blob: 7545c692539e97bc740c7e9db508f9cee2b9893d (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
|
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/*
* Xen para-virtual DRM device
*
* Copyright (C) 2016-2018 EPAM Systems Inc.
*
* Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
*/
#ifndef __XEN_DRM_FRONT_SHBUF_H_
#define __XEN_DRM_FRONT_SHBUF_H_
#include <linux/kernel.h>
#include <linux/scatterlist.h>
#include <xen/grant_table.h>
struct xen_drm_front_shbuf {
/*
* number of references granted for the backend use:
* - for allocated/imported dma-buf's this holds number of grant
* references for the page directory and pages of the buffer
* - for the buffer provided by the backend this holds number of
* grant references for the page directory as grant references for
* the buffer will be provided by the backend
*/
int num_grefs;
grant_ref_t *grefs;
unsigned char *directory;
int num_pages;
struct page **pages;
struct xenbus_device *xb_dev;
/* these are the ops used internally depending on be_alloc mode */
const struct xen_drm_front_shbuf_ops *ops;
/* Xen map handles for the buffer allocated by the backend */
grant_handle_t *backend_map_handles;
};
struct xen_drm_front_shbuf_cfg {
struct xenbus_device *xb_dev;
size_t size;
struct page **pages;
bool be_alloc;
};
struct xen_drm_front_shbuf *
xen_drm_front_shbuf_alloc(struct xen_drm_front_shbuf_cfg *cfg);
grant_ref_t xen_drm_front_shbuf_get_dir_start(struct xen_drm_front_shbuf *buf);
int xen_drm_front_shbuf_map(struct xen_drm_front_shbuf *buf);
int xen_drm_front_shbuf_unmap(struct xen_drm_front_shbuf *buf);
void xen_drm_front_shbuf_flush(struct xen_drm_front_shbuf *buf);
void xen_drm_front_shbuf_free(struct xen_drm_front_shbuf *buf);
#endif /* __XEN_DRM_FRONT_SHBUF_H_ */
|