1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
|
/* exynos_drm_gem.h
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* Authoer: Inki Dae <inki.dae@samsung.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#ifndef _EXYNOS_DRM_GEM_H_
#define _EXYNOS_DRM_GEM_H_
#define to_exynos_gem_obj(x) container_of(x,\
struct exynos_drm_gem_obj, base)
#define IS_NONCONTIG_BUFFER(f) (f & EXYNOS_BO_NONCONTIG)
/*
* exynos drm gem buffer structure.
*
* @kvaddr: kernel virtual address to allocated memory region.
* *userptr: user space address.
* @dma_addr: bus address(accessed by dma) to allocated memory region.
* - this address could be physical address without IOMMU and
* device address with IOMMU.
* @write: whether pages will be written to by the caller.
* @pages: Array of backing pages.
* @sgt: sg table to transfer page data.
* @size: size of allocated memory region.
* @pfnmap: indicate whether memory region from userptr is mmaped with
* VM_PFNMAP or not.
*/
struct exynos_drm_gem_buf {
void __iomem *kvaddr;
unsigned long userptr;
dma_addr_t dma_addr;
struct dma_attrs dma_attrs;
unsigned int write;
struct page **pages;
struct sg_table *sgt;
unsigned long size;
bool pfnmap;
};
/*
* exynos drm buffer structure.
*
* @base: a gem object.
* - a new handle to this gem object would be created
* by drm_gem_handle_create().
* @buffer: a pointer to exynos_drm_gem_buffer object.
* - contain the information to memory region allocated
* by user request or at framebuffer creation.
* continuous memory region allocated by user request
* or at framebuffer creation.
* @size: size requested from user, in bytes and this size is aligned
* in page unit.
* @vma: a pointer to vm_area.
* @flags: indicate memory type to allocated buffer and cache attruibute.
*
* P.S. this object would be transfered to user as kms_bo.handle so
* user can access the buffer through kms_bo.handle.
*/
struct exynos_drm_gem_obj {
struct drm_gem_object base;
struct exynos_drm_gem_buf *buffer;
unsigned long size;
struct vm_area_struct *vma;
unsigned int flags;
};
struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
/* destroy a buffer with gem object */
void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj);
/* create a private gem object and initialize it. */
struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
unsigned long size);
/* create a new buffer with gem object */
struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
unsigned int flags,
unsigned long size);
/*
* request gem object creation and buffer allocation as the size
* that it is calculated with framebuffer information such as width,
* height and bpp.
*/
int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/*
* get dma address from gem handle and this function could be used for
* other drivers such as 2d/3d acceleration drivers.
* with this function call, gem object reference count would be increased.
*/
dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
struct drm_file *filp);
/*
* put dma address from gem handle and this function could be used for
* other drivers such as 2d/3d acceleration drivers.
* with this function call, gem object reference count would be decreased.
*/
void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
struct drm_file *filp);
/* get buffer offset to map to user space. */
int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/*
* mmap the physically continuous memory that a gem object contains
* to user space.
*/
int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/* map user space allocated by malloc to pages. */
int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/* get buffer information to memory region allocated by gem. */
int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/* get buffer size to gem handle. */
unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
unsigned int gem_handle,
struct drm_file *file_priv);
/* initialize gem object. */
int exynos_drm_gem_init_object(struct drm_gem_object *obj);
/* free gem object. */
void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj);
/* create memory region for drm framebuffer. */
int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
/* map memory region for drm framebuffer to user space. */
int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
struct drm_device *dev, uint32_t handle,
uint64_t *offset);
/* page fault handler and mmap fault address(virtual) to physical memory. */
int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
/* set vm_flags and we can change the vm attribute to other one at here. */
int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
static inline int vma_is_io(struct vm_area_struct *vma)
{
return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
}
/* get a copy of a virtual memory region. */
struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma);
/* release a userspace virtual memory area. */
void exynos_gem_put_vma(struct vm_area_struct *vma);
/* get pages from user space. */
int exynos_gem_get_pages_from_userptr(unsigned long start,
unsigned int npages,
struct page **pages,
struct vm_area_struct *vma);
/* drop the reference to pages. */
void exynos_gem_put_pages_to_userptr(struct page **pages,
unsigned int npages,
struct vm_area_struct *vma);
/* map sgt with dma region. */
int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
struct sg_table *sgt,
enum dma_data_direction dir);
/* unmap sgt from dma region. */
void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
struct sg_table *sgt,
enum dma_data_direction dir);
#endif
|