diff options
Diffstat (limited to 'drivers/staging/android/ion/ion.h')
-rw-r--r-- | drivers/staging/android/ion/ion.h | 387 |
1 files changed, 293 insertions, 94 deletions
diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h index 93dafb4586e4..ace8416bd509 100644 --- a/drivers/staging/android/ion/ion.h +++ b/drivers/staging/android/ion/ion.h @@ -14,38 +14,31 @@ * */ -#ifndef _LINUX_ION_H -#define _LINUX_ION_H +#ifndef _ION_H +#define _ION_H +#include <linux/device.h> +#include <linux/dma-direction.h> +#include <linux/kref.h> +#include <linux/mm_types.h> +#include <linux/mutex.h> +#include <linux/rbtree.h> +#include <linux/sched.h> +#include <linux/shrinker.h> #include <linux/types.h> +#include <linux/miscdevice.h> #include "../uapi/ion.h" -struct ion_handle; -struct ion_device; -struct ion_heap; -struct ion_mapper; -struct ion_client; -struct ion_buffer; - -/* - * This should be removed some day when phys_addr_t's are fully - * plumbed in the kernel, and all instances of ion_phys_addr_t should - * be converted to phys_addr_t. For the time being many kernel interfaces - * do not accept phys_addr_t's that would have to - */ -#define ion_phys_addr_t unsigned long - /** * struct ion_platform_heap - defines a heap in the given platform * @type: type of the heap from ion_heap_type enum - * @id: unique identifier for heap. When allocating higher numbers + * @id: unique identifier for heap. When allocating higher numb ers * will be allocated from first. At allocation these are passed * as a bit mask and therefore can not exceed ION_NUM_HEAP_IDS. * @name: used for debug purposes * @base: base address of heap in physical memory if applicable * @size: size of the heap in bytes if applicable - * @align: required alignment in physical memory if applicable * @priv: private info passed from the board file * * Provided by the board file. @@ -54,123 +47,329 @@ struct ion_platform_heap { enum ion_heap_type type; unsigned int id; const char *name; - ion_phys_addr_t base; + phys_addr_t base; size_t size; - ion_phys_addr_t align; + phys_addr_t align; void *priv; }; /** - * struct ion_platform_data - array of platform heaps passed from board file - * @nr: number of structures in the array - * @heaps: array of platform_heap structions + * struct ion_buffer - metadata for a particular buffer + * @ref: reference count + * @node: node in the ion_device buffers tree + * @dev: back pointer to the ion_device + * @heap: back pointer to the heap the buffer came from + * @flags: buffer specific flags + * @private_flags: internal buffer specific flags + * @size: size of the buffer + * @priv_virt: private data to the buffer representable as + * a void * + * @lock: protects the buffers cnt fields + * @kmap_cnt: number of times the buffer is mapped to the kernel + * @vaddr: the kernel mapping if kmap_cnt is not zero + * @sg_table: the sg table for the buffer if dmap_cnt is not zero + * @pages: flat array of pages in the buffer -- used by fault + * handler and only valid for buffers that are faulted in + * @vmas: list of vma's mapping this buffer + * @handle_count: count of handles referencing this buffer + * @task_comm: taskcomm of last client to reference this buffer in a + * handle, used for debugging + * @pid: pid of last client to reference this buffer in a + * handle, used for debugging + */ +struct ion_buffer { + union { + struct rb_node node; + struct list_head list; + }; + struct ion_device *dev; + struct ion_heap *heap; + unsigned long flags; + unsigned long private_flags; + size_t size; + void *priv_virt; + struct mutex lock; + int kmap_cnt; + void *vaddr; + struct sg_table *sg_table; + struct page **pages; + struct list_head vmas; + struct list_head attachments; + /* used to track orphaned buffers */ + int handle_count; + char task_comm[TASK_COMM_LEN]; + pid_t pid; +}; +void ion_buffer_destroy(struct ion_buffer *buffer); + +/** + * struct ion_device - the metadata of the ion device node + * @dev: the actual misc device + * @buffers: an rb tree of all the existing buffers + * @buffer_lock: lock protecting the tree of buffers + * @lock: rwsem protecting the tree of heaps and clients + */ +struct ion_device { + struct miscdevice dev; + struct rb_root buffers; + struct mutex buffer_lock; + struct rw_semaphore lock; + struct plist_head heaps; + struct dentry *debug_root; + int heap_cnt; +}; + +/** + * struct ion_heap_ops - ops to operate on a given heap + * @allocate: allocate memory + * @free: free memory + * @map_kernel map memory to the kernel + * @unmap_kernel unmap memory to the kernel + * @map_user map memory to userspace * - * Provided by the board file in the form of platform data to a platform device. + * allocate, phys, and map_user return 0 on success, -errno on error. + * map_dma and map_kernel return pointer on success, ERR_PTR on + * error. @free will be called with ION_PRIV_FLAG_SHRINKER_FREE set in + * the buffer's private_flags when called from a shrinker. In that + * case, the pages being free'd must be truly free'd back to the + * system, not put in a page pool or otherwise cached. */ -struct ion_platform_data { - int nr; - struct ion_platform_heap *heaps; +struct ion_heap_ops { + int (*allocate)(struct ion_heap *heap, + struct ion_buffer *buffer, unsigned long len, + unsigned long flags); + void (*free)(struct ion_buffer *buffer); + void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer); + void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer); + int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer, + struct vm_area_struct *vma); + int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan); }; /** - * ion_client_create() - allocate a client and returns it - * @dev: the global ion device - * @name: used for debugging + * heap flags - flags between the heaps and core ion code */ -struct ion_client *ion_client_create(struct ion_device *dev, - const char *name); +#define ION_HEAP_FLAG_DEFER_FREE (1 << 0) /** - * ion_client_destroy() - free's a client and all it's handles - * @client: the client + * private flags - flags internal to ion + */ +/* + * Buffer is being freed from a shrinker function. Skip any possible + * heap-specific caching mechanism (e.g. page pools). Guarantees that + * any buffer storage that came from the system allocator will be + * returned to the system allocator. + */ +#define ION_PRIV_FLAG_SHRINKER_FREE (1 << 0) + +/** + * struct ion_heap - represents a heap in the system + * @node: rb node to put the heap on the device's tree of heaps + * @dev: back pointer to the ion_device + * @type: type of heap + * @ops: ops struct as above + * @flags: flags + * @id: id of heap, also indicates priority of this heap when + * allocating. These are specified by platform data and + * MUST be unique + * @name: used for debugging + * @shrinker: a shrinker for the heap + * @free_list: free list head if deferred free is used + * @free_list_size size of the deferred free list in bytes + * @lock: protects the free list + * @waitqueue: queue to wait on from deferred free thread + * @task: task struct of deferred free thread + * @debug_show: called when heap debug file is read to add any + * heap specific debug info to output * - * Free the provided client and all it's resources including - * any handles it is holding. + * Represents a pool of memory from which buffers can be made. In some + * systems the only heap is regular system memory allocated via vmalloc. + * On others, some blocks might require large physically contiguous buffers + * that are allocated from a specially reserved heap. */ -void ion_client_destroy(struct ion_client *client); +struct ion_heap { + struct plist_node node; + struct ion_device *dev; + enum ion_heap_type type; + struct ion_heap_ops *ops; + unsigned long flags; + unsigned int id; + const char *name; + struct shrinker shrinker; + struct list_head free_list; + size_t free_list_size; + spinlock_t free_lock; + wait_queue_head_t waitqueue; + struct task_struct *task; + + int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *); +}; /** - * ion_alloc - allocate ion memory - * @client: the client - * @len: size of the allocation - * @align: requested allocation alignment, lots of hardware blocks - * have alignment requirements of some kind - * @heap_id_mask: mask of heaps to allocate from, if multiple bits are set - * heaps will be tried in order from highest to lowest - * id - * @flags: heap flags, the low 16 bits are consumed by ion, the - * high 16 bits are passed on to the respective heap and - * can be heap custom + * ion_buffer_cached - this ion buffer is cached + * @buffer: buffer * - * Allocate memory in one of the heaps provided in heap mask and return - * an opaque handle to it. + * indicates whether this ion buffer is cached */ -struct ion_handle *ion_alloc(struct ion_client *client, size_t len, - size_t align, unsigned int heap_id_mask, - unsigned int flags); +bool ion_buffer_cached(struct ion_buffer *buffer); /** - * ion_free - free a handle - * @client: the client - * @handle: the handle to free + * ion_buffer_fault_user_mappings - fault in user mappings of this buffer + * @buffer: buffer * - * Free the provided handle. + * indicates whether userspace mappings of this buffer will be faulted + * in, this can affect how buffers are allocated from the heap. */ -void ion_free(struct ion_client *client, struct ion_handle *handle); +bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer); /** - * ion_map_kernel - create mapping for the given handle - * @client: the client - * @handle: handle to map + * ion_device_add_heap - adds a heap to the ion device + * @heap: the heap to add + */ +void ion_device_add_heap(struct ion_heap *heap); + +/** + * some helpers for common operations on buffers using the sg_table + * and vaddr fields + */ +void *ion_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer); +void ion_heap_unmap_kernel(struct ion_heap *heap, struct ion_buffer *buffer); +int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, + struct vm_area_struct *vma); +int ion_heap_buffer_zero(struct ion_buffer *buffer); +int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot); + +int ion_alloc(size_t len, + unsigned int heap_id_mask, + unsigned int flags); + +/** + * ion_heap_init_shrinker + * @heap: the heap * - * Map the given handle into the kernel and return a kernel address that - * can be used to access this address. + * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op + * this function will be called to setup a shrinker to shrink the freelists + * and call the heap's shrink op. */ -void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle); +void ion_heap_init_shrinker(struct ion_heap *heap); /** - * ion_unmap_kernel() - destroy a kernel mapping for a handle - * @client: the client - * @handle: handle to unmap + * ion_heap_init_deferred_free -- initialize deferred free functionality + * @heap: the heap + * + * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will + * be called to setup deferred frees. Calls to free the buffer will + * return immediately and the actual free will occur some time later */ -void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle); +int ion_heap_init_deferred_free(struct ion_heap *heap); /** - * ion_share_dma_buf() - share buffer as dma-buf - * @client: the client - * @handle: the handle + * ion_heap_freelist_add - add a buffer to the deferred free list + * @heap: the heap + * @buffer: the buffer + * + * Adds an item to the deferred freelist. */ -struct dma_buf *ion_share_dma_buf(struct ion_client *client, - struct ion_handle *handle); +void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer); /** - * ion_share_dma_buf_fd() - given an ion client, create a dma-buf fd - * @client: the client - * @handle: the handle + * ion_heap_freelist_drain - drain the deferred free list + * @heap: the heap + * @size: amount of memory to drain in bytes + * + * Drains the indicated amount of memory from the deferred freelist immediately. + * Returns the total amount freed. The total freed may be higher depending + * on the size of the items in the list, or lower if there is insufficient + * total memory on the freelist. */ -int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle); +size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size); /** - * ion_import_dma_buf() - get ion_handle from dma-buf - * @client: the client - * @dmabuf: the dma-buf + * ion_heap_freelist_shrink - drain the deferred free + * list, skipping any heap-specific + * pooling or caching mechanisms + * + * @heap: the heap + * @size: amount of memory to drain in bytes * - * Get the ion_buffer associated with the dma-buf and return the ion_handle. - * If no ion_handle exists for this buffer, return newly created ion_handle. - * If dma-buf from another exporter is passed, return ERR_PTR(-EINVAL) + * Drains the indicated amount of memory from the deferred freelist immediately. + * Returns the total amount freed. The total freed may be higher depending + * on the size of the items in the list, or lower if there is insufficient + * total memory on the freelist. + * + * Unlike with @ion_heap_freelist_drain, don't put any pages back into + * page pools or otherwise cache the pages. Everything must be + * genuinely free'd back to the system. If you're free'ing from a + * shrinker you probably want to use this. Note that this relies on + * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE + * flag. */ -struct ion_handle *ion_import_dma_buf(struct ion_client *client, - struct dma_buf *dmabuf); +size_t ion_heap_freelist_shrink(struct ion_heap *heap, + size_t size); /** - * ion_import_dma_buf_fd() - given a dma-buf fd from the ion exporter get handle - * @client: the client - * @fd: the dma-buf fd + * ion_heap_freelist_size - returns the size of the freelist in bytes + * @heap: the heap + */ +size_t ion_heap_freelist_size(struct ion_heap *heap); + + +/** + * functions for creating and destroying a heap pool -- allows you + * to keep a pool of pre allocated memory to use from your heap. Keeping + * a pool of memory that is ready for dma, ie any cached mapping have been + * invalidated from the cache, provides a significant performance benefit on + * many systems + */ + +/** + * struct ion_page_pool - pagepool struct + * @high_count: number of highmem items in the pool + * @low_count: number of lowmem items in the pool + * @high_items: list of highmem items + * @low_items: list of lowmem items + * @mutex: lock protecting this struct and especially the count + * item list + * @gfp_mask: gfp_mask to use from alloc + * @order: order of pages in the pool + * @list: plist node for list of pools + * @cached: it's cached pool or not + * + * Allows you to keep a pool of pre allocated pages to use from your heap. + * Keeping a pool of pages that is ready for dma, ie any cached mapping have + * been invalidated from the cache, provides a significant performance benefit + * on many systems + */ +struct ion_page_pool { + int high_count; + int low_count; + bool cached; + struct list_head high_items; + struct list_head low_items; + struct mutex mutex; + gfp_t gfp_mask; + unsigned int order; + struct plist_node list; +}; + +struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order, + bool cached); +void ion_page_pool_destroy(struct ion_page_pool *pool); +struct page *ion_page_pool_alloc(struct ion_page_pool *pool); +void ion_page_pool_free(struct ion_page_pool *pool, struct page *page); + +/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool + * @pool: the pool + * @gfp_mask: the memory type to reclaim + * @nr_to_scan: number of items to shrink in pages * - * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf_fd, - * import that fd and return a handle representing it. If a dma-buf from - * another exporter is passed in this function will return ERR_PTR(-EINVAL) + * returns the number of items freed in pages */ -struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd); +int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, + int nr_to_scan); + +long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); + +int ion_query_heaps(struct ion_heap_query *query); -#endif /* _LINUX_ION_H */ +#endif /* _ION_H */ |