summaryrefslogtreecommitdiffstats
path: root/drivers/dma-buf
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma-buf')
-rw-r--r--drivers/dma-buf/Kconfig11
-rw-r--r--drivers/dma-buf/Makefile2
-rw-r--r--drivers/dma-buf/dma-buf.c63
-rw-r--r--drivers/dma-buf/dma-heap.c297
-rw-r--r--drivers/dma-buf/dma-resv.c32
-rw-r--r--drivers/dma-buf/heaps/Kconfig14
-rw-r--r--drivers/dma-buf/heaps/Makefile4
-rw-r--r--drivers/dma-buf/heaps/cma_heap.c177
-rw-r--r--drivers/dma-buf/heaps/heap-helpers.c271
-rw-r--r--drivers/dma-buf/heaps/heap-helpers.h53
-rw-r--r--drivers/dma-buf/heaps/system_heap.c123
-rw-r--r--drivers/dma-buf/udmabuf.c85
12 files changed, 1049 insertions, 83 deletions
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index a23b6752d11a..0613bb7770f5 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -44,4 +44,15 @@ config DMABUF_SELFTESTS
default n
depends on DMA_SHARED_BUFFER
+menuconfig DMABUF_HEAPS
+ bool "DMA-BUF Userland Memory Heaps"
+ select DMA_SHARED_BUFFER
+ help
+ Choose this option to enable the DMA-BUF userland memory heaps.
+ This options creates per heap chardevs in /dev/dma_heap/ which
+ allows userspace to allocate dma-bufs that can be shared
+ between drivers.
+
+source "drivers/dma-buf/heaps/Kconfig"
+
endmenu
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index 03479da06422..9c190026bfab 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -1,6 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \
dma-resv.o seqno-fence.o
+obj-$(CONFIG_DMABUF_HEAPS) += dma-heap.o
+obj-$(CONFIG_DMABUF_HEAPS) += heaps/
obj-$(CONFIG_SYNC_FILE) += sync_file.o
obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o
obj-$(CONFIG_UDMABUF) += udmabuf.o
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index ce41cd9b758a..d4097856c86b 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -878,29 +878,9 @@ EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
* with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
* access.
*
- * To support dma_buf objects residing in highmem cpu access is page-based
- * using an api similar to kmap. Accessing a dma_buf is done in aligned chunks
- * of PAGE_SIZE size. Before accessing a chunk it needs to be mapped, which
- * returns a pointer in kernel virtual address space. Afterwards the chunk
- * needs to be unmapped again. There is no limit on how often a given chunk
- * can be mapped and unmapped, i.e. the importer does not need to call
- * begin_cpu_access again before mapping the same chunk again.
- *
- * Interfaces::
- * void \*dma_buf_kmap(struct dma_buf \*, unsigned long);
- * void dma_buf_kunmap(struct dma_buf \*, unsigned long, void \*);
- *
- * Implementing the functions is optional for exporters and for importers all
- * the restrictions of using kmap apply.
- *
- * dma_buf kmap calls outside of the range specified in begin_cpu_access are
- * undefined. If the range is not PAGE_SIZE aligned, kmap needs to succeed on
- * the partial chunks at the beginning and end but may return stale or bogus
- * data outside of the range (in these partial chunks).
- *
- * For some cases the overhead of kmap can be too high, a vmap interface
- * is introduced. This interface should be used very carefully, as vmalloc
- * space is a limited resources on many architectures.
+ * Since for most kernel internal dma-buf accesses need the entire buffer, a
+ * vmap interface is introduced. Note that on very old 32-bit architectures
+ * vmalloc space might be limited and result in vmap calls failing.
*
* Interfaces::
* void \*dma_buf_vmap(struct dma_buf \*dmabuf)
@@ -1050,43 +1030,6 @@ int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
}
EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
-/**
- * dma_buf_kmap - Map a page of the buffer object into kernel address space. The
- * same restrictions as for kmap and friends apply.
- * @dmabuf: [in] buffer to map page from.
- * @page_num: [in] page in PAGE_SIZE units to map.
- *
- * This call must always succeed, any necessary preparations that might fail
- * need to be done in begin_cpu_access.
- */
-void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
-{
- WARN_ON(!dmabuf);
-
- if (!dmabuf->ops->map)
- return NULL;
- return dmabuf->ops->map(dmabuf, page_num);
-}
-EXPORT_SYMBOL_GPL(dma_buf_kmap);
-
-/**
- * dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap.
- * @dmabuf: [in] buffer to unmap page from.
- * @page_num: [in] page in PAGE_SIZE units to unmap.
- * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap.
- *
- * This call must always succeed.
- */
-void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num,
- void *vaddr)
-{
- WARN_ON(!dmabuf);
-
- if (dmabuf->ops->unmap)
- dmabuf->ops->unmap(dmabuf, page_num, vaddr);
-}
-EXPORT_SYMBOL_GPL(dma_buf_kunmap);
-
/**
* dma_buf_mmap - Setup up a userspace mmap with the given vma
diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c
new file mode 100644
index 000000000000..4f04d104ae61
--- /dev/null
+++ b/drivers/dma-buf/dma-heap.c
@@ -0,0 +1,297 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Framework for userspace DMA-BUF allocations
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (C) 2019 Linaro Ltd.
+ */
+
+#include <linux/cdev.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/err.h>
+#include <linux/xarray.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/syscalls.h>
+#include <linux/dma-heap.h>
+#include <uapi/linux/dma-heap.h>
+
+#define DEVNAME "dma_heap"
+
+#define NUM_HEAP_MINORS 128
+
+/**
+ * struct dma_heap - represents a dmabuf heap in the system
+ * @name: used for debugging/device-node name
+ * @ops: ops struct for this heap
+ * @heap_devt heap device node
+ * @list list head connecting to list of heaps
+ * @heap_cdev heap char device
+ *
+ * Represents a heap of memory from which buffers can be made.
+ */
+struct dma_heap {
+ const char *name;
+ const struct dma_heap_ops *ops;
+ void *priv;
+ dev_t heap_devt;
+ struct list_head list;
+ struct cdev heap_cdev;
+};
+
+static LIST_HEAD(heap_list);
+static DEFINE_MUTEX(heap_list_lock);
+static dev_t dma_heap_devt;
+static struct class *dma_heap_class;
+static DEFINE_XARRAY_ALLOC(dma_heap_minors);
+
+static int dma_heap_buffer_alloc(struct dma_heap *heap, size_t len,
+ unsigned int fd_flags,
+ unsigned int heap_flags)
+{
+ /*
+ * Allocations from all heaps have to begin
+ * and end on page boundaries.
+ */
+ len = PAGE_ALIGN(len);
+ if (!len)
+ return -EINVAL;
+
+ return heap->ops->allocate(heap, len, fd_flags, heap_flags);
+}
+
+static int dma_heap_open(struct inode *inode, struct file *file)
+{
+ struct dma_heap *heap;
+
+ heap = xa_load(&dma_heap_minors, iminor(inode));
+ if (!heap) {
+ pr_err("dma_heap: minor %d unknown.\n", iminor(inode));
+ return -ENODEV;
+ }
+
+ /* instance data as context */
+ file->private_data = heap;
+ nonseekable_open(inode, file);
+
+ return 0;
+}
+
+static long dma_heap_ioctl_allocate(struct file *file, void *data)
+{
+ struct dma_heap_allocation_data *heap_allocation = data;
+ struct dma_heap *heap = file->private_data;
+ int fd;
+
+ if (heap_allocation->fd)
+ return -EINVAL;
+
+ if (heap_allocation->fd_flags & ~DMA_HEAP_VALID_FD_FLAGS)
+ return -EINVAL;
+
+ if (heap_allocation->heap_flags & ~DMA_HEAP_VALID_HEAP_FLAGS)
+ return -EINVAL;
+
+ fd = dma_heap_buffer_alloc(heap, heap_allocation->len,
+ heap_allocation->fd_flags,
+ heap_allocation->heap_flags);
+ if (fd < 0)
+ return fd;
+
+ heap_allocation->fd = fd;
+
+ return 0;
+}
+
+unsigned int dma_heap_ioctl_cmds[] = {
+ DMA_HEAP_IOC_ALLOC,
+};
+
+static long dma_heap_ioctl(struct file *file, unsigned int ucmd,
+ unsigned long arg)
+{
+ char stack_kdata[128];
+ char *kdata = stack_kdata;
+ unsigned int kcmd;
+ unsigned int in_size, out_size, drv_size, ksize;
+ int nr = _IOC_NR(ucmd);
+ int ret = 0;
+
+ if (nr >= ARRAY_SIZE(dma_heap_ioctl_cmds))
+ return -EINVAL;
+
+ /* Get the kernel ioctl cmd that matches */
+ kcmd = dma_heap_ioctl_cmds[nr];
+
+ /* Figure out the delta between user cmd size and kernel cmd size */
+ drv_size = _IOC_SIZE(kcmd);
+ out_size = _IOC_SIZE(ucmd);
+ in_size = out_size;
+ if ((ucmd & kcmd & IOC_IN) == 0)
+ in_size = 0;
+ if ((ucmd & kcmd & IOC_OUT) == 0)
+ out_size = 0;
+ ksize = max(max(in_size, out_size), drv_size);
+
+ /* If necessary, allocate buffer for ioctl argument */
+ if (ksize > sizeof(stack_kdata)) {
+ kdata = kmalloc(ksize, GFP_KERNEL);
+ if (!kdata)
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(kdata, (void __user *)arg, in_size) != 0) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ /* zero out any difference between the kernel/user structure size */
+ if (ksize > in_size)
+ memset(kdata + in_size, 0, ksize - in_size);
+
+ switch (kcmd) {
+ case DMA_HEAP_IOC_ALLOC:
+ ret = dma_heap_ioctl_allocate(file, kdata);
+ break;
+ default:
+ return -ENOTTY;
+ }
+
+ if (copy_to_user((void __user *)arg, kdata, out_size) != 0)
+ ret = -EFAULT;
+err:
+ if (kdata != stack_kdata)
+ kfree(kdata);
+ return ret;
+}
+
+static const struct file_operations dma_heap_fops = {
+ .owner = THIS_MODULE,
+ .open = dma_heap_open,
+ .unlocked_ioctl = dma_heap_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = dma_heap_ioctl,
+#endif
+};
+
+/**
+ * dma_heap_get_drvdata() - get per-subdriver data for the heap
+ * @heap: DMA-Heap to retrieve private data for
+ *
+ * Returns:
+ * The per-subdriver data for the heap.
+ */
+void *dma_heap_get_drvdata(struct dma_heap *heap)
+{
+ return heap->priv;
+}
+
+struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
+{
+ struct dma_heap *heap, *h, *err_ret;
+ struct device *dev_ret;
+ unsigned int minor;
+ int ret;
+
+ if (!exp_info->name || !strcmp(exp_info->name, "")) {
+ pr_err("dma_heap: Cannot add heap without a name\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!exp_info->ops || !exp_info->ops->allocate) {
+ pr_err("dma_heap: Cannot add heap with invalid ops struct\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* check the name is unique */
+ mutex_lock(&heap_list_lock);
+ list_for_each_entry(h, &heap_list, list) {
+ if (!strcmp(h->name, exp_info->name)) {
+ mutex_unlock(&heap_list_lock);
+ pr_err("dma_heap: Already registered heap named %s\n",
+ exp_info->name);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+ mutex_unlock(&heap_list_lock);
+
+ heap = kzalloc(sizeof(*heap), GFP_KERNEL);
+ if (!heap)
+ return ERR_PTR(-ENOMEM);
+
+ heap->name = exp_info->name;
+ heap->ops = exp_info->ops;
+ heap->priv = exp_info->priv;
+
+ /* Find unused minor number */
+ ret = xa_alloc(&dma_heap_minors, &minor, heap,
+ XA_LIMIT(0, NUM_HEAP_MINORS - 1), GFP_KERNEL);
+ if (ret < 0) {
+ pr_err("dma_heap: Unable to get minor number for heap\n");
+ err_ret = ERR_PTR(ret);
+ goto err0;
+ }
+
+ /* Create device */
+ heap->heap_devt = MKDEV(MAJOR(dma_heap_devt), minor);
+
+ cdev_init(&heap->heap_cdev, &dma_heap_fops);
+ ret = cdev_add(&heap->heap_cdev, heap->heap_devt, 1);
+ if (ret < 0) {
+ pr_err("dma_heap: Unable to add char device\n");
+ err_ret = ERR_PTR(ret);
+ goto err1;
+ }
+
+ dev_ret = device_create(dma_heap_class,
+ NULL,
+ heap->heap_devt,
+ NULL,
+ heap->name);
+ if (IS_ERR(dev_ret)) {
+ pr_err("dma_heap: Unable to create device\n");
+ err_ret = ERR_CAST(dev_ret);
+ goto err2;
+ }
+ /* Add heap to the list */
+ mutex_lock(&heap_list_lock);
+ list_add(&heap->list, &heap_list);
+ mutex_unlock(&heap_list_lock);
+
+ return heap;
+
+err2:
+ cdev_del(&heap->heap_cdev);
+err1:
+ xa_erase(&dma_heap_minors, minor);
+err0:
+ kfree(heap);
+ return err_ret;
+}
+
+static char *dma_heap_devnode(struct device *dev, umode_t *mode)
+{
+ return kasprintf(GFP_KERNEL, "dma_heap/%s", dev_name(dev));
+}
+
+static int dma_heap_init(void)
+{
+ int ret;
+
+ ret = alloc_chrdev_region(&dma_heap_devt, 0, NUM_HEAP_MINORS, DEVNAME);
+ if (ret)
+ return ret;
+
+ dma_heap_class = class_create(THIS_MODULE, DEVNAME);
+ if (IS_ERR(dma_heap_class)) {
+ unregister_chrdev_region(dma_heap_devt, NUM_HEAP_MINORS);
+ return PTR_ERR(dma_heap_class);
+ }
+ dma_heap_class->devnode = dma_heap_devnode;
+
+ return 0;
+}
+subsys_initcall(dma_heap_init);
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 709002515550..4264e64788c4 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -34,6 +34,7 @@
#include <linux/dma-resv.h>
#include <linux/export.h>
+#include <linux/sched/mm.h>
/**
* DOC: Reservation Object Overview
@@ -95,6 +96,37 @@ static void dma_resv_list_free(struct dma_resv_list *list)
kfree_rcu(list, rcu);
}
+#if IS_ENABLED(CONFIG_LOCKDEP)
+static int __init dma_resv_lockdep(void)
+{
+ struct mm_struct *mm = mm_alloc();
+ struct ww_acquire_ctx ctx;
+ struct dma_resv obj;
+ int ret;
+
+ if (!mm)
+ return -ENOMEM;
+
+ dma_resv_init(&obj);
+
+ down_read(&mm->mmap_sem);
+ ww_acquire_init(&ctx, &reservation_ww_class);
+ ret = dma_resv_lock(&obj, &ctx);
+ if (ret == -EDEADLK)
+ dma_resv_lock_slow(&obj, &ctx);
+ fs_reclaim_acquire(GFP_KERNEL);
+ fs_reclaim_release(GFP_KERNEL);
+ ww_mutex_unlock(&obj.lock);
+ ww_acquire_fini(&ctx);
+ up_read(&mm->mmap_sem);
+
+ mmput(mm);
+
+ return 0;
+}
+subsys_initcall(dma_resv_lockdep);
+#endif
+
/**
* dma_resv_init - initialize a reservation object
* @obj: the reservation object
diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig
new file mode 100644
index 000000000000..a5eef06c4226
--- /dev/null
+++ b/drivers/dma-buf/heaps/Kconfig
@@ -0,0 +1,14 @@
+config DMABUF_HEAPS_SYSTEM
+ bool "DMA-BUF System Heap"
+ depends on DMABUF_HEAPS
+ help
+ Choose this option to enable the system dmabuf heap. The system heap
+ is backed by pages from the buddy allocator. If in doubt, say Y.
+
+config DMABUF_HEAPS_CMA
+ bool "DMA-BUF CMA Heap"
+ depends on DMABUF_HEAPS && DMA_CMA
+ help
+ Choose this option to enable dma-buf CMA heap. This heap is backed
+ by the Contiguous Memory Allocator (CMA). If your system has these
+ regions, you should say Y here.
diff --git a/drivers/dma-buf/heaps/Makefile b/drivers/dma-buf/heaps/Makefile
new file mode 100644
index 000000000000..6e54cdec3da0
--- /dev/null
+++ b/drivers/dma-buf/heaps/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-y += heap-helpers.o
+obj-$(CONFIG_DMABUF_HEAPS_SYSTEM) += system_heap.o
+obj-$(CONFIG_DMABUF_HEAPS_CMA) += cma_heap.o
diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
new file mode 100644
index 000000000000..626cf7fd033a
--- /dev/null
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DMABUF CMA heap exporter
+ *
+ * Copyright (C) 2012, 2019 Linaro Ltd.
+ * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
+ */
+
+#include <linux/cma.h>
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-heap.h>
+#include <linux/dma-contiguous.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/sched/signal.h>
+
+#include "heap-helpers.h"
+
+struct cma_heap {
+ struct dma_heap *heap;
+ struct cma *cma;
+};
+
+static void cma_heap_free(struct heap_helper_buffer *buffer)
+{
+ struct cma_heap *cma_heap = dma_heap_get_drvdata(buffer->heap);
+ unsigned long nr_pages = buffer->pagecount;
+ struct page *cma_pages = buffer->priv_virt;
+
+ /* free page list */
+ kfree(buffer->pages);
+ /* release memory */
+ cma_release(cma_heap->cma, cma_pages, nr_pages);
+ kfree(buffer);
+}
+
+/* dmabuf heap CMA operations functions */
+static int cma_heap_allocate(struct dma_heap *heap,
+ unsigned long len,
+ unsigned long fd_flags,
+ unsigned long heap_flags)
+{
+ struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
+ struct heap_helper_buffer *helper_buffer;
+ struct page *cma_pages;
+ size_t size = PAGE_ALIGN(len);
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+ unsigned long align = get_order(size);
+ struct dma_buf *dmabuf;
+ int ret = -ENOMEM;
+ pgoff_t pg;
+
+ if (align > CONFIG_CMA_ALIGNMENT)
+ align = CONFIG_CMA_ALIGNMENT;
+
+ helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL);
+ if (!helper_buffer)
+ return -ENOMEM;
+
+ init_heap_helper_buffer(helper_buffer, cma_heap_free);
+ helper_buffer->heap = heap;
+ helper_buffer->size = len;
+
+ cma_pages = cma_alloc(cma_heap->cma, nr_pages, align, false);
+ if (!cma_pages)
+ goto free_buf;
+
+ if (PageHighMem(cma_pages)) {
+ unsigned long nr_clear_pages = nr_pages;
+ struct page *page = cma_pages;
+
+ while (nr_clear_pages > 0) {
+ void *vaddr = kmap_atomic(page);
+
+ memset(vaddr, 0, PAGE_SIZE);
+ kunmap_atomic(vaddr);
+ /*
+ * Avoid wasting time zeroing memory if the process
+ * has been killed by by SIGKILL
+ */
+ if (fatal_signal_pending(current))
+ goto free_cma;
+
+ page++;
+ nr_clear_pages--;
+ }
+ } else {
+ memset(page_address(cma_pages), 0, size);
+ }
+
+ helper_buffer->pagecount = nr_pages;
+ helper_buffer->pages = kmalloc_array(helper_buffer->pagecount,
+ sizeof(*helper_buffer->pages),
+ GFP_KERNEL);
+ if (!helper_buffer->pages) {
+ ret = -ENOMEM;
+ goto free_cma;
+ }
+
+ for (pg = 0; pg < helper_buffer->pagecount; pg++)
+ helper_buffer->pages[pg] = &cma_pages[pg];
+
+ /* create the dmabuf */
+ dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags);
+ if (IS_ERR(dmabuf)) {
+ ret = PTR_ERR(dmabuf);
+ goto free_pages;
+ }
+
+ helper_buffer->dmabuf = dmabuf;
+ helper_buffer->priv_virt = cma_pages;
+
+ ret = dma_buf_fd(dmabuf, fd_flags);
+ if (ret < 0) {
+ dma_buf_put(dmabuf);
+ /* just return, as put will call release and that will free */
+ return ret;
+ }
+
+ return ret;
+
+free_pages:
+ kfree(helper_buffer->pages);
+free_cma:
+ cma_release(cma_heap->cma, cma_pages, nr_pages);
+free_buf:
+ kfree(helper_buffer);
+ return ret;
+}
+
+static const struct dma_heap_ops cma_heap_ops = {
+ .allocate = cma_heap_allocate,
+};
+
+static int __add_cma_heap(struct cma *cma, void *data)
+{
+ struct cma_heap *cma_heap;
+ struct dma_heap_export_info exp_info;
+
+ cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
+ if (!cma_heap)
+ return -ENOMEM;
+ cma_heap->cma = cma;
+
+ exp_info.name = cma_get_name(cma);
+ exp_info.ops = &cma_heap_ops;
+ exp_info.priv = cma_heap;
+
+ cma_heap->heap = dma_heap_add(&exp_info);
+ if (IS_ERR(cma_heap->heap)) {
+ int ret = PTR_ERR(cma_heap->heap);
+
+ kfree(cma_heap);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int add_default_cma_heap(void)
+{
+ struct cma *default_cma = dev_get_cma_area(NULL);
+ int ret = 0;
+
+ if (default_cma)
+ ret = __add_cma_heap(default_cma, NULL);
+
+ return ret;
+}
+module_init(add_default_cma_heap);
+MODULE_DESCRIPTION("DMA-BUF CMA Heap");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma-buf/heaps/heap-helpers.c b/drivers/dma-buf/heaps/heap-helpers.c
new file mode 100644
index 000000000000..9f964ca3f59c
--- /dev/null
+++ b/drivers/dma-buf/heaps/heap-helpers.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/err.h>
+#include <linux/highmem.h>
+#include <linux/idr.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <uapi/linux/dma-heap.h>
+
+#include "heap-helpers.h"
+
+void init_heap_helper_buffer(struct heap_helper_buffer *buffer,
+ void (*free)(struct heap_helper_buffer *))
+{
+ buffer->priv_virt = NULL;
+ mutex_init(&buffer->lock);
+ buffer->vmap_cnt = 0;
+ buffer->vaddr = NULL;
+ buffer->pagecount = 0;
+ buffer->pages = NULL;
+ INIT_LIST_HEAD(&buffer->attachments);
+ buffer->free = free;
+}
+
+struct dma_buf *heap_helper_export_dmabuf(struct heap_helper_buffer *buffer,
+ int fd_flags)
+{
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ exp_info.ops = &heap_helper_ops;
+ exp_info.size = buffer->size;
+ exp_info.flags = fd_flags;
+ exp_info.priv = buffer;
+
+ return dma_buf_export(&exp_info);
+}
+
+static void *dma_heap_map_kernel(struct heap_helper_buffer *buffer)
+{
+ void *vaddr;
+
+ vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL);
+ if (!vaddr)
+ return ERR_PTR(-ENOMEM);
+
+ return vaddr;
+}
+
+static void dma_heap_buffer_destroy(struct heap_helper_buffer *buffer)
+{
+ if (buffer->vmap_cnt > 0) {
+ WARN(1, "%s: buffer still mapped in the kernel\n", __func__);
+ vunmap(buffer->vaddr);
+ }
+
+ buffer->free(buffer);
+}
+
+static void *dma_heap_buffer_vmap_get(struct heap_helper_buffer *buffer)
+{
+ void *vaddr;
+
+ if (buffer->vmap_cnt) {
+ buffer->vmap_cnt++;
+ return buffer->vaddr;
+ }
+ vaddr = dma_heap_map_kernel(buffer);
+ if (IS_ERR(vaddr))
+ return vaddr;
+ buffer->vaddr = vaddr;
+ buffer->vmap_cnt++;
+ return vaddr;
+}
+
+static void dma_heap_buffer_vmap_put(struct heap_helper_buffer *buffer)
+{
+ if (!--buffer->vmap_cnt) {
+ vunmap(buffer->vaddr);
+ buffer->vaddr = NULL;
+ }
+}
+
+struct dma_heaps_attachment {
+ struct device *dev;
+ struct sg_table table;
+ struct list_head list;
+};
+
+static int dma_heap_attach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ struct dma_heaps_attachment *a;
+ struct heap_helper_buffer *buffer = dmabuf->priv;
+ int ret;
+
+ a = kzalloc(sizeof(*a), GFP_KERNEL);
+ if (!a)
+ return -ENOMEM;
+
+ ret = sg_alloc_table_from_pages(&a->table, buffer->pages,
+ buffer->pagecount, 0,
+ buffer->pagecount << PAGE_SHIFT,
+ GFP_KERNEL);
+ if (ret) {
+ kfree(a);
+ return ret;
+ }
+
+ a->dev = attachment->dev;
+ INIT_LIST_HEAD(&a->list);
+
+ attachment->priv = a;
+
+ mutex_lock(&buffer->lock);
+ list_add(&a->list, &buffer->attachments);
+ mutex_unlock(&buffer->lock);
+
+ return 0;
+}
+
+static void dma_heap_detach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ struct dma_heaps_attachment *a = attachment->priv;
+ struct heap_helper_buffer *buffer = dmabuf->priv;
+
+ mutex_lock(&buffer->lock);
+ list_del(&a->list);
+ mutex_unlock(&buffer->lock);
+
+ sg_free_table(&a->table);
+ kfree(a);
+}
+
+static
+struct sg_table *dma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction)
+{
+ struct dma_heaps_attachment *a = attachment->priv;
+ struct sg_table *table;
+
+ table = &a->table;
+
+ if (!dma_map_sg(attachment->dev, table->sgl, table->nents,
+ direction))
+ table = ERR_PTR(-ENOMEM);
+ return table;
+}
+
+static void dma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *table,
+ enum dma_data_direction direction)
+{
+ dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
+}
+
+static vm_fault_t dma_heap_vm_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct heap_helper_buffer *buffer = vma->vm_private_data;
+
+ if (vmf->pgoff > buffer->pagecount)
+ return VM_FAULT_SIGBUS;
+
+ vmf->page = buffer->pages[vmf->pgoff];
+ get_page(vmf->page);
+
+ return 0;
+}
+
+static const struct vm_operations_struct dma_heap_vm_ops = {
+ .fault = dma_heap_vm_fault,
+};
+
+static int dma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+ struct heap_helper_buffer *buffer = dmabuf->priv;
+
+ if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
+ return -EINVAL;
+
+ vma->vm_ops = &dma_heap_vm_ops;
+ vma->vm_private_data = buffer;
+
+ return 0;
+}
+
+static void dma_heap_dma_buf_release(struct dma_buf *dmabuf)
+{
+ struct heap_helper_buffer *buffer = dmabuf->priv;
+
+ dma_heap_buffer_destroy(buffer);
+}
+
+static int dma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
+ enum dma_data_direction direction)
+{
+ struct heap_helper_buffer *buffer = dmabuf->priv;
+ struct dma_heaps_attachment *a;
+ int ret = 0;
+
+ mutex_lock(&buffer->lock);
+
+ if (buffer->vmap_cnt)
+ invalidate_kernel_vmap_range(buffer->vaddr, buffer->size);
+
+ list_for_each_entry(a, &buffer->attachments, list) {
+ dma_sync_sg_for_cpu(a->dev, a->table.sgl, a->table.nents,
+ direction);
+ }
+ mutex_unlock(&buffer->lock);
+
+ return ret;
+}
+
+static int dma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
+ enum dma_data_direction direction)
+{
+ struct heap_helper_buffer *buffer = dmabuf->priv;
+ struct dma_heaps_attachment *a;
+
+ mutex_lock(&buffer->lock);
+
+ if (buffer->vmap_cnt)
+ flush_kernel_vmap_range(buffer->vaddr, buffer->size);
+
+ list_for_each_entry(a, &buffer->attachments, list) {
+ dma_sync_sg_for_device(a->dev, a->table.sgl, a->table.nents,
+ direction);
+ }
+ mutex_unlock(&buffer->lock);
+
+ return 0;
+}
+
+static void *dma_heap_dma_buf_vmap(struct dma_buf *dmabuf)
+{
+ struct heap_helper_buffer *buffer = dmabuf->priv;
+ void *vaddr;
+
+ mutex_lock(&buffer->lock);
+ vaddr = dma_heap_buffer_vmap_get(buffer);
+ mutex_unlock(&buffer->lock);
+
+ return vaddr;
+}
+
+static void dma_heap_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
+{
+ struct heap_helper_buffer *buffer = dmabuf->priv;
+
+ mutex_lock(&buffer->lock);
+ dma_heap_buffer_vmap_put(buffer);
+ mutex_unlock(&buffer->lock);
+}
+
+const struct dma_buf_ops heap_helper_ops = {
+ .map_dma_buf = dma_heap_map_dma_buf,
+ .unmap_dma_buf = dma_heap_unmap_dma_buf,
+ .mmap = dma_heap_mmap,
+ .release = dma_heap_dma_buf_release,
+ .attach = dma_heap_attach,
+ .detach = dma_heap_detach,
+ .begin_cpu_access = dma_heap_dma_buf_begin_cpu_access,
+ .end_cpu_access = dma_heap_dma_buf_end_cpu_access,
+ .vmap = dma_heap_dma_buf_vmap,
+ .vunmap = dma_heap_dma_buf_vunmap,
+};
diff --git a/drivers/dma-buf/heaps/heap-helpers.h b/drivers/dma-buf/heaps/heap-helpers.h
new file mode 100644
index 000000000000..805d2df88024
--- /dev/null
+++ b/drivers/dma-buf/heaps/heap-helpers.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * DMABUF Heaps helper code
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (C) 2019 Linaro Ltd.
+ */
+
+#ifndef _HEAP_HELPERS_H
+#define _HEAP_HELPERS_H
+
+#include <linux/dma-heap.h>
+#include <linux/list.h>
+
+/**
+ * struct heap_helper_buffer - helper buffer metadata
+ * @heap: back pointer to the heap the buffer came from
+ * @dmabuf: backing dma-buf for this buffer
+ * @size: size of the buffer
+ * @priv_virt pointer to heap specific private value
+ * @lock mutext to protect the data in this structure
+ * @vmap_cnt count of vmap references on the buffer
+ * @vaddr vmap'ed virtual address
+ * @pagecount number of pages in the buffer
+ * @pages list of page pointers
+ * @attachments list of device attachments
+ *
+ * @free heap callback to free the buffer
+ */
+struct heap_helper_buffer {
+ struct dma_heap *heap;
+ struct dma_buf *dmabuf;
+ size_t size;
+
+ void *priv_virt;
+ struct mutex lock;
+ int vmap_cnt;
+ void *vaddr;
+ pgoff_t pagecount;
+ struct page **pages;
+ struct list_head attachments;
+
+ void (*free)(struct heap_helper_buffer *buffer);
+};
+
+void init_heap_helper_buffer(struct heap_helper_buffer *buffer,
+ void (*free)(struct heap_helper_buffer *));
+
+struct dma_buf *heap_helper_export_dmabuf(struct heap_helper_buffer *buffer,
+ int fd_flags);
+
+extern const struct dma_buf_ops heap_helper_ops;
+#endif /* _HEAP_HELPERS_H */
diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
new file mode 100644
index 000000000000..1aa01e98c595
--- /dev/null
+++ b/drivers/dma-buf/heaps/system_heap.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DMABUF System heap exporter
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (C) 2019 Linaro Ltd.
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-heap.h>
+#include <linux/err.h>
+#include <linux/highmem.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/sched/signal.h>
+#include <asm/page.h>
+
+#include "heap-helpers.h"
+
+struct dma_heap *sys_heap;
+
+static void system_heap_free(struct heap_helper_buffer *buffer)
+{
+ pgoff_t pg;
+
+ for (pg = 0; pg < buffer->pagecount; pg++)
+ __free_page(buffer->pages[pg]);
+ kfree(buffer->pages);
+ kfree(buffer);
+}
+
+static int system_heap_allocate(struct dma_heap *heap,
+ unsigned long len,
+ unsigned long fd_flags,
+ unsigned long heap_flags)
+{
+ struct heap_helper_buffer *helper_buffer;
+ struct dma_buf *dmabuf;
+ int ret = -ENOMEM;
+ pgoff_t pg;
+
+ helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL);
+ if (!helper_buffer)
+ return -ENOMEM;
+
+ init_heap_helper_buffer(helper_buffer, system_heap_free);
+ helper_buffer->heap = heap;
+ helper_buffer->size = len;
+
+ helper_buffer->pagecount = len / PAGE_SIZE;
+ helper_buffer->pages = kmalloc_array(helper_buffer->pagecount,
+ sizeof(*helper_buffer->pages),
+ GFP_KERNEL);
+ if (!helper_buffer->pages) {
+ ret = -ENOMEM;
+ goto err0;
+ }
+
+ for (pg = 0; pg < helper_buffer->pagecount; pg++) {
+ /*
+ * Avoid trying to allocate memory if the process
+ * has been killed by by SIGKILL
+ */
+ if (fatal_signal_pending(current))
+ goto err1;
+
+ helper_buffer->pages[pg] = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!helper_buffer->pages[pg])
+ goto err1;
+ }
+
+ /* create the dmabuf */
+ dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags);
+ if (IS_ERR(dmabuf)) {
+ ret = PTR_ERR(dmabuf);
+ goto err1;
+ }
+
+ helper_buffer->dmabuf = dmabuf;
+
+ ret = dma_buf_fd(dmabuf, fd_flags);
+ if (ret < 0) {
+ dma_buf_put(dmabuf);
+ /* just return, as put will call release and that will free */
+ return ret;
+ }
+
+ return ret;
+
+err1:
+ while (pg > 0)
+ __free_page(helper_buffer->pages[--pg]);
+ kfree(helper_buffer->pages);
+err0:
+ kfree(helper_buffer);
+
+ return ret;
+}
+
+static const struct dma_heap_ops system_heap_ops = {
+ .allocate = system_heap_allocate,
+};
+
+static int system_heap_create(void)
+{
+ struct dma_heap_export_info exp_info;
+ int ret = 0;
+
+ exp_info.name = "system_heap";
+ exp_info.ops = &system_heap_ops;
+ exp_info.priv = NULL;
+
+ sys_heap = dma_heap_add(&exp_info);
+ if (IS_ERR(sys_heap))
+ ret = PTR_ERR(sys_heap);
+
+ return ret;
+}
+module_init(system_heap_create);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index 9635897458a0..61b0a2cff874 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -18,6 +18,8 @@ static const size_t size_limit_mb = 64; /* total dmabuf size, in megabytes */
struct udmabuf {
pgoff_t pagecount;
struct page **pages;
+ struct sg_table *sg;
+ struct miscdevice *device;
};
static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
@@ -46,10 +48,10 @@ static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
return 0;
}
-static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
- enum dma_data_direction direction)
+static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
+ enum dma_data_direction direction)
{
- struct udmabuf *ubuf = at->dmabuf->priv;
+ struct udmabuf *ubuf = buf->priv;
struct sg_table *sg;
int ret;
@@ -61,7 +63,7 @@ static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
GFP_KERNEL);
if (ret < 0)
goto err;
- if (!dma_map_sg(at->dev, sg->sgl, sg->nents, direction)) {
+ if (!dma_map_sg(dev, sg->sgl, sg->nents, direction)) {
ret = -EINVAL;
goto err;
}
@@ -73,54 +75,90 @@ err:
return ERR_PTR(ret);
}
+static void put_sg_table(struct device *dev, struct sg_table *sg,
+ enum dma_data_direction direction)
+{
+ dma_unmap_sg(dev, sg->sgl, sg->nents, direction);
+ sg_free_table(sg);
+ kfree(sg);
+}
+
+static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
+ enum dma_data_direction direction)
+{
+ return get_sg_table(at->dev, at->dmabuf, direction);
+}
+
static void unmap_udmabuf(struct dma_buf_attachment *at,
struct sg_table *sg,
enum dma_data_direction direction)
{
- dma_unmap_sg(at->dev, sg->sgl, sg->nents, direction);
- sg_free_table(sg);
- kfree(sg);
+ return put_sg_table(at->dev, sg, direction);
}
static void release_udmabuf(struct dma_buf *buf)
{
struct udmabuf *ubuf = buf->priv;
+ struct device *dev = ubuf->device->this_device;
pgoff_t pg;
+ if (ubuf->sg)
+ put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL);
+
for (pg = 0; pg < ubuf->pagecount; pg++)
put_page(ubuf->pages[pg]);
kfree(ubuf->pages);
kfree(ubuf);
}
-static void *kmap_udmabuf(struct dma_buf *buf, unsigned long page_num)
+static int begin_cpu_udmabuf(struct dma_buf *buf,
+ enum dma_data_direction direction)
{
struct udmabuf *ubuf = buf->priv;
- struct page *page = ubuf->pages[page_num];
+ struct device *dev = ubuf->device->this_device;
+
+ if (!ubuf->sg) {
+ ubuf->sg = get_sg_table(dev, buf, direction);
+ if (IS_ERR(ubuf->sg))
+ return PTR_ERR(ubuf->sg);
+ } else {
+ dma_sync_sg_for_device(dev, ubuf->sg->sgl,
+ ubuf->sg->nents,
+ direction);
+ }
- return kmap(page);
+ return 0;
}
-static void kunmap_udmabuf(struct dma_buf *buf, unsigned long page_num,
- void *vaddr)
+static int end_cpu_udmabuf(struct dma_buf *buf,
+ enum dma_data_direction direction)
{
- kunmap(vaddr);
+ struct udmabuf *ubuf = buf->priv;
+ struct device *dev = ubuf->device->this_device;
+
+ if (!ubuf->sg)
+ return -EINVAL;
+
+ dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
+ return 0;
}
static const struct dma_buf_ops udmabuf_ops = {
- .map_dma_buf = map_udmabuf,
- .unmap_dma_buf = unmap_udmabuf,
- .release = release_udmabuf,
- .map = kmap_udmabuf,
- .unmap = kunmap_udmabuf,
- .mmap = mmap_udmabuf,
+ .cache_sgt_mapping = true,
+ .map_dma_buf = map_udmabuf,
+ .unmap_dma_buf = unmap_udmabuf,
+ .release = release_udmabuf,
+ .mmap = mmap_udmabuf,
+ .begin_cpu_access = begin_cpu_udmabuf,
+ .end_cpu_access = end_cpu_udmabuf,
};
#define SEALS_WANTED (F_SEAL_SHRINK)
#define SEALS_DENIED (F_SEAL_WRITE)
-static long udmabuf_create(const struct udmabuf_create_list *head,
- const struct udmabuf_create_item *list)
+static long udmabuf_create(struct miscdevice *device,
+ struct udmabuf_create_list *head,
+ struct udmabuf_create_item *list)
{
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
struct file *memfd = NULL;
@@ -187,6 +225,7 @@ static long udmabuf_create(const struct udmabuf_create_list *head,
exp_info.priv = ubuf;
exp_info.flags = O_RDWR;
+ ubuf->device = device;
buf = dma_buf_export(&exp_info);
if (IS_ERR(buf)) {
ret = PTR_ERR(buf);
@@ -224,7 +263,7 @@ static long udmabuf_ioctl_create(struct file *filp, unsigned long arg)
list.offset = create.offset;
list.size = create.size;
- return udmabuf_create(&head, &list);
+ return udmabuf_create(filp->private_data, &head, &list);
}
static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg)
@@ -243,7 +282,7 @@ static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg)
if (IS_ERR(list))
return PTR_ERR(list);
- ret = udmabuf_create(&head, list);
+ ret = udmabuf_create(filp->private_data, &head, list);
kfree(list);
return ret;
}