summaryrefslogtreecommitdiffstats
path: root/drivers/dma-buf
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-10-11 18:12:22 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-11 18:12:22 -0700
commit6b25e21fa6f26d0f0d45f161d169029411c84286 (patch)
treefdff805ecd81ec46951f49577efe450ddb7d060a /drivers/dma-buf
parenta379f71a30dddbd2e7393624e455ce53c87965d1 (diff)
parent69405d3da98b48633b78a49403e4f9cdb7c6a0f5 (diff)
downloadlinux-6b25e21fa6f26d0f0d45f161d169029411c84286.tar.bz2
Merge tag 'drm-for-v4.9' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie: "Core: - Fence destaging work - DRIVER_LEGACY to split off legacy drm drivers - drm_mm refactoring - Splitting drm_crtc.c into chunks and documenting better - Display info fixes - rbtree support for prime buffer lookup - Simple VGA DAC driver Panel: - Add Nexus 7 panel - More simple panels i915: - Refactoring GEM naming - Refactored vma/active tracking - Lockless request lookups - Better stolen memory support - FBC fixes - SKL watermark fixes - VGPU improvements - dma-buf fencing support - Better DP dongle support amdgpu: - Powerplay for Iceland asics - Improved GPU reset support - UVD/VEC powergating support for CZ/ST - Preinitialised VRAM buffer support - Virtual display support - Initial SI support - GTT rework - PCI shutdown callback support - HPD IRQ storm fixes amdkfd: - bugfixes tilcdc: - Atomic modesetting support mediatek: - AAL + GAMMA engine support - Hook up gamma LUT - Temporal dithering support imx: - Pixel clock from devicetree - drm bridge support for LVDS bridges - active plane reconfiguration - VDIC deinterlacer support - Frame synchronisation unit support - Color space conversion support analogix: - PSR support - Better panel on/off support rockchip: - rk3399 vop/crtc support - PSR support vc4: - Interlaced vblank timing - 3D rendering CPU overhead reduction - HDMI output fixes tda998x: - HDMI audio ASoC support sunxi: - Allwinner A33 support - better TCON support msm: - DT binding cleanups - Explicit fence-fd support sti: - remove sti415/416 support etnaviv: - MMUv2 refactoring - GC3000 support exynos: - Refactoring HDMI DCC/PHY - G2D pm regression fix - Page fault issues with wait for vblank There is no nouveau work in this tree, as Ben didn't get a pull request in, and he was fighting moving to atomic and adding mst support, so maybe best it waits for a cycle" * tag 'drm-for-v4.9' of git://people.freedesktop.org/~airlied/linux: (1412 commits) drm/crtc: constify drm_crtc_index parameter drm/i915: Fix conflict resolution from backmerge of v4.8-rc8 to drm-next drm/i915/guc: Unwind GuC workqueue reservation if request construction fails drm/i915: Reset the breadcrumbs IRQ more carefully drm/i915: Force relocations via cpu if we run out of idle aperture drm/i915: Distinguish last emitted request from last submitted request drm/i915: Allow DP to work w/o EDID drm/i915: Move long hpd handling into the hotplug work drm/i915/execlists: Reinitialise context image after GPU hang drm/i915: Use correct index for backtracking HUNG semaphores drm/i915: Unalias obj->phys_handle and obj->userptr drm/i915: Just clear the mmiodebug before a register access drm/i915/gen9: only add the planes actually affected by ddb changes drm/i915: Allow PCH DPLL sharing regardless of DPLL_SDVO_HIGH_SPEED drm/i915/bxt: Fix HDMI DPLL configuration drm/i915/gen9: fix the watermark res_blocks value drm/i915/gen9: fix plane_blocks_per_line on watermarks calculations drm/i915/gen9: minimum scanlines for Y tile is not always 4 drm/i915/gen9: fix the WaWmMemoryReadLatency implementation drm/i915/kbl: KBL also needs to run the SAGV code ...
Diffstat (limited to 'drivers/dma-buf')
-rw-r--r--drivers/dma-buf/dma-buf.c23
-rw-r--r--drivers/dma-buf/fence-array.c7
-rw-r--r--drivers/dma-buf/reservation.c2
-rw-r--r--drivers/dma-buf/sync_debug.c12
-rw-r--r--drivers/dma-buf/sync_file.c204
5 files changed, 176 insertions, 72 deletions
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index ddaee60ae52a..cf04d249a6a4 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -586,6 +586,22 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
}
EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
+static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
+ enum dma_data_direction direction)
+{
+ bool write = (direction == DMA_BIDIRECTIONAL ||
+ direction == DMA_TO_DEVICE);
+ struct reservation_object *resv = dmabuf->resv;
+ long ret;
+
+ /* Wait on any implicit rendering fences */
+ ret = reservation_object_wait_timeout_rcu(resv, write, true,
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
/**
* dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
@@ -608,6 +624,13 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
if (dmabuf->ops->begin_cpu_access)
ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
+ /* Ensure that all fences are waited upon - but we first allow
+ * the native handler the chance to do so more efficiently if it
+ * chooses. A double invocation here will be reasonably cheap no-op.
+ */
+ if (ret == 0)
+ ret = __dma_buf_begin_cpu_access(dmabuf, direction);
+
return ret;
}
EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
diff --git a/drivers/dma-buf/fence-array.c b/drivers/dma-buf/fence-array.c
index a8731c853da6..f1989fcaf354 100644
--- a/drivers/dma-buf/fence-array.c
+++ b/drivers/dma-buf/fence-array.c
@@ -99,6 +99,7 @@ const struct fence_ops fence_array_ops = {
.wait = fence_default_wait,
.release = fence_array_release,
};
+EXPORT_SYMBOL(fence_array_ops);
/**
* fence_array_create - Create a custom fence array
@@ -106,14 +107,14 @@ const struct fence_ops fence_array_ops = {
* @fences: [in] array containing the fences
* @context: [in] fence context to use
* @seqno: [in] sequence number to use
- * @signal_on_any [in] signal on any fence in the array
+ * @signal_on_any: [in] signal on any fence in the array
*
* Allocate a fence_array object and initialize the base fence with fence_init().
* In case of error it returns NULL.
*
- * The caller should allocte the fences array with num_fences size
+ * The caller should allocate the fences array with num_fences size
* and fill it with the fences it wants to add to the object. Ownership of this
- * array is take and fence_put() is used on each fence on release.
+ * array is taken and fence_put() is used on each fence on release.
*
* If @signal_on_any is true the fence array signals if any fence in the array
* signals, otherwise it signals when all fences in the array signal.
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index 9566a62ad8e3..723d8af988e5 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -205,7 +205,7 @@ done:
* @fence: the shared fence to add
*
* Add a fence to a shared slot, obj->lock must be held, and
- * reservation_object_reserve_shared_fence has been called.
+ * reservation_object_reserve_shared() has been called.
*/
void reservation_object_add_shared_fence(struct reservation_object *obj,
struct fence *fence)
diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c
index fab95204cf74..2dd4c3db6caa 100644
--- a/drivers/dma-buf/sync_debug.c
+++ b/drivers/dma-buf/sync_debug.c
@@ -135,10 +135,16 @@ static void sync_print_sync_file(struct seq_file *s,
int i;
seq_printf(s, "[%p] %s: %s\n", sync_file, sync_file->name,
- sync_status_str(atomic_read(&sync_file->status)));
+ sync_status_str(!fence_is_signaled(sync_file->fence)));
- for (i = 0; i < sync_file->num_fences; ++i)
- sync_print_fence(s, sync_file->cbs[i].fence, true);
+ if (fence_is_array(sync_file->fence)) {
+ struct fence_array *array = to_fence_array(sync_file->fence);
+
+ for (i = 0; i < array->num_fences; ++i)
+ sync_print_fence(s, array->fences[i], true);
+ } else {
+ sync_print_fence(s, sync_file->fence, true);
+ }
}
static int sync_debugfs_show(struct seq_file *s, void *unused)
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 9aaa608dfe01..b29a9e817320 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -28,11 +28,11 @@
static const struct file_operations sync_file_fops;
-static struct sync_file *sync_file_alloc(int size)
+static struct sync_file *sync_file_alloc(void)
{
struct sync_file *sync_file;
- sync_file = kzalloc(size, GFP_KERNEL);
+ sync_file = kzalloc(sizeof(*sync_file), GFP_KERNEL);
if (!sync_file)
return NULL;
@@ -45,6 +45,8 @@ static struct sync_file *sync_file_alloc(int size)
init_waitqueue_head(&sync_file->wq);
+ INIT_LIST_HEAD(&sync_file->cb.node);
+
return sync_file;
err:
@@ -54,14 +56,11 @@ err:
static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
{
- struct sync_file_cb *check;
struct sync_file *sync_file;
- check = container_of(cb, struct sync_file_cb, cb);
- sync_file = check->sync_file;
+ sync_file = container_of(cb, struct sync_file, cb);
- if (atomic_dec_and_test(&sync_file->status))
- wake_up_all(&sync_file->wq);
+ wake_up_all(&sync_file->wq);
}
/**
@@ -76,23 +75,17 @@ struct sync_file *sync_file_create(struct fence *fence)
{
struct sync_file *sync_file;
- sync_file = sync_file_alloc(offsetof(struct sync_file, cbs[1]));
+ sync_file = sync_file_alloc();
if (!sync_file)
return NULL;
- sync_file->num_fences = 1;
- atomic_set(&sync_file->status, 1);
+ sync_file->fence = fence;
+
snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d",
fence->ops->get_driver_name(fence),
fence->ops->get_timeline_name(fence), fence->context,
fence->seqno);
- sync_file->cbs[0].fence = fence;
- sync_file->cbs[0].sync_file = sync_file;
- if (fence_add_callback(fence, &sync_file->cbs[0].cb,
- fence_check_cb_func))
- atomic_dec(&sync_file->status);
-
return sync_file;
}
EXPORT_SYMBOL(sync_file_create);
@@ -121,14 +114,73 @@ err:
return NULL;
}
-static void sync_file_add_pt(struct sync_file *sync_file, int *i,
- struct fence *fence)
+/**
+ * sync_file_get_fence - get the fence related to the sync_file fd
+ * @fd: sync_file fd to get the fence from
+ *
+ * Ensures @fd references a valid sync_file and returns a fence that
+ * represents all fence in the sync_file. On error NULL is returned.
+ */
+struct fence *sync_file_get_fence(int fd)
+{
+ struct sync_file *sync_file;
+ struct fence *fence;
+
+ sync_file = sync_file_fdget(fd);
+ if (!sync_file)
+ return NULL;
+
+ fence = fence_get(sync_file->fence);
+ fput(sync_file->file);
+
+ return fence;
+}
+EXPORT_SYMBOL(sync_file_get_fence);
+
+static int sync_file_set_fence(struct sync_file *sync_file,
+ struct fence **fences, int num_fences)
+{
+ struct fence_array *array;
+
+ /*
+ * The reference for the fences in the new sync_file and held
+ * in add_fence() during the merge procedure, so for num_fences == 1
+ * we already own a new reference to the fence. For num_fence > 1
+ * we own the reference of the fence_array creation.
+ */
+ if (num_fences == 1) {
+ sync_file->fence = fences[0];
+ kfree(fences);
+ } else {
+ array = fence_array_create(num_fences, fences,
+ fence_context_alloc(1), 1, false);
+ if (!array)
+ return -ENOMEM;
+
+ sync_file->fence = &array->base;
+ }
+
+ return 0;
+}
+
+static struct fence **get_fences(struct sync_file *sync_file, int *num_fences)
+{
+ if (fence_is_array(sync_file->fence)) {
+ struct fence_array *array = to_fence_array(sync_file->fence);
+
+ *num_fences = array->num_fences;
+ return array->fences;
+ }
+
+ *num_fences = 1;
+ return &sync_file->fence;
+}
+
+static void add_fence(struct fence **fences, int *i, struct fence *fence)
{
- sync_file->cbs[*i].fence = fence;
- sync_file->cbs[*i].sync_file = sync_file;
+ fences[*i] = fence;
- if (!fence_add_callback(fence, &sync_file->cbs[*i].cb,
- fence_check_cb_func)) {
+ if (!fence_is_signaled(fence)) {
fence_get(fence);
(*i)++;
}
@@ -147,16 +199,24 @@ static void sync_file_add_pt(struct sync_file *sync_file, int *i,
static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
struct sync_file *b)
{
- int num_fences = a->num_fences + b->num_fences;
struct sync_file *sync_file;
- int i, i_a, i_b;
- unsigned long size = offsetof(struct sync_file, cbs[num_fences]);
+ struct fence **fences, **nfences, **a_fences, **b_fences;
+ int i, i_a, i_b, num_fences, a_num_fences, b_num_fences;
- sync_file = sync_file_alloc(size);
+ sync_file = sync_file_alloc();
if (!sync_file)
return NULL;
- atomic_set(&sync_file->status, num_fences);
+ a_fences = get_fences(a, &a_num_fences);
+ b_fences = get_fences(b, &b_num_fences);
+ if (a_num_fences > INT_MAX - b_num_fences)
+ return NULL;
+
+ num_fences = a_num_fences + b_num_fences;
+
+ fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL);
+ if (!fences)
+ goto err;
/*
* Assume sync_file a and b are both ordered and have no
@@ -165,55 +225,69 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
* If a sync_file can only be created with sync_file_merge
* and sync_file_create, this is a reasonable assumption.
*/
- for (i = i_a = i_b = 0; i_a < a->num_fences && i_b < b->num_fences; ) {
- struct fence *pt_a = a->cbs[i_a].fence;
- struct fence *pt_b = b->cbs[i_b].fence;
+ for (i = i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) {
+ struct fence *pt_a = a_fences[i_a];
+ struct fence *pt_b = b_fences[i_b];
if (pt_a->context < pt_b->context) {
- sync_file_add_pt(sync_file, &i, pt_a);
+ add_fence(fences, &i, pt_a);
i_a++;
} else if (pt_a->context > pt_b->context) {
- sync_file_add_pt(sync_file, &i, pt_b);
+ add_fence(fences, &i, pt_b);
i_b++;
} else {
if (pt_a->seqno - pt_b->seqno <= INT_MAX)
- sync_file_add_pt(sync_file, &i, pt_a);
+ add_fence(fences, &i, pt_a);
else
- sync_file_add_pt(sync_file, &i, pt_b);
+ add_fence(fences, &i, pt_b);
i_a++;
i_b++;
}
}
- for (; i_a < a->num_fences; i_a++)
- sync_file_add_pt(sync_file, &i, a->cbs[i_a].fence);
+ for (; i_a < a_num_fences; i_a++)
+ add_fence(fences, &i, a_fences[i_a]);
+
+ for (; i_b < b_num_fences; i_b++)
+ add_fence(fences, &i, b_fences[i_b]);
- for (; i_b < b->num_fences; i_b++)
- sync_file_add_pt(sync_file, &i, b->cbs[i_b].fence);
+ if (i == 0)
+ fences[i++] = fence_get(a_fences[0]);
- if (num_fences > i)
- atomic_sub(num_fences - i, &sync_file->status);
- sync_file->num_fences = i;
+ if (num_fences > i) {
+ nfences = krealloc(fences, i * sizeof(*fences),
+ GFP_KERNEL);
+ if (!nfences)
+ goto err;
+
+ fences = nfences;
+ }
+
+ if (sync_file_set_fence(sync_file, fences, i) < 0) {
+ kfree(fences);
+ goto err;
+ }
strlcpy(sync_file->name, name, sizeof(sync_file->name));
return sync_file;
+
+err:
+ fput(sync_file->file);
+ return NULL;
+
}
static void sync_file_free(struct kref *kref)
{
struct sync_file *sync_file = container_of(kref, struct sync_file,
kref);
- int i;
-
- for (i = 0; i < sync_file->num_fences; ++i) {
- fence_remove_callback(sync_file->cbs[i].fence,
- &sync_file->cbs[i].cb);
- fence_put(sync_file->cbs[i].fence);
- }
+ if (test_bit(POLL_ENABLED, &sync_file->fence->flags))
+ fence_remove_callback(sync_file->fence, &sync_file->cb);
+ fence_put(sync_file->fence);
kfree(sync_file);
}
@@ -228,17 +302,17 @@ static int sync_file_release(struct inode *inode, struct file *file)
static unsigned int sync_file_poll(struct file *file, poll_table *wait)
{
struct sync_file *sync_file = file->private_data;
- int status;
poll_wait(file, &sync_file->wq, wait);
- status = atomic_read(&sync_file->status);
+ if (!poll_does_not_wait(wait) &&
+ !test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {
+ if (fence_add_callback(sync_file->fence, &sync_file->cb,
+ fence_check_cb_func) < 0)
+ wake_up_all(&sync_file->wq);
+ }
- if (!status)
- return POLLIN;
- if (status < 0)
- return POLLERR;
- return 0;
+ return fence_is_signaled(sync_file->fence) ? POLLIN : 0;
}
static long sync_file_ioctl_merge(struct sync_file *sync_file,
@@ -315,8 +389,9 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
{
struct sync_file_info info;
struct sync_fence_info *fence_info = NULL;
+ struct fence **fences;
__u32 size;
- int ret, i;
+ int num_fences, ret, i;
if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
return -EFAULT;
@@ -324,6 +399,8 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
if (info.flags || info.pad)
return -EINVAL;
+ fences = get_fences(sync_file, &num_fences);
+
/*
* Passing num_fences = 0 means that userspace doesn't want to
* retrieve any sync_fence_info. If num_fences = 0 we skip filling
@@ -333,16 +410,16 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
if (!info.num_fences)
goto no_fences;
- if (info.num_fences < sync_file->num_fences)
+ if (info.num_fences < num_fences)
return -EINVAL;
- size = sync_file->num_fences * sizeof(*fence_info);
+ size = num_fences * sizeof(*fence_info);
fence_info = kzalloc(size, GFP_KERNEL);
if (!fence_info)
return -ENOMEM;
- for (i = 0; i < sync_file->num_fences; ++i)
- sync_fill_fence_info(sync_file->cbs[i].fence, &fence_info[i]);
+ for (i = 0; i < num_fences; i++)
+ sync_fill_fence_info(fences[i], &fence_info[i]);
if (copy_to_user(u64_to_user_ptr(info.sync_fence_info), fence_info,
size)) {
@@ -352,11 +429,8 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
no_fences:
strlcpy(info.name, sync_file->name, sizeof(info.name));
- info.status = atomic_read(&sync_file->status);
- if (info.status >= 0)
- info.status = !info.status;
-
- info.num_fences = sync_file->num_fences;
+ info.status = fence_is_signaled(sync_file->fence);
+ info.num_fences = num_fences;
if (copy_to_user((void __user *)arg, &info, sizeof(info)))
ret = -EFAULT;