summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/drm_gem.c
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2013-08-15 00:02:46 +0200
committerDave Airlie <airlied@redhat.com>2013-08-21 12:58:17 +1000
commit319c933c71f3dbdb2b3274d1634d3494c70efa06 (patch)
tree21573ae28fb9b9c38dd974eebc80a69b9a7d70e1 /drivers/gpu/drm/drm_gem.c
parent20228c447846da9399ead53fdbbc8ab69b47788a (diff)
downloadlinux-319c933c71f3dbdb2b3274d1634d3494c70efa06.tar.bz2
drm/prime: proper locking+refcounting for obj->dma_buf link
The export dma-buf cache is semantically similar to an flink name. So semantically it makes sense to treat it the same and remove the name (i.e. the dma_buf pointer) and its references when the last gem handle disappears. Again we need to be careful, but double so: Not just could someone race and export with a gem close ioctl (so we need to recheck obj->handle_count again when assigning the new name), but multiple exports can also race against each another. This is prevented by holding the dev->object_name_lock across the entire section which touches obj->dma_buf. With the new scheme we also need to reinstate the obj->dma_buf link at import time (in case the only reference userspace has held in-between was through the dma-buf fd and not through any native gem handle). For simplicity we don't check whether it's a native object but unconditionally set up that link - with the new scheme of removing the obj->dma_buf reference when the last handle disappears we can do that. To make it clear that this is not just for exported buffers anymore als rename it from export_dma_buf to dma_buf. To make sure that now one can race a fd_to_handle or handle_to_fd with gem_close we use the same tricks as in flink of extending the dev->object_name_locking critical section. With this change we finally have a guaranteed 1:1 relationship (at least for native objects) between gem objects and dma-bufs, even accounting for races (which can happen since the dma-buf itself holds a reference while in-flight). This prevent igt/prime_self_import/export-vs-gem_close-race from Oopsing the kernel. There is still a leak though since the per-file priv dma-buf/handle cache handling is racy. That will be fixed in a later patch. v2: Remove the bogus dma_buf_put from the export_and_register_object failure path if we've raced with the handle count dropping to 0. Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/drm_gem.c')
-rw-r--r--drivers/gpu/drm/drm_gem.c24
1 files changed, 21 insertions, 3 deletions
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index d47aa774d64b..4b3c533be859 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -195,9 +195,14 @@ drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
drm_prime_remove_buf_handle(&filp->prime,
obj->import_attach->dmabuf);
}
- if (obj->export_dma_buf) {
+
+ /*
+ * Note: obj->dma_buf can't disappear as long as we still hold a
+ * handle reference in obj->handle_count.
+ */
+ if (obj->dma_buf) {
drm_prime_remove_buf_handle(&filp->prime,
- obj->export_dma_buf);
+ obj->dma_buf);
}
}
@@ -231,6 +236,15 @@ static void drm_gem_object_handle_free(struct drm_gem_object *obj)
}
}
+static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
+{
+ /* Unbreak the reference cycle if we have an exported dma_buf. */
+ if (obj->dma_buf) {
+ dma_buf_put(obj->dma_buf);
+ obj->dma_buf = NULL;
+ }
+}
+
static void
drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
{
@@ -244,8 +258,10 @@ drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
*/
mutex_lock(&obj->dev->object_name_lock);
- if (--obj->handle_count == 0)
+ if (--obj->handle_count == 0) {
drm_gem_object_handle_free(obj);
+ drm_gem_object_exported_dma_buf_free(obj);
+ }
mutex_unlock(&obj->dev->object_name_lock);
drm_gem_object_unreference_unlocked(obj);
@@ -712,6 +728,8 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
void
drm_gem_object_release(struct drm_gem_object *obj)
{
+ WARN_ON(obj->dma_buf);
+
if (obj->filp)
fput(obj->filp);
}