summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2012-10-22 12:51:26 +0000
committerDave Airlie <airlied@redhat.com>2012-10-23 10:15:21 +1000
commitb8e902f24fdd16c4373ddc37a4e150c4afe9c6db (patch)
treee17556304d126329c63af9367432fe796294d08a /drivers/gpu/drm
parent7bc17a7837bf4ec8fd2d63438c0b6b0160c454c1 (diff)
downloadlinux-b8e902f24fdd16c4373ddc37a4e150c4afe9c6db.tar.bz2
drm/ttm: Fix a theoretical race in ttm_bo_cleanup_refs()
In theory, that function could release the lru lock between checking for bo on ddestroy list and a successful reserve if the bo was already reserved, and the function was called with waiting reserves allowed. However, all current reservers of a bo on the ddestroy list would atomically take the bo off the list after a successful reserve so this race should not have been hit, so no need to backport for stable. This patch also fixes a case found by Maarten Lankhorst where ttm_mem_evict_first called with no_wait_gpu would incorrectly spin waiting for bo idle if trying to evict a busy buffer that also sits on the ddestroy list. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c20
1 files changed, 12 insertions, 8 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index d42631cade5a..bf6e4b5a73b5 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -580,6 +580,7 @@ retry:
if (unlikely(ret != 0))
return ret;
+retry_reserve:
spin_lock(&glob->lru_lock);
if (unlikely(list_empty(&bo->ddestroy))) {
@@ -587,14 +588,20 @@ retry:
return 0;
}
- ret = ttm_bo_reserve_locked(bo, interruptible,
- no_wait_reserve, false, 0);
+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
- if (unlikely(ret != 0)) {
+ if (unlikely(ret == -EBUSY)) {
spin_unlock(&glob->lru_lock);
- return ret;
+ if (likely(!no_wait_reserve))
+ ret = ttm_bo_wait_unreserved(bo, interruptible);
+ if (unlikely(ret != 0))
+ return ret;
+
+ goto retry_reserve;
}
+ BUG_ON(ret != 0);
+
/**
* We can re-check for sync object without taking
* the bo::lock since setting the sync object requires
@@ -811,10 +818,7 @@ retry:
no_wait_reserve, no_wait_gpu);
kref_put(&bo->list_kref, ttm_bo_release_list);
- if (likely(ret == 0 || ret == -ERESTARTSYS))
- return ret;
-
- goto retry;
+ return ret;
}
ret = ttm_bo_reserve_locked(bo, false, true, false, 0);