summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristian König <deathsimple@vodafone.de>2012-06-29 11:33:12 +0200
committerChristian König <deathsimple@vodafone.de>2012-07-17 10:31:39 +0200
commit7ecc45e3ef8468abb062be2a8bb427029342f42d (patch)
treeb161f84f121bc15ed6d1a511da1be5984721069f
parent49099c4991da3c94773f888aea2e9d27b8a7c6d1 (diff)
downloadlinux-7ecc45e3ef8468abb062be2a8bb427029342f42d.tar.bz2
drm/radeon: add error handling to fence_wait_empty_locked
Instead of returning the error handle it directly and while at it fix the comments about the ring lock. Signed-off-by: Christian König <deathsimple@vodafone.de> Reviewed-by: Michel Dänzer <michel.daenzer@amd.com> Reviewed-by: Jerome Glisse <jglisse@redhat.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r--drivers/gpu/drm/radeon/radeon.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c33
2 files changed, 22 insertions, 13 deletions
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 77b4519b19b8..5861ec86725f 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -239,7 +239,7 @@ void radeon_fence_process(struct radeon_device *rdev, int ring);
bool radeon_fence_signaled(struct radeon_fence *fence);
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
-int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
+void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
int radeon_fence_wait_any(struct radeon_device *rdev,
struct radeon_fence **fences,
bool intr);
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 7b55625a5e18..be4e4f390e89 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -440,14 +440,11 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
return 0;
}
+/* caller must hold ring lock */
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
{
uint64_t seq;
- /* We are not protected by ring lock when reading current seq but
- * it's ok as worst case is we return to early while we could have
- * wait.
- */
seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
if (seq >= rdev->fence_drv[ring].sync_seq[ring]) {
/* nothing to wait for, last_seq is
@@ -457,15 +454,27 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
return radeon_fence_wait_seq(rdev, seq, ring, false, false);
}
-int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
+/* caller must hold ring lock */
+void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
{
- /* We are not protected by ring lock when reading current seq
- * but it's ok as wait empty is call from place where no more
- * activity can be scheduled so there won't be concurrent access
- * to seq value.
- */
- return radeon_fence_wait_seq(rdev, rdev->fence_drv[ring].sync_seq[ring],
- ring, false, false);
+ uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
+
+ while(1) {
+ int r;
+ r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
+ if (r == -EDEADLK) {
+ mutex_unlock(&rdev->ring_lock);
+ r = radeon_gpu_reset(rdev);
+ mutex_lock(&rdev->ring_lock);
+ if (!r)
+ continue;
+ }
+ if (r) {
+ dev_err(rdev->dev, "error waiting for ring to become"
+ " idle (%d)\n", r);
+ }
+ return;
+ }
}
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)