summaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2014-09-12 13:58:27 +1000
committerDave Airlie <airlied@redhat.com>2014-09-12 13:58:27 +1000
commit10d123b2f2b5bf54f59a884f12018d24a97d5a63 (patch)
tree308e3e9a7e687e9324fb7b18b5d9f90309f49ee7 /drivers/gpu
parente351943b081f4d9e6f692ce1a6117e8d2e71f478 (diff)
parent298593b609ecbf9e8a99e8a41c8c46acb3528468 (diff)
downloadlinux-10d123b2f2b5bf54f59a884f12018d24a97d5a63.tar.bz2
Merge branch 'drm-next-3.18' of git://people.freedesktop.org/~agd5f/linux into drm-next
concurrent buffer reads. * 'drm-next-3.18' of git://people.freedesktop.org/~agd5f/linux: drm/radeon: allow concurrent buffer reads drm/radeon: add the infrastructure for concurrent buffer access drm/ttm: allow fence to be added as shared
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c1
-rw-r--r--drivers/gpu/drm/radeon/cik.c25
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c25
-rw-r--r--drivers/gpu/drm/radeon/evergreen_dma.c24
-rw-r--r--drivers/gpu/drm/radeon/r100.c21
-rw-r--r--drivers/gpu/drm/radeon/r200.c21
-rw-r--r--drivers/gpu/drm/radeon/r600.c23
-rw-r--r--drivers/gpu/drm/radeon/r600_dma.c25
-rw-r--r--drivers/gpu/drm/radeon/radeon.h43
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h74
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_ib.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c38
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c19
-rw-r--r--drivers/gpu/drm/radeon/rv770_dma.c25
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c25
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c5
22 files changed, 282 insertions, 211 deletions
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index a6e19c83143e..446e71ca36cb 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -226,6 +226,7 @@ int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
qxl_bo_ref(bo);
entry->tv.bo = &bo->tbo;
+ entry->tv.shared = false;
list_add_tail(&entry->tv.head, &release->bos);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 1f598ab3b9a7..0b5a230d8b96 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -3959,18 +3959,19 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev,
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @num_gpu_pages: number of GPU pages to xfer
- * @fence: radeon fence object
+ * @resv: reservation object to sync to
*
* Copy GPU paging using the CP DMA engine (CIK+).
* Used by the radeon ttm implementation to move pages if
* registered as the asic copy callback.
*/
-int cik_copy_cpdma(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence **fence)
+struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct reservation_object *resv)
{
struct radeon_semaphore *sem = NULL;
+ struct radeon_fence *fence;
int ring_index = rdev->asic->copy.blit_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index];
u32 size_in_bytes, cur_size_in_bytes, control;
@@ -3980,7 +3981,7 @@ int cik_copy_cpdma(struct radeon_device *rdev,
r = radeon_semaphore_create(rdev, &sem);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
- return r;
+ return ERR_PTR(r);
}
size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
@@ -3989,10 +3990,10 @@ int cik_copy_cpdma(struct radeon_device *rdev,
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
radeon_semaphore_free(rdev, &sem, NULL);
- return r;
+ return ERR_PTR(r);
}
- radeon_semaphore_sync_to(sem, *fence);
+ radeon_semaphore_sync_resv(sem, resv, false);
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
@@ -4014,17 +4015,17 @@ int cik_copy_cpdma(struct radeon_device *rdev,
dst_offset += cur_size_in_bytes;
}
- r = radeon_fence_emit(rdev, fence, ring->idx);
+ r = radeon_fence_emit(rdev, &fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
radeon_semaphore_free(rdev, &sem, NULL);
- return r;
+ return ERR_PTR(r);
}
radeon_ring_unlock_commit(rdev, ring, false);
- radeon_semaphore_free(rdev, &sem, *fence);
+ radeon_semaphore_free(rdev, &sem, fence);
- return r;
+ return fence;
}
/*
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index 192278bc993c..c01a6100c318 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -537,18 +537,19 @@ void cik_sdma_fini(struct radeon_device *rdev)
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @num_gpu_pages: number of GPU pages to xfer
- * @fence: radeon fence object
+ * @resv: reservation object to sync to
*
* Copy GPU paging using the DMA engine (CIK).
* Used by the radeon ttm implementation to move pages if
* registered as the asic copy callback.
*/
-int cik_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence **fence)
+struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct reservation_object *resv)
{
struct radeon_semaphore *sem = NULL;
+ struct radeon_fence *fence;
int ring_index = rdev->asic->copy.dma_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index];
u32 size_in_bytes, cur_size_in_bytes;
@@ -558,7 +559,7 @@ int cik_copy_dma(struct radeon_device *rdev,
r = radeon_semaphore_create(rdev, &sem);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
- return r;
+ return ERR_PTR(r);
}
size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
@@ -567,10 +568,10 @@ int cik_copy_dma(struct radeon_device *rdev,
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
radeon_semaphore_free(rdev, &sem, NULL);
- return r;
+ return ERR_PTR(r);
}
- radeon_semaphore_sync_to(sem, *fence);
+ radeon_semaphore_sync_resv(sem, resv, false);
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
@@ -589,17 +590,17 @@ int cik_copy_dma(struct radeon_device *rdev,
dst_offset += cur_size_in_bytes;
}
- r = radeon_fence_emit(rdev, fence, ring->idx);
+ r = radeon_fence_emit(rdev, &fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
radeon_semaphore_free(rdev, &sem, NULL);
- return r;
+ return ERR_PTR(r);
}
radeon_ring_unlock_commit(rdev, ring, false);
- radeon_semaphore_free(rdev, &sem, *fence);
+ radeon_semaphore_free(rdev, &sem, fence);
- return r;
+ return fence;
}
/**
diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c
index afaba388c36d..946f37d0b469 100644
--- a/drivers/gpu/drm/radeon/evergreen_dma.c
+++ b/drivers/gpu/drm/radeon/evergreen_dma.c
@@ -104,12 +104,14 @@ void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
* Used by the radeon ttm implementation to move pages if
* registered as the asic copy callback.
*/
-int evergreen_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence **fence)
+struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct reservation_object *resv)
{
struct radeon_semaphore *sem = NULL;
+ struct radeon_fence *fence;
int ring_index = rdev->asic->copy.dma_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index];
u32 size_in_dw, cur_size_in_dw;
@@ -119,7 +121,7 @@ int evergreen_copy_dma(struct radeon_device *rdev,
r = radeon_semaphore_create(rdev, &sem);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
- return r;
+ return ERR_PTR(r);
}
size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
@@ -128,10 +130,10 @@ int evergreen_copy_dma(struct radeon_device *rdev,
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
radeon_semaphore_free(rdev, &sem, NULL);
- return r;
+ return ERR_PTR(r);
}
- radeon_semaphore_sync_to(sem, *fence);
+ radeon_semaphore_sync_resv(sem, resv, false);
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
@@ -148,17 +150,17 @@ int evergreen_copy_dma(struct radeon_device *rdev,
dst_offset += cur_size_in_dw * 4;
}
- r = radeon_fence_emit(rdev, fence, ring->idx);
+ r = radeon_fence_emit(rdev, &fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
radeon_semaphore_free(rdev, &sem, NULL);
- return r;
+ return ERR_PTR(r);
}
radeon_ring_unlock_commit(rdev, ring, false);
- radeon_semaphore_free(rdev, &sem, *fence);
+ radeon_semaphore_free(rdev, &sem, fence);
- return r;
+ return fence;
}
/**
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 4c5ec44ff328..c6b486f888d5 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -855,13 +855,14 @@ bool r100_semaphore_ring_emit(struct radeon_device *rdev,
return false;
}
-int r100_copy_blit(struct radeon_device *rdev,
- uint64_t src_offset,
- uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence **fence)
+struct radeon_fence *r100_copy_blit(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct reservation_object *resv)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ struct radeon_fence *fence;
uint32_t cur_pages;
uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
uint32_t pitch;
@@ -882,7 +883,7 @@ int r100_copy_blit(struct radeon_device *rdev,
r = radeon_ring_lock(rdev, ring, ndw);
if (r) {
DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
while (num_gpu_pages > 0) {
cur_pages = num_gpu_pages;
@@ -922,11 +923,13 @@ int r100_copy_blit(struct radeon_device *rdev,
RADEON_WAIT_2D_IDLECLEAN |
RADEON_WAIT_HOST_IDLECLEAN |
RADEON_WAIT_DMA_GUI_IDLE);
- if (fence) {
- r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
+ r = radeon_fence_emit(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return ERR_PTR(r);
}
radeon_ring_unlock_commit(rdev, ring, false);
- return r;
+ return fence;
}
static int r100_cp_wait_for_idle(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index 67780374a652..732d4938aab7 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -80,13 +80,14 @@ static int r200_get_vtx_size_0(uint32_t vtx_fmt_0)
return vtx_size;
}
-int r200_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset,
- uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence **fence)
+struct radeon_fence *r200_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct reservation_object *resv)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ struct radeon_fence *fence;
uint32_t size;
uint32_t cur_size;
int i, num_loops;
@@ -98,7 +99,7 @@ int r200_copy_dma(struct radeon_device *rdev,
r = radeon_ring_lock(rdev, ring, num_loops * 4 + 64);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
- return r;
+ return ERR_PTR(r);
}
/* Must wait for 2D idle & clean before DMA or hangs might happen */
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
@@ -118,11 +119,13 @@ int r200_copy_dma(struct radeon_device *rdev,
}
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE);
- if (fence) {
- r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
+ r = radeon_fence_emit(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return ERR_PTR(r);
}
radeon_ring_unlock_commit(rdev, ring, false);
- return r;
+ return fence;
}
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index a95ced569d84..5e9146b968b9 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2894,12 +2894,13 @@ bool r600_semaphore_ring_emit(struct radeon_device *rdev,
* Used by the radeon ttm implementation to move pages if
* registered as the asic copy callback.
*/
-int r600_copy_cpdma(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence **fence)
+struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct reservation_object *resv)
{
struct radeon_semaphore *sem = NULL;
+ struct radeon_fence *fence;
int ring_index = rdev->asic->copy.blit_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index];
u32 size_in_bytes, cur_size_in_bytes, tmp;
@@ -2909,7 +2910,7 @@ int r600_copy_cpdma(struct radeon_device *rdev,
r = radeon_semaphore_create(rdev, &sem);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
- return r;
+ return ERR_PTR(r);
}
size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
@@ -2918,10 +2919,10 @@ int r600_copy_cpdma(struct radeon_device *rdev,
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
radeon_semaphore_free(rdev, &sem, NULL);
- return r;
+ return ERR_PTR(r);
}
- radeon_semaphore_sync_to(sem, *fence);
+ radeon_semaphore_sync_resv(sem, resv, false);
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
@@ -2948,17 +2949,17 @@ int r600_copy_cpdma(struct radeon_device *rdev,
radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit);
- r = radeon_fence_emit(rdev, fence, ring->idx);
+ r = radeon_fence_emit(rdev, &fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
radeon_semaphore_free(rdev, &sem, NULL);
- return r;
+ return ERR_PTR(r);
}
radeon_ring_unlock_commit(rdev, ring, false);
- radeon_semaphore_free(rdev, &sem, *fence);
+ radeon_semaphore_free(rdev, &sem, fence);
- return r;
+ return fence;
}
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
index 51fd98553eaf..fc54224ce87b 100644
--- a/drivers/gpu/drm/radeon/r600_dma.c
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -436,18 +436,19 @@ void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @num_gpu_pages: number of GPU pages to xfer
- * @fence: radeon fence object
+ * @resv: reservation object to sync to
*
* Copy GPU paging using the DMA engine (r6xx).
* Used by the radeon ttm implementation to move pages if
* registered as the asic copy callback.
*/
-int r600_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence **fence)
+struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct reservation_object *resv)
{
struct radeon_semaphore *sem = NULL;
+ struct radeon_fence *fence;
int ring_index = rdev->asic->copy.dma_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index];
u32 size_in_dw, cur_size_in_dw;
@@ -457,7 +458,7 @@ int r600_copy_dma(struct radeon_device *rdev,
r = radeon_semaphore_create(rdev, &sem);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
- return r;
+ return ERR_PTR(r);
}
size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
@@ -466,10 +467,10 @@ int r600_copy_dma(struct radeon_device *rdev,
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
radeon_semaphore_free(rdev, &sem, NULL);
- return r;
+ return ERR_PTR(r);
}
- radeon_semaphore_sync_to(sem, *fence);
+ radeon_semaphore_sync_resv(sem, resv, false);
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
@@ -486,15 +487,15 @@ int r600_copy_dma(struct radeon_device *rdev,
dst_offset += cur_size_in_dw * 4;
}
- r = radeon_fence_emit(rdev, fence, ring->idx);
+ r = radeon_fence_emit(rdev, &fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
radeon_semaphore_free(rdev, &sem, NULL);
- return r;
+ return ERR_PTR(r);
}
radeon_ring_unlock_commit(rdev, ring, false);
- radeon_semaphore_free(rdev, &sem, *fence);
+ radeon_semaphore_free(rdev, &sem, fence);
- return r;
+ return fence;
}
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 79c988db79ad..2e41dc12cd00 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -585,8 +585,11 @@ bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore);
bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore);
-void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore,
- struct radeon_fence *fence);
+void radeon_semaphore_sync_fence(struct radeon_semaphore *semaphore,
+ struct radeon_fence *fence);
+void radeon_semaphore_sync_resv(struct radeon_semaphore *semaphore,
+ struct reservation_object *resv,
+ bool shared);
int radeon_semaphore_sync_rings(struct radeon_device *rdev,
struct radeon_semaphore *semaphore,
int waiting_ring);
@@ -1855,24 +1858,24 @@ struct radeon_asic {
} display;
/* copy functions for bo handling */
struct {
- int (*blit)(struct radeon_device *rdev,
- uint64_t src_offset,
- uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence **fence);
+ struct radeon_fence *(*blit)(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct reservation_object *resv);
u32 blit_ring_index;
- int (*dma)(struct radeon_device *rdev,
- uint64_t src_offset,
- uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence **fence);
+ struct radeon_fence *(*dma)(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct reservation_object *resv);
u32 dma_ring_index;
/* method used for bo copy */
- int (*copy)(struct radeon_device *rdev,
- uint64_t src_offset,
- uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence **fence);
+ struct radeon_fence *(*copy)(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct reservation_object *resv);
/* ring used for bo copies */
u32 copy_ring_index;
} copy;
@@ -2833,9 +2836,9 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
#define radeon_hdmi_setmode(rdev, e, m) (rdev)->asic->display.hdmi_setmode((e), (m))
#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)]->emit_fence((rdev), (fence))
#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)]->emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
-#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f))
-#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (f))
-#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (f))
+#define radeon_copy_blit(rdev, s, d, np, resv) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (resv))
+#define radeon_copy_dma(rdev, s, d, np, resv) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (resv))
+#define radeon_copy(rdev, s, d, np, resv) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (resv))
#define radeon_copy_blit_ring_index(rdev) (rdev)->asic->copy.blit_ring_index
#define radeon_copy_dma_ring_index(rdev) (rdev)->asic->copy.dma_ring_index
#define radeon_copy_ring_index(rdev) (rdev)->asic->copy.copy_ring_index
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 987a3b713e06..ca01bb8ea217 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -81,11 +81,11 @@ bool r100_semaphore_ring_emit(struct radeon_device *rdev,
int r100_cs_parse(struct radeon_cs_parser *p);
void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg);
-int r100_copy_blit(struct radeon_device *rdev,
- uint64_t src_offset,
- uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence **fence);
+struct radeon_fence *r100_copy_blit(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct reservation_object *resv);
int r100_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size);
@@ -153,11 +153,11 @@ void r100_ring_hdp_flush(struct radeon_device *rdev,
/*
* r200,rv250,rs300,rv280
*/
-extern int r200_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset,
- uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence **fence);
+struct radeon_fence *r200_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct reservation_object *resv);
void r200_set_safe_registers(struct radeon_device *rdev);
/*
@@ -341,12 +341,14 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
-int r600_copy_cpdma(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages, struct radeon_fence **fence);
-int r600_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages, struct radeon_fence **fence);
+struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct reservation_object *resv);
+struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct reservation_object *resv);
void r600_hpd_init(struct radeon_device *rdev);
void r600_hpd_fini(struct radeon_device *rdev);
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -462,10 +464,10 @@ bool rv770_page_flip_pending(struct radeon_device *rdev, int crtc);
void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
void r700_cp_stop(struct radeon_device *rdev);
void r700_cp_fini(struct radeon_device *rdev);
-int rv770_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence **fence);
+struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct reservation_object *resv);
u32 rv770_get_xclk(struct radeon_device *rdev);
int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
int rv770_get_temp(struct radeon_device *rdev);
@@ -536,10 +538,10 @@ void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
struct radeon_ib *ib);
-int evergreen_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence **fence);
+struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct reservation_object *resv);
void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable);
void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
int evergreen_get_temp(struct radeon_device *rdev);
@@ -701,10 +703,10 @@ int si_vm_init(struct radeon_device *rdev);
void si_vm_fini(struct radeon_device *rdev);
void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
-int si_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence **fence);
+struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct reservation_object *resv);
void si_dma_vm_copy_pages(struct radeon_device *rdev,
struct radeon_ib *ib,
@@ -760,14 +762,14 @@ bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_semaphore *semaphore,
bool emit_wait);
void cik_sdma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
-int cik_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence **fence);
-int cik_copy_cpdma(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence **fence);
+struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct reservation_object *resv);
+struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct reservation_object *resv);
int cik_sdma_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 69f5695bdab9..1e8855060fc7 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -45,33 +45,29 @@ static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
for (i = 0; i < n; i++) {
switch (flag) {
case RADEON_BENCHMARK_COPY_DMA:
- r = radeon_copy_dma(rdev, saddr, daddr,
- size / RADEON_GPU_PAGE_SIZE,
- &fence);
+ fence = radeon_copy_dma(rdev, saddr, daddr,
+ size / RADEON_GPU_PAGE_SIZE,
+ NULL);
break;
case RADEON_BENCHMARK_COPY_BLIT:
- r = radeon_copy_blit(rdev, saddr, daddr,
- size / RADEON_GPU_PAGE_SIZE,
- &fence);
+ fence = radeon_copy_blit(rdev, saddr, daddr,
+ size / RADEON_GPU_PAGE_SIZE,
+ NULL);
break;
default:
DRM_ERROR("Unknown copy method\n");
- r = -EINVAL;
+ return -EINVAL;
}
- if (r)
- goto exit_do_move;
+ if (IS_ERR(fence))
+ return PTR_ERR(fence);
+
r = radeon_fence_wait(fence, false);
- if (r)
- goto exit_do_move;
radeon_fence_unref(&fence);
+ if (r)
+ return r;
}
end_jiffies = jiffies;
- r = jiffies_to_msecs(end_jiffies - start_jiffies);
-
-exit_do_move:
- if (fence)
- radeon_fence_unref(&fence);
- return r;
+ return jiffies_to_msecs(end_jiffies - start_jiffies);
}
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 6e3d1c8f3483..f662de41ba49 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -183,6 +183,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
}
p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
+ p->relocs[i].tv.shared = !r->write_domain;
p->relocs[i].handle = r->handle;
radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
@@ -254,16 +255,13 @@ static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
for (i = 0; i < p->nrelocs; i++) {
struct reservation_object *resv;
- struct fence *fence;
if (!p->relocs[i].robj)
continue;
resv = p->relocs[i].robj->tbo.resv;
- fence = reservation_object_get_excl(resv);
-
- radeon_semaphore_sync_to(p->ib.semaphore,
- (struct radeon_fence *)fence);
+ radeon_semaphore_sync_resv(p->ib.semaphore, resv,
+ p->relocs[i].tv.shared);
}
}
@@ -568,7 +566,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
goto out;
}
radeon_cs_sync_rings(parser);
- radeon_semaphore_sync_to(parser->ib.semaphore, vm->fence);
+ radeon_semaphore_sync_fence(parser->ib.semaphore, vm->fence);
if ((rdev->family >= CHIP_TAHITI) &&
(parser->chunk_const_ib_idx != -1)) {
diff --git a/drivers/gpu/drm/radeon/radeon_ib.c b/drivers/gpu/drm/radeon/radeon_ib.c
index 6fc7461d70c4..3f39fcca4d07 100644
--- a/drivers/gpu/drm/radeon/radeon_ib.c
+++ b/drivers/gpu/drm/radeon/radeon_ib.c
@@ -145,7 +145,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
if (ib->vm) {
struct radeon_fence *vm_id_fence;
vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring);
- radeon_semaphore_sync_to(ib->semaphore, vm_id_fence);
+ radeon_semaphore_sync_fence(ib->semaphore, vm_id_fence);
}
/* sync with other rings */
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index 56d9fd66d8ae..5bfbd8372a3c 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -96,15 +96,15 @@ bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ridx,
}
/**
- * radeon_semaphore_sync_to - use the semaphore to sync to a fence
+ * radeon_semaphore_sync_fence - use the semaphore to sync to a fence
*
* @semaphore: semaphore object to add fence to
* @fence: fence to sync to
*
* Sync to the fence using this semaphore object
*/
-void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore,
- struct radeon_fence *fence)
+void radeon_semaphore_sync_fence(struct radeon_semaphore *semaphore,
+ struct radeon_fence *fence)
{
struct radeon_fence *other;
@@ -116,6 +116,38 @@ void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore,
}
/**
+ * radeon_semaphore_sync_to - use the semaphore to sync to a reservation object
+ *
+ * @sema: semaphore object to add fence from reservation object to
+ * @resv: reservation object with embedded fence
+ * @shared: true if we should onyl sync to the exclusive fence
+ *
+ * Sync to the fence using this semaphore object
+ */
+void radeon_semaphore_sync_resv(struct radeon_semaphore *sema,
+ struct reservation_object *resv,
+ bool shared)
+{
+ struct reservation_object_list *flist;
+ struct fence *f;
+ unsigned i;
+
+ /* always sync to the exclusive fence */
+ f = reservation_object_get_excl(resv);
+ radeon_semaphore_sync_fence(sema, (struct radeon_fence*)f);
+
+ flist = reservation_object_get_list(resv);
+ if (shared || !flist)
+ return;
+
+ for (i = 0; i < flist->shared_count; ++i) {
+ f = rcu_dereference_protected(flist->shared[i],
+ reservation_object_held(resv));
+ radeon_semaphore_sync_fence(sema, (struct radeon_fence*)f);
+ }
+}
+
+/**
* radeon_semaphore_sync_rings - sync ring to all registered fences
*
* @rdev: radeon_device pointer
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 17bc3dced9f1..ce943e1a5e51 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -116,11 +116,16 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
radeon_bo_kunmap(gtt_obj[i]);
if (ring == R600_RING_TYPE_DMA_INDEX)
- r = radeon_copy_dma(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+ fence = radeon_copy_dma(rdev, gtt_addr, vram_addr,
+ size / RADEON_GPU_PAGE_SIZE,
+ NULL);
else
- r = radeon_copy_blit(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
- if (r) {
+ fence = radeon_copy_blit(rdev, gtt_addr, vram_addr,
+ size / RADEON_GPU_PAGE_SIZE,
+ NULL);
+ if (IS_ERR(fence)) {
DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
+ r = PTR_ERR(fence);
goto out_lclean_unpin;
}
@@ -162,11 +167,16 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
radeon_bo_kunmap(vram_obj);
if (ring == R600_RING_TYPE_DMA_INDEX)
- r = radeon_copy_dma(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+ fence = radeon_copy_dma(rdev, vram_addr, gtt_addr,
+ size / RADEON_GPU_PAGE_SIZE,
+ NULL);
else
- r = radeon_copy_blit(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
- if (r) {
+ fence = radeon_copy_blit(rdev, vram_addr, gtt_addr,
+ size / RADEON_GPU_PAGE_SIZE,
+ NULL);
+ if (IS_ERR(fence)) {
DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
+ r = PTR_ERR(fence);
goto out_lclean_unpin;
}
@@ -222,7 +232,7 @@ out_lclean:
radeon_bo_unreserve(gtt_obj[i]);
radeon_bo_unref(&gtt_obj[i]);
}
- if (fence)
+ if (fence && !IS_ERR(fence))
radeon_fence_unref(&fence);
break;
}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 62d1f4d730a2..eca2ce60d440 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -233,6 +233,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
struct radeon_device *rdev;
uint64_t old_start, new_start;
struct radeon_fence *fence;
+ unsigned num_pages;
int r, ridx;
rdev = radeon_get_rdev(bo->bdev);
@@ -269,12 +270,11 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
- /* sync other rings */
- fence = (struct radeon_fence *)reservation_object_get_excl(bo->resv);
- r = radeon_copy(rdev, old_start, new_start,
- new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
- &fence);
- /* FIXME: handle copy error */
+ num_pages = new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
+ fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->resv);
+ if (IS_ERR(fence))
+ return PTR_ERR(fence);
+
r = ttm_bo_move_accel_cleanup(bo, &fence->base,
evict, no_wait_gpu, new_mem);
radeon_fence_unref(&fence);
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 671ee566aa51..ce870959dff8 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -143,6 +143,7 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
list[0].tv.bo = &vm->page_directory->tbo;
+ list[0].tv.shared = false;
list[0].tiling_flags = 0;
list[0].handle = 0;
list_add(&list[0].tv.head, head);
@@ -156,6 +157,7 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
list[idx].tv.bo = &list[idx].robj->tbo;
+ list[idx].tv.shared = false;
list[idx].tiling_flags = 0;
list[idx].handle = 0;
list_add(&list[idx++].tv.head, head);
@@ -395,6 +397,7 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev,
memset(&tv, 0, sizeof(tv));
tv.bo = &bo->tbo;
+ tv.shared = false;
INIT_LIST_HEAD(&head);
list_add(&tv.head, &head);
@@ -693,15 +696,10 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
incr, R600_PTE_VALID);
if (ib.length_dw != 0) {
- struct fence *fence;
-
radeon_asic_vm_pad_ib(rdev, &ib);
- fence = reservation_object_get_excl(pd->tbo.resv);
- radeon_semaphore_sync_to(ib.semaphore,
- (struct radeon_fence *)fence);
-
- radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
+ radeon_semaphore_sync_resv(ib.semaphore, pd->tbo.resv, false);
+ radeon_semaphore_sync_fence(ib.semaphore, vm->last_id_use);
WARN_ON(ib.length_dw > ndw);
r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) {
@@ -826,11 +824,8 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
struct radeon_bo *pt = vm->page_tables[pt_idx].bo;
unsigned nptes;
uint64_t pte;
- struct fence *fence;
- fence = reservation_object_get_excl(pt->tbo.resv);
- radeon_semaphore_sync_to(ib->semaphore,
- (struct radeon_fence *)fence);
+ radeon_semaphore_sync_resv(ib->semaphore, pt->tbo.resv, false);
if ((addr & ~mask) == (end & ~mask))
nptes = end - addr;
@@ -972,7 +967,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
radeon_asic_vm_pad_ib(rdev, &ib);
WARN_ON(ib.length_dw > ndw);
- radeon_semaphore_sync_to(ib.semaphore, vm->fence);
+ radeon_semaphore_sync_fence(ib.semaphore, vm->fence);
r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) {
radeon_ib_free(rdev, &ib);
diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c
index 74426ac2bb5c..c112764adfdf 100644
--- a/drivers/gpu/drm/radeon/rv770_dma.c
+++ b/drivers/gpu/drm/radeon/rv770_dma.c
@@ -33,18 +33,19 @@
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @num_gpu_pages: number of GPU pages to xfer
- * @fence: radeon fence object
+ * @resv: reservation object to sync to
*
* Copy GPU paging using the DMA engine (r7xx).
* Used by the radeon ttm implementation to move pages if
* registered as the asic copy callback.
*/
-int rv770_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence **fence)
+struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct reservation_object *resv)
{
struct radeon_semaphore *sem = NULL;
+ struct radeon_fence *fence;
int ring_index = rdev->asic->copy.dma_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index];
u32 size_in_dw, cur_size_in_dw;
@@ -54,7 +55,7 @@ int rv770_copy_dma(struct radeon_device *rdev,
r = radeon_semaphore_create(rdev, &sem);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
- return r;
+ return ERR_PTR(r);
}
size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
@@ -63,10 +64,10 @@ int rv770_copy_dma(struct radeon_device *rdev,
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
radeon_semaphore_free(rdev, &sem, NULL);
- return r;
+ return ERR_PTR(r);
}
- radeon_semaphore_sync_to(sem, *fence);
+ radeon_semaphore_sync_resv(sem, resv, false);
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
@@ -83,15 +84,15 @@ int rv770_copy_dma(struct radeon_device *rdev,
dst_offset += cur_size_in_dw * 4;
}
- r = radeon_fence_emit(rdev, fence, ring->idx);
+ r = radeon_fence_emit(rdev, &fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
radeon_semaphore_free(rdev, &sem, NULL);
- return r;
+ return ERR_PTR(r);
}
radeon_ring_unlock_commit(rdev, ring, false);
- radeon_semaphore_free(rdev, &sem, *fence);
+ radeon_semaphore_free(rdev, &sem, fence);
- return r;
+ return fence;
}
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index 7c22baaf94db..9b0dfbc913f3 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -218,18 +218,19 @@ void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @num_gpu_pages: number of GPU pages to xfer
- * @fence: radeon fence object
+ * @resv: reservation object to sync to
*
* Copy GPU paging using the DMA engine (SI).
* Used by the radeon ttm implementation to move pages if
* registered as the asic copy callback.
*/
-int si_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence **fence)
+struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct reservation_object *resv)
{
struct radeon_semaphore *sem = NULL;
+ struct radeon_fence *fence;
int ring_index = rdev->asic->copy.dma_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index];
u32 size_in_bytes, cur_size_in_bytes;
@@ -239,7 +240,7 @@ int si_copy_dma(struct radeon_device *rdev,
r = radeon_semaphore_create(rdev, &sem);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
- return r;
+ return ERR_PTR(r);
}
size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
@@ -248,10 +249,10 @@ int si_copy_dma(struct radeon_device *rdev,
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
radeon_semaphore_free(rdev, &sem, NULL);
- return r;
+ return ERR_PTR(r);
}
- radeon_semaphore_sync_to(sem, *fence);
+ radeon_semaphore_sync_resv(sem, resv, false);
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
@@ -268,16 +269,16 @@ int si_copy_dma(struct radeon_device *rdev,
dst_offset += cur_size_in_bytes;
}
- r = radeon_fence_emit(rdev, fence, ring->idx);
+ r = radeon_fence_emit(rdev, &fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
radeon_semaphore_free(rdev, &sem, NULL);
- return r;
+ return ERR_PTR(r);
}
radeon_ring_unlock_commit(rdev, ring, false);
- radeon_semaphore_free(rdev, &sem, *fence);
+ radeon_semaphore_free(rdev, &sem, fence);
- return r;
+ return fence;
}
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index adafc0f8ec06..8ce508e76208 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -119,8 +119,14 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
ret = -EBUSY;
}
- if (!ret)
- continue;
+ if (!ret) {
+ if (!entry->shared)
+ continue;
+
+ ret = reservation_object_reserve_shared(bo->resv);
+ if (!ret)
+ continue;
+ }
/* uh oh, we lost out, drop every reservation and try
* to only reserve this buffer, then start over if
@@ -136,6 +142,9 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
ret = 0;
}
+ if (!ret && entry->shared)
+ ret = reservation_object_reserve_shared(bo->resv);
+
if (unlikely(ret != 0)) {
if (ret == -EINTR)
ret = -ERESTARTSYS;
@@ -183,7 +192,10 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
list_for_each_entry(entry, list, head) {
bo = entry->bo;
- reservation_object_add_excl_fence(bo->resv, fence);
+ if (entry->shared)
+ reservation_object_add_shared_fence(bo->resv, fence);
+ else
+ reservation_object_add_excl_fence(bo->resv, fence);
ttm_bo_add_to_lru(bo);
__ttm_bo_unreserve(bo);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 0ceaddc8e4f7..b4de3b2a7cc5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -346,6 +346,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
++sw_context->cur_val_buf;
val_buf = &vval_buf->base;
val_buf->bo = ttm_bo_reference(bo);
+ val_buf->shared = false;
list_add_tail(&val_buf->head, &sw_context->validate_nodes);
vval_buf->validate_as_mob = validate_as_mob;
}
@@ -2670,9 +2671,11 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
INIT_LIST_HEAD(&validate_list);
pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
+ pinned_val.shared = false;
list_add_tail(&pinned_val.head, &validate_list);
query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
+ query_val.shared = false;
list_add_tail(&query_val.head, &validate_list);
ret = ttm_eu_reserve_buffers(&ticket, &validate_list, false);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index ff0e03b97753..26584316cb78 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -133,6 +133,7 @@ static void vmw_resource_release(struct kref *kref)
struct ttm_validate_buffer val_buf;
val_buf.bo = bo;
+ val_buf.shared = false;
res->func->unbind(res, false, &val_buf);
}
res->backup_dirty = false;
@@ -1219,6 +1220,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
INIT_LIST_HEAD(&val_list);
val_buf->bo = ttm_bo_reference(&res->backup->base);
+ val_buf->shared = false;
list_add_tail(&val_buf->head, &val_list);
ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible);
if (unlikely(ret != 0))
@@ -1312,6 +1314,7 @@ int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
BUG_ON(!func->may_evict);
val_buf.bo = NULL;
+ val_buf.shared = false;
ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
if (unlikely(ret != 0))
return ret;
@@ -1357,6 +1360,7 @@ int vmw_resource_validate(struct vmw_resource *res)
return 0;
val_buf.bo = NULL;
+ val_buf.shared = false;
if (res->backup)
val_buf.bo = &res->backup->base;
do {
@@ -1474,6 +1478,7 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo,
struct ttm_validate_buffer val_buf;
val_buf.bo = bo;
+ val_buf.shared = false;
list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {