From 0e1d8083bddb38b7169f6240905422f95d3c31b9 Mon Sep 17 00:00:00 2001
From: Christian König <christian.koenig@amd.com>
Date: Mon, 5 Aug 2019 11:16:12 +0200
Subject: dma-buf: further relax reservation_object_add_shared_fence
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Other cores don't busy wait any more and we removed the last user of checking
the seqno for changes. Drop updating the number for shared fences altogether.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/322379/?series=64837&rev=1
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 7 +------
 1 file changed, 1 insertion(+), 6 deletions(-)

(limited to 'drivers/gpu/drm/amd/amdgpu')

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index fe062b76ec91..a4640ddc24d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -251,12 +251,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
 	new->shared_max = old->shared_max;
 	new->shared_count = k;
 
-	/* Install the new fence list, seqcount provides the barriers */
-	preempt_disable();
-	write_seqcount_begin(&resv->seq);
-	RCU_INIT_POINTER(resv->fence, new);
-	write_seqcount_end(&resv->seq);
-	preempt_enable();
+	rcu_assign_pointer(resv->fence, new);
 
 	/* Drop the references to the removed fences or move them to ef_list */
 	for (i = j, k = 0; i < old->shared_count; ++i) {
-- 
cgit v1.2.3


From 52791eeec1d9f4a7e7fe08aaba0b1553149d93bc Mon Sep 17 00:00:00 2001
From: Christian König <christian.koenig@amd.com>
Date: Sun, 11 Aug 2019 10:06:32 +0200
Subject: dma-buf: rename reservation_object to dma_resv
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Be more consistent with the naming of the other DMA-buf objects.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/323401/
---
 drivers/dma-buf/Makefile                          |   2 +-
 drivers/dma-buf/dma-buf.c                         |  32 +-
 drivers/dma-buf/dma-fence.c                       |   2 +-
 drivers/dma-buf/dma-resv.c                        | 603 +++++++++++++++++++++
 drivers/dma-buf/reservation.c                     | 610 ----------------------
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c  |  14 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c            |   4 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c       |   2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c       |  20 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c           |   6 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h           |   2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c           |   6 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h           |   2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c            |   2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c        |  18 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.h        |   2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c          |  10 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h          |   4 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c           |  14 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h           |   6 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c           |   2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c            |  20 +-
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |   2 +-
 drivers/gpu/drm/drm_atomic_uapi.c                 |   2 +-
 drivers/gpu/drm/drm_gem.c                         |  26 +-
 drivers/gpu/drm/drm_gem_framebuffer_helper.c      |   4 +-
 drivers/gpu/drm/etnaviv/etnaviv_gem.c             |   8 +-
 drivers/gpu/drm/etnaviv/etnaviv_gem.h             |   2 +-
 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c      |  14 +-
 drivers/gpu/drm/i915/display/intel_display.c      |   4 +-
 drivers/gpu/drm/i915/gem/i915_gem_busy.c          |   6 +-
 drivers/gpu/drm/i915/gem/i915_gem_clflush.c       |   2 +-
 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c    |   2 +-
 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c        |   2 +-
 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c    |   6 +-
 drivers/gpu/drm/i915/gem/i915_gem_fence.c         |   2 +-
 drivers/gpu/drm/i915/gem/i915_gem_object.h        |  10 +-
 drivers/gpu/drm/i915/gem/i915_gem_wait.c          |  18 +-
 drivers/gpu/drm/i915/i915_drv.h                   |   2 +-
 drivers/gpu/drm/i915/i915_gem.c                   |   2 +-
 drivers/gpu/drm/i915/i915_gem_batch_pool.c        |  12 +-
 drivers/gpu/drm/i915/i915_request.c               |   4 +-
 drivers/gpu/drm/i915/i915_sw_fence.c              |   8 +-
 drivers/gpu/drm/i915/i915_sw_fence.h              |   4 +-
 drivers/gpu/drm/i915/i915_vma.c                   |  16 +-
 drivers/gpu/drm/i915/i915_vma.h                   |   8 +-
 drivers/gpu/drm/lima/lima_gem.c                   |   8 +-
 drivers/gpu/drm/mediatek/mtk_drm_fb.c             |   2 +-
 drivers/gpu/drm/msm/msm_gem.c                     |  18 +-
 drivers/gpu/drm/msm/msm_gem.h                     |   2 +-
 drivers/gpu/drm/msm/msm_gem_submit.c              |   2 +-
 drivers/gpu/drm/mxsfb/mxsfb_drv.c                 |   2 +-
 drivers/gpu/drm/nouveau/dispnv50/wndw.c           |   2 +-
 drivers/gpu/drm/nouveau/nouveau_bo.c              |  10 +-
 drivers/gpu/drm/nouveau/nouveau_bo.h              |   2 +-
 drivers/gpu/drm/nouveau/nouveau_fence.c           |  12 +-
 drivers/gpu/drm/nouveau/nouveau_gem.c             |   2 +-
 drivers/gpu/drm/nouveau/nouveau_prime.c           |   6 +-
 drivers/gpu/drm/panfrost/panfrost_drv.c           |   2 +-
 drivers/gpu/drm/panfrost/panfrost_job.c           |   6 +-
 drivers/gpu/drm/qxl/qxl_debugfs.c                 |   2 +-
 drivers/gpu/drm/qxl/qxl_release.c                 |   6 +-
 drivers/gpu/drm/radeon/cik.c                      |   2 +-
 drivers/gpu/drm/radeon/cik_sdma.c                 |   2 +-
 drivers/gpu/drm/radeon/evergreen_dma.c            |   2 +-
 drivers/gpu/drm/radeon/r100.c                     |   2 +-
 drivers/gpu/drm/radeon/r200.c                     |   2 +-
 drivers/gpu/drm/radeon/r600.c                     |   2 +-
 drivers/gpu/drm/radeon/r600_dma.c                 |   2 +-
 drivers/gpu/drm/radeon/radeon.h                   |   8 +-
 drivers/gpu/drm/radeon/radeon_asic.h              |  18 +-
 drivers/gpu/drm/radeon/radeon_benchmark.c         |   2 +-
 drivers/gpu/drm/radeon/radeon_cs.c                |   2 +-
 drivers/gpu/drm/radeon/radeon_display.c           |   2 +-
 drivers/gpu/drm/radeon/radeon_gem.c               |   6 +-
 drivers/gpu/drm/radeon/radeon_mn.c                |   2 +-
 drivers/gpu/drm/radeon/radeon_object.c            |  14 +-
 drivers/gpu/drm/radeon/radeon_object.h            |   2 +-
 drivers/gpu/drm/radeon/radeon_prime.c             |   6 +-
 drivers/gpu/drm/radeon/radeon_sync.c              |  10 +-
 drivers/gpu/drm/radeon/radeon_uvd.c               |   2 +-
 drivers/gpu/drm/radeon/radeon_vm.c                |   2 +-
 drivers/gpu/drm/radeon/rv770_dma.c                |   2 +-
 drivers/gpu/drm/radeon/si_dma.c                   |   2 +-
 drivers/gpu/drm/ttm/ttm_bo.c                      | 118 ++---
 drivers/gpu/drm/ttm/ttm_bo_util.c                 |  16 +-
 drivers/gpu/drm/ttm/ttm_bo_vm.c                   |   6 +-
 drivers/gpu/drm/ttm/ttm_execbuf_util.c            |  20 +-
 drivers/gpu/drm/ttm/ttm_tt.c                      |   2 +-
 drivers/gpu/drm/v3d/v3d_gem.c                     |   4 +-
 drivers/gpu/drm/vc4/vc4_gem.c                     |   6 +-
 drivers/gpu/drm/vgem/vgem_fence.c                 |  16 +-
 drivers/gpu/drm/virtio/virtgpu_ioctl.c            |   4 +-
 drivers/gpu/drm/virtio/virtgpu_plane.c            |   2 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_blit.c              |   4 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c                |   8 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c           |   4 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_resource.c          |   6 +-
 include/drm/drmP.h                                |   2 +-
 include/drm/drm_gem.h                             |   8 +-
 include/drm/ttm/ttm_bo_api.h                      |  12 +-
 include/drm/ttm/ttm_bo_driver.h                   |  14 +-
 include/linux/dma-buf.h                           |   4 +-
 include/linux/dma-fence.h                         |   4 +-
 include/linux/dma-resv.h                          | 312 +++++++++++
 include/linux/reservation.h                       | 332 ------------
 106 files changed, 1302 insertions(+), 1329 deletions(-)
 create mode 100644 drivers/dma-buf/dma-resv.c
 delete mode 100644 drivers/dma-buf/reservation.c
 create mode 100644 include/linux/dma-resv.h
 delete mode 100644 include/linux/reservation.h

(limited to 'drivers/gpu/drm/amd/amdgpu')

diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index e8c7310cb800..dcfb01e7c6f4 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0-only
 obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \
-	 reservation.o seqno-fence.o
+	 dma-resv.o seqno-fence.o
 obj-$(CONFIG_SYNC_FILE)		+= sync_file.o
 obj-$(CONFIG_SW_SYNC)		+= sw_sync.o sync_debug.o
 obj-$(CONFIG_UDMABUF)		+= udmabuf.o
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 67510f2be8bc..b3400d6524ab 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -21,7 +21,7 @@
 #include <linux/module.h>
 #include <linux/seq_file.h>
 #include <linux/poll.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 #include <linux/mm.h>
 #include <linux/mount.h>
 #include <linux/pseudo_fs.h>
@@ -104,8 +104,8 @@ static int dma_buf_release(struct inode *inode, struct file *file)
 	list_del(&dmabuf->list_node);
 	mutex_unlock(&db_list.lock);
 
-	if (dmabuf->resv == (struct reservation_object *)&dmabuf[1])
-		reservation_object_fini(dmabuf->resv);
+	if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
+		dma_resv_fini(dmabuf->resv);
 
 	module_put(dmabuf->owner);
 	kfree(dmabuf);
@@ -165,7 +165,7 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
  * To support cross-device and cross-driver synchronization of buffer access
  * implicit fences (represented internally in the kernel with &struct fence) can
  * be attached to a &dma_buf. The glue for that and a few related things are
- * provided in the &reservation_object structure.
+ * provided in the &dma_resv structure.
  *
  * Userspace can query the state of these implicitly tracked fences using poll()
  * and related system calls:
@@ -195,8 +195,8 @@ static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
 static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
 {
 	struct dma_buf *dmabuf;
-	struct reservation_object *resv;
-	struct reservation_object_list *fobj;
+	struct dma_resv *resv;
+	struct dma_resv_list *fobj;
 	struct dma_fence *fence_excl;
 	__poll_t events;
 	unsigned shared_count;
@@ -214,7 +214,7 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
 		return 0;
 
 	rcu_read_lock();
-	reservation_object_fences(resv, &fence_excl, &fobj, &shared_count);
+	dma_resv_fences(resv, &fence_excl, &fobj, &shared_count);
 	if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
 		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
 		__poll_t pevents = EPOLLIN;
@@ -493,13 +493,13 @@ err_alloc_file:
 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
 {
 	struct dma_buf *dmabuf;
-	struct reservation_object *resv = exp_info->resv;
+	struct dma_resv *resv = exp_info->resv;
 	struct file *file;
 	size_t alloc_size = sizeof(struct dma_buf);
 	int ret;
 
 	if (!exp_info->resv)
-		alloc_size += sizeof(struct reservation_object);
+		alloc_size += sizeof(struct dma_resv);
 	else
 		/* prevent &dma_buf[1] == dma_buf->resv */
 		alloc_size += 1;
@@ -531,8 +531,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
 	dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
 
 	if (!resv) {
-		resv = (struct reservation_object *)&dmabuf[1];
-		reservation_object_init(resv);
+		resv = (struct dma_resv *)&dmabuf[1];
+		dma_resv_init(resv);
 	}
 	dmabuf->resv = resv;
 
@@ -896,11 +896,11 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
 {
 	bool write = (direction == DMA_BIDIRECTIONAL ||
 		      direction == DMA_TO_DEVICE);
-	struct reservation_object *resv = dmabuf->resv;
+	struct dma_resv *resv = dmabuf->resv;
 	long ret;
 
 	/* Wait on any implicit rendering fences */
-	ret = reservation_object_wait_timeout_rcu(resv, write, true,
+	ret = dma_resv_wait_timeout_rcu(resv, write, true,
 						  MAX_SCHEDULE_TIMEOUT);
 	if (ret < 0)
 		return ret;
@@ -1141,8 +1141,8 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
 	int ret;
 	struct dma_buf *buf_obj;
 	struct dma_buf_attachment *attach_obj;
-	struct reservation_object *robj;
-	struct reservation_object_list *fobj;
+	struct dma_resv *robj;
+	struct dma_resv_list *fobj;
 	struct dma_fence *fence;
 	int count = 0, attach_count, shared_count, i;
 	size_t size = 0;
@@ -1175,7 +1175,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
 
 		robj = buf_obj->resv;
 		rcu_read_lock();
-		reservation_object_fences(robj, &fence, &fobj, &shared_count);
+		dma_resv_fences(robj, &fence, &fobj, &shared_count);
 		rcu_read_unlock();
 
 		if (fence)
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 59ac96ec7ba8..8025a891d3e9 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -60,7 +60,7 @@ static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(1);
  *
  * - Then there's also implicit fencing, where the synchronization points are
  *   implicitly passed around as part of shared &dma_buf instances. Such
- *   implicit fences are stored in &struct reservation_object through the
+ *   implicit fences are stored in &struct dma_resv through the
  *   &dma_buf.resv pointer.
  */
 
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
new file mode 100644
index 000000000000..f5142683c851
--- /dev/null
+++ b/drivers/dma-buf/dma-resv.c
@@ -0,0 +1,603 @@
+/*
+ * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
+ *
+ * Based on bo.c which bears the following copyright notice,
+ * but is dual licensed:
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include <linux/dma-resv.h>
+#include <linux/export.h>
+
+/**
+ * DOC: Reservation Object Overview
+ *
+ * The reservation object provides a mechanism to manage shared and
+ * exclusive fences associated with a buffer.  A reservation object
+ * can have attached one exclusive fence (normally associated with
+ * write operations) or N shared fences (read operations).  The RCU
+ * mechanism is used to protect read access to fences from locked
+ * write-side updates.
+ */
+
+DEFINE_WD_CLASS(reservation_ww_class);
+EXPORT_SYMBOL(reservation_ww_class);
+
+/**
+ * dma_resv_list_alloc - allocate fence list
+ * @shared_max: number of fences we need space for
+ *
+ * Allocate a new dma_resv_list and make sure to correctly initialize
+ * shared_max.
+ */
+static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max)
+{
+	struct dma_resv_list *list;
+
+	list = kmalloc(offsetof(typeof(*list), shared[shared_max]), GFP_KERNEL);
+	if (!list)
+		return NULL;
+
+	list->shared_max = (ksize(list) - offsetof(typeof(*list), shared)) /
+		sizeof(*list->shared);
+
+	return list;
+}
+
+/**
+ * dma_resv_list_free - free fence list
+ * @list: list to free
+ *
+ * Free a dma_resv_list and make sure to drop all references.
+ */
+static void dma_resv_list_free(struct dma_resv_list *list)
+{
+	unsigned int i;
+
+	if (!list)
+		return;
+
+	for (i = 0; i < list->shared_count; ++i)
+		dma_fence_put(rcu_dereference_protected(list->shared[i], true));
+
+	kfree_rcu(list, rcu);
+}
+
+/**
+ * dma_resv_init - initialize a reservation object
+ * @obj: the reservation object
+ */
+void dma_resv_init(struct dma_resv *obj)
+{
+	ww_mutex_init(&obj->lock, &reservation_ww_class);
+	RCU_INIT_POINTER(obj->fence, NULL);
+	RCU_INIT_POINTER(obj->fence_excl, NULL);
+}
+EXPORT_SYMBOL(dma_resv_init);
+
+/**
+ * dma_resv_fini - destroys a reservation object
+ * @obj: the reservation object
+ */
+void dma_resv_fini(struct dma_resv *obj)
+{
+	struct dma_resv_list *fobj;
+	struct dma_fence *excl;
+
+	/*
+	 * This object should be dead and all references must have
+	 * been released to it, so no need to be protected with rcu.
+	 */
+	excl = rcu_dereference_protected(obj->fence_excl, 1);
+	if (excl)
+		dma_fence_put(excl);
+
+	fobj = rcu_dereference_protected(obj->fence, 1);
+	dma_resv_list_free(fobj);
+	ww_mutex_destroy(&obj->lock);
+}
+EXPORT_SYMBOL(dma_resv_fini);
+
+/**
+ * dma_resv_reserve_shared - Reserve space to add shared fences to
+ * a dma_resv.
+ * @obj: reservation object
+ * @num_fences: number of fences we want to add
+ *
+ * Should be called before dma_resv_add_shared_fence().  Must
+ * be called with obj->lock held.
+ *
+ * RETURNS
+ * Zero for success, or -errno
+ */
+int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
+{
+	struct dma_resv_list *old, *new;
+	unsigned int i, j, k, max;
+
+	dma_resv_assert_held(obj);
+
+	old = dma_resv_get_list(obj);
+
+	if (old && old->shared_max) {
+		if ((old->shared_count + num_fences) <= old->shared_max)
+			return 0;
+		else
+			max = max(old->shared_count + num_fences,
+				  old->shared_max * 2);
+	} else {
+		max = 4;
+	}
+
+	new = dma_resv_list_alloc(max);
+	if (!new)
+		return -ENOMEM;
+
+	/*
+	 * no need to bump fence refcounts, rcu_read access
+	 * requires the use of kref_get_unless_zero, and the
+	 * references from the old struct are carried over to
+	 * the new.
+	 */
+	for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) {
+		struct dma_fence *fence;
+
+		fence = rcu_dereference_protected(old->shared[i],
+						  dma_resv_held(obj));
+		if (dma_fence_is_signaled(fence))
+			RCU_INIT_POINTER(new->shared[--k], fence);
+		else
+			RCU_INIT_POINTER(new->shared[j++], fence);
+	}
+	new->shared_count = j;
+
+	/*
+	 * We are not changing the effective set of fences here so can
+	 * merely update the pointer to the new array; both existing
+	 * readers and new readers will see exactly the same set of
+	 * active (unsignaled) shared fences. Individual fences and the
+	 * old array are protected by RCU and so will not vanish under
+	 * the gaze of the rcu_read_lock() readers.
+	 */
+	rcu_assign_pointer(obj->fence, new);
+
+	if (!old)
+		return 0;
+
+	/* Drop the references to the signaled fences */
+	for (i = k; i < max; ++i) {
+		struct dma_fence *fence;
+
+		fence = rcu_dereference_protected(new->shared[i],
+						  dma_resv_held(obj));
+		dma_fence_put(fence);
+	}
+	kfree_rcu(old, rcu);
+
+	return 0;
+}
+EXPORT_SYMBOL(dma_resv_reserve_shared);
+
+/**
+ * dma_resv_add_shared_fence - Add a fence to a shared slot
+ * @obj: the reservation object
+ * @fence: the shared fence to add
+ *
+ * Add a fence to a shared slot, obj->lock must be held, and
+ * dma_resv_reserve_shared() has been called.
+ */
+void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
+{
+	struct dma_resv_list *fobj;
+	struct dma_fence *old;
+	unsigned int i, count;
+
+	dma_fence_get(fence);
+
+	dma_resv_assert_held(obj);
+
+	fobj = dma_resv_get_list(obj);
+	count = fobj->shared_count;
+
+	for (i = 0; i < count; ++i) {
+
+		old = rcu_dereference_protected(fobj->shared[i],
+						dma_resv_held(obj));
+		if (old->context == fence->context ||
+		    dma_fence_is_signaled(old))
+			goto replace;
+	}
+
+	BUG_ON(fobj->shared_count >= fobj->shared_max);
+	old = NULL;
+	count++;
+
+replace:
+	RCU_INIT_POINTER(fobj->shared[i], fence);
+	/* pointer update must be visible before we extend the shared_count */
+	smp_store_mb(fobj->shared_count, count);
+	dma_fence_put(old);
+}
+EXPORT_SYMBOL(dma_resv_add_shared_fence);
+
+/**
+ * dma_resv_add_excl_fence - Add an exclusive fence.
+ * @obj: the reservation object
+ * @fence: the shared fence to add
+ *
+ * Add a fence to the exclusive slot.  The obj->lock must be held.
+ */
+void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
+{
+	struct dma_fence *old_fence = dma_resv_get_excl(obj);
+	struct dma_resv_list *old;
+	u32 i = 0;
+
+	dma_resv_assert_held(obj);
+
+	old = dma_resv_get_list(obj);
+	if (old)
+		i = old->shared_count;
+
+	if (fence)
+		dma_fence_get(fence);
+
+	preempt_disable();
+	rcu_assign_pointer(obj->fence_excl, fence);
+	/* pointer update must be visible before we modify the shared_count */
+	if (old)
+		smp_store_mb(old->shared_count, 0);
+	preempt_enable();
+
+	/* inplace update, no shared fences */
+	while (i--)
+		dma_fence_put(rcu_dereference_protected(old->shared[i],
+						dma_resv_held(obj)));
+
+	dma_fence_put(old_fence);
+}
+EXPORT_SYMBOL(dma_resv_add_excl_fence);
+
+/**
+* dma_resv_copy_fences - Copy all fences from src to dst.
+* @dst: the destination reservation object
+* @src: the source reservation object
+*
+* Copy all fences from src to dst. dst-lock must be held.
+*/
+int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
+{
+	struct dma_resv_list *src_list, *dst_list;
+	struct dma_fence *old, *new;
+	unsigned int i, shared_count;
+
+	dma_resv_assert_held(dst);
+
+	rcu_read_lock();
+
+retry:
+	dma_resv_fences(src, &new, &src_list, &shared_count);
+	if (shared_count) {
+		rcu_read_unlock();
+
+		dst_list = dma_resv_list_alloc(shared_count);
+		if (!dst_list)
+			return -ENOMEM;
+
+		rcu_read_lock();
+		dma_resv_fences(src, &new, &src_list, &shared_count);
+		if (!src_list || shared_count > dst_list->shared_max) {
+			kfree(dst_list);
+			goto retry;
+		}
+
+		dst_list->shared_count = 0;
+		for (i = 0; i < shared_count; ++i) {
+			struct dma_fence *fence;
+
+			fence = rcu_dereference(src_list->shared[i]);
+			if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+				     &fence->flags))
+				continue;
+
+			if (!dma_fence_get_rcu(fence)) {
+				dma_resv_list_free(dst_list);
+				goto retry;
+			}
+
+			if (dma_fence_is_signaled(fence)) {
+				dma_fence_put(fence);
+				continue;
+			}
+
+			rcu_assign_pointer(dst_list->shared[dst_list->shared_count++], fence);
+		}
+	} else {
+		dst_list = NULL;
+	}
+
+	if (new && !dma_fence_get_rcu(new)) {
+		dma_resv_list_free(dst_list);
+		goto retry;
+	}
+	rcu_read_unlock();
+
+	src_list = dma_resv_get_list(dst);
+	old = dma_resv_get_excl(dst);
+
+	preempt_disable();
+	rcu_assign_pointer(dst->fence_excl, new);
+	rcu_assign_pointer(dst->fence, dst_list);
+	preempt_enable();
+
+	dma_resv_list_free(src_list);
+	dma_fence_put(old);
+
+	return 0;
+}
+EXPORT_SYMBOL(dma_resv_copy_fences);
+
+/**
+ * dma_resv_get_fences_rcu - Get an object's shared and exclusive
+ * fences without update side lock held
+ * @obj: the reservation object
+ * @pfence_excl: the returned exclusive fence (or NULL)
+ * @pshared_count: the number of shared fences returned
+ * @pshared: the array of shared fence ptrs returned (array is krealloc'd to
+ * the required size, and must be freed by caller)
+ *
+ * Retrieve all fences from the reservation object. If the pointer for the
+ * exclusive fence is not specified the fence is put into the array of the
+ * shared fences as well. Returns either zero or -ENOMEM.
+ */
+int dma_resv_get_fences_rcu(struct dma_resv *obj,
+			    struct dma_fence **pfence_excl,
+			    unsigned *pshared_count,
+			    struct dma_fence ***pshared)
+{
+	struct dma_fence **shared = NULL;
+	struct dma_fence *fence_excl;
+	unsigned int shared_count;
+	int ret = 1;
+
+	do {
+		struct dma_resv_list *fobj;
+		unsigned int i;
+		size_t sz = 0;
+
+		i = 0;
+
+		rcu_read_lock();
+		dma_resv_fences(obj, &fence_excl, &fobj,
+					  &shared_count);
+
+		if (fence_excl && !dma_fence_get_rcu(fence_excl))
+			goto unlock;
+
+		if (fobj)
+			sz += sizeof(*shared) * fobj->shared_max;
+
+		if (!pfence_excl && fence_excl)
+			sz += sizeof(*shared);
+
+		if (sz) {
+			struct dma_fence **nshared;
+
+			nshared = krealloc(shared, sz,
+					   GFP_NOWAIT | __GFP_NOWARN);
+			if (!nshared) {
+				rcu_read_unlock();
+
+				dma_fence_put(fence_excl);
+				fence_excl = NULL;
+
+				nshared = krealloc(shared, sz, GFP_KERNEL);
+				if (nshared) {
+					shared = nshared;
+					continue;
+				}
+
+				ret = -ENOMEM;
+				break;
+			}
+			shared = nshared;
+			for (i = 0; i < shared_count; ++i) {
+				shared[i] = rcu_dereference(fobj->shared[i]);
+				if (!dma_fence_get_rcu(shared[i]))
+					break;
+			}
+		}
+
+		if (i != shared_count) {
+			while (i--)
+				dma_fence_put(shared[i]);
+			dma_fence_put(fence_excl);
+			goto unlock;
+		}
+
+		ret = 0;
+unlock:
+		rcu_read_unlock();
+	} while (ret);
+
+	if (pfence_excl)
+		*pfence_excl = fence_excl;
+	else if (fence_excl)
+		shared[++shared_count] = fence_excl;
+
+	if (!shared_count) {
+		kfree(shared);
+		shared = NULL;
+	}
+
+	*pshared_count = shared_count;
+	*pshared = shared;
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu);
+
+/**
+ * dma_resv_wait_timeout_rcu - Wait on reservation's objects
+ * shared and/or exclusive fences.
+ * @obj: the reservation object
+ * @wait_all: if true, wait on all fences, else wait on just exclusive fence
+ * @intr: if true, do interruptible wait
+ * @timeout: timeout value in jiffies or zero to return immediately
+ *
+ * RETURNS
+ * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
+ * greater than zer on success.
+ */
+long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
+			       bool wait_all, bool intr,
+			       unsigned long timeout)
+{
+	struct dma_resv_list *fobj;
+	struct dma_fence *fence;
+	unsigned shared_count;
+	long ret = timeout ? timeout : 1;
+	int i;
+
+retry:
+	rcu_read_lock();
+	i = -1;
+
+	dma_resv_fences(obj, &fence, &fobj, &shared_count);
+	if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+		if (!dma_fence_get_rcu(fence))
+			goto unlock_retry;
+
+		if (dma_fence_is_signaled(fence)) {
+			dma_fence_put(fence);
+			fence = NULL;
+		}
+
+	} else {
+		fence = NULL;
+	}
+
+	if (wait_all) {
+		for (i = 0; !fence && i < shared_count; ++i) {
+			struct dma_fence *lfence = rcu_dereference(fobj->shared[i]);
+
+			if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+				     &lfence->flags))
+				continue;
+
+			if (!dma_fence_get_rcu(lfence))
+				goto unlock_retry;
+
+			if (dma_fence_is_signaled(lfence)) {
+				dma_fence_put(lfence);
+				continue;
+			}
+
+			fence = lfence;
+			break;
+		}
+	}
+
+	rcu_read_unlock();
+	if (fence) {
+		ret = dma_fence_wait_timeout(fence, intr, ret);
+		dma_fence_put(fence);
+		if (ret > 0 && wait_all && (i + 1 < shared_count))
+			goto retry;
+	}
+	return ret;
+
+unlock_retry:
+	rcu_read_unlock();
+	goto retry;
+}
+EXPORT_SYMBOL_GPL(dma_resv_wait_timeout_rcu);
+
+
+static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
+{
+	struct dma_fence *fence, *lfence = passed_fence;
+	int ret = 1;
+
+	if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
+		fence = dma_fence_get_rcu(lfence);
+		if (!fence)
+			return -1;
+
+		ret = !!dma_fence_is_signaled(fence);
+		dma_fence_put(fence);
+	}
+	return ret;
+}
+
+/**
+ * dma_resv_test_signaled_rcu - Test if a reservation object's
+ * fences have been signaled.
+ * @obj: the reservation object
+ * @test_all: if true, test all fences, otherwise only test the exclusive
+ * fence
+ *
+ * RETURNS
+ * true if all fences signaled, else false
+ */
+bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
+{
+	struct dma_resv_list *fobj;
+	struct dma_fence *fence_excl;
+	unsigned shared_count;
+	int ret;
+
+	rcu_read_lock();
+retry:
+	ret = true;
+
+	dma_resv_fences(obj, &fence_excl, &fobj, &shared_count);
+	if (test_all) {
+		unsigned i;
+
+		for (i = 0; i < shared_count; ++i) {
+			struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
+
+			ret = dma_resv_test_signaled_single(fence);
+			if (ret < 0)
+				goto retry;
+			else if (!ret)
+				break;
+		}
+	}
+
+	if (!shared_count && fence_excl) {
+		ret = dma_resv_test_signaled_single(fence_excl);
+		if (ret < 0)
+			goto retry;
+	}
+
+	rcu_read_unlock();
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu);
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
deleted file mode 100644
index f7f4a0858c2a..000000000000
--- a/drivers/dma-buf/reservation.c
+++ /dev/null
@@ -1,610 +0,0 @@
-/*
- * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
- *
- * Based on bo.c which bears the following copyright notice,
- * but is dual licensed:
- *
- * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
- */
-
-#include <linux/reservation.h>
-#include <linux/export.h>
-
-/**
- * DOC: Reservation Object Overview
- *
- * The reservation object provides a mechanism to manage shared and
- * exclusive fences associated with a buffer.  A reservation object
- * can have attached one exclusive fence (normally associated with
- * write operations) or N shared fences (read operations).  The RCU
- * mechanism is used to protect read access to fences from locked
- * write-side updates.
- */
-
-DEFINE_WD_CLASS(reservation_ww_class);
-EXPORT_SYMBOL(reservation_ww_class);
-
-/**
- * reservation_object_list_alloc - allocate fence list
- * @shared_max: number of fences we need space for
- *
- * Allocate a new reservation_object_list and make sure to correctly initialize
- * shared_max.
- */
-static struct reservation_object_list *
-reservation_object_list_alloc(unsigned int shared_max)
-{
-	struct reservation_object_list *list;
-
-	list = kmalloc(offsetof(typeof(*list), shared[shared_max]), GFP_KERNEL);
-	if (!list)
-		return NULL;
-
-	list->shared_max = (ksize(list) - offsetof(typeof(*list), shared)) /
-		sizeof(*list->shared);
-
-	return list;
-}
-
-/**
- * reservation_object_list_free - free fence list
- * @list: list to free
- *
- * Free a reservation_object_list and make sure to drop all references.
- */
-static void reservation_object_list_free(struct reservation_object_list *list)
-{
-	unsigned int i;
-
-	if (!list)
-		return;
-
-	for (i = 0; i < list->shared_count; ++i)
-		dma_fence_put(rcu_dereference_protected(list->shared[i], true));
-
-	kfree_rcu(list, rcu);
-}
-
-/**
- * reservation_object_init - initialize a reservation object
- * @obj: the reservation object
- */
-void reservation_object_init(struct reservation_object *obj)
-{
-	ww_mutex_init(&obj->lock, &reservation_ww_class);
-	RCU_INIT_POINTER(obj->fence, NULL);
-	RCU_INIT_POINTER(obj->fence_excl, NULL);
-}
-EXPORT_SYMBOL(reservation_object_init);
-
-/**
- * reservation_object_fini - destroys a reservation object
- * @obj: the reservation object
- */
-void reservation_object_fini(struct reservation_object *obj)
-{
-	struct reservation_object_list *fobj;
-	struct dma_fence *excl;
-
-	/*
-	 * This object should be dead and all references must have
-	 * been released to it, so no need to be protected with rcu.
-	 */
-	excl = rcu_dereference_protected(obj->fence_excl, 1);
-	if (excl)
-		dma_fence_put(excl);
-
-	fobj = rcu_dereference_protected(obj->fence, 1);
-	reservation_object_list_free(fobj);
-	ww_mutex_destroy(&obj->lock);
-}
-EXPORT_SYMBOL(reservation_object_fini);
-
-/**
- * reservation_object_reserve_shared - Reserve space to add shared fences to
- * a reservation_object.
- * @obj: reservation object
- * @num_fences: number of fences we want to add
- *
- * Should be called before reservation_object_add_shared_fence().  Must
- * be called with obj->lock held.
- *
- * RETURNS
- * Zero for success, or -errno
- */
-int reservation_object_reserve_shared(struct reservation_object *obj,
-				      unsigned int num_fences)
-{
-	struct reservation_object_list *old, *new;
-	unsigned int i, j, k, max;
-
-	reservation_object_assert_held(obj);
-
-	old = reservation_object_get_list(obj);
-
-	if (old && old->shared_max) {
-		if ((old->shared_count + num_fences) <= old->shared_max)
-			return 0;
-		else
-			max = max(old->shared_count + num_fences,
-				  old->shared_max * 2);
-	} else {
-		max = 4;
-	}
-
-	new = reservation_object_list_alloc(max);
-	if (!new)
-		return -ENOMEM;
-
-	/*
-	 * no need to bump fence refcounts, rcu_read access
-	 * requires the use of kref_get_unless_zero, and the
-	 * references from the old struct are carried over to
-	 * the new.
-	 */
-	for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) {
-		struct dma_fence *fence;
-
-		fence = rcu_dereference_protected(old->shared[i],
-						  reservation_object_held(obj));
-		if (dma_fence_is_signaled(fence))
-			RCU_INIT_POINTER(new->shared[--k], fence);
-		else
-			RCU_INIT_POINTER(new->shared[j++], fence);
-	}
-	new->shared_count = j;
-
-	/*
-	 * We are not changing the effective set of fences here so can
-	 * merely update the pointer to the new array; both existing
-	 * readers and new readers will see exactly the same set of
-	 * active (unsignaled) shared fences. Individual fences and the
-	 * old array are protected by RCU and so will not vanish under
-	 * the gaze of the rcu_read_lock() readers.
-	 */
-	rcu_assign_pointer(obj->fence, new);
-
-	if (!old)
-		return 0;
-
-	/* Drop the references to the signaled fences */
-	for (i = k; i < max; ++i) {
-		struct dma_fence *fence;
-
-		fence = rcu_dereference_protected(new->shared[i],
-						  reservation_object_held(obj));
-		dma_fence_put(fence);
-	}
-	kfree_rcu(old, rcu);
-
-	return 0;
-}
-EXPORT_SYMBOL(reservation_object_reserve_shared);
-
-/**
- * reservation_object_add_shared_fence - Add a fence to a shared slot
- * @obj: the reservation object
- * @fence: the shared fence to add
- *
- * Add a fence to a shared slot, obj->lock must be held, and
- * reservation_object_reserve_shared() has been called.
- */
-void reservation_object_add_shared_fence(struct reservation_object *obj,
-					 struct dma_fence *fence)
-{
-	struct reservation_object_list *fobj;
-	struct dma_fence *old;
-	unsigned int i, count;
-
-	dma_fence_get(fence);
-
-	reservation_object_assert_held(obj);
-
-	fobj = reservation_object_get_list(obj);
-	count = fobj->shared_count;
-
-	for (i = 0; i < count; ++i) {
-
-		old = rcu_dereference_protected(fobj->shared[i],
-						reservation_object_held(obj));
-		if (old->context == fence->context ||
-		    dma_fence_is_signaled(old))
-			goto replace;
-	}
-
-	BUG_ON(fobj->shared_count >= fobj->shared_max);
-	old = NULL;
-	count++;
-
-replace:
-	RCU_INIT_POINTER(fobj->shared[i], fence);
-	/* pointer update must be visible before we extend the shared_count */
-	smp_store_mb(fobj->shared_count, count);
-	dma_fence_put(old);
-}
-EXPORT_SYMBOL(reservation_object_add_shared_fence);
-
-/**
- * reservation_object_add_excl_fence - Add an exclusive fence.
- * @obj: the reservation object
- * @fence: the shared fence to add
- *
- * Add a fence to the exclusive slot.  The obj->lock must be held.
- */
-void reservation_object_add_excl_fence(struct reservation_object *obj,
-				       struct dma_fence *fence)
-{
-	struct dma_fence *old_fence = reservation_object_get_excl(obj);
-	struct reservation_object_list *old;
-	u32 i = 0;
-
-	reservation_object_assert_held(obj);
-
-	old = reservation_object_get_list(obj);
-	if (old)
-		i = old->shared_count;
-
-	if (fence)
-		dma_fence_get(fence);
-
-	preempt_disable();
-	rcu_assign_pointer(obj->fence_excl, fence);
-	/* pointer update must be visible before we modify the shared_count */
-	if (old)
-		smp_store_mb(old->shared_count, 0);
-	preempt_enable();
-
-	/* inplace update, no shared fences */
-	while (i--)
-		dma_fence_put(rcu_dereference_protected(old->shared[i],
-						reservation_object_held(obj)));
-
-	dma_fence_put(old_fence);
-}
-EXPORT_SYMBOL(reservation_object_add_excl_fence);
-
-/**
-* reservation_object_copy_fences - Copy all fences from src to dst.
-* @dst: the destination reservation object
-* @src: the source reservation object
-*
-* Copy all fences from src to dst. dst-lock must be held.
-*/
-int reservation_object_copy_fences(struct reservation_object *dst,
-				   struct reservation_object *src)
-{
-	struct reservation_object_list *src_list, *dst_list;
-	struct dma_fence *old, *new;
-	unsigned int i, shared_count;
-
-	reservation_object_assert_held(dst);
-
-	rcu_read_lock();
-
-retry:
-	reservation_object_fences(src, &new, &src_list, &shared_count);
-	if (shared_count) {
-		rcu_read_unlock();
-
-		dst_list = reservation_object_list_alloc(shared_count);
-		if (!dst_list)
-			return -ENOMEM;
-
-		rcu_read_lock();
-		reservation_object_fences(src, &new, &src_list, &shared_count);
-		if (!src_list || shared_count > dst_list->shared_max) {
-			kfree(dst_list);
-			goto retry;
-		}
-
-		dst_list->shared_count = 0;
-		for (i = 0; i < shared_count; ++i) {
-			struct dma_fence *fence;
-
-			fence = rcu_dereference(src_list->shared[i]);
-			if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
-				     &fence->flags))
-				continue;
-
-			if (!dma_fence_get_rcu(fence)) {
-				reservation_object_list_free(dst_list);
-				goto retry;
-			}
-
-			if (dma_fence_is_signaled(fence)) {
-				dma_fence_put(fence);
-				continue;
-			}
-
-			rcu_assign_pointer(dst_list->shared[dst_list->shared_count++], fence);
-		}
-	} else {
-		dst_list = NULL;
-	}
-
-	if (new && !dma_fence_get_rcu(new)) {
-		reservation_object_list_free(dst_list);
-		goto retry;
-	}
-	rcu_read_unlock();
-
-	src_list = reservation_object_get_list(dst);
-	old = reservation_object_get_excl(dst);
-
-	preempt_disable();
-	rcu_assign_pointer(dst->fence_excl, new);
-	rcu_assign_pointer(dst->fence, dst_list);
-	preempt_enable();
-
-	reservation_object_list_free(src_list);
-	dma_fence_put(old);
-
-	return 0;
-}
-EXPORT_SYMBOL(reservation_object_copy_fences);
-
-/**
- * reservation_object_get_fences_rcu - Get an object's shared and exclusive
- * fences without update side lock held
- * @obj: the reservation object
- * @pfence_excl: the returned exclusive fence (or NULL)
- * @pshared_count: the number of shared fences returned
- * @pshared: the array of shared fence ptrs returned (array is krealloc'd to
- * the required size, and must be freed by caller)
- *
- * Retrieve all fences from the reservation object. If the pointer for the
- * exclusive fence is not specified the fence is put into the array of the
- * shared fences as well. Returns either zero or -ENOMEM.
- */
-int reservation_object_get_fences_rcu(struct reservation_object *obj,
-				      struct dma_fence **pfence_excl,
-				      unsigned *pshared_count,
-				      struct dma_fence ***pshared)
-{
-	struct dma_fence **shared = NULL;
-	struct dma_fence *fence_excl;
-	unsigned int shared_count;
-	int ret = 1;
-
-	do {
-		struct reservation_object_list *fobj;
-		unsigned int i;
-		size_t sz = 0;
-
-		i = 0;
-
-		rcu_read_lock();
-		reservation_object_fences(obj, &fence_excl, &fobj,
-					  &shared_count);
-
-		if (fence_excl && !dma_fence_get_rcu(fence_excl))
-			goto unlock;
-
-		if (fobj)
-			sz += sizeof(*shared) * fobj->shared_max;
-
-		if (!pfence_excl && fence_excl)
-			sz += sizeof(*shared);
-
-		if (sz) {
-			struct dma_fence **nshared;
-
-			nshared = krealloc(shared, sz,
-					   GFP_NOWAIT | __GFP_NOWARN);
-			if (!nshared) {
-				rcu_read_unlock();
-
-				dma_fence_put(fence_excl);
-				fence_excl = NULL;
-
-				nshared = krealloc(shared, sz, GFP_KERNEL);
-				if (nshared) {
-					shared = nshared;
-					continue;
-				}
-
-				ret = -ENOMEM;
-				break;
-			}
-			shared = nshared;
-			for (i = 0; i < shared_count; ++i) {
-				shared[i] = rcu_dereference(fobj->shared[i]);
-				if (!dma_fence_get_rcu(shared[i]))
-					break;
-			}
-		}
-
-		if (i != shared_count) {
-			while (i--)
-				dma_fence_put(shared[i]);
-			dma_fence_put(fence_excl);
-			goto unlock;
-		}
-
-		ret = 0;
-unlock:
-		rcu_read_unlock();
-	} while (ret);
-
-	if (pfence_excl)
-		*pfence_excl = fence_excl;
-	else if (fence_excl)
-		shared[++shared_count] = fence_excl;
-
-	if (!shared_count) {
-		kfree(shared);
-		shared = NULL;
-	}
-
-	*pshared_count = shared_count;
-	*pshared = shared;
-	return ret;
-}
-EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu);
-
-/**
- * reservation_object_wait_timeout_rcu - Wait on reservation's objects
- * shared and/or exclusive fences.
- * @obj: the reservation object
- * @wait_all: if true, wait on all fences, else wait on just exclusive fence
- * @intr: if true, do interruptible wait
- * @timeout: timeout value in jiffies or zero to return immediately
- *
- * RETURNS
- * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
- * greater than zer on success.
- */
-long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
-					 bool wait_all, bool intr,
-					 unsigned long timeout)
-{
-	struct reservation_object_list *fobj;
-	struct dma_fence *fence;
-	unsigned shared_count;
-	long ret = timeout ? timeout : 1;
-	int i;
-
-retry:
-	rcu_read_lock();
-	i = -1;
-
-	reservation_object_fences(obj, &fence, &fobj, &shared_count);
-	if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
-		if (!dma_fence_get_rcu(fence))
-			goto unlock_retry;
-
-		if (dma_fence_is_signaled(fence)) {
-			dma_fence_put(fence);
-			fence = NULL;
-		}
-
-	} else {
-		fence = NULL;
-	}
-
-	if (wait_all) {
-		for (i = 0; !fence && i < shared_count; ++i) {
-			struct dma_fence *lfence = rcu_dereference(fobj->shared[i]);
-
-			if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
-				     &lfence->flags))
-				continue;
-
-			if (!dma_fence_get_rcu(lfence))
-				goto unlock_retry;
-
-			if (dma_fence_is_signaled(lfence)) {
-				dma_fence_put(lfence);
-				continue;
-			}
-
-			fence = lfence;
-			break;
-		}
-	}
-
-	rcu_read_unlock();
-	if (fence) {
-		ret = dma_fence_wait_timeout(fence, intr, ret);
-		dma_fence_put(fence);
-		if (ret > 0 && wait_all && (i + 1 < shared_count))
-			goto retry;
-	}
-	return ret;
-
-unlock_retry:
-	rcu_read_unlock();
-	goto retry;
-}
-EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu);
-
-
-static inline int
-reservation_object_test_signaled_single(struct dma_fence *passed_fence)
-{
-	struct dma_fence *fence, *lfence = passed_fence;
-	int ret = 1;
-
-	if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
-		fence = dma_fence_get_rcu(lfence);
-		if (!fence)
-			return -1;
-
-		ret = !!dma_fence_is_signaled(fence);
-		dma_fence_put(fence);
-	}
-	return ret;
-}
-
-/**
- * reservation_object_test_signaled_rcu - Test if a reservation object's
- * fences have been signaled.
- * @obj: the reservation object
- * @test_all: if true, test all fences, otherwise only test the exclusive
- * fence
- *
- * RETURNS
- * true if all fences signaled, else false
- */
-bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
-					  bool test_all)
-{
-	struct reservation_object_list *fobj;
-	struct dma_fence *fence_excl;
-	unsigned shared_count;
-	int ret;
-
-	rcu_read_lock();
-retry:
-	ret = true;
-
-	reservation_object_fences(obj, &fence_excl, &fobj, &shared_count);
-	if (test_all) {
-		unsigned i;
-
-		for (i = 0; i < shared_count; ++i) {
-			struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
-
-			ret = reservation_object_test_signaled_single(fence);
-			if (ret < 0)
-				goto retry;
-			else if (!ret)
-				break;
-		}
-	}
-
-	if (!shared_count && fence_excl) {
-		ret = reservation_object_test_signaled_single(fence_excl);
-		if (ret < 0)
-			goto retry;
-	}
-
-	rcu_read_unlock();
-	return ret;
-}
-EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index a4640ddc24d1..bc4ec6b20a87 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -218,14 +218,14 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
 					struct amdgpu_amdkfd_fence *ef)
 {
-	struct reservation_object *resv = bo->tbo.base.resv;
-	struct reservation_object_list *old, *new;
+	struct dma_resv *resv = bo->tbo.base.resv;
+	struct dma_resv_list *old, *new;
 	unsigned int i, j, k;
 
 	if (!ef)
 		return -EINVAL;
 
-	old = reservation_object_get_list(resv);
+	old = dma_resv_get_list(resv);
 	if (!old)
 		return 0;
 
@@ -241,7 +241,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
 		struct dma_fence *f;
 
 		f = rcu_dereference_protected(old->shared[i],
-					      reservation_object_held(resv));
+					      dma_resv_held(resv));
 
 		if (f->context == ef->base.context)
 			RCU_INIT_POINTER(new->shared[--j], f);
@@ -258,7 +258,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
 		struct dma_fence *f;
 
 		f = rcu_dereference_protected(new->shared[i],
-					      reservation_object_held(resv));
+					      dma_resv_held(resv));
 		dma_fence_put(f);
 	}
 	kfree_rcu(old, rcu);
@@ -882,7 +882,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
 				  AMDGPU_FENCE_OWNER_KFD, false);
 	if (ret)
 		goto wait_pd_fail;
-	ret = reservation_object_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
+	ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
 	if (ret)
 		goto reserve_shared_fail;
 	amdgpu_bo_fence(vm->root.base.bo,
@@ -2127,7 +2127,7 @@ int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem
 	 * Add process eviction fence to bo so they can
 	 * evict each other.
 	 */
-	ret = reservation_object_reserve_shared(gws_bo->tbo.base.resv, 1);
+	ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
 	if (ret)
 		goto reserve_shared_fail;
 	amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index e748cd284780..22236d367e26 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -730,7 +730,7 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
 
 	list_for_each_entry(e, &p->validated, tv.head) {
 		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
-		struct reservation_object *resv = bo->tbo.base.resv;
+		struct dma_resv *resv = bo->tbo.base.resv;
 
 		r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp,
 				     amdgpu_bo_explicit_sync(bo));
@@ -1729,7 +1729,7 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
 	*map = mapping;
 
 	/* Double check that the BO is reserved by this CS */
-	if (reservation_object_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket)
+	if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket)
 		return -EINVAL;
 
 	if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index b5d020e15c35..8a48cb5b5875 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -204,7 +204,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
 		goto unpin;
 	}
 
-	r = reservation_object_get_fences_rcu(new_abo->tbo.base.resv, &work->excl,
+	r = dma_resv_get_fences_rcu(new_abo->tbo.base.resv, &work->excl,
 					      &work->shared_count,
 					      &work->shared);
 	if (unlikely(r != 0)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index b88e27da7c28..bf0f00508987 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -137,23 +137,23 @@ int amdgpu_gem_prime_mmap(struct drm_gem_object *obj,
 }
 
 static int
-__reservation_object_make_exclusive(struct reservation_object *obj)
+__dma_resv_make_exclusive(struct dma_resv *obj)
 {
 	struct dma_fence **fences;
 	unsigned int count;
 	int r;
 
-	if (!reservation_object_get_list(obj)) /* no shared fences to convert */
+	if (!dma_resv_get_list(obj)) /* no shared fences to convert */
 		return 0;
 
-	r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences);
+	r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences);
 	if (r)
 		return r;
 
 	if (count == 0) {
 		/* Now that was unexpected. */
 	} else if (count == 1) {
-		reservation_object_add_excl_fence(obj, fences[0]);
+		dma_resv_add_excl_fence(obj, fences[0]);
 		dma_fence_put(fences[0]);
 		kfree(fences);
 	} else {
@@ -165,7 +165,7 @@ __reservation_object_make_exclusive(struct reservation_object *obj)
 		if (!array)
 			goto err_fences_put;
 
-		reservation_object_add_excl_fence(obj, &array->base);
+		dma_resv_add_excl_fence(obj, &array->base);
 		dma_fence_put(&array->base);
 	}
 
@@ -216,7 +216,7 @@ static int amdgpu_dma_buf_map_attach(struct dma_buf *dma_buf,
 		 * fences on the reservation object into a single exclusive
 		 * fence.
 		 */
-		r = __reservation_object_make_exclusive(bo->tbo.base.resv);
+		r = __dma_resv_make_exclusive(bo->tbo.base.resv);
 		if (r)
 			goto error_unreserve;
 	}
@@ -367,7 +367,7 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
 				 struct dma_buf_attachment *attach,
 				 struct sg_table *sg)
 {
-	struct reservation_object *resv = attach->dmabuf->resv;
+	struct dma_resv *resv = attach->dmabuf->resv;
 	struct amdgpu_device *adev = dev->dev_private;
 	struct amdgpu_bo *bo;
 	struct amdgpu_bo_param bp;
@@ -380,7 +380,7 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
 	bp.flags = 0;
 	bp.type = ttm_bo_type_sg;
 	bp.resv = resv;
-	reservation_object_lock(resv, NULL);
+	dma_resv_lock(resv, NULL);
 	ret = amdgpu_bo_create(adev, &bp, &bo);
 	if (ret)
 		goto error;
@@ -392,11 +392,11 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
 	if (attach->dmabuf->ops != &amdgpu_dmabuf_ops)
 		bo->prime_shared_count = 1;
 
-	reservation_object_unlock(resv);
+	dma_resv_unlock(resv);
 	return &bo->tbo.base;
 
 error:
-	reservation_object_unlock(resv);
+	dma_resv_unlock(resv);
 	return ERR_PTR(ret);
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index bff9173a1a94..40f673cfbbfe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -50,7 +50,7 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj)
 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
 			     int alignment, u32 initial_domain,
 			     u64 flags, enum ttm_bo_type type,
-			     struct reservation_object *resv,
+			     struct dma_resv *resv,
 			     struct drm_gem_object **obj)
 {
 	struct amdgpu_bo *bo;
@@ -215,7 +215,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
 	union drm_amdgpu_gem_create *args = data;
 	uint64_t flags = args->in.domain_flags;
 	uint64_t size = args->in.bo_size;
-	struct reservation_object *resv = NULL;
+	struct dma_resv *resv = NULL;
 	struct drm_gem_object *gobj;
 	uint32_t handle;
 	int r;
@@ -433,7 +433,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
 		return -ENOENT;
 	}
 	robj = gem_to_amdgpu_bo(gobj);
-	ret = reservation_object_wait_timeout_rcu(robj->tbo.base.resv, true, true,
+	ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true,
 						  timeout);
 
 	/* ret == 0 means not signaled,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
index 2f17150e26e1..0b66d2e6b5d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
@@ -47,7 +47,7 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev);
 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
 			     int alignment, u32 initial_domain,
 			     u64 flags, enum ttm_bo_type type,
-			     struct reservation_object *resv,
+			     struct dma_resv *resv,
 			     struct drm_gem_object **obj);
 
 int amdgpu_mode_dumb_create(struct drm_file *file_priv,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 57b3d8a9bef3..b3823f657bdf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -104,7 +104,7 @@ static void amdgpu_pasid_free_cb(struct dma_fence *fence,
  *
  * Free the pasid only after all the fences in resv are signaled.
  */
-void amdgpu_pasid_free_delayed(struct reservation_object *resv,
+void amdgpu_pasid_free_delayed(struct dma_resv *resv,
 			       unsigned int pasid)
 {
 	struct dma_fence *fence, **fences;
@@ -112,7 +112,7 @@ void amdgpu_pasid_free_delayed(struct reservation_object *resv,
 	unsigned count;
 	int r;
 
-	r = reservation_object_get_fences_rcu(resv, NULL, &count, &fences);
+	r = dma_resv_get_fences_rcu(resv, NULL, &count, &fences);
 	if (r)
 		goto fallback;
 
@@ -156,7 +156,7 @@ fallback:
 	/* Not enough memory for the delayed delete, as last resort
 	 * block for all the fences to complete.
 	 */
-	reservation_object_wait_timeout_rcu(resv, true, false,
+	dma_resv_wait_timeout_rcu(resv, true, false,
 					    MAX_SCHEDULE_TIMEOUT);
 	amdgpu_pasid_free(pasid);
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
index 7625419f0fc2..8e58325bbca2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
@@ -72,7 +72,7 @@ struct amdgpu_vmid_mgr {
 
 int amdgpu_pasid_alloc(unsigned int bits);
 void amdgpu_pasid_free(unsigned int pasid);
-void amdgpu_pasid_free_delayed(struct reservation_object *resv,
+void amdgpu_pasid_free_delayed(struct dma_resv *resv,
 			       unsigned int pasid);
 
 bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 50022acc8a81..f1f8cdd695d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -179,7 +179,7 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
 		if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
 			continue;
 
-		r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv,
+		r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
 			true, false, MAX_SCHEDULE_TIMEOUT);
 		if (r <= 0)
 			DRM_ERROR("(%ld) failed to wait for user bo\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 8ae44d383a13..2f11ebd95528 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -544,7 +544,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
 
 fail_unreserve:
 	if (!bp->resv)
-		reservation_object_unlock(bo->tbo.base.resv);
+		dma_resv_unlock(bo->tbo.base.resv);
 	amdgpu_bo_unref(&bo);
 	return r;
 }
@@ -606,13 +606,13 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
 
 	if ((flags & AMDGPU_GEM_CREATE_SHADOW) && !(adev->flags & AMD_IS_APU)) {
 		if (!bp->resv)
-			WARN_ON(reservation_object_lock((*bo_ptr)->tbo.base.resv,
+			WARN_ON(dma_resv_lock((*bo_ptr)->tbo.base.resv,
 							NULL));
 
 		r = amdgpu_bo_create_shadow(adev, bp->size, *bo_ptr);
 
 		if (!bp->resv)
-			reservation_object_unlock((*bo_ptr)->tbo.base.resv);
+			dma_resv_unlock((*bo_ptr)->tbo.base.resv);
 
 		if (r)
 			amdgpu_bo_unref(bo_ptr);
@@ -709,7 +709,7 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
 		return 0;
 	}
 
-	r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv, false, false,
+	r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false,
 						MAX_SCHEDULE_TIMEOUT);
 	if (r < 0)
 		return r;
@@ -1087,7 +1087,7 @@ int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
  */
 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
 {
-	reservation_object_assert_held(bo->tbo.base.resv);
+	dma_resv_assert_held(bo->tbo.base.resv);
 
 	if (tiling_flags)
 		*tiling_flags = bo->tiling_flags;
@@ -1283,12 +1283,12 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
 		     bool shared)
 {
-	struct reservation_object *resv = bo->tbo.base.resv;
+	struct dma_resv *resv = bo->tbo.base.resv;
 
 	if (shared)
-		reservation_object_add_shared_fence(resv, fence);
+		dma_resv_add_shared_fence(resv, fence);
 	else
-		reservation_object_add_excl_fence(resv, fence);
+		dma_resv_add_excl_fence(resv, fence);
 }
 
 /**
@@ -1328,7 +1328,7 @@ int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
 {
 	WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
-	WARN_ON_ONCE(!reservation_object_is_locked(bo->tbo.base.resv) &&
+	WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
 		     !bo->pin_count && bo->tbo.type != ttm_bo_type_kernel);
 	WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
 	WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 113fb2feb437..1a555b0fd3b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -41,7 +41,7 @@ struct amdgpu_bo_param {
 	u32				preferred_domain;
 	u64				flags;
 	enum ttm_bo_type		type;
-	struct reservation_object	*resv;
+	struct dma_resv	*resv;
 };
 
 /* bo virtual addresses in a vm */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 9828f3c7c655..95e5e93edd18 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -190,10 +190,10 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
  */
 int amdgpu_sync_resv(struct amdgpu_device *adev,
 		     struct amdgpu_sync *sync,
-		     struct reservation_object *resv,
+		     struct dma_resv *resv,
 		     void *owner, bool explicit_sync)
 {
-	struct reservation_object_list *flist;
+	struct dma_resv_list *flist;
 	struct dma_fence *f;
 	void *fence_owner;
 	unsigned i;
@@ -203,16 +203,16 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
 		return -EINVAL;
 
 	/* always sync to the exclusive fence */
-	f = reservation_object_get_excl(resv);
+	f = dma_resv_get_excl(resv);
 	r = amdgpu_sync_fence(adev, sync, f, false);
 
-	flist = reservation_object_get_list(resv);
+	flist = dma_resv_get_list(resv);
 	if (!flist || r)
 		return r;
 
 	for (i = 0; i < flist->shared_count; ++i) {
 		f = rcu_dereference_protected(flist->shared[i],
-					      reservation_object_held(resv));
+					      dma_resv_held(resv));
 		/* We only want to trigger KFD eviction fences on
 		 * evict or move jobs. Skip KFD fences otherwise.
 		 */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
index 10cf23a57f17..b5f1778a2319 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
@@ -27,7 +27,7 @@
 #include <linux/hashtable.h>
 
 struct dma_fence;
-struct reservation_object;
+struct dma_resv;
 struct amdgpu_device;
 struct amdgpu_ring;
 
@@ -44,7 +44,7 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
 		      struct dma_fence *f, bool explicit);
 int amdgpu_sync_resv(struct amdgpu_device *adev,
 		     struct amdgpu_sync *sync,
-		     struct reservation_object *resv,
+		     struct dma_resv *resv,
 		     void *owner,
 		     bool explicit_sync);
 struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 63e7d1e01b76..fb09314bcfd4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -303,7 +303,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
 			       struct amdgpu_copy_mem *src,
 			       struct amdgpu_copy_mem *dst,
 			       uint64_t size,
-			       struct reservation_object *resv,
+			       struct dma_resv *resv,
 			       struct dma_fence **f)
 {
 	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
@@ -1470,7 +1470,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
 {
 	unsigned long num_pages = bo->mem.num_pages;
 	struct drm_mm_node *node = bo->mem.mm_node;
-	struct reservation_object_list *flist;
+	struct dma_resv_list *flist;
 	struct dma_fence *f;
 	int i;
 
@@ -1478,18 +1478,18 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
 	 * cleanly handle page faults.
 	 */
 	if (bo->type == ttm_bo_type_kernel &&
-	    !reservation_object_test_signaled_rcu(bo->base.resv, true))
+	    !dma_resv_test_signaled_rcu(bo->base.resv, true))
 		return false;
 
 	/* If bo is a KFD BO, check if the bo belongs to the current process.
 	 * If true, then return false as any KFD process needs all its BOs to
 	 * be resident to run successfully
 	 */
-	flist = reservation_object_get_list(bo->base.resv);
+	flist = dma_resv_get_list(bo->base.resv);
 	if (flist) {
 		for (i = 0; i < flist->shared_count; ++i) {
 			f = rcu_dereference_protected(flist->shared[i],
-				reservation_object_held(bo->base.resv));
+				dma_resv_held(bo->base.resv));
 			if (amdkfd_fence_check_mm(f, current->mm))
 				return false;
 		}
@@ -1992,7 +1992,7 @@ error_free:
 
 int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
 		       uint64_t dst_offset, uint32_t byte_count,
-		       struct reservation_object *resv,
+		       struct dma_resv *resv,
 		       struct dma_fence **fence, bool direct_submit,
 		       bool vm_needs_flush)
 {
@@ -2066,7 +2066,7 @@ error_free:
 
 int amdgpu_fill_buffer(struct amdgpu_bo *bo,
 		       uint32_t src_data,
-		       struct reservation_object *resv,
+		       struct dma_resv *resv,
 		       struct dma_fence **fence)
 {
 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index caa76c693700..80896bd6b972 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -83,18 +83,18 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,
 
 int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
 		       uint64_t dst_offset, uint32_t byte_count,
-		       struct reservation_object *resv,
+		       struct dma_resv *resv,
 		       struct dma_fence **fence, bool direct_submit,
 		       bool vm_needs_flush);
 int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
 			       struct amdgpu_copy_mem *src,
 			       struct amdgpu_copy_mem *dst,
 			       uint64_t size,
-			       struct reservation_object *resv,
+			       struct dma_resv *resv,
 			       struct dma_fence **f);
 int amdgpu_fill_buffer(struct amdgpu_bo *bo,
 			uint32_t src_data,
-			struct reservation_object *resv,
+			struct dma_resv *resv,
 			struct dma_fence **fence);
 
 int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index f858607b17a5..b2c364b8695f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1073,7 +1073,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
 	ib->length_dw = 16;
 
 	if (direct) {
-		r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv,
+		r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
 							true, false,
 							msecs_to_jiffies(10));
 		if (r == 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 95eef0ac2829..07dcad7ecb26 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1702,7 +1702,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 			ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
 			pages_addr = ttm->dma_address;
 		}
-		exclusive = reservation_object_get_excl(bo->tbo.base.resv);
+		exclusive = dma_resv_get_excl(bo->tbo.base.resv);
 	}
 
 	if (bo) {
@@ -1879,18 +1879,18 @@ static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
  */
 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 {
-	struct reservation_object *resv = vm->root.base.bo->tbo.base.resv;
+	struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
 	struct dma_fence *excl, **shared;
 	unsigned i, shared_count;
 	int r;
 
-	r = reservation_object_get_fences_rcu(resv, &excl,
+	r = dma_resv_get_fences_rcu(resv, &excl,
 					      &shared_count, &shared);
 	if (r) {
 		/* Not enough memory to grab the fence list, as last resort
 		 * block for all the fences to complete.
 		 */
-		reservation_object_wait_timeout_rcu(resv, true, false,
+		dma_resv_wait_timeout_rcu(resv, true, false,
 						    MAX_SCHEDULE_TIMEOUT);
 		return;
 	}
@@ -1978,7 +1978,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
 			   struct amdgpu_vm *vm)
 {
 	struct amdgpu_bo_va *bo_va, *tmp;
-	struct reservation_object *resv;
+	struct dma_resv *resv;
 	bool clear;
 	int r;
 
@@ -1997,7 +1997,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
 		spin_unlock(&vm->invalidated_lock);
 
 		/* Try to reserve the BO to avoid clearing its ptes */
-		if (!amdgpu_vm_debug && reservation_object_trylock(resv))
+		if (!amdgpu_vm_debug && dma_resv_trylock(resv))
 			clear = false;
 		/* Somebody else is using the BO right now */
 		else
@@ -2008,7 +2008,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
 			return r;
 
 		if (!clear)
-			reservation_object_unlock(resv);
+			dma_resv_unlock(resv);
 		spin_lock(&vm->invalidated_lock);
 	}
 	spin_unlock(&vm->invalidated_lock);
@@ -2416,7 +2416,7 @@ void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
 			struct amdgpu_bo *bo;
 
 			bo = mapping->bo_va->base.bo;
-			if (reservation_object_locking_ctx(bo->tbo.base.resv) !=
+			if (dma_resv_locking_ctx(bo->tbo.base.resv) !=
 			    ticket)
 				continue;
 		}
@@ -2649,7 +2649,7 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
  */
 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
 {
-	return reservation_object_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv,
+	return dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv,
 						   true, true, timeout);
 }
 
@@ -2724,7 +2724,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 	if (r)
 		goto error_free_root;
 
-	r = reservation_object_reserve_shared(root->tbo.base.resv, 1);
+	r = dma_resv_reserve_shared(root->tbo.base.resv, 1);
 	if (r)
 		goto error_unreserve;
 
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 381a5345f195..cb7cfa9b34f2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -5693,7 +5693,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
 		 * deadlock during GPU reset when this fence will not signal
 		 * but we hold reservation lock for the BO.
 		 */
-		r = reservation_object_wait_timeout_rcu(abo->tbo.base.resv, true,
+		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
 							false,
 							msecs_to_jiffies(5000));
 		if (unlikely(r <= 0))
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 19ae119f1a5d..5a5b42db6f2a 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -1037,7 +1037,7 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
  * As a contrast, with implicit fencing the kernel keeps track of any
  * ongoing rendering, and automatically ensures that the atomic update waits
  * for any pending rendering to complete. For shared buffers represented with
- * a &struct dma_buf this is tracked in &struct reservation_object.
+ * a &struct dma_buf this is tracked in &struct dma_resv.
  * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org),
  * whereas explicit fencing is what Android wants.
  *
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index a2dd198177f2..6854f5867d51 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -159,7 +159,7 @@ void drm_gem_private_object_init(struct drm_device *dev,
 	kref_init(&obj->refcount);
 	obj->handle_count = 0;
 	obj->size = size;
-	reservation_object_init(&obj->_resv);
+	dma_resv_init(&obj->_resv);
 	if (!obj->resv)
 		obj->resv = &obj->_resv;
 
@@ -755,7 +755,7 @@ drm_gem_object_lookup(struct drm_file *filp, u32 handle)
 EXPORT_SYMBOL(drm_gem_object_lookup);
 
 /**
- * drm_gem_reservation_object_wait - Wait on GEM object's reservation's objects
+ * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects
  * shared and/or exclusive fences.
  * @filep: DRM file private date
  * @handle: userspace handle
@@ -767,7 +767,7 @@ EXPORT_SYMBOL(drm_gem_object_lookup);
  * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
  * greater than 0 on success.
  */
-long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle,
+long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
 				    bool wait_all, unsigned long timeout)
 {
 	long ret;
@@ -779,7 +779,7 @@ long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle,
 		return -EINVAL;
 	}
 
-	ret = reservation_object_wait_timeout_rcu(obj->resv, wait_all,
+	ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all,
 						  true, timeout);
 	if (ret == 0)
 		ret = -ETIME;
@@ -790,7 +790,7 @@ long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle,
 
 	return ret;
 }
-EXPORT_SYMBOL(drm_gem_reservation_object_wait);
+EXPORT_SYMBOL(drm_gem_dma_resv_wait);
 
 /**
  * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
@@ -956,7 +956,7 @@ drm_gem_object_release(struct drm_gem_object *obj)
 	if (obj->filp)
 		fput(obj->filp);
 
-	reservation_object_fini(&obj->_resv);
+	dma_resv_fini(&obj->_resv);
 	drm_gem_free_mmap_offset(obj);
 }
 EXPORT_SYMBOL(drm_gem_object_release);
@@ -1291,7 +1291,7 @@ retry:
 	if (contended != -1) {
 		struct drm_gem_object *obj = objs[contended];
 
-		ret = reservation_object_lock_slow_interruptible(obj->resv,
+		ret = dma_resv_lock_slow_interruptible(obj->resv,
 								 acquire_ctx);
 		if (ret) {
 			ww_acquire_done(acquire_ctx);
@@ -1303,16 +1303,16 @@ retry:
 		if (i == contended)
 			continue;
 
-		ret = reservation_object_lock_interruptible(objs[i]->resv,
+		ret = dma_resv_lock_interruptible(objs[i]->resv,
 							    acquire_ctx);
 		if (ret) {
 			int j;
 
 			for (j = 0; j < i; j++)
-				reservation_object_unlock(objs[j]->resv);
+				dma_resv_unlock(objs[j]->resv);
 
 			if (contended != -1 && contended >= i)
-				reservation_object_unlock(objs[contended]->resv);
+				dma_resv_unlock(objs[contended]->resv);
 
 			if (ret == -EDEADLK) {
 				contended = i;
@@ -1337,7 +1337,7 @@ drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
 	int i;
 
 	for (i = 0; i < count; i++)
-		reservation_object_unlock(objs[i]->resv);
+		dma_resv_unlock(objs[i]->resv);
 
 	ww_acquire_fini(acquire_ctx);
 }
@@ -1413,12 +1413,12 @@ int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
 
 	if (!write) {
 		struct dma_fence *fence =
-			reservation_object_get_excl_rcu(obj->resv);
+			dma_resv_get_excl_rcu(obj->resv);
 
 		return drm_gem_fence_array_add(fence_array, fence);
 	}
 
-	ret = reservation_object_get_fences_rcu(obj->resv, NULL,
+	ret = dma_resv_get_fences_rcu(obj->resv, NULL,
 						&fence_count, &fences);
 	if (ret || !fence_count)
 		return ret;
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index f61304054786..b9bcd310ca2d 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -7,7 +7,7 @@
 
 #include <linux/dma-buf.h>
 #include <linux/dma-fence.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 #include <linux/slab.h>
 
 #include <drm/drm_atomic.h>
@@ -294,7 +294,7 @@ int drm_gem_fb_prepare_fb(struct drm_plane *plane,
 		return 0;
 
 	obj = drm_gem_fb_get_obj(state->fb, 0);
-	fence = reservation_object_get_excl_rcu(obj->resv);
+	fence = dma_resv_get_excl_rcu(obj->resv);
 	drm_atomic_set_fence_for_plane(state, fence);
 
 	return 0;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 17ca602db60a..7d83e04ec36e 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -397,13 +397,13 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
 	}
 
 	if (op & ETNA_PREP_NOSYNC) {
-		if (!reservation_object_test_signaled_rcu(obj->resv,
+		if (!dma_resv_test_signaled_rcu(obj->resv,
 							  write))
 			return -EBUSY;
 	} else {
 		unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
 
-		ret = reservation_object_wait_timeout_rcu(obj->resv,
+		ret = dma_resv_wait_timeout_rcu(obj->resv,
 							  write, true, remain);
 		if (ret <= 0)
 			return ret == 0 ? -ETIMEDOUT : ret;
@@ -459,8 +459,8 @@ static void etnaviv_gem_describe_fence(struct dma_fence *fence,
 static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 {
 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
-	struct reservation_object *robj = obj->resv;
-	struct reservation_object_list *fobj;
+	struct dma_resv *robj = obj->resv;
+	struct dma_resv_list *fobj;
 	struct dma_fence *fence;
 	unsigned long off = drm_vma_node_start(&obj->vma_node);
 
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index fcd5d71b502f..28379e4df253 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -6,7 +6,7 @@
 #ifndef __ETNAVIV_GEM_H__
 #define __ETNAVIV_GEM_H__
 
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 #include "etnaviv_cmdbuf.h"
 #include "etnaviv_drv.h"
 
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 1a636469eeda..998c96b40d8a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -4,7 +4,7 @@
  */
 
 #include <linux/dma-fence-array.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 #include <linux/sync_file.h>
 #include "etnaviv_cmdbuf.h"
 #include "etnaviv_drv.h"
@@ -165,10 +165,10 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
 
 	for (i = 0; i < submit->nr_bos; i++) {
 		struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
-		struct reservation_object *robj = bo->obj->base.resv;
+		struct dma_resv *robj = bo->obj->base.resv;
 
 		if (!(bo->flags & ETNA_SUBMIT_BO_WRITE)) {
-			ret = reservation_object_reserve_shared(robj, 1);
+			ret = dma_resv_reserve_shared(robj, 1);
 			if (ret)
 				return ret;
 		}
@@ -177,13 +177,13 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
 			continue;
 
 		if (bo->flags & ETNA_SUBMIT_BO_WRITE) {
-			ret = reservation_object_get_fences_rcu(robj, &bo->excl,
+			ret = dma_resv_get_fences_rcu(robj, &bo->excl,
 								&bo->nr_shared,
 								&bo->shared);
 			if (ret)
 				return ret;
 		} else {
-			bo->excl = reservation_object_get_excl_rcu(robj);
+			bo->excl = dma_resv_get_excl_rcu(robj);
 		}
 
 	}
@@ -199,10 +199,10 @@ static void submit_attach_object_fences(struct etnaviv_gem_submit *submit)
 		struct drm_gem_object *obj = &submit->bos[i].obj->base;
 
 		if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
-			reservation_object_add_excl_fence(obj->resv,
+			dma_resv_add_excl_fence(obj->resv,
 							  submit->out_fence);
 		else
-			reservation_object_add_shared_fence(obj->resv,
+			dma_resv_add_shared_fence(obj->resv,
 							    submit->out_fence);
 
 		submit_unlock_object(submit, i);
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 8592a7d422de..21c71fdceec0 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -29,7 +29,7 @@
 #include <linux/intel-iommu.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 #include <linux/slab.h>
 #include <linux/vgaarb.h>
 
@@ -14317,7 +14317,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
 		if (ret < 0)
 			return ret;
 
-		fence = reservation_object_get_excl_rcu(obj->base.resv);
+		fence = dma_resv_get_excl_rcu(obj->base.resv);
 		if (fence) {
 			add_rps_boost_after_vblank(new_state->crtc, fence);
 			dma_fence_put(fence);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
index 8473292096cb..a2aff1d8290e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
@@ -82,7 +82,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 {
 	struct drm_i915_gem_busy *args = data;
 	struct drm_i915_gem_object *obj;
-	struct reservation_object_list *list;
+	struct dma_resv_list *list;
 	unsigned int i, shared_count;
 	struct dma_fence *excl;
 	int err;
@@ -106,11 +106,11 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 	 * Alternatively, we can trade that extra information on read/write
 	 * activity with
 	 *	args->busy =
-	 *		!reservation_object_test_signaled_rcu(obj->resv, true);
+	 *		!dma_resv_test_signaled_rcu(obj->resv, true);
 	 * to report the overall busyness. This is what the wait-ioctl does.
 	 *
 	 */
-	reservation_object_fences(obj->base.resv, &excl, &list, &shared_count);
+	dma_resv_fences(obj->base.resv, &excl, &list, &shared_count);
 
 	/* Translate the exclusive fence to the READ *and* WRITE engine */
 	args->busy = busy_check_writer(excl);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
index 5295285d5843..88ee8ca7967f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
@@ -147,7 +147,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
 						true, I915_FENCE_TIMEOUT,
 						I915_FENCE_GFP);
 
-		reservation_object_add_excl_fence(obj->base.resv,
+		dma_resv_add_excl_fence(obj->base.resv,
 						  &clflush->dma);
 
 		i915_sw_fence_commit(&clflush->wait);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
index 1fdab0767a47..693fcfce5d69 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
@@ -288,7 +288,7 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
 	if (err < 0) {
 		dma_fence_set_error(&work->dma, err);
 	} else {
-		reservation_object_add_excl_fence(obj->base.resv, &work->dma);
+		dma_resv_add_excl_fence(obj->base.resv, &work->dma);
 		err = 0;
 	}
 	i915_gem_object_unlock(obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 570b20ad9e58..96ce95c8ac5a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -6,7 +6,7 @@
 
 #include <linux/dma-buf.h>
 #include <linux/highmem.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 
 #include "i915_drv.h"
 #include "i915_gem_object.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 5fae0e50aad0..2d71653ede00 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -5,7 +5,7 @@
  */
 
 #include <linux/intel-iommu.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 #include <linux/sync_file.h>
 #include <linux/uaccess.h>
 
@@ -1246,7 +1246,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
 		goto skip_request;
 
 	i915_vma_lock(batch);
-	GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true));
+	GEM_BUG_ON(!dma_resv_test_signaled_rcu(batch->resv, true));
 	err = i915_vma_move_to_active(batch, rq, 0);
 	i915_vma_unlock(batch);
 	if (err)
@@ -1317,7 +1317,7 @@ relocate_entry(struct i915_vma *vma,
 
 	if (!eb->reloc_cache.vaddr &&
 	    (DBG_FORCE_RELOC == FORCE_GPU_RELOC ||
-	     !reservation_object_test_signaled_rcu(vma->resv, true))) {
+	     !dma_resv_test_signaled_rcu(vma->resv, true))) {
 		const unsigned int gen = eb->reloc_cache.gen;
 		unsigned int len;
 		u32 *batch;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_fence.c b/drivers/gpu/drm/i915/gem/i915_gem_fence.c
index cf0439e6be83..5496f33a9064 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_fence.c
@@ -78,7 +78,7 @@ i915_gem_object_lock_fence(struct drm_i915_gem_object *obj)
 					    I915_FENCE_GFP) < 0)
 		goto err;
 
-	reservation_object_add_excl_fence(obj->base.resv, &stub->dma);
+	dma_resv_add_excl_fence(obj->base.resv, &stub->dma);
 
 	return &stub->dma;
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index dfebd5706f16..d5f8fdc95fd9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -99,22 +99,22 @@ i915_gem_object_put(struct drm_i915_gem_object *obj)
 	__drm_gem_object_put(&obj->base);
 }
 
-#define assert_object_held(obj) reservation_object_assert_held((obj)->base.resv)
+#define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
 
 static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
 {
-	reservation_object_lock(obj->base.resv, NULL);
+	dma_resv_lock(obj->base.resv, NULL);
 }
 
 static inline int
 i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj)
 {
-	return reservation_object_lock_interruptible(obj->base.resv, NULL);
+	return dma_resv_lock_interruptible(obj->base.resv, NULL);
 }
 
 static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
 {
-	reservation_object_unlock(obj->base.resv);
+	dma_resv_unlock(obj->base.resv);
 }
 
 struct dma_fence *
@@ -373,7 +373,7 @@ i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
 	struct dma_fence *fence;
 
 	rcu_read_lock();
-	fence = reservation_object_get_excl_rcu(obj->base.resv);
+	fence = dma_resv_get_excl_rcu(obj->base.resv);
 	rcu_read_unlock();
 
 	if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index fa46a54bcbe7..8af55cd3e690 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -31,7 +31,7 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
 }
 
 static long
-i915_gem_object_wait_reservation(struct reservation_object *resv,
+i915_gem_object_wait_reservation(struct dma_resv *resv,
 				 unsigned int flags,
 				 long timeout)
 {
@@ -43,7 +43,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
 		unsigned int count, i;
 		int ret;
 
-		ret = reservation_object_get_fences_rcu(resv,
+		ret = dma_resv_get_fences_rcu(resv,
 							&excl, &count, &shared);
 		if (ret)
 			return ret;
@@ -72,7 +72,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
 		 */
 		prune_fences = count && timeout >= 0;
 	} else {
-		excl = reservation_object_get_excl_rcu(resv);
+		excl = dma_resv_get_excl_rcu(resv);
 	}
 
 	if (excl && timeout >= 0)
@@ -84,10 +84,10 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
 	 * Opportunistically prune the fences iff we know they have *all* been
 	 * signaled.
 	 */
-	if (prune_fences && reservation_object_trylock(resv)) {
-		if (reservation_object_test_signaled_rcu(resv, true))
-			reservation_object_add_excl_fence(resv, NULL);
-		reservation_object_unlock(resv);
+	if (prune_fences && dma_resv_trylock(resv)) {
+		if (dma_resv_test_signaled_rcu(resv, true))
+			dma_resv_add_excl_fence(resv, NULL);
+		dma_resv_unlock(resv);
 	}
 
 	return timeout;
@@ -140,7 +140,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 		unsigned int count, i;
 		int ret;
 
-		ret = reservation_object_get_fences_rcu(obj->base.resv,
+		ret = dma_resv_get_fences_rcu(obj->base.resv,
 							&excl, &count, &shared);
 		if (ret)
 			return ret;
@@ -152,7 +152,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 
 		kfree(shared);
 	} else {
-		excl = reservation_object_get_excl_rcu(obj->base.resv);
+		excl = dma_resv_get_excl_rcu(obj->base.resv);
 	}
 
 	if (excl) {
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index bdd596604e93..b62db22b37a1 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -43,7 +43,7 @@
 #include <linux/mm_types.h>
 #include <linux/perf_event.h>
 #include <linux/pm_qos.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 #include <linux/shmem_fs.h>
 #include <linux/stackdepot.h>
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 190ad54fb072..a5a439cb8c06 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -29,7 +29,7 @@
 #include <drm/i915_drm.h>
 #include <linux/dma-fence-array.h>
 #include <linux/kthread.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 #include <linux/shmem_fs.h>
 #include <linux/slab.h>
 #include <linux/stop_machine.h>
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
index 25a3e4d09a2f..5f82a763e64c 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
@@ -96,9 +96,9 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
 	list_for_each_entry(obj, list, batch_pool_link) {
 		/* The batches are strictly LRU ordered */
 		if (i915_gem_object_is_active(obj)) {
-			struct reservation_object *resv = obj->base.resv;
+			struct dma_resv *resv = obj->base.resv;
 
-			if (!reservation_object_test_signaled_rcu(resv, true))
+			if (!dma_resv_test_signaled_rcu(resv, true))
 				break;
 
 			i915_retire_requests(pool->engine->i915);
@@ -113,13 +113,13 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
 			 * than replace the existing fence.
 			 */
 			if (rcu_access_pointer(resv->fence)) {
-				reservation_object_lock(resv, NULL);
-				reservation_object_add_excl_fence(resv, NULL);
-				reservation_object_unlock(resv);
+				dma_resv_lock(resv, NULL);
+				dma_resv_add_excl_fence(resv, NULL);
+				dma_resv_unlock(resv);
 			}
 		}
 
-		GEM_BUG_ON(!reservation_object_test_signaled_rcu(obj->base.resv,
+		GEM_BUG_ON(!dma_resv_test_signaled_rcu(obj->base.resv,
 								 true));
 
 		if (obj->base.size >= size)
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index a195a92d0105..0d1bd3f56c21 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1027,7 +1027,7 @@ i915_request_await_object(struct i915_request *to,
 		struct dma_fence **shared;
 		unsigned int count, i;
 
-		ret = reservation_object_get_fences_rcu(obj->base.resv,
+		ret = dma_resv_get_fences_rcu(obj->base.resv,
 							&excl, &count, &shared);
 		if (ret)
 			return ret;
@@ -1044,7 +1044,7 @@ i915_request_await_object(struct i915_request *to,
 			dma_fence_put(shared[i]);
 		kfree(shared);
 	} else {
-		excl = reservation_object_get_excl_rcu(obj->base.resv);
+		excl = dma_resv_get_excl_rcu(obj->base.resv);
 	}
 
 	if (excl) {
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 5387aafd3424..362e4e00b4c6 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -7,7 +7,7 @@
 #include <linux/slab.h>
 #include <linux/dma-fence.h>
 #include <linux/irq_work.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 
 #include "i915_sw_fence.h"
 #include "i915_selftest.h"
@@ -510,7 +510,7 @@ int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
 }
 
 int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
-				    struct reservation_object *resv,
+				    struct dma_resv *resv,
 				    const struct dma_fence_ops *exclude,
 				    bool write,
 				    unsigned long timeout,
@@ -526,7 +526,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
 		struct dma_fence **shared;
 		unsigned int count, i;
 
-		ret = reservation_object_get_fences_rcu(resv,
+		ret = dma_resv_get_fences_rcu(resv,
 							&excl, &count, &shared);
 		if (ret)
 			return ret;
@@ -551,7 +551,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
 			dma_fence_put(shared[i]);
 		kfree(shared);
 	} else {
-		excl = reservation_object_get_excl_rcu(resv);
+		excl = dma_resv_get_excl_rcu(resv);
 	}
 
 	if (ret >= 0 && excl && excl->ops != exclude) {
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index 9cb5c3b307a6..8cf353e8c3e0 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -16,7 +16,7 @@
 #include <linux/wait.h>
 
 struct completion;
-struct reservation_object;
+struct dma_resv;
 
 struct i915_sw_fence {
 	wait_queue_head_t wait;
@@ -82,7 +82,7 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
 				  gfp_t gfp);
 
 int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
-				    struct reservation_object *resv,
+				    struct dma_resv *resv,
 				    const struct dma_fence_ops *exclude,
 				    bool write,
 				    unsigned long timeout,
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index a57729be8312..ebfd03d117cd 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -99,10 +99,10 @@ static void __i915_vma_retire(struct i915_active *ref)
 		return;
 
 	/* Prune the shared fence arrays iff completely idle (inc. external) */
-	if (reservation_object_trylock(obj->base.resv)) {
-		if (reservation_object_test_signaled_rcu(obj->base.resv, true))
-			reservation_object_add_excl_fence(obj->base.resv, NULL);
-		reservation_object_unlock(obj->base.resv);
+	if (dma_resv_trylock(obj->base.resv)) {
+		if (dma_resv_test_signaled_rcu(obj->base.resv, true))
+			dma_resv_add_excl_fence(obj->base.resv, NULL);
+		dma_resv_unlock(obj->base.resv);
 	}
 
 	/*
@@ -903,7 +903,7 @@ static void export_fence(struct i915_vma *vma,
 			 struct i915_request *rq,
 			 unsigned int flags)
 {
-	struct reservation_object *resv = vma->resv;
+	struct dma_resv *resv = vma->resv;
 
 	/*
 	 * Ignore errors from failing to allocate the new fence, we can't
@@ -911,9 +911,9 @@ static void export_fence(struct i915_vma *vma,
 	 * synchronisation leading to rendering corruption.
 	 */
 	if (flags & EXEC_OBJECT_WRITE)
-		reservation_object_add_excl_fence(resv, &rq->fence);
-	else if (reservation_object_reserve_shared(resv, 1) == 0)
-		reservation_object_add_shared_fence(resv, &rq->fence);
+		dma_resv_add_excl_fence(resv, &rq->fence);
+	else if (dma_resv_reserve_shared(resv, 1) == 0)
+		dma_resv_add_shared_fence(resv, &rq->fence);
 }
 
 int i915_vma_move_to_active(struct i915_vma *vma,
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 4b769db649bf..59a497561fc4 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -55,7 +55,7 @@ struct i915_vma {
 	struct i915_address_space *vm;
 	const struct i915_vma_ops *ops;
 	struct i915_fence_reg *fence;
-	struct reservation_object *resv; /** Alias of obj->resv */
+	struct dma_resv *resv; /** Alias of obj->resv */
 	struct sg_table *pages;
 	void __iomem *iomap;
 	void *private; /* owned by creator */
@@ -299,16 +299,16 @@ void i915_vma_close(struct i915_vma *vma);
 void i915_vma_reopen(struct i915_vma *vma);
 void i915_vma_destroy(struct i915_vma *vma);
 
-#define assert_vma_held(vma) reservation_object_assert_held((vma)->resv)
+#define assert_vma_held(vma) dma_resv_assert_held((vma)->resv)
 
 static inline void i915_vma_lock(struct i915_vma *vma)
 {
-	reservation_object_lock(vma->resv, NULL);
+	dma_resv_lock(vma->resv, NULL);
 }
 
 static inline void i915_vma_unlock(struct i915_vma *vma)
 {
-	reservation_object_unlock(vma->resv);
+	dma_resv_unlock(vma->resv);
 }
 
 int __i915_vma_do_pin(struct i915_vma *vma,
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index fd1a024703d2..ff3d9acc24fc 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -136,7 +136,7 @@ static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
 	int err = 0;
 
 	if (!write) {
-		err = reservation_object_reserve_shared(bo->gem.resv, 1);
+		err = dma_resv_reserve_shared(bo->gem.resv, 1);
 		if (err)
 			return err;
 	}
@@ -296,9 +296,9 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
 
 	for (i = 0; i < submit->nr_bos; i++) {
 		if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE)
-			reservation_object_add_excl_fence(bos[i]->gem.resv, fence);
+			dma_resv_add_excl_fence(bos[i]->gem.resv, fence);
 		else
-			reservation_object_add_shared_fence(bos[i]->gem.resv, fence);
+			dma_resv_add_shared_fence(bos[i]->gem.resv, fence);
 	}
 
 	lima_gem_unlock_bos(bos, submit->nr_bos, &ctx);
@@ -341,7 +341,7 @@ int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns)
 
 	timeout = drm_timeout_abs_to_jiffies(timeout_ns);
 
-	ret = drm_gem_reservation_object_wait(file, handle, write, timeout);
+	ret = drm_gem_dma_resv_wait(file, handle, write, timeout);
 	if (ret == 0)
 		ret = timeout ? -ETIMEDOUT : -EBUSY;
 
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_fb.c b/drivers/gpu/drm/mediatek/mtk_drm_fb.c
index ae40b080ae47..3f230a28a2dc 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_fb.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_fb.c
@@ -4,7 +4,7 @@
  */
 
 #include <linux/dma-buf.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 
 #include <drm/drm_modeset_helper.h>
 #include <drm/drm_fb_helper.h>
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 18da8d6ffc51..348a7ad2c044 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -663,13 +663,13 @@ void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
 int msm_gem_sync_object(struct drm_gem_object *obj,
 		struct msm_fence_context *fctx, bool exclusive)
 {
-	struct reservation_object_list *fobj;
+	struct dma_resv_list *fobj;
 	struct dma_fence *fence;
 	int i, ret;
 
-	fobj = reservation_object_get_list(obj->resv);
+	fobj = dma_resv_get_list(obj->resv);
 	if (!fobj || (fobj->shared_count == 0)) {
-		fence = reservation_object_get_excl(obj->resv);
+		fence = dma_resv_get_excl(obj->resv);
 		/* don't need to wait on our own fences, since ring is fifo */
 		if (fence && (fence->context != fctx->context)) {
 			ret = dma_fence_wait(fence, true);
@@ -683,7 +683,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
 
 	for (i = 0; i < fobj->shared_count; i++) {
 		fence = rcu_dereference_protected(fobj->shared[i],
-						reservation_object_held(obj->resv));
+						dma_resv_held(obj->resv));
 		if (fence->context != fctx->context) {
 			ret = dma_fence_wait(fence, true);
 			if (ret)
@@ -701,9 +701,9 @@ void msm_gem_move_to_active(struct drm_gem_object *obj,
 	WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
 	msm_obj->gpu = gpu;
 	if (exclusive)
-		reservation_object_add_excl_fence(obj->resv, fence);
+		dma_resv_add_excl_fence(obj->resv, fence);
 	else
-		reservation_object_add_shared_fence(obj->resv, fence);
+		dma_resv_add_shared_fence(obj->resv, fence);
 	list_del_init(&msm_obj->mm_list);
 	list_add_tail(&msm_obj->mm_list, &gpu->active_list);
 }
@@ -728,7 +728,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
 		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
 	long ret;
 
-	ret = reservation_object_wait_timeout_rcu(obj->resv, write,
+	ret = dma_resv_wait_timeout_rcu(obj->resv, write,
 						  true,  remain);
 	if (ret == 0)
 		return remain == 0 ? -EBUSY : -ETIMEDOUT;
@@ -760,8 +760,8 @@ static void describe_fence(struct dma_fence *fence, const char *type,
 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 {
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
-	struct reservation_object *robj = obj->resv;
-	struct reservation_object_list *fobj;
+	struct dma_resv *robj = obj->resv;
+	struct dma_resv_list *fobj;
 	struct dma_fence *fence;
 	struct msm_gem_vma *vma;
 	uint64_t off = drm_vma_node_start(&obj->vma_node);
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 8cfcf8f09e3e..9e0953c2b7ce 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -8,7 +8,7 @@
 #define __MSM_GEM_H__
 
 #include <linux/kref.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 #include "msm_drv.h"
 
 /* Additional internal-use only BO flags: */
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 348f8c2be806..2e1556b7af26 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -225,7 +225,7 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
 			 * strange place to call it.  OTOH this is a
 			 * convenient can-fail point to hook it in.
 			 */
-			ret = reservation_object_reserve_shared(msm_obj->base.resv,
+			ret = dma_resv_reserve_shared(msm_obj->base.resv,
 								1);
 			if (ret)
 				return ret;
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index 878ef6822812..e8506335cd15 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -17,7 +17,7 @@
 #include <linux/of_graph.h>
 #include <linux/of_reserved_mem.h>
 #include <linux/pm_runtime.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 #include <linux/spinlock.h>
 
 #include <drm/drm_atomic.h>
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 89f8e76a2d7d..027a01b97d1c 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -457,7 +457,7 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
 		asyw->image.handle[0] = ctxdma->object.handle;
 	}
 
-	asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.base.resv);
+	asyw->state.fence = dma_resv_get_excl_rcu(fb->nvbo->bo.base.resv);
 	asyw->image.offset[0] = fb->nvbo->bo.offset;
 
 	if (wndw->func->prepare) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 99e391be9370..e0b1bbee936f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -188,7 +188,7 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
 int
 nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
 	       uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
-	       struct sg_table *sg, struct reservation_object *robj,
+	       struct sg_table *sg, struct dma_resv *robj,
 	       struct nouveau_bo **pnvbo)
 {
 	struct nouveau_drm *drm = cli->drm;
@@ -1324,7 +1324,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
 {
 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 	struct drm_device *dev = drm->dev;
-	struct dma_fence *fence = reservation_object_get_excl(bo->base.resv);
+	struct dma_fence *fence = dma_resv_get_excl(bo->base.resv);
 
 	nv10_bo_put_tile_region(dev, *old_tile, fence);
 	*old_tile = new_tile;
@@ -1655,12 +1655,12 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
 void
 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
 {
-	struct reservation_object *resv = nvbo->bo.base.resv;
+	struct dma_resv *resv = nvbo->bo.base.resv;
 
 	if (exclusive)
-		reservation_object_add_excl_fence(resv, &fence->base);
+		dma_resv_add_excl_fence(resv, &fence->base);
 	else if (fence)
-		reservation_object_add_shared_fence(resv, &fence->base);
+		dma_resv_add_shared_fence(resv, &fence->base);
 }
 
 struct ttm_bo_driver nouveau_bo_driver = {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index d675efe8e7f9..3ae84834bd5c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -73,7 +73,7 @@ extern struct ttm_bo_driver nouveau_bo_driver;
 void nouveau_bo_move_init(struct nouveau_drm *);
 int  nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 flags,
 		    u32 tile_mode, u32 tile_flags, struct sg_table *sg,
-		    struct reservation_object *robj,
+		    struct dma_resv *robj,
 		    struct nouveau_bo **);
 int  nouveau_bo_pin(struct nouveau_bo *, u32 flags, bool contig);
 int  nouveau_bo_unpin(struct nouveau_bo *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index e5f249ab216a..8df390078c85 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -335,20 +335,20 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
 {
 	struct nouveau_fence_chan *fctx = chan->fence;
 	struct dma_fence *fence;
-	struct reservation_object *resv = nvbo->bo.base.resv;
-	struct reservation_object_list *fobj;
+	struct dma_resv *resv = nvbo->bo.base.resv;
+	struct dma_resv_list *fobj;
 	struct nouveau_fence *f;
 	int ret = 0, i;
 
 	if (!exclusive) {
-		ret = reservation_object_reserve_shared(resv, 1);
+		ret = dma_resv_reserve_shared(resv, 1);
 
 		if (ret)
 			return ret;
 	}
 
-	fobj = reservation_object_get_list(resv);
-	fence = reservation_object_get_excl(resv);
+	fobj = dma_resv_get_list(resv);
+	fence = dma_resv_get_excl(resv);
 
 	if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
 		struct nouveau_channel *prev = NULL;
@@ -377,7 +377,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
 		bool must_wait = true;
 
 		fence = rcu_dereference_protected(fobj->shared[i],
-						reservation_object_held(resv));
+						dma_resv_held(resv));
 
 		f = nouveau_local_fence(fence, chan->drm);
 		if (f) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index c7368aa0bdec..c77302f969e8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -887,7 +887,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
 		return -ENOENT;
 	nvbo = nouveau_gem_object(gem);
 
-	lret = reservation_object_wait_timeout_rcu(nvbo->bo.base.resv, write, true,
+	lret = dma_resv_wait_timeout_rcu(nvbo->bo.base.resv, write, true,
 						   no_wait ? 0 : 30 * HZ);
 	if (!lret)
 		ret = -EBUSY;
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index e86ad7ae622b..7262ced9688a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -62,16 +62,16 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_bo *nvbo;
-	struct reservation_object *robj = attach->dmabuf->resv;
+	struct dma_resv *robj = attach->dmabuf->resv;
 	u32 flags = 0;
 	int ret;
 
 	flags = TTM_PL_FLAG_TT;
 
-	reservation_object_lock(robj, NULL);
+	dma_resv_lock(robj, NULL);
 	ret = nouveau_bo_new(&drm->client, attach->dmabuf->size, 0, flags, 0, 0,
 			     sg, robj, &nvbo);
-	reservation_object_unlock(robj);
+	dma_resv_unlock(robj);
 	if (ret)
 		return ERR_PTR(ret);
 
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index a1352750984c..b41754658681 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -274,7 +274,7 @@ panfrost_ioctl_wait_bo(struct drm_device *dev, void *data,
 	if (!gem_obj)
 		return -ENOENT;
 
-	ret = reservation_object_wait_timeout_rcu(gem_obj->resv, true,
+	ret = dma_resv_wait_timeout_rcu(gem_obj->resv, true,
 						  true, timeout);
 	if (!ret)
 		ret = timeout ? -ETIMEDOUT : -EBUSY;
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index d567ce98494c..0fc4539fd08d 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -6,7 +6,7 @@
 #include <linux/io.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 #include <drm/gpu_scheduler.h>
 #include <drm/panfrost_drm.h>
 
@@ -199,7 +199,7 @@ static void panfrost_acquire_object_fences(struct drm_gem_object **bos,
 	int i;
 
 	for (i = 0; i < bo_count; i++)
-		implicit_fences[i] = reservation_object_get_excl_rcu(bos[i]->resv);
+		implicit_fences[i] = dma_resv_get_excl_rcu(bos[i]->resv);
 }
 
 static void panfrost_attach_object_fences(struct drm_gem_object **bos,
@@ -209,7 +209,7 @@ static void panfrost_attach_object_fences(struct drm_gem_object **bos,
 	int i;
 
 	for (i = 0; i < bo_count; i++)
-		reservation_object_add_excl_fence(bos[i]->resv, fence);
+		dma_resv_add_excl_fence(bos[i]->resv, fence);
 }
 
 int panfrost_job_push(struct panfrost_job *job)
diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
index 94439212a5c5..a4f4175bbdbe 100644
--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
@@ -57,7 +57,7 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data)
 	struct qxl_bo *bo;
 
 	list_for_each_entry(bo, &qdev->gem.objects, list) {
-		struct reservation_object_list *fobj;
+		struct dma_resv_list *fobj;
 		int rel;
 
 		rcu_read_lock();
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index df55b83e0a55..312216caeea2 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -238,7 +238,7 @@ static int qxl_release_validate_bo(struct qxl_bo *bo)
 			return ret;
 	}
 
-	ret = reservation_object_reserve_shared(bo->tbo.base.resv, 1);
+	ret = dma_resv_reserve_shared(bo->tbo.base.resv, 1);
 	if (ret)
 		return ret;
 
@@ -458,9 +458,9 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
 	list_for_each_entry(entry, &release->bos, head) {
 		bo = entry->bo;
 
-		reservation_object_add_shared_fence(bo->base.resv, &release->base);
+		dma_resv_add_shared_fence(bo->base.resv, &release->base);
 		ttm_bo_add_to_lru(bo);
-		reservation_object_unlock(bo->base.resv);
+		dma_resv_unlock(bo->base.resv);
 	}
 	spin_unlock(&glob->lru_lock);
 	ww_acquire_fini(&release->ticket);
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 40f4d29edfe2..62eab82a64f9 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -3659,7 +3659,7 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev,
 struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
 				    uint64_t src_offset, uint64_t dst_offset,
 				    unsigned num_gpu_pages,
-				    struct reservation_object *resv)
+				    struct dma_resv *resv)
 {
 	struct radeon_fence *fence;
 	struct radeon_sync sync;
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index 589217a7e435..35b9dc6ce46a 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -579,7 +579,7 @@ void cik_sdma_fini(struct radeon_device *rdev)
 struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
 				  uint64_t src_offset, uint64_t dst_offset,
 				  unsigned num_gpu_pages,
-				  struct reservation_object *resv)
+				  struct dma_resv *resv)
 {
 	struct radeon_fence *fence;
 	struct radeon_sync sync;
diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c
index 5505a04ca402..a46ee6c2099d 100644
--- a/drivers/gpu/drm/radeon/evergreen_dma.c
+++ b/drivers/gpu/drm/radeon/evergreen_dma.c
@@ -108,7 +108,7 @@ struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
 					uint64_t src_offset,
 					uint64_t dst_offset,
 					unsigned num_gpu_pages,
-					struct reservation_object *resv)
+					struct dma_resv *resv)
 {
 	struct radeon_fence *fence;
 	struct radeon_sync sync;
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 5c05193da520..7089dfc8c2a9 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -891,7 +891,7 @@ struct radeon_fence *r100_copy_blit(struct radeon_device *rdev,
 				    uint64_t src_offset,
 				    uint64_t dst_offset,
 				    unsigned num_gpu_pages,
-				    struct reservation_object *resv)
+				    struct dma_resv *resv)
 {
 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
 	struct radeon_fence *fence;
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index 9ce6dd83d284..840401413c58 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -84,7 +84,7 @@ struct radeon_fence *r200_copy_dma(struct radeon_device *rdev,
 				   uint64_t src_offset,
 				   uint64_t dst_offset,
 				   unsigned num_gpu_pages,
-				   struct reservation_object *resv)
+				   struct dma_resv *resv)
 {
 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
 	struct radeon_fence *fence;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 7d175a9e8330..e937cc01910d 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2963,7 +2963,7 @@ bool r600_semaphore_ring_emit(struct radeon_device *rdev,
 struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
 				     uint64_t src_offset, uint64_t dst_offset,
 				     unsigned num_gpu_pages,
-				     struct reservation_object *resv)
+				     struct dma_resv *resv)
 {
 	struct radeon_fence *fence;
 	struct radeon_sync sync;
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
index 35d92ef8a0d4..af6c0da45f28 100644
--- a/drivers/gpu/drm/radeon/r600_dma.c
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -444,7 +444,7 @@ void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
 struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
 				   uint64_t src_offset, uint64_t dst_offset,
 				   unsigned num_gpu_pages,
-				   struct reservation_object *resv)
+				   struct dma_resv *resv)
 {
 	struct radeon_fence *fence;
 	struct radeon_sync sync;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 3f7701321d21..de1d090df034 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -619,7 +619,7 @@ void radeon_sync_fence(struct radeon_sync *sync,
 		       struct radeon_fence *fence);
 int radeon_sync_resv(struct radeon_device *rdev,
 		     struct radeon_sync *sync,
-		     struct reservation_object *resv,
+		     struct dma_resv *resv,
 		     bool shared);
 int radeon_sync_rings(struct radeon_device *rdev,
 		      struct radeon_sync *sync,
@@ -1912,20 +1912,20 @@ struct radeon_asic {
 					     uint64_t src_offset,
 					     uint64_t dst_offset,
 					     unsigned num_gpu_pages,
-					     struct reservation_object *resv);
+					     struct dma_resv *resv);
 		u32 blit_ring_index;
 		struct radeon_fence *(*dma)(struct radeon_device *rdev,
 					    uint64_t src_offset,
 					    uint64_t dst_offset,
 					    unsigned num_gpu_pages,
-					    struct reservation_object *resv);
+					    struct dma_resv *resv);
 		u32 dma_ring_index;
 		/* method used for bo copy */
 		struct radeon_fence *(*copy)(struct radeon_device *rdev,
 					     uint64_t src_offset,
 					     uint64_t dst_offset,
 					     unsigned num_gpu_pages,
-					     struct reservation_object *resv);
+					     struct dma_resv *resv);
 		/* ring used for bo copies */
 		u32 copy_ring_index;
 	} copy;
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index e3f036c20d64..a74fa18cd27b 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -86,7 +86,7 @@ struct radeon_fence *r100_copy_blit(struct radeon_device *rdev,
 				    uint64_t src_offset,
 				    uint64_t dst_offset,
 				    unsigned num_gpu_pages,
-				    struct reservation_object *resv);
+				    struct dma_resv *resv);
 int r100_set_surface_reg(struct radeon_device *rdev, int reg,
 			 uint32_t tiling_flags, uint32_t pitch,
 			 uint32_t offset, uint32_t obj_size);
@@ -157,7 +157,7 @@ struct radeon_fence *r200_copy_dma(struct radeon_device *rdev,
 				   uint64_t src_offset,
 				   uint64_t dst_offset,
 				   unsigned num_gpu_pages,
-				   struct reservation_object *resv);
+				   struct dma_resv *resv);
 void r200_set_safe_registers(struct radeon_device *rdev);
 
 /*
@@ -347,11 +347,11 @@ int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
 struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
 				     uint64_t src_offset, uint64_t dst_offset,
 				     unsigned num_gpu_pages,
-				     struct reservation_object *resv);
+				     struct dma_resv *resv);
 struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
 				   uint64_t src_offset, uint64_t dst_offset,
 				   unsigned num_gpu_pages,
-				   struct reservation_object *resv);
+				   struct dma_resv *resv);
 void r600_hpd_init(struct radeon_device *rdev);
 void r600_hpd_fini(struct radeon_device *rdev);
 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -473,7 +473,7 @@ void r700_cp_fini(struct radeon_device *rdev);
 struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev,
 				    uint64_t src_offset, uint64_t dst_offset,
 				    unsigned num_gpu_pages,
-				    struct reservation_object *resv);
+				    struct dma_resv *resv);
 u32 rv770_get_xclk(struct radeon_device *rdev);
 int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
 int rv770_get_temp(struct radeon_device *rdev);
@@ -547,7 +547,7 @@ void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
 struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
 					uint64_t src_offset, uint64_t dst_offset,
 					unsigned num_gpu_pages,
-					struct reservation_object *resv);
+					struct dma_resv *resv);
 int evergreen_get_temp(struct radeon_device *rdev);
 int evergreen_get_allowed_info_register(struct radeon_device *rdev,
 					u32 reg, u32 *val);
@@ -725,7 +725,7 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
 struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
 				 uint64_t src_offset, uint64_t dst_offset,
 				 unsigned num_gpu_pages,
-				 struct reservation_object *resv);
+				 struct dma_resv *resv);
 
 void si_dma_vm_copy_pages(struct radeon_device *rdev,
 			  struct radeon_ib *ib,
@@ -796,11 +796,11 @@ void cik_sdma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
 struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
 				  uint64_t src_offset, uint64_t dst_offset,
 				  unsigned num_gpu_pages,
-				  struct reservation_object *resv);
+				  struct dma_resv *resv);
 struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
 				    uint64_t src_offset, uint64_t dst_offset,
 				    unsigned num_gpu_pages,
-				    struct reservation_object *resv);
+				    struct dma_resv *resv);
 int cik_sdma_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
 int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
 bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 1ea50ce16312..ac9a5ec481c3 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -35,7 +35,7 @@
 static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
 				    uint64_t saddr, uint64_t daddr,
 				    int flag, int n,
-				    struct reservation_object *resv)
+				    struct dma_resv *resv)
 {
 	unsigned long start_jiffies;
 	unsigned long end_jiffies;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 7e5254a34e84..7b5460678382 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -255,7 +255,7 @@ static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
 	int r;
 
 	list_for_each_entry(reloc, &p->validated, tv.head) {
-		struct reservation_object *resv;
+		struct dma_resv *resv;
 
 		resv = reloc->robj->tbo.base.resv;
 		r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 7bf73230ac0b..e81b01f8db90 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -533,7 +533,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
 		DRM_ERROR("failed to pin new rbo buffer before flip\n");
 		goto cleanup;
 	}
-	work->fence = dma_fence_get(reservation_object_get_excl(new_rbo->tbo.base.resv));
+	work->fence = dma_fence_get(dma_resv_get_excl(new_rbo->tbo.base.resv));
 	radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
 	radeon_bo_unreserve(new_rbo);
 
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 03873f21a734..4cf58dbbe439 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -114,7 +114,7 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj,
 	}
 	if (domain == RADEON_GEM_DOMAIN_CPU) {
 		/* Asking for cpu access wait for object idle */
-		r = reservation_object_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
+		r = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
 		if (!r)
 			r = -EBUSY;
 
@@ -449,7 +449,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
 	}
 	robj = gem_to_radeon_bo(gobj);
 
-	r = reservation_object_test_signaled_rcu(robj->tbo.base.resv, true);
+	r = dma_resv_test_signaled_rcu(robj->tbo.base.resv, true);
 	if (r == 0)
 		r = -EBUSY;
 	else
@@ -478,7 +478,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
 	}
 	robj = gem_to_radeon_bo(gobj);
 
-	ret = reservation_object_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
+	ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
 	if (ret == 0)
 		r = -EBUSY;
 	else if (ret < 0)
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index 0d64ace0e6c1..6902f998ede9 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -163,7 +163,7 @@ static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
 				continue;
 			}
 
-			r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv,
+			r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
 				true, false, MAX_SCHEDULE_TIMEOUT);
 			if (r <= 0)
 				DRM_ERROR("(%ld) failed to wait for user bo\n", r);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 9db8ba29ef68..2abe1eab471f 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -183,7 +183,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
 int radeon_bo_create(struct radeon_device *rdev,
 		     unsigned long size, int byte_align, bool kernel,
 		     u32 domain, u32 flags, struct sg_table *sg,
-		     struct reservation_object *resv,
+		     struct dma_resv *resv,
 		     struct radeon_bo **bo_ptr)
 {
 	struct radeon_bo *bo;
@@ -610,7 +610,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
 	int steal;
 	int i;
 
-	reservation_object_assert_held(bo->tbo.base.resv);
+	dma_resv_assert_held(bo->tbo.base.resv);
 
 	if (!bo->tiling_flags)
 		return 0;
@@ -736,7 +736,7 @@ void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
 				uint32_t *tiling_flags,
 				uint32_t *pitch)
 {
-	reservation_object_assert_held(bo->tbo.base.resv);
+	dma_resv_assert_held(bo->tbo.base.resv);
 
 	if (tiling_flags)
 		*tiling_flags = bo->tiling_flags;
@@ -748,7 +748,7 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
 				bool force_drop)
 {
 	if (!force_drop)
-		reservation_object_assert_held(bo->tbo.base.resv);
+		dma_resv_assert_held(bo->tbo.base.resv);
 
 	if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
 		return 0;
@@ -870,10 +870,10 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
 void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
 		     bool shared)
 {
-	struct reservation_object *resv = bo->tbo.base.resv;
+	struct dma_resv *resv = bo->tbo.base.resv;
 
 	if (shared)
-		reservation_object_add_shared_fence(resv, &fence->base);
+		dma_resv_add_shared_fence(resv, &fence->base);
 	else
-		reservation_object_add_excl_fence(resv, &fence->base);
+		dma_resv_add_excl_fence(resv, &fence->base);
 }
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index e5554bf9140e..d23f2ed4126e 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -126,7 +126,7 @@ extern int radeon_bo_create(struct radeon_device *rdev,
 			    unsigned long size, int byte_align,
 			    bool kernel, u32 domain, u32 flags,
 			    struct sg_table *sg,
-			    struct reservation_object *resv,
+			    struct dma_resv *resv,
 			    struct radeon_bo **bo_ptr);
 extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
 extern void radeon_bo_kunmap(struct radeon_bo *bo);
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index 52b0d0cd8cbe..b906e8fbd5f3 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -63,15 +63,15 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
 							struct dma_buf_attachment *attach,
 							struct sg_table *sg)
 {
-	struct reservation_object *resv = attach->dmabuf->resv;
+	struct dma_resv *resv = attach->dmabuf->resv;
 	struct radeon_device *rdev = dev->dev_private;
 	struct radeon_bo *bo;
 	int ret;
 
-	reservation_object_lock(resv, NULL);
+	dma_resv_lock(resv, NULL);
 	ret = radeon_bo_create(rdev, attach->dmabuf->size, PAGE_SIZE, false,
 			       RADEON_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
-	reservation_object_unlock(resv);
+	dma_resv_unlock(resv);
 	if (ret)
 		return ERR_PTR(ret);
 
diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c
index 8c9780b5a884..55cc77a73c7b 100644
--- a/drivers/gpu/drm/radeon/radeon_sync.c
+++ b/drivers/gpu/drm/radeon/radeon_sync.c
@@ -87,30 +87,30 @@ void radeon_sync_fence(struct radeon_sync *sync,
  */
 int radeon_sync_resv(struct radeon_device *rdev,
 		     struct radeon_sync *sync,
-		     struct reservation_object *resv,
+		     struct dma_resv *resv,
 		     bool shared)
 {
-	struct reservation_object_list *flist;
+	struct dma_resv_list *flist;
 	struct dma_fence *f;
 	struct radeon_fence *fence;
 	unsigned i;
 	int r = 0;
 
 	/* always sync to the exclusive fence */
-	f = reservation_object_get_excl(resv);
+	f = dma_resv_get_excl(resv);
 	fence = f ? to_radeon_fence(f) : NULL;
 	if (fence && fence->rdev == rdev)
 		radeon_sync_fence(sync, fence);
 	else if (f)
 		r = dma_fence_wait(f, true);
 
-	flist = reservation_object_get_list(resv);
+	flist = dma_resv_get_list(resv);
 	if (shared || !flist || r)
 		return r;
 
 	for (i = 0; i < flist->shared_count; ++i) {
 		f = rcu_dereference_protected(flist->shared[i],
-					      reservation_object_held(resv));
+					      dma_resv_held(resv));
 		fence = to_radeon_fence(f);
 		if (fence && fence->rdev == rdev)
 			radeon_sync_fence(sync, fence);
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 311e69c2ed7f..1ad5c3b86b64 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -477,7 +477,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
 		return -EINVAL;
 	}
 
-	f = reservation_object_get_excl(bo->tbo.base.resv);
+	f = dma_resv_get_excl(bo->tbo.base.resv);
 	if (f) {
 		r = radeon_fence_wait((struct radeon_fence *)f, false);
 		if (r) {
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index e48a05533126..e0ad547786e8 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -831,7 +831,7 @@ static int radeon_vm_update_ptes(struct radeon_device *rdev,
 		int r;
 
 		radeon_sync_resv(rdev, &ib->sync, pt->tbo.base.resv, true);
-		r = reservation_object_reserve_shared(pt->tbo.base.resv, 1);
+		r = dma_resv_reserve_shared(pt->tbo.base.resv, 1);
 		if (r)
 			return r;
 
diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c
index 0866b38ef264..4c91614b5e70 100644
--- a/drivers/gpu/drm/radeon/rv770_dma.c
+++ b/drivers/gpu/drm/radeon/rv770_dma.c
@@ -42,7 +42,7 @@
 struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev,
 				    uint64_t src_offset, uint64_t dst_offset,
 				    unsigned num_gpu_pages,
-				    struct reservation_object *resv)
+				    struct dma_resv *resv)
 {
 	struct radeon_fence *fence;
 	struct radeon_sync sync;
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index 4773bb7d947e..d2fa302a5be9 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -231,7 +231,7 @@ void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
 struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
 				 uint64_t src_offset, uint64_t dst_offset,
 				 unsigned num_gpu_pages,
-				 struct reservation_object *resv)
+				 struct dma_resv *resv)
 {
 	struct radeon_fence *fence;
 	struct radeon_sync sync;
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 10a861a1690c..58d1f2b28132 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -41,7 +41,7 @@
 #include <linux/file.h>
 #include <linux/module.h>
 #include <linux/atomic.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 
 static void ttm_bo_global_kobj_release(struct kobject *kobj);
 
@@ -161,7 +161,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
 	atomic_dec(&bo->bdev->glob->bo_count);
 	dma_fence_put(bo->moving);
 	if (!ttm_bo_uses_embedded_gem_object(bo))
-		reservation_object_fini(&bo->base._resv);
+		dma_resv_fini(&bo->base._resv);
 	mutex_destroy(&bo->wu_mutex);
 	bo->destroy(bo);
 	ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
@@ -173,7 +173,7 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
 	struct ttm_bo_device *bdev = bo->bdev;
 	struct ttm_mem_type_manager *man;
 
-	reservation_object_assert_held(bo->base.resv);
+	dma_resv_assert_held(bo->base.resv);
 
 	if (!list_empty(&bo->lru))
 		return;
@@ -244,7 +244,7 @@ static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
 			     struct ttm_lru_bulk_move *bulk)
 {
-	reservation_object_assert_held(bo->base.resv);
+	dma_resv_assert_held(bo->base.resv);
 
 	ttm_bo_del_from_lru(bo);
 	ttm_bo_add_to_lru(bo);
@@ -277,8 +277,8 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
 		if (!pos->first)
 			continue;
 
-		reservation_object_assert_held(pos->first->base.resv);
-		reservation_object_assert_held(pos->last->base.resv);
+		dma_resv_assert_held(pos->first->base.resv);
+		dma_resv_assert_held(pos->last->base.resv);
 
 		man = &pos->first->bdev->man[TTM_PL_TT];
 		list_bulk_move_tail(&man->lru[i], &pos->first->lru,
@@ -292,8 +292,8 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
 		if (!pos->first)
 			continue;
 
-		reservation_object_assert_held(pos->first->base.resv);
-		reservation_object_assert_held(pos->last->base.resv);
+		dma_resv_assert_held(pos->first->base.resv);
+		dma_resv_assert_held(pos->last->base.resv);
 
 		man = &pos->first->bdev->man[TTM_PL_VRAM];
 		list_bulk_move_tail(&man->lru[i], &pos->first->lru,
@@ -307,8 +307,8 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
 		if (!pos->first)
 			continue;
 
-		reservation_object_assert_held(pos->first->base.resv);
-		reservation_object_assert_held(pos->last->base.resv);
+		dma_resv_assert_held(pos->first->base.resv);
+		dma_resv_assert_held(pos->last->base.resv);
 
 		lru = &pos->first->bdev->glob->swap_lru[i];
 		list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap);
@@ -442,29 +442,29 @@ static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
 	if (bo->base.resv == &bo->base._resv)
 		return 0;
 
-	BUG_ON(!reservation_object_trylock(&bo->base._resv));
+	BUG_ON(!dma_resv_trylock(&bo->base._resv));
 
-	r = reservation_object_copy_fences(&bo->base._resv, bo->base.resv);
+	r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
 	if (r)
-		reservation_object_unlock(&bo->base._resv);
+		dma_resv_unlock(&bo->base._resv);
 
 	return r;
 }
 
 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
 {
-	struct reservation_object_list *fobj;
+	struct dma_resv_list *fobj;
 	struct dma_fence *fence;
 	int i;
 
-	fobj = reservation_object_get_list(&bo->base._resv);
-	fence = reservation_object_get_excl(&bo->base._resv);
+	fobj = dma_resv_get_list(&bo->base._resv);
+	fence = dma_resv_get_excl(&bo->base._resv);
 	if (fence && !fence->ops->signaled)
 		dma_fence_enable_sw_signaling(fence);
 
 	for (i = 0; fobj && i < fobj->shared_count; ++i) {
 		fence = rcu_dereference_protected(fobj->shared[i],
-					reservation_object_held(bo->base.resv));
+					dma_resv_held(bo->base.resv));
 
 		if (!fence->ops->signaled)
 			dma_fence_enable_sw_signaling(fence);
@@ -482,23 +482,23 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
 		/* Last resort, if we fail to allocate memory for the
 		 * fences block for the BO to become idle
 		 */
-		reservation_object_wait_timeout_rcu(bo->base.resv, true, false,
+		dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
 						    30 * HZ);
 		spin_lock(&glob->lru_lock);
 		goto error;
 	}
 
 	spin_lock(&glob->lru_lock);
-	ret = reservation_object_trylock(bo->base.resv) ? 0 : -EBUSY;
+	ret = dma_resv_trylock(bo->base.resv) ? 0 : -EBUSY;
 	if (!ret) {
-		if (reservation_object_test_signaled_rcu(&bo->base._resv, true)) {
+		if (dma_resv_test_signaled_rcu(&bo->base._resv, true)) {
 			ttm_bo_del_from_lru(bo);
 			spin_unlock(&glob->lru_lock);
 			if (bo->base.resv != &bo->base._resv)
-				reservation_object_unlock(&bo->base._resv);
+				dma_resv_unlock(&bo->base._resv);
 
 			ttm_bo_cleanup_memtype_use(bo);
-			reservation_object_unlock(bo->base.resv);
+			dma_resv_unlock(bo->base.resv);
 			return;
 		}
 
@@ -514,10 +514,10 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
 			ttm_bo_add_to_lru(bo);
 		}
 
-		reservation_object_unlock(bo->base.resv);
+		dma_resv_unlock(bo->base.resv);
 	}
 	if (bo->base.resv != &bo->base._resv)
-		reservation_object_unlock(&bo->base._resv);
+		dma_resv_unlock(&bo->base._resv);
 
 error:
 	kref_get(&bo->list_kref);
@@ -546,7 +546,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
 			       bool unlock_resv)
 {
 	struct ttm_bo_global *glob = bo->bdev->glob;
-	struct reservation_object *resv;
+	struct dma_resv *resv;
 	int ret;
 
 	if (unlikely(list_empty(&bo->ddestroy)))
@@ -554,7 +554,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
 	else
 		resv = &bo->base._resv;
 
-	if (reservation_object_test_signaled_rcu(resv, true))
+	if (dma_resv_test_signaled_rcu(resv, true))
 		ret = 0;
 	else
 		ret = -EBUSY;
@@ -563,10 +563,10 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
 		long lret;
 
 		if (unlock_resv)
-			reservation_object_unlock(bo->base.resv);
+			dma_resv_unlock(bo->base.resv);
 		spin_unlock(&glob->lru_lock);
 
-		lret = reservation_object_wait_timeout_rcu(resv, true,
+		lret = dma_resv_wait_timeout_rcu(resv, true,
 							   interruptible,
 							   30 * HZ);
 
@@ -576,7 +576,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
 			return -EBUSY;
 
 		spin_lock(&glob->lru_lock);
-		if (unlock_resv && !reservation_object_trylock(bo->base.resv)) {
+		if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
 			/*
 			 * We raced, and lost, someone else holds the reservation now,
 			 * and is probably busy in ttm_bo_cleanup_memtype_use.
@@ -593,7 +593,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
 
 	if (ret || unlikely(list_empty(&bo->ddestroy))) {
 		if (unlock_resv)
-			reservation_object_unlock(bo->base.resv);
+			dma_resv_unlock(bo->base.resv);
 		spin_unlock(&glob->lru_lock);
 		return ret;
 	}
@@ -606,7 +606,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
 	ttm_bo_cleanup_memtype_use(bo);
 
 	if (unlock_resv)
-		reservation_object_unlock(bo->base.resv);
+		dma_resv_unlock(bo->base.resv);
 
 	return 0;
 }
@@ -634,12 +634,12 @@ static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
 
 		if (remove_all || bo->base.resv != &bo->base._resv) {
 			spin_unlock(&glob->lru_lock);
-			reservation_object_lock(bo->base.resv, NULL);
+			dma_resv_lock(bo->base.resv, NULL);
 
 			spin_lock(&glob->lru_lock);
 			ttm_bo_cleanup_refs(bo, false, !remove_all, true);
 
-		} else if (reservation_object_trylock(bo->base.resv)) {
+		} else if (dma_resv_trylock(bo->base.resv)) {
 			ttm_bo_cleanup_refs(bo, false, !remove_all, true);
 		} else {
 			spin_unlock(&glob->lru_lock);
@@ -708,7 +708,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
 	struct ttm_placement placement;
 	int ret = 0;
 
-	reservation_object_assert_held(bo->base.resv);
+	dma_resv_assert_held(bo->base.resv);
 
 	placement.num_placement = 0;
 	placement.num_busy_placement = 0;
@@ -779,7 +779,7 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
 	bool ret = false;
 
 	if (bo->base.resv == ctx->resv) {
-		reservation_object_assert_held(bo->base.resv);
+		dma_resv_assert_held(bo->base.resv);
 		if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT
 		    || !list_empty(&bo->ddestroy))
 			ret = true;
@@ -787,7 +787,7 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
 		if (busy)
 			*busy = false;
 	} else {
-		ret = reservation_object_trylock(bo->base.resv);
+		ret = dma_resv_trylock(bo->base.resv);
 		*locked = ret;
 		if (busy)
 			*busy = !ret;
@@ -815,10 +815,10 @@ static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
 		return -EBUSY;
 
 	if (ctx->interruptible)
-		r = reservation_object_lock_interruptible(busy_bo->base.resv,
+		r = dma_resv_lock_interruptible(busy_bo->base.resv,
 							  ticket);
 	else
-		r = reservation_object_lock(busy_bo->base.resv, ticket);
+		r = dma_resv_lock(busy_bo->base.resv, ticket);
 
 	/*
 	 * TODO: It would be better to keep the BO locked until allocation is at
@@ -826,7 +826,7 @@ static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
 	 * of TTM.
 	 */
 	if (!r)
-		reservation_object_unlock(busy_bo->base.resv);
+		dma_resv_unlock(busy_bo->base.resv);
 
 	return r == -EDEADLK ? -EBUSY : r;
 }
@@ -852,7 +852,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
 			if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
 							    &busy)) {
 				if (busy && !busy_bo && ticket !=
-				    reservation_object_locking_ctx(bo->base.resv))
+				    dma_resv_locking_ctx(bo->base.resv))
 					busy_bo = bo;
 				continue;
 			}
@@ -860,7 +860,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
 			if (place && !bdev->driver->eviction_valuable(bo,
 								      place)) {
 				if (locked)
-					reservation_object_unlock(bo->base.resv);
+					dma_resv_unlock(bo->base.resv);
 				continue;
 			}
 			break;
@@ -932,9 +932,9 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
 	spin_unlock(&man->move_lock);
 
 	if (fence) {
-		reservation_object_add_shared_fence(bo->base.resv, fence);
+		dma_resv_add_shared_fence(bo->base.resv, fence);
 
-		ret = reservation_object_reserve_shared(bo->base.resv, 1);
+		ret = dma_resv_reserve_shared(bo->base.resv, 1);
 		if (unlikely(ret)) {
 			dma_fence_put(fence);
 			return ret;
@@ -961,7 +961,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
 	struct ww_acquire_ctx *ticket;
 	int ret;
 
-	ticket = reservation_object_locking_ctx(bo->base.resv);
+	ticket = dma_resv_locking_ctx(bo->base.resv);
 	do {
 		ret = (*man->func->get_node)(man, bo, place, mem);
 		if (unlikely(ret != 0))
@@ -1091,7 +1091,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 	bool type_found = false;
 	int i, ret;
 
-	ret = reservation_object_reserve_shared(bo->base.resv, 1);
+	ret = dma_resv_reserve_shared(bo->base.resv, 1);
 	if (unlikely(ret))
 		return ret;
 
@@ -1172,7 +1172,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
 	int ret = 0;
 	struct ttm_mem_reg mem;
 
-	reservation_object_assert_held(bo->base.resv);
+	dma_resv_assert_held(bo->base.resv);
 
 	mem.num_pages = bo->num_pages;
 	mem.size = mem.num_pages << PAGE_SHIFT;
@@ -1242,7 +1242,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
 	int ret;
 	uint32_t new_flags;
 
-	reservation_object_assert_held(bo->base.resv);
+	dma_resv_assert_held(bo->base.resv);
 	/*
 	 * Check whether we need to move buffer.
 	 */
@@ -1279,7 +1279,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
 			 struct ttm_operation_ctx *ctx,
 			 size_t acc_size,
 			 struct sg_table *sg,
-			 struct reservation_object *resv,
+			 struct dma_resv *resv,
 			 void (*destroy) (struct ttm_buffer_object *))
 {
 	int ret = 0;
@@ -1333,7 +1333,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
 	bo->sg = sg;
 	if (resv) {
 		bo->base.resv = resv;
-		reservation_object_assert_held(bo->base.resv);
+		dma_resv_assert_held(bo->base.resv);
 	} else {
 		bo->base.resv = &bo->base._resv;
 	}
@@ -1342,7 +1342,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
 		 * bo.gem is not initialized, so we have to setup the
 		 * struct elements we want use regardless.
 		 */
-		reservation_object_init(&bo->base._resv);
+		dma_resv_init(&bo->base._resv);
 		drm_vma_node_reset(&bo->base.vma_node);
 	}
 	atomic_inc(&bo->bdev->glob->bo_count);
@@ -1360,7 +1360,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
 	 * since otherwise lockdep will be angered in radeon.
 	 */
 	if (!resv) {
-		locked = reservation_object_trylock(bo->base.resv);
+		locked = dma_resv_trylock(bo->base.resv);
 		WARN_ON(!locked);
 	}
 
@@ -1394,7 +1394,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
 		bool interruptible,
 		size_t acc_size,
 		struct sg_table *sg,
-		struct reservation_object *resv,
+		struct dma_resv *resv,
 		void (*destroy) (struct ttm_buffer_object *))
 {
 	struct ttm_operation_ctx ctx = { interruptible, false };
@@ -1804,13 +1804,13 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
 	long timeout = 15 * HZ;
 
 	if (no_wait) {
-		if (reservation_object_test_signaled_rcu(bo->base.resv, true))
+		if (dma_resv_test_signaled_rcu(bo->base.resv, true))
 			return 0;
 		else
 			return -EBUSY;
 	}
 
-	timeout = reservation_object_wait_timeout_rcu(bo->base.resv, true,
+	timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true,
 						      interruptible, timeout);
 	if (timeout < 0)
 		return timeout;
@@ -1818,7 +1818,7 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
 	if (timeout == 0)
 		return -EBUSY;
 
-	reservation_object_add_excl_fence(bo->base.resv, NULL);
+	dma_resv_add_excl_fence(bo->base.resv, NULL);
 	return 0;
 }
 EXPORT_SYMBOL(ttm_bo_wait);
@@ -1934,7 +1934,7 @@ out:
 	 * already swapped buffer.
 	 */
 	if (locked)
-		reservation_object_unlock(bo->base.resv);
+		dma_resv_unlock(bo->base.resv);
 	kref_put(&bo->list_kref, ttm_bo_release_list);
 	return ret;
 }
@@ -1972,14 +1972,14 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
 	ret = mutex_lock_interruptible(&bo->wu_mutex);
 	if (unlikely(ret != 0))
 		return -ERESTARTSYS;
-	if (!reservation_object_is_locked(bo->base.resv))
+	if (!dma_resv_is_locked(bo->base.resv))
 		goto out_unlock;
-	ret = reservation_object_lock_interruptible(bo->base.resv, NULL);
+	ret = dma_resv_lock_interruptible(bo->base.resv, NULL);
 	if (ret == -EINTR)
 		ret = -ERESTARTSYS;
 	if (unlikely(ret != 0))
 		goto out_unlock;
-	reservation_object_unlock(bo->base.resv);
+	dma_resv_unlock(bo->base.resv);
 
 out_unlock:
 	mutex_unlock(&bo->wu_mutex);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 425a6d627b30..fe81c565e7ef 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -38,7 +38,7 @@
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include <linux/module.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 
 struct ttm_transfer_obj {
 	struct ttm_buffer_object base;
@@ -518,8 +518,8 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
 	fbo->base.destroy = &ttm_transfered_destroy;
 	fbo->base.acc_size = 0;
 	fbo->base.base.resv = &fbo->base.base._resv;
-	reservation_object_init(fbo->base.base.resv);
-	ret = reservation_object_trylock(fbo->base.base.resv);
+	dma_resv_init(fbo->base.base.resv);
+	ret = dma_resv_trylock(fbo->base.base.resv);
 	WARN_ON(!ret);
 
 	*new_obj = &fbo->base;
@@ -689,7 +689,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
 	int ret;
 	struct ttm_buffer_object *ghost_obj;
 
-	reservation_object_add_excl_fence(bo->base.resv, fence);
+	dma_resv_add_excl_fence(bo->base.resv, fence);
 	if (evict) {
 		ret = ttm_bo_wait(bo, false, false);
 		if (ret)
@@ -716,7 +716,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
 		if (ret)
 			return ret;
 
-		reservation_object_add_excl_fence(ghost_obj->base.resv, fence);
+		dma_resv_add_excl_fence(ghost_obj->base.resv, fence);
 
 		/**
 		 * If we're not moving to fixed memory, the TTM object
@@ -752,7 +752,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
 
 	int ret;
 
-	reservation_object_add_excl_fence(bo->base.resv, fence);
+	dma_resv_add_excl_fence(bo->base.resv, fence);
 
 	if (!evict) {
 		struct ttm_buffer_object *ghost_obj;
@@ -772,7 +772,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
 		if (ret)
 			return ret;
 
-		reservation_object_add_excl_fence(ghost_obj->base.resv, fence);
+		dma_resv_add_excl_fence(ghost_obj->base.resv, fence);
 
 		/**
 		 * If we're not moving to fixed memory, the TTM object
@@ -841,7 +841,7 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
 	if (ret)
 		return ret;
 
-	ret = reservation_object_copy_fences(ghost->base.resv, bo->base.resv);
+	ret = dma_resv_copy_fences(ghost->base.resv, bo->base.resv);
 	/* Last resort, wait for the BO to be idle when we are OOM */
 	if (ret)
 		ttm_bo_wait(bo, false, false);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 85f5bcbe0c76..76eedb963693 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -71,7 +71,7 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
 		ttm_bo_get(bo);
 		up_read(&vmf->vma->vm_mm->mmap_sem);
 		(void) dma_fence_wait(bo->moving, true);
-		reservation_object_unlock(bo->base.resv);
+		dma_resv_unlock(bo->base.resv);
 		ttm_bo_put(bo);
 		goto out_unlock;
 	}
@@ -131,7 +131,7 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
 	 * for reserve, and if it fails, retry the fault after waiting
 	 * for the buffer to become unreserved.
 	 */
-	if (unlikely(!reservation_object_trylock(bo->base.resv))) {
+	if (unlikely(!dma_resv_trylock(bo->base.resv))) {
 		if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
 			if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
 				ttm_bo_get(bo);
@@ -296,7 +296,7 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
 out_io_unlock:
 	ttm_mem_io_unlock(man);
 out_unlock:
-	reservation_object_unlock(bo->base.resv);
+	dma_resv_unlock(bo->base.resv);
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 3aefe72fb5cb..131dae8f4170 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -39,7 +39,7 @@ static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
 	list_for_each_entry_continue_reverse(entry, list, head) {
 		struct ttm_buffer_object *bo = entry->bo;
 
-		reservation_object_unlock(bo->base.resv);
+		dma_resv_unlock(bo->base.resv);
 	}
 }
 
@@ -71,7 +71,7 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
 
 		if (list_empty(&bo->lru))
 			ttm_bo_add_to_lru(bo);
-		reservation_object_unlock(bo->base.resv);
+		dma_resv_unlock(bo->base.resv);
 	}
 	spin_unlock(&glob->lru_lock);
 
@@ -114,7 +114,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
 
 		ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
 		if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
-			reservation_object_unlock(bo->base.resv);
+			dma_resv_unlock(bo->base.resv);
 
 			ret = -EBUSY;
 
@@ -130,7 +130,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
 			if (!entry->num_shared)
 				continue;
 
-			ret = reservation_object_reserve_shared(bo->base.resv,
+			ret = dma_resv_reserve_shared(bo->base.resv,
 								entry->num_shared);
 			if (!ret)
 				continue;
@@ -144,16 +144,16 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
 
 		if (ret == -EDEADLK) {
 			if (intr) {
-				ret = reservation_object_lock_slow_interruptible(bo->base.resv,
+				ret = dma_resv_lock_slow_interruptible(bo->base.resv,
 										 ticket);
 			} else {
-				reservation_object_lock_slow(bo->base.resv, ticket);
+				dma_resv_lock_slow(bo->base.resv, ticket);
 				ret = 0;
 			}
 		}
 
 		if (!ret && entry->num_shared)
-			ret = reservation_object_reserve_shared(bo->base.resv,
+			ret = dma_resv_reserve_shared(bo->base.resv,
 								entry->num_shared);
 
 		if (unlikely(ret != 0)) {
@@ -201,14 +201,14 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
 	list_for_each_entry(entry, list, head) {
 		bo = entry->bo;
 		if (entry->num_shared)
-			reservation_object_add_shared_fence(bo->base.resv, fence);
+			dma_resv_add_shared_fence(bo->base.resv, fence);
 		else
-			reservation_object_add_excl_fence(bo->base.resv, fence);
+			dma_resv_add_excl_fence(bo->base.resv, fence);
 		if (list_empty(&bo->lru))
 			ttm_bo_add_to_lru(bo);
 		else
 			ttm_bo_move_to_lru_tail(bo, NULL);
-		reservation_object_unlock(bo->base.resv);
+		dma_resv_unlock(bo->base.resv);
 	}
 	spin_unlock(&glob->lru_lock);
 	if (ticket)
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 00b4a3337840..e0e9b4f69db6 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -48,7 +48,7 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
 	struct ttm_bo_device *bdev = bo->bdev;
 	uint32_t page_flags = 0;
 
-	reservation_object_assert_held(bo->base.resv);
+	dma_resv_assert_held(bo->base.resv);
 
 	if (bdev->need_dma32)
 		page_flags |= TTM_PAGE_FLAG_DMA32;
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 79744137d89f..5d80507b539b 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -409,7 +409,7 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
 	if (args->pad != 0)
 		return -EINVAL;
 
-	ret = drm_gem_reservation_object_wait(file_priv, args->handle,
+	ret = drm_gem_dma_resv_wait(file_priv, args->handle,
 					      true, timeout_jiffies);
 
 	/* Decrement the user's timeout, in case we got interrupted
@@ -495,7 +495,7 @@ v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv,
 
 	for (i = 0; i < job->bo_count; i++) {
 		/* XXX: Use shared fences for read-only objects. */
-		reservation_object_add_excl_fence(job->bo[i]->resv,
+		dma_resv_add_excl_fence(job->bo[i]->resv,
 						  job->done_fence);
 	}
 
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index b72b760e3018..7a06cb6e31c5 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -543,7 +543,7 @@ vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
 		bo = to_vc4_bo(&exec->bo[i]->base);
 		bo->seqno = seqno;
 
-		reservation_object_add_shared_fence(bo->base.base.resv, exec->fence);
+		dma_resv_add_shared_fence(bo->base.base.resv, exec->fence);
 	}
 
 	list_for_each_entry(bo, &exec->unref_list, unref_head) {
@@ -554,7 +554,7 @@ vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
 		bo = to_vc4_bo(&exec->rcl_write_bo[i]->base);
 		bo->write_seqno = seqno;
 
-		reservation_object_add_excl_fence(bo->base.base.resv, exec->fence);
+		dma_resv_add_excl_fence(bo->base.base.resv, exec->fence);
 	}
 }
 
@@ -642,7 +642,7 @@ retry:
 	for (i = 0; i < exec->bo_count; i++) {
 		bo = &exec->bo[i]->base;
 
-		ret = reservation_object_reserve_shared(bo->resv, 1);
+		ret = dma_resv_reserve_shared(bo->resv, 1);
 		if (ret) {
 			vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
 			return ret;
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index d8630467549c..9268f6fc3f66 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -21,7 +21,7 @@
  */
 
 #include <linux/dma-buf.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 
 #include <drm/drm_file.h>
 
@@ -128,7 +128,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
 {
 	struct drm_vgem_fence_attach *arg = data;
 	struct vgem_file *vfile = file->driver_priv;
-	struct reservation_object *resv;
+	struct dma_resv *resv;
 	struct drm_gem_object *obj;
 	struct dma_fence *fence;
 	int ret;
@@ -151,7 +151,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
 
 	/* Check for a conflicting fence */
 	resv = obj->resv;
-	if (!reservation_object_test_signaled_rcu(resv,
+	if (!dma_resv_test_signaled_rcu(resv,
 						  arg->flags & VGEM_FENCE_WRITE)) {
 		ret = -EBUSY;
 		goto err_fence;
@@ -159,12 +159,12 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
 
 	/* Expose the fence via the dma-buf */
 	ret = 0;
-	reservation_object_lock(resv, NULL);
+	dma_resv_lock(resv, NULL);
 	if (arg->flags & VGEM_FENCE_WRITE)
-		reservation_object_add_excl_fence(resv, fence);
-	else if ((ret = reservation_object_reserve_shared(resv, 1)) == 0)
-		reservation_object_add_shared_fence(resv, fence);
-	reservation_object_unlock(resv);
+		dma_resv_add_excl_fence(resv, fence);
+	else if ((ret = dma_resv_reserve_shared(resv, 1)) == 0)
+		dma_resv_add_shared_fence(resv, fence);
+	dma_resv_unlock(resv);
 
 	/* Record the fence in our idr for later signaling */
 	if (ret == 0) {
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 3c430dd65f67..0a88ef11b9d3 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -396,7 +396,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
 		(vgdev, qobj->hw_res_handle,
 		 vfpriv->ctx_id, offset, args->level,
 		 &box, fence);
-	reservation_object_add_excl_fence(qobj->tbo.base.resv,
+	dma_resv_add_excl_fence(qobj->tbo.base.resv,
 					  &fence->f);
 
 	dma_fence_put(&fence->f);
@@ -450,7 +450,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
 			(vgdev, qobj,
 			 vfpriv ? vfpriv->ctx_id : 0, offset,
 			 args->level, &box, fence);
-		reservation_object_add_excl_fence(qobj->tbo.base.resv,
+		dma_resv_add_excl_fence(qobj->tbo.base.resv,
 						  &fence->f);
 		dma_fence_put(&fence->f);
 	}
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 3dc08f991a8d..a492ac3f4a7e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -212,7 +212,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
 			 0, 0, vgfb->fence);
 		ret = virtio_gpu_object_reserve(bo, false);
 		if (!ret) {
-			reservation_object_add_excl_fence(bo->tbo.base.resv,
+			dma_resv_add_excl_fence(bo->tbo.base.resv,
 							  &vgfb->fence->f);
 			dma_fence_put(&vgfb->fence->f);
 			vgfb->fence = NULL;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index 6c01ad2785dd..bb46ca0c458f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -459,9 +459,9 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
 
 	/* Buffer objects need to be either pinned or reserved: */
 	if (!(dst->mem.placement & TTM_PL_FLAG_NO_EVICT))
-		reservation_object_assert_held(dst->base.resv);
+		dma_resv_assert_held(dst->base.resv);
 	if (!(src->mem.placement & TTM_PL_FLAG_NO_EVICT))
-		reservation_object_assert_held(src->base.resv);
+		dma_resv_assert_held(src->base.resv);
 
 	if (dst->ttm->state == tt_unpopulated) {
 		ret = dst->ttm->bdev->driver->ttm_tt_populate(dst->ttm, &ctx);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 369034c0de31..a05ef9d0a2e7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -342,7 +342,7 @@ void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
 	uint32_t old_mem_type = bo->mem.mem_type;
 	int ret;
 
-	reservation_object_assert_held(bo->base.resv);
+	dma_resv_assert_held(bo->base.resv);
 
 	if (pin) {
 		if (vbo->pin_count++ > 0)
@@ -689,7 +689,7 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
 		bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
 		long lret;
 
-		lret = reservation_object_wait_timeout_rcu
+		lret = dma_resv_wait_timeout_rcu
 			(bo->base.resv, true, true,
 			 nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
 		if (!lret)
@@ -1007,10 +1007,10 @@ void vmw_bo_fence_single(struct ttm_buffer_object *bo,
 
 	if (fence == NULL) {
 		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
-		reservation_object_add_excl_fence(bo->base.resv, &fence->base);
+		dma_resv_add_excl_fence(bo->base.resv, &fence->base);
 		dma_fence_put(&fence->base);
 	} else
-		reservation_object_add_excl_fence(bo->base.resv, &fence->base);
+		dma_resv_add_excl_fence(bo->base.resv, &fence->base);
 }
 
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index 7984f172ec4a..1e3bd58bc2d4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -169,7 +169,7 @@ static int vmw_cotable_unscrub(struct vmw_resource *res)
 	} *cmd;
 
 	WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
-	reservation_object_assert_held(bo->base.resv);
+	dma_resv_assert_held(bo->base.resv);
 
 	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
 	if (!cmd)
@@ -311,7 +311,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
 		return 0;
 
 	WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
-	reservation_object_assert_held(bo->base.resv);
+	dma_resv_assert_held(bo->base.resv);
 
 	mutex_lock(&dev_priv->binding_mutex);
 	if (!vcotbl->scrubbed)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 701643b7b0c4..0b5472450633 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -402,14 +402,14 @@ void vmw_resource_unreserve(struct vmw_resource *res,
 
 	if (switch_backup && new_backup != res->backup) {
 		if (res->backup) {
-			reservation_object_assert_held(res->backup->base.base.resv);
+			dma_resv_assert_held(res->backup->base.base.resv);
 			list_del_init(&res->mob_head);
 			vmw_bo_unreference(&res->backup);
 		}
 
 		if (new_backup) {
 			res->backup = vmw_bo_reference(new_backup);
-			reservation_object_assert_held(new_backup->base.base.resv);
+			dma_resv_assert_held(new_backup->base.base.resv);
 			list_add_tail(&res->mob_head, &new_backup->res_list);
 		} else {
 			res->backup = NULL;
@@ -691,7 +691,7 @@ void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
 		.num_shared = 0
 	};
 
-	reservation_object_assert_held(vbo->base.base.resv);
+	dma_resv_assert_held(vbo->base.base.resv);
 	list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) {
 		if (!res->func->unbind)
 			continue;
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 94aae87b1138..037b1f7a87a5 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -87,7 +87,7 @@ struct module;
 
 struct device_node;
 struct videomode;
-struct reservation_object;
+struct dma_resv;
 struct dma_buf_attachment;
 
 struct pci_dev;
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index ae693c0666cd..6aaba14f5972 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -35,7 +35,7 @@
  */
 
 #include <linux/kref.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 
 #include <drm/drm_vma_manager.h>
 
@@ -276,7 +276,7 @@ struct drm_gem_object {
 	 *
 	 * Normally (@resv == &@_resv) except for imported GEM objects.
 	 */
-	struct reservation_object *resv;
+	struct dma_resv *resv;
 
 	/**
 	 * @_resv:
@@ -285,7 +285,7 @@ struct drm_gem_object {
 	 *
 	 * This is unused for imported GEM objects.
 	 */
-	struct reservation_object _resv;
+	struct dma_resv _resv;
 
 	/**
 	 * @funcs:
@@ -390,7 +390,7 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
 			   int count, struct drm_gem_object ***objs_out);
 struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 handle);
-long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle,
+long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
 				    bool wait_all, unsigned long timeout);
 int drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
 			      struct ww_acquire_ctx *acquire_ctx);
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 65ef5376de59..43c4929a2171 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -40,7 +40,7 @@
 #include <linux/mutex.h>
 #include <linux/mm.h>
 #include <linux/bitmap.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 
 struct ttm_bo_global;
 
@@ -273,7 +273,7 @@ struct ttm_bo_kmap_obj {
 struct ttm_operation_ctx {
 	bool interruptible;
 	bool no_wait_gpu;
-	struct reservation_object *resv;
+	struct dma_resv *resv;
 	uint64_t bytes_moved;
 	uint32_t flags;
 };
@@ -493,7 +493,7 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
  * @page_alignment: Data alignment in pages.
  * @ctx: TTM operation context for memory allocation.
  * @acc_size: Accounted size for this object.
- * @resv: Pointer to a reservation_object, or NULL to let ttm allocate one.
+ * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
  * @destroy: Destroy function. Use NULL for kfree().
  *
  * This function initializes a pre-allocated struct ttm_buffer_object.
@@ -526,7 +526,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
 			 struct ttm_operation_ctx *ctx,
 			 size_t acc_size,
 			 struct sg_table *sg,
-			 struct reservation_object *resv,
+			 struct dma_resv *resv,
 			 void (*destroy) (struct ttm_buffer_object *));
 
 /**
@@ -545,7 +545,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
  * point to the shmem object backing a GEM object if TTM is used to back a
  * GEM user interface.
  * @acc_size: Accounted size for this object.
- * @resv: Pointer to a reservation_object, or NULL to let ttm allocate one.
+ * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
  * @destroy: Destroy function. Use NULL for kfree().
  *
  * This function initializes a pre-allocated struct ttm_buffer_object.
@@ -570,7 +570,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo,
 		unsigned long size, enum ttm_bo_type type,
 		struct ttm_placement *placement,
 		uint32_t page_alignment, bool interrubtible, size_t acc_size,
-		struct sg_table *sg, struct reservation_object *resv,
+		struct sg_table *sg, struct dma_resv *resv,
 		void (*destroy) (struct ttm_buffer_object *));
 
 /**
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 3f1935c19a66..e88e00c6cbf2 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -35,7 +35,7 @@
 #include <linux/workqueue.h>
 #include <linux/fs.h>
 #include <linux/spinlock.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 
 #include "ttm_bo_api.h"
 #include "ttm_memory.h"
@@ -654,14 +654,14 @@ static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo,
 		if (WARN_ON(ticket))
 			return -EBUSY;
 
-		success = reservation_object_trylock(bo->base.resv);
+		success = dma_resv_trylock(bo->base.resv);
 		return success ? 0 : -EBUSY;
 	}
 
 	if (interruptible)
-		ret = reservation_object_lock_interruptible(bo->base.resv, ticket);
+		ret = dma_resv_lock_interruptible(bo->base.resv, ticket);
 	else
-		ret = reservation_object_lock(bo->base.resv, ticket);
+		ret = dma_resv_lock(bo->base.resv, ticket);
 	if (ret == -EINTR)
 		return -ERESTARTSYS;
 	return ret;
@@ -745,10 +745,10 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
 	WARN_ON(!kref_read(&bo->kref));
 
 	if (interruptible)
-		ret = reservation_object_lock_slow_interruptible(bo->base.resv,
+		ret = dma_resv_lock_slow_interruptible(bo->base.resv,
 								 ticket);
 	else
-		reservation_object_lock_slow(bo->base.resv, ticket);
+		dma_resv_lock_slow(bo->base.resv, ticket);
 
 	if (likely(ret == 0))
 		ttm_bo_del_sub_from_lru(bo);
@@ -773,7 +773,7 @@ static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
 	else
 		ttm_bo_move_to_lru_tail(bo, NULL);
 	spin_unlock(&bo->bdev->glob->lru_lock);
-	reservation_object_unlock(bo->base.resv);
+	dma_resv_unlock(bo->base.resv);
 }
 
 /*
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index bae060fae862..ec212cb27fdc 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -306,7 +306,7 @@ struct dma_buf {
 	struct module *owner;
 	struct list_head list_node;
 	void *priv;
-	struct reservation_object *resv;
+	struct dma_resv *resv;
 
 	/* poll support */
 	wait_queue_head_t poll;
@@ -365,7 +365,7 @@ struct dma_buf_export_info {
 	const struct dma_buf_ops *ops;
 	size_t size;
 	int flags;
-	struct reservation_object *resv;
+	struct dma_resv *resv;
 	void *priv;
 };
 
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index bea1d05cf51e..404aa748eda6 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -279,7 +279,7 @@ static inline struct dma_fence *dma_fence_get(struct dma_fence *fence)
 }
 
 /**
- * dma_fence_get_rcu - get a fence from a reservation_object_list with
+ * dma_fence_get_rcu - get a fence from a dma_resv_list with
  *                     rcu read lock
  * @fence: fence to increase refcount of
  *
@@ -303,7 +303,7 @@ static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence)
  * so long as the caller is using RCU on the pointer to the fence.
  *
  * An alternative mechanism is to employ a seqlock to protect a bunch of
- * fences, such as used by struct reservation_object. When using a seqlock,
+ * fences, such as used by struct dma_resv. When using a seqlock,
  * the seqlock must be taken before and checked after a reference to the
  * fence is acquired (as shown here).
  *
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
new file mode 100644
index 000000000000..38f2802afabb
--- /dev/null
+++ b/include/linux/dma-resv.h
@@ -0,0 +1,312 @@
+/*
+ * Header file for reservations for dma-buf and ttm
+ *
+ * Copyright(C) 2011 Linaro Limited. All rights reserved.
+ * Copyright (C) 2012-2013 Canonical Ltd
+ * Copyright (C) 2012 Texas Instruments
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Maarten Lankhorst <maarten.lankhorst@canonical.com>
+ * Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ *
+ * Based on bo.c which bears the following copyright notice,
+ * but is dual licensed:
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _LINUX_RESERVATION_H
+#define _LINUX_RESERVATION_H
+
+#include <linux/ww_mutex.h>
+#include <linux/dma-fence.h>
+#include <linux/slab.h>
+#include <linux/seqlock.h>
+#include <linux/rcupdate.h>
+
+extern struct ww_class reservation_ww_class;
+
+/**
+ * struct dma_resv_list - a list of shared fences
+ * @rcu: for internal use
+ * @shared_count: table of shared fences
+ * @shared_max: for growing shared fence table
+ * @shared: shared fence table
+ */
+struct dma_resv_list {
+	struct rcu_head rcu;
+	u32 shared_count, shared_max;
+	struct dma_fence __rcu *shared[];
+};
+
+/**
+ * struct dma_resv - a reservation object manages fences for a buffer
+ * @lock: update side lock
+ * @seq: sequence count for managing RCU read-side synchronization
+ * @fence_excl: the exclusive fence, if there is one currently
+ * @fence: list of current shared fences
+ */
+struct dma_resv {
+	struct ww_mutex lock;
+
+	struct dma_fence __rcu *fence_excl;
+	struct dma_resv_list __rcu *fence;
+};
+
+#define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
+#define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
+
+/**
+ * dma_resv_get_excl - get the reservation object's
+ * exclusive fence, with update-side lock held
+ * @obj: the reservation object
+ *
+ * Returns the exclusive fence (if any).  Does NOT take a
+ * reference. Writers must hold obj->lock, readers may only
+ * hold a RCU read side lock.
+ *
+ * RETURNS
+ * The exclusive fence or NULL
+ */
+static inline struct dma_fence *dma_resv_get_excl(struct dma_resv *obj)
+{
+	return rcu_dereference_protected(obj->fence_excl,
+					 dma_resv_held(obj));
+}
+
+/**
+ * dma_resv_get_list - get the reservation object's
+ * shared fence list, with update-side lock held
+ * @obj: the reservation object
+ *
+ * Returns the shared fence list.  Does NOT take references to
+ * the fence.  The obj->lock must be held.
+ */
+static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj)
+{
+	return rcu_dereference_protected(obj->fence,
+					 dma_resv_held(obj));
+}
+
+/**
+ * dma_resv_fences - read consistent fence pointers
+ * @obj: reservation object where we get the fences from
+ * @excl: pointer for the exclusive fence
+ * @list: pointer for the shared fence list
+ *
+ * Make sure we have a consisten exclusive fence and shared fence list.
+ * Must be called with rcu read side lock held.
+ */
+static inline void dma_resv_fences(struct dma_resv *obj,
+				   struct dma_fence **excl,
+				   struct dma_resv_list **list,
+				   u32 *shared_count)
+{
+	do {
+		*excl = rcu_dereference(obj->fence_excl);
+		*list = rcu_dereference(obj->fence);
+		*shared_count = *list ? (*list)->shared_count : 0;
+		smp_rmb(); /* See dma_resv_add_excl_fence */
+	} while (rcu_access_pointer(obj->fence_excl) != *excl);
+}
+
+/**
+ * dma_resv_get_excl_rcu - get the reservation object's
+ * exclusive fence, without lock held.
+ * @obj: the reservation object
+ *
+ * If there is an exclusive fence, this atomically increments it's
+ * reference count and returns it.
+ *
+ * RETURNS
+ * The exclusive fence or NULL if none
+ */
+static inline struct dma_fence *dma_resv_get_excl_rcu(struct dma_resv *obj)
+{
+	struct dma_fence *fence;
+
+	if (!rcu_access_pointer(obj->fence_excl))
+		return NULL;
+
+	rcu_read_lock();
+	fence = dma_fence_get_rcu_safe(&obj->fence_excl);
+	rcu_read_unlock();
+
+	return fence;
+}
+
+/**
+ * dma_resv_lock - lock the reservation object
+ * @obj: the reservation object
+ * @ctx: the locking context
+ *
+ * Locks the reservation object for exclusive access and modification. Note,
+ * that the lock is only against other writers, readers will run concurrently
+ * with a writer under RCU. The seqlock is used to notify readers if they
+ * overlap with a writer.
+ *
+ * As the reservation object may be locked by multiple parties in an
+ * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
+ * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
+ * object may be locked by itself by passing NULL as @ctx.
+ */
+static inline int dma_resv_lock(struct dma_resv *obj,
+				struct ww_acquire_ctx *ctx)
+{
+	return ww_mutex_lock(&obj->lock, ctx);
+}
+
+/**
+ * dma_resv_lock_interruptible - lock the reservation object
+ * @obj: the reservation object
+ * @ctx: the locking context
+ *
+ * Locks the reservation object interruptible for exclusive access and
+ * modification. Note, that the lock is only against other writers, readers
+ * will run concurrently with a writer under RCU. The seqlock is used to
+ * notify readers if they overlap with a writer.
+ *
+ * As the reservation object may be locked by multiple parties in an
+ * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
+ * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
+ * object may be locked by itself by passing NULL as @ctx.
+ */
+static inline int dma_resv_lock_interruptible(struct dma_resv *obj,
+					      struct ww_acquire_ctx *ctx)
+{
+	return ww_mutex_lock_interruptible(&obj->lock, ctx);
+}
+
+/**
+ * dma_resv_lock_slow - slowpath lock the reservation object
+ * @obj: the reservation object
+ * @ctx: the locking context
+ *
+ * Acquires the reservation object after a die case. This function
+ * will sleep until the lock becomes available. See dma_resv_lock() as
+ * well.
+ */
+static inline void dma_resv_lock_slow(struct dma_resv *obj,
+				      struct ww_acquire_ctx *ctx)
+{
+	ww_mutex_lock_slow(&obj->lock, ctx);
+}
+
+/**
+ * dma_resv_lock_slow_interruptible - slowpath lock the reservation
+ * object, interruptible
+ * @obj: the reservation object
+ * @ctx: the locking context
+ *
+ * Acquires the reservation object interruptible after a die case. This function
+ * will sleep until the lock becomes available. See
+ * dma_resv_lock_interruptible() as well.
+ */
+static inline int dma_resv_lock_slow_interruptible(struct dma_resv *obj,
+						   struct ww_acquire_ctx *ctx)
+{
+	return ww_mutex_lock_slow_interruptible(&obj->lock, ctx);
+}
+
+/**
+ * dma_resv_trylock - trylock the reservation object
+ * @obj: the reservation object
+ *
+ * Tries to lock the reservation object for exclusive access and modification.
+ * Note, that the lock is only against other writers, readers will run
+ * concurrently with a writer under RCU. The seqlock is used to notify readers
+ * if they overlap with a writer.
+ *
+ * Also note that since no context is provided, no deadlock protection is
+ * possible.
+ *
+ * Returns true if the lock was acquired, false otherwise.
+ */
+static inline bool __must_check dma_resv_trylock(struct dma_resv *obj)
+{
+	return ww_mutex_trylock(&obj->lock);
+}
+
+/**
+ * dma_resv_is_locked - is the reservation object locked
+ * @obj: the reservation object
+ *
+ * Returns true if the mutex is locked, false if unlocked.
+ */
+static inline bool dma_resv_is_locked(struct dma_resv *obj)
+{
+	return ww_mutex_is_locked(&obj->lock);
+}
+
+/**
+ * dma_resv_locking_ctx - returns the context used to lock the object
+ * @obj: the reservation object
+ *
+ * Returns the context used to lock a reservation object or NULL if no context
+ * was used or the object is not locked at all.
+ */
+static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj)
+{
+	return READ_ONCE(obj->lock.ctx);
+}
+
+/**
+ * dma_resv_unlock - unlock the reservation object
+ * @obj: the reservation object
+ *
+ * Unlocks the reservation object following exclusive access.
+ */
+static inline void dma_resv_unlock(struct dma_resv *obj)
+{
+#ifdef CONFIG_DEBUG_MUTEXES
+	/* Test shared fence slot reservation */
+	if (rcu_access_pointer(obj->fence)) {
+		struct dma_resv_list *fence = dma_resv_get_list(obj);
+
+		fence->shared_max = fence->shared_count;
+	}
+#endif
+	ww_mutex_unlock(&obj->lock);
+}
+
+void dma_resv_init(struct dma_resv *obj);
+void dma_resv_fini(struct dma_resv *obj);
+int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
+void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
+
+void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
+
+int dma_resv_get_fences_rcu(struct dma_resv *obj,
+			    struct dma_fence **pfence_excl,
+			    unsigned *pshared_count,
+			    struct dma_fence ***pshared);
+
+int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
+
+long dma_resv_wait_timeout_rcu(struct dma_resv *obj, bool wait_all, bool intr,
+			       unsigned long timeout);
+
+bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all);
+
+#endif /* _LINUX_RESERVATION_H */
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
deleted file mode 100644
index acddefea694f..000000000000
--- a/include/linux/reservation.h
+++ /dev/null
@@ -1,332 +0,0 @@
-/*
- * Header file for reservations for dma-buf and ttm
- *
- * Copyright(C) 2011 Linaro Limited. All rights reserved.
- * Copyright (C) 2012-2013 Canonical Ltd
- * Copyright (C) 2012 Texas Instruments
- *
- * Authors:
- * Rob Clark <robdclark@gmail.com>
- * Maarten Lankhorst <maarten.lankhorst@canonical.com>
- * Thomas Hellstrom <thellstrom-at-vmware-dot-com>
- *
- * Based on bo.c which bears the following copyright notice,
- * but is dual licensed:
- *
- * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-#ifndef _LINUX_RESERVATION_H
-#define _LINUX_RESERVATION_H
-
-#include <linux/ww_mutex.h>
-#include <linux/dma-fence.h>
-#include <linux/slab.h>
-#include <linux/seqlock.h>
-#include <linux/rcupdate.h>
-
-extern struct ww_class reservation_ww_class;
-
-/**
- * struct reservation_object_list - a list of shared fences
- * @rcu: for internal use
- * @shared_count: table of shared fences
- * @shared_max: for growing shared fence table
- * @shared: shared fence table
- */
-struct reservation_object_list {
-	struct rcu_head rcu;
-	u32 shared_count, shared_max;
-	struct dma_fence __rcu *shared[];
-};
-
-/**
- * struct reservation_object - a reservation object manages fences for a buffer
- * @lock: update side lock
- * @seq: sequence count for managing RCU read-side synchronization
- * @fence_excl: the exclusive fence, if there is one currently
- * @fence: list of current shared fences
- */
-struct reservation_object {
-	struct ww_mutex lock;
-
-	struct dma_fence __rcu *fence_excl;
-	struct reservation_object_list __rcu *fence;
-};
-
-#define reservation_object_held(obj) lockdep_is_held(&(obj)->lock.base)
-#define reservation_object_assert_held(obj) \
-	lockdep_assert_held(&(obj)->lock.base)
-
-/**
- * reservation_object_get_excl - get the reservation object's
- * exclusive fence, with update-side lock held
- * @obj: the reservation object
- *
- * Returns the exclusive fence (if any).  Does NOT take a
- * reference. Writers must hold obj->lock, readers may only
- * hold a RCU read side lock.
- *
- * RETURNS
- * The exclusive fence or NULL
- */
-static inline struct dma_fence *
-reservation_object_get_excl(struct reservation_object *obj)
-{
-	return rcu_dereference_protected(obj->fence_excl,
-					 reservation_object_held(obj));
-}
-
-/**
- * reservation_object_get_list - get the reservation object's
- * shared fence list, with update-side lock held
- * @obj: the reservation object
- *
- * Returns the shared fence list.  Does NOT take references to
- * the fence.  The obj->lock must be held.
- */
-static inline struct reservation_object_list *
-reservation_object_get_list(struct reservation_object *obj)
-{
-	return rcu_dereference_protected(obj->fence,
-					 reservation_object_held(obj));
-}
-
-/**
- * reservation_object_fences - read consistent fence pointers
- * @obj: reservation object where we get the fences from
- * @excl: pointer for the exclusive fence
- * @list: pointer for the shared fence list
- *
- * Make sure we have a consisten exclusive fence and shared fence list.
- * Must be called with rcu read side lock held.
- */
-static inline void
-reservation_object_fences(struct reservation_object *obj,
-			  struct dma_fence **excl,
-			  struct reservation_object_list **list,
-			  u32 *shared_count)
-{
-	do {
-		*excl = rcu_dereference(obj->fence_excl);
-		*list = rcu_dereference(obj->fence);
-		*shared_count = *list ? (*list)->shared_count : 0;
-		smp_rmb(); /* See reservation_object_add_excl_fence */
-	} while (rcu_access_pointer(obj->fence_excl) != *excl);
-}
-
-/**
- * reservation_object_get_excl_rcu - get the reservation object's
- * exclusive fence, without lock held.
- * @obj: the reservation object
- *
- * If there is an exclusive fence, this atomically increments it's
- * reference count and returns it.
- *
- * RETURNS
- * The exclusive fence or NULL if none
- */
-static inline struct dma_fence *
-reservation_object_get_excl_rcu(struct reservation_object *obj)
-{
-	struct dma_fence *fence;
-
-	if (!rcu_access_pointer(obj->fence_excl))
-		return NULL;
-
-	rcu_read_lock();
-	fence = dma_fence_get_rcu_safe(&obj->fence_excl);
-	rcu_read_unlock();
-
-	return fence;
-}
-
-/**
- * reservation_object_lock - lock the reservation object
- * @obj: the reservation object
- * @ctx: the locking context
- *
- * Locks the reservation object for exclusive access and modification. Note,
- * that the lock is only against other writers, readers will run concurrently
- * with a writer under RCU. The seqlock is used to notify readers if they
- * overlap with a writer.
- *
- * As the reservation object may be locked by multiple parties in an
- * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
- * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
- * object may be locked by itself by passing NULL as @ctx.
- */
-static inline int
-reservation_object_lock(struct reservation_object *obj,
-			struct ww_acquire_ctx *ctx)
-{
-	return ww_mutex_lock(&obj->lock, ctx);
-}
-
-/**
- * reservation_object_lock_interruptible - lock the reservation object
- * @obj: the reservation object
- * @ctx: the locking context
- *
- * Locks the reservation object interruptible for exclusive access and
- * modification. Note, that the lock is only against other writers, readers
- * will run concurrently with a writer under RCU. The seqlock is used to
- * notify readers if they overlap with a writer.
- *
- * As the reservation object may be locked by multiple parties in an
- * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
- * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
- * object may be locked by itself by passing NULL as @ctx.
- */
-static inline int
-reservation_object_lock_interruptible(struct reservation_object *obj,
-				      struct ww_acquire_ctx *ctx)
-{
-	return ww_mutex_lock_interruptible(&obj->lock, ctx);
-}
-
-/**
- * reservation_object_lock_slow - slowpath lock the reservation object
- * @obj: the reservation object
- * @ctx: the locking context
- *
- * Acquires the reservation object after a die case. This function
- * will sleep until the lock becomes available. See reservation_object_lock() as
- * well.
- */
-static inline void
-reservation_object_lock_slow(struct reservation_object *obj,
-			     struct ww_acquire_ctx *ctx)
-{
-	ww_mutex_lock_slow(&obj->lock, ctx);
-}
-
-/**
- * reservation_object_lock_slow_interruptible - slowpath lock the reservation
- * object, interruptible
- * @obj: the reservation object
- * @ctx: the locking context
- *
- * Acquires the reservation object interruptible after a die case. This function
- * will sleep until the lock becomes available. See
- * reservation_object_lock_interruptible() as well.
- */
-static inline int
-reservation_object_lock_slow_interruptible(struct reservation_object *obj,
-					   struct ww_acquire_ctx *ctx)
-{
-	return ww_mutex_lock_slow_interruptible(&obj->lock, ctx);
-}
-
-/**
- * reservation_object_trylock - trylock the reservation object
- * @obj: the reservation object
- *
- * Tries to lock the reservation object for exclusive access and modification.
- * Note, that the lock is only against other writers, readers will run
- * concurrently with a writer under RCU. The seqlock is used to notify readers
- * if they overlap with a writer.
- *
- * Also note that since no context is provided, no deadlock protection is
- * possible.
- *
- * Returns true if the lock was acquired, false otherwise.
- */
-static inline bool __must_check
-reservation_object_trylock(struct reservation_object *obj)
-{
-	return ww_mutex_trylock(&obj->lock);
-}
-
-/**
- * reservation_object_is_locked - is the reservation object locked
- * @obj: the reservation object
- *
- * Returns true if the mutex is locked, false if unlocked.
- */
-static inline bool
-reservation_object_is_locked(struct reservation_object *obj)
-{
-	return ww_mutex_is_locked(&obj->lock);
-}
-
-/**
- * reservation_object_locking_ctx - returns the context used to lock the object
- * @obj: the reservation object
- *
- * Returns the context used to lock a reservation object or NULL if no context
- * was used or the object is not locked at all.
- */
-static inline struct ww_acquire_ctx *
-reservation_object_locking_ctx(struct reservation_object *obj)
-{
-	return READ_ONCE(obj->lock.ctx);
-}
-
-/**
- * reservation_object_unlock - unlock the reservation object
- * @obj: the reservation object
- *
- * Unlocks the reservation object following exclusive access.
- */
-static inline void
-reservation_object_unlock(struct reservation_object *obj)
-{
-#ifdef CONFIG_DEBUG_MUTEXES
-	/* Test shared fence slot reservation */
-	if (rcu_access_pointer(obj->fence)) {
-		struct reservation_object_list *fence =
-			reservation_object_get_list(obj);
-
-		fence->shared_max = fence->shared_count;
-	}
-#endif
-	ww_mutex_unlock(&obj->lock);
-}
-
-void reservation_object_init(struct reservation_object *obj);
-void reservation_object_fini(struct reservation_object *obj);
-int reservation_object_reserve_shared(struct reservation_object *obj,
-				      unsigned int num_fences);
-void reservation_object_add_shared_fence(struct reservation_object *obj,
-					 struct dma_fence *fence);
-
-void reservation_object_add_excl_fence(struct reservation_object *obj,
-				       struct dma_fence *fence);
-
-int reservation_object_get_fences_rcu(struct reservation_object *obj,
-				      struct dma_fence **pfence_excl,
-				      unsigned *pshared_count,
-				      struct dma_fence ***pshared);
-
-int reservation_object_copy_fences(struct reservation_object *dst,
-				   struct reservation_object *src);
-
-long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
-					 bool wait_all, bool intr,
-					 unsigned long timeout);
-
-bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
-					  bool test_all);
-
-#endif /* _LINUX_RESERVATION_H */
-- 
cgit v1.2.3


From b016cd6ed4b772759804e0d6082bd1f5ca63b8ee Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed, 14 Aug 2019 19:24:01 +0100
Subject: dma-buf: Restore seqlock around dma_resv updates
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This reverts
67c97fb79a7f ("dma-buf: add reservation_object_fences helper")
dd7a7d1ff2f1 ("drm/i915: use new reservation_object_fences helper")
0e1d8083bddb ("dma-buf: further relax reservation_object_add_shared_fence")
5d344f58da76 ("dma-buf: nuke reservation_object seq number")

The scenario that defeats simply grabbing a set of shared/exclusive
fences and using them blissfully under RCU is that any of those fences
may be reallocated by a SLAB_TYPESAFE_BY_RCU fence slab cache. In this
scenario, while keeping the rcu_read_lock we need to establish that no
fence was changed in the dma_resv after a read (or full) memory barrier.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Acked-by: Christian König <christian.koenig@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190814182401.25009-1-chris@chris-wilson.co.uk
---
 drivers/dma-buf/dma-buf.c                        |  31 ++++++-
 drivers/dma-buf/dma-resv.c                       | 109 +++++++++++++++-------
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c |   7 +-
 drivers/gpu/drm/i915/gem/i915_gem_busy.c         |  24 +++--
 include/linux/dma-resv.h                         | 113 ++++++++++-------------
 5 files changed, 175 insertions(+), 109 deletions(-)

(limited to 'drivers/gpu/drm/amd/amdgpu')

diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index b3400d6524ab..433d91d710e4 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -199,7 +199,7 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
 	struct dma_resv_list *fobj;
 	struct dma_fence *fence_excl;
 	__poll_t events;
-	unsigned shared_count;
+	unsigned shared_count, seq;
 
 	dmabuf = file->private_data;
 	if (!dmabuf || !dmabuf->resv)
@@ -213,8 +213,21 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
 	if (!events)
 		return 0;
 
+retry:
+	seq = read_seqcount_begin(&resv->seq);
 	rcu_read_lock();
-	dma_resv_fences(resv, &fence_excl, &fobj, &shared_count);
+
+	fobj = rcu_dereference(resv->fence);
+	if (fobj)
+		shared_count = fobj->shared_count;
+	else
+		shared_count = 0;
+	fence_excl = rcu_dereference(resv->fence_excl);
+	if (read_seqcount_retry(&resv->seq, seq)) {
+		rcu_read_unlock();
+		goto retry;
+	}
+
 	if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
 		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
 		__poll_t pevents = EPOLLIN;
@@ -1144,6 +1157,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
 	struct dma_resv *robj;
 	struct dma_resv_list *fobj;
 	struct dma_fence *fence;
+	unsigned seq;
 	int count = 0, attach_count, shared_count, i;
 	size_t size = 0;
 
@@ -1174,9 +1188,16 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
 				buf_obj->name ?: "");
 
 		robj = buf_obj->resv;
-		rcu_read_lock();
-		dma_resv_fences(robj, &fence, &fobj, &shared_count);
-		rcu_read_unlock();
+		while (true) {
+			seq = read_seqcount_begin(&robj->seq);
+			rcu_read_lock();
+			fobj = rcu_dereference(robj->fence);
+			shared_count = fobj ? fobj->shared_count : 0;
+			fence = rcu_dereference(robj->fence_excl);
+			if (!read_seqcount_retry(&robj->seq, seq))
+				break;
+			rcu_read_unlock();
+		}
 
 		if (fence)
 			seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index f5142683c851..42a8f3f11681 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -49,6 +49,12 @@
 DEFINE_WD_CLASS(reservation_ww_class);
 EXPORT_SYMBOL(reservation_ww_class);
 
+struct lock_class_key reservation_seqcount_class;
+EXPORT_SYMBOL(reservation_seqcount_class);
+
+const char reservation_seqcount_string[] = "reservation_seqcount";
+EXPORT_SYMBOL(reservation_seqcount_string);
+
 /**
  * dma_resv_list_alloc - allocate fence list
  * @shared_max: number of fences we need space for
@@ -96,6 +102,9 @@ static void dma_resv_list_free(struct dma_resv_list *list)
 void dma_resv_init(struct dma_resv *obj)
 {
 	ww_mutex_init(&obj->lock, &reservation_ww_class);
+
+	__seqcount_init(&obj->seq, reservation_seqcount_string,
+			&reservation_seqcount_class);
 	RCU_INIT_POINTER(obj->fence, NULL);
 	RCU_INIT_POINTER(obj->fence_excl, NULL);
 }
@@ -225,6 +234,9 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
 	fobj = dma_resv_get_list(obj);
 	count = fobj->shared_count;
 
+	preempt_disable();
+	write_seqcount_begin(&obj->seq);
+
 	for (i = 0; i < count; ++i) {
 
 		old = rcu_dereference_protected(fobj->shared[i],
@@ -242,6 +254,9 @@ replace:
 	RCU_INIT_POINTER(fobj->shared[i], fence);
 	/* pointer update must be visible before we extend the shared_count */
 	smp_store_mb(fobj->shared_count, count);
+
+	write_seqcount_end(&obj->seq);
+	preempt_enable();
 	dma_fence_put(old);
 }
 EXPORT_SYMBOL(dma_resv_add_shared_fence);
@@ -269,10 +284,12 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
 		dma_fence_get(fence);
 
 	preempt_disable();
-	rcu_assign_pointer(obj->fence_excl, fence);
-	/* pointer update must be visible before we modify the shared_count */
+	write_seqcount_begin(&obj->seq);
+	/* write_seqcount_begin provides the necessary memory barrier */
+	RCU_INIT_POINTER(obj->fence_excl, fence);
 	if (old)
-		smp_store_mb(old->shared_count, 0);
+		old->shared_count = 0;
+	write_seqcount_end(&obj->seq);
 	preempt_enable();
 
 	/* inplace update, no shared fences */
@@ -295,15 +312,17 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
 {
 	struct dma_resv_list *src_list, *dst_list;
 	struct dma_fence *old, *new;
-	unsigned int i, shared_count;
+	unsigned i;
 
 	dma_resv_assert_held(dst);
 
 	rcu_read_lock();
+	src_list = rcu_dereference(src->fence);
 
 retry:
-	dma_resv_fences(src, &new, &src_list, &shared_count);
-	if (shared_count) {
+	if (src_list) {
+		unsigned shared_count = src_list->shared_count;
+
 		rcu_read_unlock();
 
 		dst_list = dma_resv_list_alloc(shared_count);
@@ -311,14 +330,14 @@ retry:
 			return -ENOMEM;
 
 		rcu_read_lock();
-		dma_resv_fences(src, &new, &src_list, &shared_count);
-		if (!src_list || shared_count > dst_list->shared_max) {
+		src_list = rcu_dereference(src->fence);
+		if (!src_list || src_list->shared_count > shared_count) {
 			kfree(dst_list);
 			goto retry;
 		}
 
 		dst_list->shared_count = 0;
-		for (i = 0; i < shared_count; ++i) {
+		for (i = 0; i < src_list->shared_count; ++i) {
 			struct dma_fence *fence;
 
 			fence = rcu_dereference(src_list->shared[i]);
@@ -328,6 +347,7 @@ retry:
 
 			if (!dma_fence_get_rcu(fence)) {
 				dma_resv_list_free(dst_list);
+				src_list = rcu_dereference(src->fence);
 				goto retry;
 			}
 
@@ -342,18 +362,18 @@ retry:
 		dst_list = NULL;
 	}
 
-	if (new && !dma_fence_get_rcu(new)) {
-		dma_resv_list_free(dst_list);
-		goto retry;
-	}
+	new = dma_fence_get_rcu_safe(&src->fence_excl);
 	rcu_read_unlock();
 
 	src_list = dma_resv_get_list(dst);
 	old = dma_resv_get_excl(dst);
 
 	preempt_disable();
-	rcu_assign_pointer(dst->fence_excl, new);
-	rcu_assign_pointer(dst->fence, dst_list);
+	write_seqcount_begin(&dst->seq);
+	/* write_seqcount_begin provides the necessary memory barrier */
+	RCU_INIT_POINTER(dst->fence_excl, new);
+	RCU_INIT_POINTER(dst->fence, dst_list);
+	write_seqcount_end(&dst->seq);
 	preempt_enable();
 
 	dma_resv_list_free(src_list);
@@ -388,18 +408,19 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
 
 	do {
 		struct dma_resv_list *fobj;
-		unsigned int i;
+		unsigned int i, seq;
 		size_t sz = 0;
 
-		i = 0;
+		shared_count = i = 0;
 
 		rcu_read_lock();
-		dma_resv_fences(obj, &fence_excl, &fobj,
-					  &shared_count);
+		seq = read_seqcount_begin(&obj->seq);
 
+		fence_excl = rcu_dereference(obj->fence_excl);
 		if (fence_excl && !dma_fence_get_rcu(fence_excl))
 			goto unlock;
 
+		fobj = rcu_dereference(obj->fence);
 		if (fobj)
 			sz += sizeof(*shared) * fobj->shared_max;
 
@@ -427,6 +448,7 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
 				break;
 			}
 			shared = nshared;
+			shared_count = fobj ? fobj->shared_count : 0;
 			for (i = 0; i < shared_count; ++i) {
 				shared[i] = rcu_dereference(fobj->shared[i]);
 				if (!dma_fence_get_rcu(shared[i]))
@@ -434,7 +456,7 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
 			}
 		}
 
-		if (i != shared_count) {
+		if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
 			while (i--)
 				dma_fence_put(shared[i]);
 			dma_fence_put(fence_excl);
@@ -478,17 +500,18 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
 			       bool wait_all, bool intr,
 			       unsigned long timeout)
 {
-	struct dma_resv_list *fobj;
 	struct dma_fence *fence;
-	unsigned shared_count;
+	unsigned seq, shared_count;
 	long ret = timeout ? timeout : 1;
 	int i;
 
 retry:
+	shared_count = 0;
+	seq = read_seqcount_begin(&obj->seq);
 	rcu_read_lock();
 	i = -1;
 
-	dma_resv_fences(obj, &fence, &fobj, &shared_count);
+	fence = rcu_dereference(obj->fence_excl);
 	if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
 		if (!dma_fence_get_rcu(fence))
 			goto unlock_retry;
@@ -503,6 +526,11 @@ retry:
 	}
 
 	if (wait_all) {
+		struct dma_resv_list *fobj = rcu_dereference(obj->fence);
+
+		if (fobj)
+			shared_count = fobj->shared_count;
+
 		for (i = 0; !fence && i < shared_count; ++i) {
 			struct dma_fence *lfence = rcu_dereference(fobj->shared[i]);
 
@@ -525,6 +553,11 @@ retry:
 
 	rcu_read_unlock();
 	if (fence) {
+		if (read_seqcount_retry(&obj->seq, seq)) {
+			dma_fence_put(fence);
+			goto retry;
+		}
+
 		ret = dma_fence_wait_timeout(fence, intr, ret);
 		dma_fence_put(fence);
 		if (ret > 0 && wait_all && (i + 1 < shared_count))
@@ -567,19 +600,23 @@ static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
  */
 bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
 {
-	struct dma_resv_list *fobj;
-	struct dma_fence *fence_excl;
-	unsigned shared_count;
+	unsigned seq, shared_count;
 	int ret;
 
 	rcu_read_lock();
 retry:
 	ret = true;
+	shared_count = 0;
+	seq = read_seqcount_begin(&obj->seq);
 
-	dma_resv_fences(obj, &fence_excl, &fobj, &shared_count);
 	if (test_all) {
 		unsigned i;
 
+		struct dma_resv_list *fobj = rcu_dereference(obj->fence);
+
+		if (fobj)
+			shared_count = fobj->shared_count;
+
 		for (i = 0; i < shared_count; ++i) {
 			struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
 
@@ -589,14 +626,24 @@ retry:
 			else if (!ret)
 				break;
 		}
-	}
 
-	if (!shared_count && fence_excl) {
-		ret = dma_resv_test_signaled_single(fence_excl);
-		if (ret < 0)
+		if (read_seqcount_retry(&obj->seq, seq))
 			goto retry;
 	}
 
+	if (!shared_count) {
+		struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
+
+		if (fence_excl) {
+			ret = dma_resv_test_signaled_single(fence_excl);
+			if (ret < 0)
+				goto retry;
+
+			if (read_seqcount_retry(&obj->seq, seq))
+				goto retry;
+		}
+	}
+
 	rcu_read_unlock();
 	return ret;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index bc4ec6b20a87..76e3516484e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -251,7 +251,12 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
 	new->shared_max = old->shared_max;
 	new->shared_count = k;
 
-	rcu_assign_pointer(resv->fence, new);
+	/* Install the new fence list, seqcount provides the barriers */
+	preempt_disable();
+	write_seqcount_begin(&resv->seq);
+	RCU_INIT_POINTER(resv->fence, new);
+	write_seqcount_end(&resv->seq);
+	preempt_enable();
 
 	/* Drop the references to the removed fences or move them to ef_list */
 	for (i = j, k = 0; i < old->shared_count; ++i) {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
index a2aff1d8290e..3d4f5775a4ba 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
@@ -83,8 +83,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 	struct drm_i915_gem_busy *args = data;
 	struct drm_i915_gem_object *obj;
 	struct dma_resv_list *list;
-	unsigned int i, shared_count;
-	struct dma_fence *excl;
+	unsigned int seq;
 	int err;
 
 	err = -ENOENT;
@@ -110,18 +109,29 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 	 * to report the overall busyness. This is what the wait-ioctl does.
 	 *
 	 */
-	dma_resv_fences(obj->base.resv, &excl, &list, &shared_count);
+retry:
+	seq = raw_read_seqcount(&obj->base.resv->seq);
 
 	/* Translate the exclusive fence to the READ *and* WRITE engine */
-	args->busy = busy_check_writer(excl);
+	args->busy =
+		busy_check_writer(rcu_dereference(obj->base.resv->fence_excl));
 
 	/* Translate shared fences to READ set of engines */
-	for (i = 0; i < shared_count; ++i) {
-		struct dma_fence *fence = rcu_dereference(list->shared[i]);
+	list = rcu_dereference(obj->base.resv->fence);
+	if (list) {
+		unsigned int shared_count = list->shared_count, i;
 
-		args->busy |= busy_check_reader(fence);
+		for (i = 0; i < shared_count; ++i) {
+			struct dma_fence *fence =
+				rcu_dereference(list->shared[i]);
+
+			args->busy |= busy_check_reader(fence);
+		}
 	}
 
+	if (args->busy && read_seqcount_retry(&obj->base.resv->seq, seq))
+		goto retry;
+
 	err = 0;
 out:
 	rcu_read_unlock();
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index 38f2802afabb..ee50d10f052b 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -46,6 +46,8 @@
 #include <linux/rcupdate.h>
 
 extern struct ww_class reservation_ww_class;
+extern struct lock_class_key reservation_seqcount_class;
+extern const char reservation_seqcount_string[];
 
 /**
  * struct dma_resv_list - a list of shared fences
@@ -69,6 +71,7 @@ struct dma_resv_list {
  */
 struct dma_resv {
 	struct ww_mutex lock;
+	seqcount_t seq;
 
 	struct dma_fence __rcu *fence_excl;
 	struct dma_resv_list __rcu *fence;
@@ -77,24 +80,6 @@ struct dma_resv {
 #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
 #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
 
-/**
- * dma_resv_get_excl - get the reservation object's
- * exclusive fence, with update-side lock held
- * @obj: the reservation object
- *
- * Returns the exclusive fence (if any).  Does NOT take a
- * reference. Writers must hold obj->lock, readers may only
- * hold a RCU read side lock.
- *
- * RETURNS
- * The exclusive fence or NULL
- */
-static inline struct dma_fence *dma_resv_get_excl(struct dma_resv *obj)
-{
-	return rcu_dereference_protected(obj->fence_excl,
-					 dma_resv_held(obj));
-}
-
 /**
  * dma_resv_get_list - get the reservation object's
  * shared fence list, with update-side lock held
@@ -109,53 +94,6 @@ static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj)
 					 dma_resv_held(obj));
 }
 
-/**
- * dma_resv_fences - read consistent fence pointers
- * @obj: reservation object where we get the fences from
- * @excl: pointer for the exclusive fence
- * @list: pointer for the shared fence list
- *
- * Make sure we have a consisten exclusive fence and shared fence list.
- * Must be called with rcu read side lock held.
- */
-static inline void dma_resv_fences(struct dma_resv *obj,
-				   struct dma_fence **excl,
-				   struct dma_resv_list **list,
-				   u32 *shared_count)
-{
-	do {
-		*excl = rcu_dereference(obj->fence_excl);
-		*list = rcu_dereference(obj->fence);
-		*shared_count = *list ? (*list)->shared_count : 0;
-		smp_rmb(); /* See dma_resv_add_excl_fence */
-	} while (rcu_access_pointer(obj->fence_excl) != *excl);
-}
-
-/**
- * dma_resv_get_excl_rcu - get the reservation object's
- * exclusive fence, without lock held.
- * @obj: the reservation object
- *
- * If there is an exclusive fence, this atomically increments it's
- * reference count and returns it.
- *
- * RETURNS
- * The exclusive fence or NULL if none
- */
-static inline struct dma_fence *dma_resv_get_excl_rcu(struct dma_resv *obj)
-{
-	struct dma_fence *fence;
-
-	if (!rcu_access_pointer(obj->fence_excl))
-		return NULL;
-
-	rcu_read_lock();
-	fence = dma_fence_get_rcu_safe(&obj->fence_excl);
-	rcu_read_unlock();
-
-	return fence;
-}
-
 /**
  * dma_resv_lock - lock the reservation object
  * @obj: the reservation object
@@ -290,6 +228,51 @@ static inline void dma_resv_unlock(struct dma_resv *obj)
 	ww_mutex_unlock(&obj->lock);
 }
 
+/**
+ * dma_resv_get_excl - get the reservation object's
+ * exclusive fence, with update-side lock held
+ * @obj: the reservation object
+ *
+ * Returns the exclusive fence (if any).  Does NOT take a
+ * reference. Writers must hold obj->lock, readers may only
+ * hold a RCU read side lock.
+ *
+ * RETURNS
+ * The exclusive fence or NULL
+ */
+static inline struct dma_fence *
+dma_resv_get_excl(struct dma_resv *obj)
+{
+	return rcu_dereference_protected(obj->fence_excl,
+					 dma_resv_held(obj));
+}
+
+/**
+ * dma_resv_get_excl_rcu - get the reservation object's
+ * exclusive fence, without lock held.
+ * @obj: the reservation object
+ *
+ * If there is an exclusive fence, this atomically increments it's
+ * reference count and returns it.
+ *
+ * RETURNS
+ * The exclusive fence or NULL if none
+ */
+static inline struct dma_fence *
+dma_resv_get_excl_rcu(struct dma_resv *obj)
+{
+	struct dma_fence *fence;
+
+	if (!rcu_access_pointer(obj->fence_excl))
+		return NULL;
+
+	rcu_read_lock();
+	fence = dma_fence_get_rcu_safe(&obj->fence_excl);
+	rcu_read_unlock();
+
+	return fence;
+}
+
 void dma_resv_init(struct dma_resv *obj);
 void dma_resv_fini(struct dma_resv *obj);
 int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
-- 
cgit v1.2.3