summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_create.c9
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.c84
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.h5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c125
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h9
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h27
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_region.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c540
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.h48
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c5
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c1
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.h1
-rw-r--r--drivers/gpu/drm/i915/intel_region_ttm.c8
-rw-r--r--drivers/gpu/drm/i915/intel_region_ttm.h11
16 files changed, 730 insertions, 153 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 4f22cac1c49b..f57dfc74d6ce 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -155,6 +155,7 @@ gem-y += \
gem/i915_gem_stolen.o \
gem/i915_gem_throttle.o \
gem/i915_gem_tiling.o \
+ gem/i915_gem_ttm.o \
gem/i915_gem_userptr.o \
gem/i915_gem_wait.o \
gem/i915_gemfs.o
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c
index 548ddf39d853..93bf63bbaff1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_create.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c
@@ -85,13 +85,10 @@ i915_gem_setup(struct drm_i915_gem_object *obj, u64 size)
return -E2BIG;
/*
- * For now resort to CPU based clearing for device local-memory, in the
- * near future this will use the blitter engine for accelerated, GPU
- * based clearing.
+ * I915_BO_ALLOC_USER will make sure the object is cleared before
+ * any user access.
*/
- flags = 0;
- if (mr->type == INTEL_MEMORY_LOCAL)
- flags = I915_BO_ALLOC_CPU_CLEAR;
+ flags = I915_BO_ALLOC_USER;
ret = mr->ops->init_object(mr, obj, size, flags);
if (ret)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index 3b4aa28a076d..2b8cd15de1d9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -4,74 +4,10 @@
*/
#include "intel_memory_region.h"
-#include "intel_region_ttm.h"
#include "gem/i915_gem_region.h"
#include "gem/i915_gem_lmem.h"
#include "i915_drv.h"
-static void lmem_put_pages(struct drm_i915_gem_object *obj,
- struct sg_table *pages)
-{
- intel_region_ttm_node_free(obj->mm.region, obj->mm.st_mm_node);
- obj->mm.dirty = false;
- sg_free_table(pages);
- kfree(pages);
-}
-
-static int lmem_get_pages(struct drm_i915_gem_object *obj)
-{
- unsigned int flags;
- struct sg_table *pages;
-
- flags = I915_ALLOC_MIN_PAGE_SIZE;
- if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
- flags |= I915_ALLOC_CONTIGUOUS;
-
- obj->mm.st_mm_node = intel_region_ttm_node_alloc(obj->mm.region,
- obj->base.size,
- flags);
- if (IS_ERR(obj->mm.st_mm_node))
- return PTR_ERR(obj->mm.st_mm_node);
-
- /* Range manager is always contigous */
- if (obj->mm.region->is_range_manager)
- obj->flags |= I915_BO_ALLOC_CONTIGUOUS;
- pages = intel_region_ttm_node_to_st(obj->mm.region, obj->mm.st_mm_node);
- if (IS_ERR(pages)) {
- intel_region_ttm_node_free(obj->mm.region, obj->mm.st_mm_node);
- return PTR_ERR(pages);
- }
-
- __i915_gem_object_set_pages(obj, pages, i915_sg_dma_sizes(pages->sgl));
-
- if (obj->flags & I915_BO_ALLOC_CPU_CLEAR) {
- void __iomem *vaddr =
- i915_gem_object_lmem_io_map(obj, 0, obj->base.size);
-
- if (!vaddr) {
- struct sg_table *pages =
- __i915_gem_object_unset_pages(obj);
-
- if (!IS_ERR_OR_NULL(pages))
- lmem_put_pages(obj, pages);
- }
-
- memset_io(vaddr, 0, obj->base.size);
- io_mapping_unmap(vaddr);
- }
-
- return 0;
-}
-
-const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
- .name = "i915_gem_object_lmem",
- .flags = I915_GEM_OBJECT_HAS_IOMEM,
-
- .get_pages = lmem_get_pages,
- .put_pages = lmem_put_pages,
- .release = i915_gem_object_release_memory_region,
-};
-
void __iomem *
i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
unsigned long n,
@@ -103,23 +39,3 @@ i915_gem_object_create_lmem(struct drm_i915_private *i915,
return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM],
size, flags);
}
-
-int __i915_gem_lmem_object_init(struct intel_memory_region *mem,
- struct drm_i915_gem_object *obj,
- resource_size_t size,
- unsigned int flags)
-{
- static struct lock_class_key lock_class;
- struct drm_i915_private *i915 = mem->i915;
-
- drm_gem_private_object_init(&i915->drm, &obj->base, size);
- i915_gem_object_init(obj, &i915_gem_lmem_obj_ops, &lock_class, flags);
-
- obj->read_domains = I915_GEM_DOMAIN_WC | I915_GEM_DOMAIN_GTT;
-
- i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
-
- i915_gem_object_init_memory_region(obj, mem);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
index fac6bc5a5ebb..ea76fd11ccb0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
@@ -26,9 +26,4 @@ i915_gem_object_create_lmem(struct drm_i915_private *i915,
resource_size_t size,
unsigned int flags);
-int __i915_gem_lmem_object_init(struct intel_memory_region *mem,
- struct drm_i915_gem_object *obj,
- resource_size_t size,
- unsigned int flags);
-
#endif /* !__I915_GEM_LMEM_H */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 5706d471692d..16eac5ea9238 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -172,7 +172,7 @@ static void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *f
}
}
-static void __i915_gem_free_object_rcu(struct rcu_head *head)
+void __i915_gem_free_object_rcu(struct rcu_head *head)
{
struct drm_i915_gem_object *obj =
container_of(head, typeof(*obj), rcu);
@@ -208,59 +208,69 @@ static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj)
}
}
-static void __i915_gem_free_objects(struct drm_i915_private *i915,
- struct llist_node *freed)
+void __i915_gem_free_object(struct drm_i915_gem_object *obj)
{
- struct drm_i915_gem_object *obj, *on;
+ trace_i915_gem_object_destroy(obj);
- llist_for_each_entry_safe(obj, on, freed, freed) {
- trace_i915_gem_object_destroy(obj);
+ if (!list_empty(&obj->vma.list)) {
+ struct i915_vma *vma;
+
+ /*
+ * Note that the vma keeps an object reference while
+ * it is active, so it *should* not sleep while we
+ * destroy it. Our debug code errs insits it *might*.
+ * For the moment, play along.
+ */
+ spin_lock(&obj->vma.lock);
+ while ((vma = list_first_entry_or_null(&obj->vma.list,
+ struct i915_vma,
+ obj_link))) {
+ GEM_BUG_ON(vma->obj != obj);
+ spin_unlock(&obj->vma.lock);
- if (!list_empty(&obj->vma.list)) {
- struct i915_vma *vma;
+ __i915_vma_put(vma);
- /*
- * Note that the vma keeps an object reference while
- * it is active, so it *should* not sleep while we
- * destroy it. Our debug code errs insits it *might*.
- * For the moment, play along.
- */
spin_lock(&obj->vma.lock);
- while ((vma = list_first_entry_or_null(&obj->vma.list,
- struct i915_vma,
- obj_link))) {
- GEM_BUG_ON(vma->obj != obj);
- spin_unlock(&obj->vma.lock);
+ }
+ spin_unlock(&obj->vma.lock);
+ }
- __i915_vma_put(vma);
+ __i915_gem_object_free_mmaps(obj);
- spin_lock(&obj->vma.lock);
- }
- spin_unlock(&obj->vma.lock);
- }
+ GEM_BUG_ON(!list_empty(&obj->lut_list));
- __i915_gem_object_free_mmaps(obj);
+ atomic_set(&obj->mm.pages_pin_count, 0);
+ __i915_gem_object_put_pages(obj);
+ GEM_BUG_ON(i915_gem_object_has_pages(obj));
+ bitmap_free(obj->bit_17);
- GEM_BUG_ON(!list_empty(&obj->lut_list));
+ if (obj->base.import_attach)
+ drm_prime_gem_destroy(&obj->base, NULL);
- atomic_set(&obj->mm.pages_pin_count, 0);
- __i915_gem_object_put_pages(obj);
- GEM_BUG_ON(i915_gem_object_has_pages(obj));
- bitmap_free(obj->bit_17);
+ drm_gem_free_mmap_offset(&obj->base);
- if (obj->base.import_attach)
- drm_prime_gem_destroy(&obj->base, NULL);
+ if (obj->ops->release)
+ obj->ops->release(obj);
- drm_gem_free_mmap_offset(&obj->base);
+ if (obj->mm.n_placements > 1)
+ kfree(obj->mm.placements);
- if (obj->ops->release)
- obj->ops->release(obj);
+ if (obj->shares_resv_from)
+ i915_vm_resv_put(obj->shares_resv_from);
+}
- if (obj->mm.n_placements > 1)
- kfree(obj->mm.placements);
+static void __i915_gem_free_objects(struct drm_i915_private *i915,
+ struct llist_node *freed)
+{
+ struct drm_i915_gem_object *obj, *on;
- if (obj->shares_resv_from)
- i915_vm_resv_put(obj->shares_resv_from);
+ llist_for_each_entry_safe(obj, on, freed, freed) {
+ might_sleep();
+ if (obj->ops->delayed_free) {
+ obj->ops->delayed_free(obj);
+ continue;
+ }
+ __i915_gem_free_object(obj);
/* But keep the pointer alive for RCU-protected lookups */
call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
@@ -318,6 +328,7 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
* worker and performing frees directly from subsequent allocations for
* crude but effective memory throttling.
*/
+
if (llist_add(&obj->freed, &i915->mm.free_list))
queue_work(i915->wq, &i915->mm.free_work);
}
@@ -410,6 +421,42 @@ int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset,
return 0;
}
+/**
+ * i915_gem_object_evictable - Whether object is likely evictable after unbind.
+ * @obj: The object to check
+ *
+ * This function checks whether the object is likely unvictable after unbind.
+ * If the object is not locked when checking, the result is only advisory.
+ * If the object is locked when checking, and the function returns true,
+ * then an eviction should indeed be possible. But since unlocked vma
+ * unpinning and unbinding is currently possible, the object can actually
+ * become evictable even if this function returns false.
+ *
+ * Return: true if the object may be evictable. False otherwise.
+ */
+bool i915_gem_object_evictable(struct drm_i915_gem_object *obj)
+{
+ struct i915_vma *vma;
+ int pin_count = atomic_read(&obj->mm.pages_pin_count);
+
+ if (!pin_count)
+ return true;
+
+ spin_lock(&obj->vma.lock);
+ list_for_each_entry(vma, &obj->vma.list, obj_link) {
+ if (i915_vma_is_pinned(vma)) {
+ spin_unlock(&obj->vma.lock);
+ return false;
+ }
+ if (atomic_read(&vma->pages_count))
+ pin_count--;
+ }
+ spin_unlock(&obj->vma.lock);
+ GEM_WARN_ON(pin_count < 0);
+
+ return pin_count == 0;
+}
+
void i915_gem_init__objects(struct drm_i915_private *i915)
{
INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 7c0eb425cb3b..5ae32ea99ee5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -200,6 +200,9 @@ static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj)
static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
{
+ if (obj->ops->adjust_lru)
+ obj->ops->adjust_lru(obj);
+
dma_resv_unlock(obj->base.resv);
}
@@ -587,6 +590,12 @@ int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset,
bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj);
+void __i915_gem_free_object_rcu(struct rcu_head *head);
+
+void __i915_gem_free_object(struct drm_i915_gem_object *obj);
+
+bool i915_gem_object_evictable(struct drm_i915_gem_object *obj);
+
#ifdef CONFIG_MMU_NOTIFIER
static inline bool
i915_gem_object_is_userptr(struct drm_i915_gem_object *obj)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index d047ea126029..68313474e6a6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -63,6 +63,20 @@ struct drm_i915_gem_object_ops {
const struct drm_i915_gem_pwrite *arg);
int (*dmabuf_export)(struct drm_i915_gem_object *obj);
+
+ /**
+ * adjust_lru - notify that the madvise value was updated
+ * @obj: The gem object
+ *
+ * The madvise value may have been updated, or object was recently
+ * referenced so act accordingly (Perhaps changing an LRU list etc).
+ */
+ void (*adjust_lru)(struct drm_i915_gem_object *obj);
+
+ /**
+ * delayed_free - Override the default delayed free implementation
+ */
+ void (*delayed_free)(struct drm_i915_gem_object *obj);
void (*release)(struct drm_i915_gem_object *obj);
const char *name; /* friendly name for debug, e.g. lockdep classes */
@@ -187,12 +201,14 @@ struct drm_i915_gem_object {
#define I915_BO_ALLOC_VOLATILE BIT(1)
#define I915_BO_ALLOC_STRUCT_PAGE BIT(2)
#define I915_BO_ALLOC_CPU_CLEAR BIT(3)
+#define I915_BO_ALLOC_USER BIT(4)
#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | \
I915_BO_ALLOC_VOLATILE | \
I915_BO_ALLOC_STRUCT_PAGE | \
- I915_BO_ALLOC_CPU_CLEAR)
-#define I915_BO_READONLY BIT(4)
-#define I915_TILING_QUIRK_BIT 5 /* unknown swizzling; do not release! */
+ I915_BO_ALLOC_CPU_CLEAR | \
+ I915_BO_ALLOC_USER)
+#define I915_BO_READONLY BIT(5)
+#define I915_TILING_QUIRK_BIT 6 /* unknown swizzling; do not release! */
/*
* Is the object to be mapped as read-only to the GPU
@@ -310,6 +326,11 @@ struct drm_i915_gem_object {
bool dirty:1;
} mm;
+ struct {
+ struct sg_table *cached_io_st;
+ bool created:1;
+ } ttm;
+
/** Record of address bit 17 of each page at last unbind. */
unsigned long *bit_17;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index f25e6646c5b7..d1f1840540dd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -18,11 +18,7 @@ void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
mutex_lock(&mem->objects.lock);
- if (obj->flags & I915_BO_ALLOC_VOLATILE)
- list_add(&obj->mm.region_link, &mem->objects.purgeable);
- else
- list_add(&obj->mm.region_link, &mem->objects.list);
-
+ list_add(&obj->mm.region_link, &mem->objects.list);
mutex_unlock(&mem->objects.lock);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
new file mode 100644
index 000000000000..2695b8c37e13
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -0,0 +1,540 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_placement.h>
+
+#include "i915_drv.h"
+#include "intel_memory_region.h"
+#include "intel_region_ttm.h"
+
+#include "gem/i915_gem_object.h"
+#include "gem/i915_gem_region.h"
+#include "gem/i915_gem_ttm.h"
+
+#define I915_PL_LMEM0 TTM_PL_PRIV
+#define I915_PL_SYSTEM TTM_PL_SYSTEM
+#define I915_PL_STOLEN TTM_PL_VRAM
+#define I915_PL_GGTT TTM_PL_TT
+
+#define I915_TTM_PRIO_PURGE 0
+#define I915_TTM_PRIO_NO_PAGES 1
+#define I915_TTM_PRIO_HAS_PAGES 2
+
+/**
+ * struct i915_ttm_tt - TTM page vector with additional private information
+ * @ttm: The base TTM page vector.
+ * @dev: The struct device used for dma mapping and unmapping.
+ * @cached_st: The cached scatter-gather table.
+ *
+ * Note that DMA may be going on right up to the point where the page-
+ * vector is unpopulated in delayed destroy. Hence keep the
+ * scatter-gather table mapped and cached up to that point. This is
+ * different from the cached gem object io scatter-gather table which
+ * doesn't have an associated dma mapping.
+ */
+struct i915_ttm_tt {
+ struct ttm_tt ttm;
+ struct device *dev;
+ struct sg_table *cached_st;
+};
+
+static const struct ttm_place lmem0_sys_placement_flags[] = {
+ {
+ .fpfn = 0,
+ .lpfn = 0,
+ .mem_type = I915_PL_LMEM0,
+ .flags = 0,
+ }, {
+ .fpfn = 0,
+ .lpfn = 0,
+ .mem_type = I915_PL_SYSTEM,
+ .flags = 0,
+ }
+};
+
+static struct ttm_placement i915_lmem0_placement = {
+ .num_placement = 1,
+ .placement = &lmem0_sys_placement_flags[0],
+ .num_busy_placement = 1,
+ .busy_placement = &lmem0_sys_placement_flags[0],
+};
+
+static struct ttm_placement i915_sys_placement = {
+ .num_placement = 1,
+ .placement = &lmem0_sys_placement_flags[1],
+ .num_busy_placement = 1,
+ .busy_placement = &lmem0_sys_placement_flags[1],
+};
+
+static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj);
+
+static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
+ uint32_t page_flags)
+{
+ struct ttm_resource_manager *man =
+ ttm_manager_type(bo->bdev, bo->resource->mem_type);
+ struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+ struct i915_ttm_tt *i915_tt;
+ int ret;
+
+ i915_tt = kzalloc(sizeof(*i915_tt), GFP_KERNEL);
+ if (!i915_tt)
+ return NULL;
+
+ if (obj->flags & I915_BO_ALLOC_CPU_CLEAR &&
+ man->use_tt)
+ page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
+
+ ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags, ttm_write_combined);
+ if (ret) {
+ kfree(i915_tt);
+ return NULL;
+ }
+
+ i915_tt->dev = obj->base.dev->dev;
+
+ return &i915_tt->ttm;
+}
+
+static void i915_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
+{
+ struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
+
+ if (i915_tt->cached_st) {
+ dma_unmap_sgtable(i915_tt->dev, i915_tt->cached_st,
+ DMA_BIDIRECTIONAL, 0);
+ sg_free_table(i915_tt->cached_st);
+ kfree(i915_tt->cached_st);
+ i915_tt->cached_st = NULL;
+ }
+ ttm_pool_free(&bdev->pool, ttm);
+}
+
+static void i915_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
+{
+ struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
+
+ ttm_tt_destroy_common(bdev, ttm);
+ kfree(i915_tt);
+}
+
+static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo,
+ const struct ttm_place *place)
+{
+ struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+
+ /* Will do for now. Our pinned objects are still on TTM's LRU lists */
+ if (!i915_gem_object_evictable(obj))
+ return false;
+
+ /* This isn't valid with a buddy allocator */
+ return ttm_bo_eviction_valuable(bo, place);
+}
+
+static void i915_ttm_evict_flags(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement)
+{
+ *placement = i915_sys_placement;
+}
+
+static int i915_ttm_move_notify(struct ttm_buffer_object *bo)
+{
+ struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+ int ret;
+
+ ret = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
+ if (ret)
+ return ret;
+
+ ret = __i915_gem_object_put_pages(obj);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void i915_ttm_free_cached_io_st(struct drm_i915_gem_object *obj)
+{
+ if (obj->ttm.cached_io_st) {
+ sg_free_table(obj->ttm.cached_io_st);
+ kfree(obj->ttm.cached_io_st);
+ obj->ttm.cached_io_st = NULL;
+ }
+}
+
+static void i915_ttm_purge(struct drm_i915_gem_object *obj)
+{
+ struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false,
+ };
+ struct ttm_placement place = {};
+ int ret;
+
+ if (obj->mm.madv == __I915_MADV_PURGED)
+ return;
+
+ /* TTM's purge interface. Note that we might be reentering. */
+ ret = ttm_bo_validate(bo, &place, &ctx);
+
+ if (!ret) {
+ i915_ttm_free_cached_io_st(obj);
+ obj->mm.madv = __I915_MADV_PURGED;
+ }
+}
+
+static void i915_ttm_swap_notify(struct ttm_buffer_object *bo)
+{
+ struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+ int ret = i915_ttm_move_notify(bo);
+
+ GEM_WARN_ON(ret);
+ GEM_WARN_ON(obj->ttm.cached_io_st);
+ if (!ret && obj->mm.madv != I915_MADV_WILLNEED)
+ i915_ttm_purge(obj);
+}
+
+static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
+{
+ struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+
+ if (likely(obj)) {
+ /* This releases all gem object bindings to the backend. */
+ __i915_gem_free_object(obj);
+ }
+}
+
+static struct intel_memory_region *
+i915_ttm_region(struct ttm_device *bdev, int ttm_mem_type)
+{
+ struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev);
+
+ /* There's some room for optimization here... */
+ GEM_BUG_ON(ttm_mem_type != I915_PL_SYSTEM &&
+ ttm_mem_type < I915_PL_LMEM0);
+ if (ttm_mem_type == I915_PL_SYSTEM)
+ return intel_memory_region_lookup(i915, INTEL_MEMORY_SYSTEM,
+ 0);
+
+ return intel_memory_region_lookup(i915, INTEL_MEMORY_LOCAL,
+ ttm_mem_type - I915_PL_LMEM0);
+}
+
+static struct sg_table *i915_ttm_tt_get_st(struct ttm_tt *ttm)
+{
+ struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
+ struct scatterlist *sg;
+ struct sg_table *st;
+ int ret;
+
+ if (i915_tt->cached_st)
+ return i915_tt->cached_st;
+
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return ERR_PTR(-ENOMEM);
+
+ sg = __sg_alloc_table_from_pages
+ (st, ttm->pages, ttm->num_pages, 0,
+ (unsigned long)ttm->num_pages << PAGE_SHIFT,
+ i915_sg_segment_size(), NULL, 0, GFP_KERNEL);
+ if (IS_ERR(sg)) {
+ kfree(st);
+ return ERR_CAST(sg);
+ }
+
+ ret = dma_map_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, 0);
+ if (ret) {
+ sg_free_table(st);
+ kfree(st);
+ return ERR_PTR(ret);
+ }
+
+ i915_tt->cached_st = st;
+ return st;
+}
+
+static struct sg_table *
+i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
+ struct ttm_resource *res)
+{
+ struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
+ struct ttm_resource_manager *man =
+ ttm_manager_type(bo->bdev, res->mem_type);
+
+ if (man->use_tt)
+ return i915_ttm_tt_get_st(bo->ttm);
+
+ return intel_region_ttm_node_to_st(obj->mm.region, res);
+}
+
+static int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_resource *dst_mem,
+ struct ttm_place *hop)
+{
+ struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+ struct ttm_resource_manager *dst_man =
+ ttm_manager_type(bo->bdev, dst_mem->mem_type);
+ struct ttm_resource_manager *src_man =
+ ttm_manager_type(bo->bdev, bo->resource->mem_type);
+ struct intel_memory_region *dst_reg, *src_reg;
+ union {
+ struct ttm_kmap_iter_tt tt;
+ struct ttm_kmap_iter_iomap io;
+ } _dst_iter, _src_iter;
+ struct ttm_kmap_iter *dst_iter, *src_iter;
+ struct sg_table *dst_st;
+ int ret;
+
+ dst_reg = i915_ttm_region(bo->bdev, dst_mem->mem_type);
+ src_reg = i915_ttm_region(bo->bdev, bo->resource->mem_type);
+ GEM_BUG_ON(!dst_reg || !src_reg);
+
+ /* Sync for now. We could do the actual copy async. */
+ ret = ttm_bo_wait_ctx(bo, ctx);
+ if (ret)
+ return ret;
+
+ ret = i915_ttm_move_notify(bo);
+ if (ret)
+ return ret;
+
+ if (obj->mm.madv != I915_MADV_WILLNEED) {
+ i915_ttm_purge(obj);
+ ttm_resource_free(bo, &dst_mem);
+ return 0;
+ }
+
+ /* Populate ttm with pages if needed. Typically system memory. */
+ if (bo->ttm && (dst_man->use_tt ||
+ (bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED))) {
+ ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
+ if (ret)
+ return ret;
+ }
+
+ dst_st = i915_ttm_resource_get_st(obj, dst_mem);
+ if (IS_ERR(dst_st))
+ return PTR_ERR(dst_st);
+
+ /* If we start mapping GGTT, we can no longer use man::use_tt here. */
+ dst_iter = dst_man->use_tt ?
+ ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm) :
+ ttm_kmap_iter_iomap_init(&_dst_iter.io, &dst_reg->iomap,
+ dst_st, dst_reg->region.start);
+
+ src_iter = src_man->use_tt ?
+ ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm) :
+ ttm_kmap_iter_iomap_init(&_src_iter.io, &src_reg->iomap,
+ obj->ttm.cached_io_st,
+ src_reg->region.start);
+
+ ttm_move_memcpy(bo, dst_mem->num_pages, dst_iter, src_iter);
+ ttm_bo_move_sync_cleanup(bo, dst_mem);
+ i915_ttm_free_cached_io_st(obj);
+
+ if (!dst_man->use_tt)
+ obj->ttm.cached_io_st = dst_st;
+
+ return 0;
+}
+
+static struct ttm_device_funcs i915_ttm_bo_driver = {
+ .ttm_tt_create = i915_ttm_tt_create,
+ .ttm_tt_unpopulate = i915_ttm_tt_unpopulate,
+ .ttm_tt_destroy = i915_ttm_tt_destroy,
+ .eviction_valuable = i915_ttm_eviction_valuable,
+ .evict_flags = i915_ttm_evict_flags,
+ .move = i915_ttm_move,
+ .swap_notify = i915_ttm_swap_notify,
+ .delete_mem_notify = i915_ttm_delete_mem_notify,
+};
+
+/**
+ * i915_ttm_driver - Return a pointer to the TTM device funcs
+ *
+ * Return: Pointer to statically allocated TTM device funcs.
+ */
+struct ttm_device_funcs *i915_ttm_driver(void)
+{
+ return &i915_ttm_bo_driver;
+}
+
+static int i915_ttm_get_pages(struct drm_i915_gem_object *obj)
+{
+ struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false,
+ };
+ struct sg_table *st;
+ int ret;
+
+ /* Move to the requested placement. */
+ ret = ttm_bo_validate(bo, &i915_lmem0_placement, &ctx);
+ if (ret)
+ return ret == -ENOSPC ? -ENXIO : ret;
+
+ /* Object either has a page vector or is an iomem object */
+ st = bo->ttm ? i915_ttm_tt_get_st(bo->ttm) : obj->ttm.cached_io_st;
+ if (IS_ERR(st))
+ return PTR_ERR(st);
+
+ __i915_gem_object_set_pages(obj, st, i915_sg_dma_sizes(st->sgl));
+
+ i915_ttm_adjust_lru(obj);
+
+ return ret;
+}
+
+static void i915_ttm_put_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *st)
+{
+ /*
+ * We're currently not called from a shrinker, so put_pages()
+ * typically means the object is about to destroyed, or called
+ * from move_notify(). So just avoid doing much for now.
+ * If the object is not destroyed next, The TTM eviction logic
+ * and shrinkers will move it out if needed.
+ */
+
+ i915_ttm_adjust_lru(obj);
+}
+
+static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
+{
+ struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
+
+ /*
+ * Don't manipulate the TTM LRUs while in TTM bo destruction.
+ * We're called through i915_ttm_delete_mem_notify().
+ */
+ if (!kref_read(&bo->kref))
+ return;
+
+ /*
+ * Put on the correct LRU list depending on the MADV status
+ */
+ spin_lock(&bo->bdev->lru_lock);
+ if (obj->mm.madv != I915_MADV_WILLNEED) {
+ bo->priority = I915_TTM_PRIO_PURGE;
+ } else if (!i915_gem_object_has_pages(obj)) {
+ if (bo->priority < I915_TTM_PRIO_HAS_PAGES)
+ bo->priority = I915_TTM_PRIO_HAS_PAGES;
+ } else {
+ if (bo->priority > I915_TTM_PRIO_NO_PAGES)
+ bo->priority = I915_TTM_PRIO_NO_PAGES;
+ }
+
+ ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
+ spin_unlock(&bo->bdev->lru_lock);
+}
+
+/*
+ * TTM-backed gem object destruction requires some clarification.
+ * Basically we have two possibilities here. We can either rely on the
+ * i915 delayed destruction and put the TTM object when the object
+ * is idle. This would be detected by TTM which would bypass the
+ * TTM delayed destroy handling. The other approach is to put the TTM
+ * object early and rely on the TTM destroyed handling, and then free
+ * the leftover parts of the GEM object once TTM's destroyed list handling is
+ * complete. For now, we rely on the latter for two reasons:
+ * a) TTM can evict an object even when it's on the delayed destroy list,
+ * which in theory allows for complete eviction.
+ * b) There is work going on in TTM to allow freeing an object even when
+ * it's not idle, and using the TTM destroyed list handling could help us
+ * benefit from that.
+ */
+static void i915_ttm_delayed_free(struct drm_i915_gem_object *obj)
+{
+ if (obj->ttm.created) {
+ ttm_bo_put(i915_gem_to_ttm(obj));
+ } else {
+ __i915_gem_free_object(obj);
+ call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
+ }
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
+ .name = "i915_gem_object_ttm",
+ .flags = I915_GEM_OBJECT_HAS_IOMEM,
+
+ .get_pages = i915_ttm_get_pages,
+ .put_pages = i915_ttm_put_pages,
+ .truncate = i915_ttm_purge,
+ .adjust_lru = i915_ttm_adjust_lru,
+ .delayed_free = i915_ttm_delayed_free,
+};
+
+void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
+{
+ struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+
+ i915_gem_object_release_memory_region(obj);
+ if (obj->ttm.created)
+ call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
+}
+
+/**
+ * __i915_gem_ttm_object_init - Initialize a ttm-backed i915 gem object
+ * @mem: The initial memory region for the object.
+ * @obj: The gem object.
+ * @size: Object size in bytes.
+ * @flags: gem object flags.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
+ struct drm_i915_gem_object *obj,
+ resource_size_t size,
+ unsigned int flags)
+{
+ static struct lock_class_key lock_class;
+ struct drm_i915_private *i915 = mem->i915;
+ enum ttm_bo_type bo_type;
+ size_t alignment = 0;
+ int ret;
+
+ /* Adjust alignment to GPU- and CPU huge page sizes. */
+
+ if (mem->is_range_manager) {
+ if (size >= SZ_1G)
+ alignment = SZ_1G >> PAGE_SHIFT;
+ else if (size >= SZ_2M)
+ alignment = SZ_2M >> PAGE_SHIFT;
+ else if (size >= SZ_64K)
+ alignment = SZ_64K >> PAGE_SHIFT;
+ }
+
+ drm_gem_private_object_init(&i915->drm, &obj->base, size);
+ i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags);
+ i915_gem_object_init_memory_region(obj, mem);
+ i915_gem_object_make_unshrinkable(obj);
+ obj->read_domains = I915_GEM_DOMAIN_WC | I915_GEM_DOMAIN_GTT;
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
+
+ bo_type = (obj->flags & I915_BO_ALLOC_USER) ? ttm_bo_type_device :
+ ttm_bo_type_kernel;
+
+ /*
+ * If this function fails, it will call the destructor, but
+ * our caller still owns the object. So no freeing in the
+ * destructor until obj->ttm.created is true.
+ * Similarly, in delayed_destroy, we can't call ttm_bo_put()
+ * until successful initialization.
+ */
+ ret = ttm_bo_init(&i915->bdev, i915_gem_to_ttm(obj), size,
+ bo_type, &i915_sys_placement, alignment,
+ true, NULL, NULL, i915_ttm_bo_destroy);
+
+ if (!ret)
+ obj->ttm.created = true;
+
+ /* i915 wants -ENXIO when out of memory region space. */
+ return (ret == -ENOSPC) ? -ENXIO : ret;
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
new file mode 100644
index 000000000000..b8d3dcbb50df
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+#ifndef _I915_GEM_TTM_H_
+#define _I915_GEM_TTM_H_
+
+#include "gem/i915_gem_object_types.h"
+
+/**
+ * i915_gem_to_ttm - Convert a struct drm_i915_gem_object to a
+ * struct ttm_buffer_object.
+ * @obj: Pointer to the gem object.
+ *
+ * Return: Pointer to the embedded struct ttm_buffer_object.
+ */
+static inline struct ttm_buffer_object *
+i915_gem_to_ttm(struct drm_i915_gem_object *obj)
+{
+ return &obj->__do_not_access;
+}
+
+/*
+ * i915 ttm gem object destructor. Internal use only.
+ */
+void i915_ttm_bo_destroy(struct ttm_buffer_object *bo);
+
+/**
+ * i915_ttm_to_gem - Convert a struct ttm_buffer_object to an embedding
+ * struct drm_i915_gem_object.
+ *
+ * Return: Pointer to the embedding struct ttm_buffer_object, or NULL
+ * if the object was not an i915 ttm object.
+ */
+static inline struct drm_i915_gem_object *
+i915_ttm_to_gem(struct ttm_buffer_object *bo)
+{
+ if (GEM_WARN_ON(bo->destroy != i915_ttm_bo_destroy))
+ return NULL;
+
+ return container_of(bo, struct drm_i915_gem_object, __do_not_access);
+}
+
+int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
+ struct drm_i915_gem_object *obj,
+ resource_size_t size,
+ unsigned int flags);
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
index f7366b054f8e..4ae1f717a94c 100644
--- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
@@ -9,6 +9,7 @@
#include "intel_region_ttm.h"
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
+#include "gem/i915_gem_ttm.h"
#include "intel_region_lmem.h"
static int init_fake_lmem_bar(struct intel_memory_region *mem)
@@ -107,7 +108,7 @@ out_no_io:
static const struct intel_memory_region_ops intel_region_lmem_ops = {
.init = region_lmem_init,
.release = region_lmem_release,
- .init_object = __i915_gem_lmem_object_init,
+ .init_object = __i915_gem_ttm_object_init,
};
struct intel_memory_region *
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 589388dec48a..6a0a3f0e36e1 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1005,8 +1005,11 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
}
}
- if (obj->mm.madv != __I915_MADV_PURGED)
+ if (obj->mm.madv != __I915_MADV_PURGED) {
obj->mm.madv = args->madv;
+ if (obj->ops->adjust_lru)
+ obj->ops->adjust_lru(obj);
+ }
if (i915_gem_object_has_pages(obj)) {
unsigned long flags;
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index e6024eb7cca4..12fb5423fd5e 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -149,7 +149,6 @@ intel_memory_region_create(struct drm_i915_private *i915,
mutex_init(&mem->objects.lock);
INIT_LIST_HEAD(&mem->objects.list);
- INIT_LIST_HEAD(&mem->objects.purgeable);
INIT_LIST_HEAD(&mem->reserved);
mutex_init(&mem->mm_lock);
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h
index 1f7dac63abb7..c7e635d62e1a 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.h
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -101,7 +101,6 @@ struct intel_memory_region {
struct {
struct mutex lock; /* Protects access to objects */
struct list_head list;
- struct list_head purgeable;
} objects;
size_t chunk_size;
diff --git a/drivers/gpu/drm/i915/intel_region_ttm.c b/drivers/gpu/drm/i915/intel_region_ttm.c
index 82a6727ede46..27fe0668d094 100644
--- a/drivers/gpu/drm/i915/intel_region_ttm.c
+++ b/drivers/gpu/drm/i915/intel_region_ttm.c
@@ -11,6 +11,7 @@
#include "intel_region_ttm.h"
+#include "gem/i915_gem_ttm.h" /* For the funcs/ops export only */
/**
* DOC: TTM support structure
*
@@ -20,9 +21,6 @@
* i915 GEM regions to TTM memory types and resource managers.
*/
-/* A Zero-initialized driver for now. We don't have a TTM backend yet. */
-static struct ttm_device_funcs i915_ttm_bo_driver;
-
/**
* intel_region_ttm_device_init - Initialize a TTM device
* @dev_priv: Pointer to an i915 device private structure.
@@ -33,7 +31,7 @@ int intel_region_ttm_device_init(struct drm_i915_private *dev_priv)
{
struct drm_device *drm = &dev_priv->drm;
- return ttm_device_init(&dev_priv->bdev, &i915_ttm_bo_driver,
+ return ttm_device_init(&dev_priv->bdev, i915_ttm_driver(),
drm->dev, drm->anon_inode->i_mapping,
drm->vma_offset_manager, false, false);
}
@@ -177,6 +175,7 @@ struct sg_table *intel_region_ttm_node_to_st(struct intel_memory_region *mem,
mem->region.start);
}
+#ifdef CONFIG_DRM_I915_SELFTEST
/**
* intel_region_ttm_node_alloc - Allocate memory resources from a region
* @mem: The memory region,
@@ -224,3 +223,4 @@ intel_region_ttm_node_alloc(struct intel_memory_region *mem,
ret = -ENXIO;
return ret ? ERR_PTR(ret) : res;
}
+#endif
diff --git a/drivers/gpu/drm/i915/intel_region_ttm.h b/drivers/gpu/drm/i915/intel_region_ttm.h
index 11b0574ab791..e8cf830fda6f 100644
--- a/drivers/gpu/drm/i915/intel_region_ttm.h
+++ b/drivers/gpu/drm/i915/intel_region_ttm.h
@@ -12,6 +12,7 @@
struct drm_i915_private;
struct intel_memory_region;
struct ttm_resource;
+struct ttm_device_funcs;
int intel_region_ttm_device_init(struct drm_i915_private *dev_priv);
@@ -24,11 +25,15 @@ void intel_region_ttm_fini(struct intel_memory_region *mem);
struct sg_table *intel_region_ttm_node_to_st(struct intel_memory_region *mem,
struct ttm_resource *res);
+void intel_region_ttm_node_free(struct intel_memory_region *mem,
+ struct ttm_resource *node);
+
+struct ttm_device_funcs *i915_ttm_driver(void);
+
+#ifdef CONFIG_DRM_I915_SELFTEST
struct ttm_resource *
intel_region_ttm_node_alloc(struct intel_memory_region *mem,
resource_size_t size,
unsigned int flags);
-
-void intel_region_ttm_node_free(struct intel_memory_region *mem,
- struct ttm_resource *node);
+#endif
#endif /* _INTEL_REGION_TTM_H_ */