summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2018-01-25 11:40:54 +1000
committerDave Airlie <airlied@redhat.com>2018-01-25 11:40:54 +1000
commit22bc72c8075fa350482cdbbd66597d626aa506c8 (patch)
tree4223a1282a53a07cd87ff0abbe1da5f8f172788d /drivers/gpu/drm/ttm
parent92eb5f0c00b7c11d85ae823a814b2a34dda8a3c4 (diff)
parent87440329b06720e09c27ad1991204f4f0bd76f83 (diff)
downloadlinux-22bc72c8075fa350482cdbbd66597d626aa506c8.tar.bz2
Merge branch 'drm-next-4.16' of git://people.freedesktop.org/~agd5f/linux into drm-next
A few more fixes for 4.16, nothing major. A few more fixes for 4.16. This is on top of the pull request from last week. Most notable change here is a fix to the link order for the now separate from amdgpu GPU scheduler to fix crashes when the modules are build into the kernel rather than as modules. * 'drm-next-4.16' of git://people.freedesktop.org/~agd5f/linux: drm: fix gpu scheduler link order drm/amd/display: Demote error print to debug print when ATOM impl missing drm/amdgpu: Avoid leaking PM domain on driver unbind (v2) drm/amd/amdgpu: Add Polaris version check drm/amdgpu: Reenable manual GPU reset from sysfs drm/amdgpu: disable MMHUB power gating on raven drm/ttm: Don't unreserve swapped BOs that were previously reserved drm/ttm: Don't add swapped BOs to swap-LRU list drm/amdgpu: only check for ECC on Vega10 drm/amd/powerplay: Fix smu_table_entry.handle type drm/ttm: add VADDR_FLAG_UPDATED_COUNT to correctly update dma_page global count drm/radeon: fill in rb backend map on evergreen/ni. drm/amdgpu/gfx9: fix ngg enablement to clear gds reserved memory (v2) drm/ttm: only free pages rather than update global memory count together drm/amdgpu: fix CPU based VM updates drm/amdgpu: fix typo in amdgpu_vce_validate_bo drm/amdgpu: fix amdgpu_vm_pasid_fault_credit drm/ttm: check the return value of register_shrinker drm/radeon: fix sparse warning: Should it be static?
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c27
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c79
3 files changed, 63 insertions, 50 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 2eb71ffe95a6..893003fc76a1 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -170,7 +170,8 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
list_add_tail(&bo->lru, &man->lru[bo->priority]);
kref_get(&bo->list_kref);
- if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
+ if (bo->ttm && !(bo->ttm->page_flags &
+ (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) {
list_add_tail(&bo->swap,
&bo->glob->swap_lru[bo->priority]);
kref_get(&bo->list_kref);
@@ -1779,8 +1780,8 @@ out:
* Unreserve without putting on LRU to avoid swapping out an
* already swapped buffer.
*/
-
- reservation_object_unlock(bo->resv);
+ if (locked)
+ reservation_object_unlock(bo->resv);
kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
}
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 79854ab3bc47..2b12c55a3bff 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -477,12 +477,12 @@ ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
return count;
}
-static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
+static int ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
{
manager->mm_shrink.count_objects = ttm_pool_shrink_count;
manager->mm_shrink.scan_objects = ttm_pool_shrink_scan;
manager->mm_shrink.seeks = 1;
- register_shrinker(&manager->mm_shrink);
+ return register_shrinker(&manager->mm_shrink);
}
static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
@@ -1034,15 +1034,18 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
&glob->kobj, "pool");
- if (unlikely(ret != 0)) {
- kobject_put(&_manager->kobj);
- _manager = NULL;
- return ret;
- }
-
- ttm_pool_mm_shrink_init(_manager);
+ if (unlikely(ret != 0))
+ goto error;
+ ret = ttm_pool_mm_shrink_init(_manager);
+ if (unlikely(ret != 0))
+ goto error;
return 0;
+
+error:
+ kobject_put(&_manager->kobj);
+ _manager = NULL;
+ return ret;
}
void ttm_page_alloc_fini(void)
@@ -1072,7 +1075,8 @@ int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
ttm->caching_state);
if (unlikely(ret != 0)) {
- ttm_pool_unpopulate(ttm);
+ ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
+ ttm->caching_state);
return ret;
}
@@ -1080,7 +1084,8 @@ int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
PAGE_SIZE, ctx);
if (unlikely(ret != 0)) {
- ttm_pool_unpopulate(ttm);
+ ttm_put_pages(ttm->pages, ttm->num_pages,
+ ttm->page_flags, ttm->caching_state);
return -ENOMEM;
}
}
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 4c659405a008..a88051552ace 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -61,6 +61,7 @@
#define SMALL_ALLOCATION 4
#define FREE_ALL_PAGES (~0U)
#define VADDR_FLAG_HUGE_POOL 1UL
+#define VADDR_FLAG_UPDATED_COUNT 2UL
enum pool_type {
IS_UNDEFINED = 0,
@@ -874,18 +875,18 @@ static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
}
/*
- * @return count of pages still required to fulfill the request.
* The populate list is actually a stack (not that is matters as TTM
* allocates one page at a time.
+ * return dma_page pointer if success, otherwise NULL.
*/
-static int ttm_dma_pool_get_pages(struct dma_pool *pool,
+static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool,
struct ttm_dma_tt *ttm_dma,
unsigned index)
{
- struct dma_page *d_page;
+ struct dma_page *d_page = NULL;
struct ttm_tt *ttm = &ttm_dma->ttm;
unsigned long irq_flags;
- int count, r = -ENOMEM;
+ int count;
spin_lock_irqsave(&pool->lock, irq_flags);
count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
@@ -894,12 +895,11 @@ static int ttm_dma_pool_get_pages(struct dma_pool *pool,
ttm->pages[index] = d_page->p;
ttm_dma->dma_address[index] = d_page->dma;
list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
- r = 0;
pool->npages_in_use += 1;
pool->npages_free -= 1;
}
spin_unlock_irqrestore(&pool->lock, irq_flags);
- return r;
+ return d_page;
}
static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
@@ -934,6 +934,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
unsigned long num_pages = ttm->num_pages;
struct dma_pool *pool;
+ struct dma_page *d_page;
enum pool_type type;
unsigned i;
int ret;
@@ -962,8 +963,8 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
while (num_pages >= HPAGE_PMD_NR) {
unsigned j;
- ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
- if (ret != 0)
+ d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i);
+ if (!d_page)
break;
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
@@ -973,6 +974,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
return -ENOMEM;
}
+ d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT;
for (j = i + 1; j < (i + HPAGE_PMD_NR); ++j) {
ttm->pages[j] = ttm->pages[j - 1] + 1;
ttm_dma->dma_address[j] = ttm_dma->dma_address[j - 1] +
@@ -996,8 +998,8 @@ skip_huge:
}
while (num_pages) {
- ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
- if (ret != 0) {
+ d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i);
+ if (!d_page) {
ttm_dma_unpopulate(ttm_dma, dev);
return -ENOMEM;
}
@@ -1009,6 +1011,7 @@ skip_huge:
return -ENOMEM;
}
+ d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT;
++i;
--num_pages;
}
@@ -1049,8 +1052,11 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
continue;
count++;
- ttm_mem_global_free_page(ttm->glob->mem_glob,
- d_page->p, pool->size);
+ if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) {
+ ttm_mem_global_free_page(ttm->glob->mem_glob,
+ d_page->p, pool->size);
+ d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT;
+ }
ttm_dma_page_put(pool, d_page);
}
@@ -1070,9 +1076,19 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
/* make sure pages array match list and count number of pages */
count = 0;
- list_for_each_entry(d_page, &ttm_dma->pages_list, page_list) {
+ list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list,
+ page_list) {
ttm->pages[count] = d_page->p;
count++;
+
+ if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) {
+ ttm_mem_global_free_page(ttm->glob->mem_glob,
+ d_page->p, pool->size);
+ d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT;
+ }
+
+ if (is_cached)
+ ttm_dma_page_put(pool, d_page);
}
spin_lock_irqsave(&pool->lock, irq_flags);
@@ -1092,19 +1108,6 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
}
spin_unlock_irqrestore(&pool->lock, irq_flags);
- if (is_cached) {
- list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) {
- ttm_mem_global_free_page(ttm->glob->mem_glob,
- d_page->p, pool->size);
- ttm_dma_page_put(pool, d_page);
- }
- } else {
- for (i = 0; i < count; i++) {
- ttm_mem_global_free_page(ttm->glob->mem_glob,
- ttm->pages[i], pool->size);
- }
- }
-
INIT_LIST_HEAD(&ttm_dma->pages_list);
for (i = 0; i < ttm->num_pages; i++) {
ttm->pages[i] = NULL;
@@ -1182,12 +1185,12 @@ ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
return count;
}
-static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
+static int ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
{
manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count;
manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan;
manager->mm_shrink.seeks = 1;
- register_shrinker(&manager->mm_shrink);
+ return register_shrinker(&manager->mm_shrink);
}
static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
@@ -1197,7 +1200,7 @@ static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
{
- int ret = -ENOMEM;
+ int ret;
WARN_ON(_manager);
@@ -1205,7 +1208,7 @@ int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
if (!_manager)
- goto err;
+ return -ENOMEM;
mutex_init(&_manager->lock);
INIT_LIST_HEAD(&_manager->pools);
@@ -1217,13 +1220,17 @@ int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
/* This takes care of auto-freeing the _manager */
ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
&glob->kobj, "dma_pool");
- if (unlikely(ret != 0)) {
- kobject_put(&_manager->kobj);
- goto err;
- }
- ttm_dma_pool_mm_shrink_init(_manager);
+ if (unlikely(ret != 0))
+ goto error;
+
+ ret = ttm_dma_pool_mm_shrink_init(_manager);
+ if (unlikely(ret != 0))
+ goto error;
return 0;
-err:
+
+error:
+ kobject_put(&_manager->kobj);
+ _manager = NULL;
return ret;
}