summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c82
-rw-r--r--drivers/gpu/drm/ttm/ttm_range_manager.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c14
6 files changed, 70 insertions, 42 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 7c8e8be774f1..c3f4b33136e5 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -51,9 +51,6 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
struct ttm_resource_manager *man;
int i, mem_type;
- drm_printf(&p, "No space for %p (%lu pages, %zuK, %zuM)\n",
- bo, bo->resource->num_pages, bo->base.size >> 10,
- bo->base.size >> 20);
for (i = 0; i < placement->num_placement; i++) {
mem_type = placement->placement[i].mem_type;
drm_printf(&p, " placement[%d]=0x%08X (%d)\n",
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index fa04e62202c1..ba3aa0a0fc43 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -173,7 +173,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm));
if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC)))
- ttm_move_memcpy(clear, dst_mem->num_pages, dst_iter, src_iter);
+ ttm_move_memcpy(clear, ttm->num_pages, dst_iter, src_iter);
if (!src_iter->ops->maps_tt)
ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem);
@@ -357,9 +357,9 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
map->virtual = NULL;
map->bo = bo;
- if (num_pages > bo->resource->num_pages)
+ if (num_pages > PFN_UP(bo->resource->size))
return -EINVAL;
- if ((start_page + num_pages) > bo->resource->num_pages)
+ if ((start_page + num_pages) > PFN_UP(bo->resource->size))
return -EINVAL;
ret = ttm_mem_io_reserve(bo->bdev, bo->resource);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 38119311284d..5a3e4b891377 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -217,7 +217,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
page_last = vma_pages(vma) + vma->vm_pgoff -
drm_vma_node_start(&bo->base.vma_node);
- if (unlikely(page_offset >= bo->resource->num_pages))
+ if (unlikely(page_offset >= PFN_UP(bo->base.size)))
return VM_FAULT_SIGBUS;
prot = ttm_io_prot(bo, bo->resource, prot);
@@ -412,7 +412,7 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
<< PAGE_SHIFT);
int ret;
- if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->resource->num_pages)
+ if (len < 1 || (offset + len) > bo->base.size)
return -EIO;
ret = ttm_bo_reserve(bo, true, false, NULL);
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 21b61631f73a..9f6764bf3b15 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -344,6 +344,28 @@ static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p)
return p->private;
}
+/* Called when we got a page, either from a pool or newly allocated */
+static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order,
+ struct page *p, dma_addr_t **dma_addr,
+ unsigned long *num_pages,
+ struct page ***pages)
+{
+ unsigned int i;
+ int r;
+
+ if (*dma_addr) {
+ r = ttm_pool_map(pool, order, p, dma_addr);
+ if (r)
+ return r;
+ }
+
+ *num_pages -= 1 << order;
+ for (i = 1 << order; i; --i, ++(*pages), ++p)
+ **pages = p;
+
+ return 0;
+}
+
/**
* ttm_pool_alloc - Fill a ttm_tt object
*
@@ -385,45 +407,57 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
for (order = min_t(unsigned int, MAX_ORDER - 1, __fls(num_pages));
num_pages;
order = min_t(unsigned int, order, __fls(num_pages))) {
- bool apply_caching = false;
struct ttm_pool_type *pt;
pt = ttm_pool_select_type(pool, tt->caching, order);
p = pt ? ttm_pool_type_take(pt) : NULL;
if (p) {
- apply_caching = true;
- } else {
- p = ttm_pool_alloc_page(pool, gfp_flags, order);
- if (p && PageHighMem(p))
- apply_caching = true;
- }
-
- if (!p) {
- if (order) {
- --order;
- continue;
- }
- r = -ENOMEM;
- goto error_free_all;
- }
-
- if (apply_caching) {
r = ttm_pool_apply_caching(caching, pages,
tt->caching);
if (r)
goto error_free_page;
- caching = pages + (1 << order);
+
+ do {
+ r = ttm_pool_page_allocated(pool, order, p,
+ &dma_addr,
+ &num_pages,
+ &pages);
+ if (r)
+ goto error_free_page;
+
+ if (num_pages < (1 << order))
+ break;
+
+ p = ttm_pool_type_take(pt);
+ } while (p);
+ caching = pages;
}
- if (dma_addr) {
- r = ttm_pool_map(pool, order, p, &dma_addr);
+ while (num_pages >= (1 << order) &&
+ (p = ttm_pool_alloc_page(pool, gfp_flags, order))) {
+
+ if (PageHighMem(p)) {
+ r = ttm_pool_apply_caching(caching, pages,
+ tt->caching);
+ if (r)
+ goto error_free_page;
+ }
+ r = ttm_pool_page_allocated(pool, order, p, &dma_addr,
+ &num_pages, &pages);
if (r)
goto error_free_page;
+ if (PageHighMem(p))
+ caching = pages;
}
- num_pages -= 1 << order;
- for (i = 1 << order; i; --i)
- *(pages++) = p++;
+ if (!p) {
+ if (order) {
+ --order;
+ continue;
+ }
+ r = -ENOMEM;
+ goto error_free_all;
+ }
}
r = ttm_pool_apply_caching(caching, pages, tt->caching);
diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
index 4cfef2b3514d..0a8bc0b7f380 100644
--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -83,7 +83,7 @@ static int ttm_range_man_alloc(struct ttm_resource_manager *man,
spin_lock(&rman->lock);
ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0],
- node->base.num_pages,
+ PFN_UP(node->base.size),
bo->page_alignment, 0,
place->fpfn, lpfn, mode);
spin_unlock(&rman->lock);
@@ -229,7 +229,6 @@ int ttm_range_man_fini_nocheck(struct ttm_device *bdev,
return ret;
spin_lock(&rman->lock);
- drm_mm_clean(mm);
drm_mm_takedown(mm);
spin_unlock(&rman->lock);
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index a729c32a1e48..328391bb1d87 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -177,7 +177,7 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
struct ttm_resource_manager *man;
res->start = 0;
- res->num_pages = PFN_UP(bo->base.size);
+ res->size = bo->base.size;
res->mem_type = place->mem_type;
res->placement = place->flags;
res->bus.addr = NULL;
@@ -192,7 +192,7 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
list_add_tail(&res->lru, &bo->bdev->pinned);
else
list_add_tail(&res->lru, &man->lru[bo->priority]);
- man->usage += res->num_pages << PAGE_SHIFT;
+ man->usage += res->size;
spin_unlock(&bo->bdev->lru_lock);
}
EXPORT_SYMBOL(ttm_resource_init);
@@ -214,7 +214,7 @@ void ttm_resource_fini(struct ttm_resource_manager *man,
spin_lock(&bdev->lru_lock);
list_del_init(&res->lru);
- man->usage -= res->num_pages << PAGE_SHIFT;
+ man->usage -= res->size;
spin_unlock(&bdev->lru_lock);
}
EXPORT_SYMBOL(ttm_resource_fini);
@@ -665,17 +665,15 @@ ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io,
iosys_map_set_vaddr(&iter_io->dmap, mem->bus.addr);
iter_io->needs_unmap = false;
} else {
- size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
-
iter_io->needs_unmap = true;
memset(&iter_io->dmap, 0, sizeof(iter_io->dmap));
if (mem->bus.caching == ttm_write_combined)
iosys_map_set_vaddr_iomem(&iter_io->dmap,
ioremap_wc(mem->bus.offset,
- bus_size));
+ mem->size));
else if (mem->bus.caching == ttm_cached)
iosys_map_set_vaddr(&iter_io->dmap,
- memremap(mem->bus.offset, bus_size,
+ memremap(mem->bus.offset, mem->size,
MEMREMAP_WB |
MEMREMAP_WT |
MEMREMAP_WC));
@@ -684,7 +682,7 @@ ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io,
if (iosys_map_is_null(&iter_io->dmap))
iosys_map_set_vaddr_iomem(&iter_io->dmap,
ioremap(mem->bus.offset,
- bus_size));
+ mem->size));
if (iosys_map_is_null(&iter_io->dmap)) {
ret = -ENOMEM;