diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_evict.c | 33 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_gtt.c | 52 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_gtt.h | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_stolen.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_trace.h | 16 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_vgpu.c | 33 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_vma.c | 16 |
8 files changed, 106 insertions, 61 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 2827dab61edf..e9b4ece689d0 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3469,8 +3469,9 @@ int __must_check i915_gem_evict_something(struct i915_address_space *vm, unsigned cache_level, u64 start, u64 end, unsigned flags); -int __must_check i915_gem_evict_for_vma(struct i915_vma *vma, - unsigned int flags); +int __must_check i915_gem_evict_for_node(struct i915_address_space *vm, + struct drm_mm_node *node, + unsigned int flags); int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); /* belongs in i915_gem_gtt.h */ diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 6a5415e31acf..50b4645bf627 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -231,7 +231,8 @@ found: /** * i915_gem_evict_for_vma - Evict vmas to make room for binding a new one - * @target: address space and range to evict for + * @vm: address space to evict from + * @target: range (and color) to evict for * @flags: additional flags to control the eviction algorithm * * This function will try to evict vmas that overlap the target node. @@ -239,18 +240,20 @@ found: * To clarify: This is for freeing up virtual address space, not for freeing * memory in e.g. the shrinker. */ -int i915_gem_evict_for_vma(struct i915_vma *target, unsigned int flags) +int i915_gem_evict_for_node(struct i915_address_space *vm, + struct drm_mm_node *target, + unsigned int flags) { LIST_HEAD(eviction_list); struct drm_mm_node *node; - u64 start = target->node.start; - u64 end = start + target->node.size; + u64 start = target->start; + u64 end = start + target->size; struct i915_vma *vma, *next; bool check_color; int ret = 0; - lockdep_assert_held(&target->vm->i915->drm.struct_mutex); - trace_i915_gem_evict_vma(target, flags); + lockdep_assert_held(&vm->i915->drm.struct_mutex); + trace_i915_gem_evict_node(vm, target, flags); /* Retire before we search the active list. Although we have * reasonable accuracy in our retirement lists, we may have @@ -258,18 +261,18 @@ int i915_gem_evict_for_vma(struct i915_vma *target, unsigned int flags) * retiring. */ if (!(flags & PIN_NONBLOCK)) - i915_gem_retire_requests(target->vm->i915); + i915_gem_retire_requests(vm->i915); - check_color = target->vm->mm.color_adjust; + check_color = vm->mm.color_adjust; if (check_color) { /* Expand search to cover neighbouring guard pages (or lack!) */ - if (start > target->vm->start) + if (start > vm->start) start -= I915_GTT_PAGE_SIZE; - if (end < target->vm->start + target->vm->total) + if (end < vm->start + vm->total) end += I915_GTT_PAGE_SIZE; } - drm_mm_for_each_node_in_range(node, &target->vm->mm, start, end) { + drm_mm_for_each_node_in_range(node, &vm->mm, start, end) { /* If we find any non-objects (!vma), we cannot evict them */ if (node->color == I915_COLOR_UNEVICTABLE) { ret = -ENOSPC; @@ -285,12 +288,12 @@ int i915_gem_evict_for_vma(struct i915_vma *target, unsigned int flags) * those as well to make room for our guard pages. */ if (check_color) { - if (vma->node.start + vma->node.size == target->node.start) { - if (vma->node.color == target->node.color) + if (vma->node.start + vma->node.size == node->start) { + if (vma->node.color == node->color) continue; } - if (vma->node.start == target->node.start + target->node.size) { - if (vma->node.color == target->node.color) + if (vma->node.start == node->start + node->size) { + if (vma->node.color == node->color) continue; } } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index f0f17b2b7215..09a5eca494b3 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -3555,6 +3555,58 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma) } /** + * i915_gem_gtt_reserve - reserve a node in an address_space (GTT) + * @vm - the &struct i915_address_space + * @node - the &struct drm_mm_node (typically i915_vma.mode) + * @size - how much space to allocate inside the GTT, + * must be #I915_GTT_PAGE_SIZE aligned + * @offset - where to insert inside the GTT, + * must be #I915_GTT_MIN_ALIGNMENT aligned, and the node + * (@offset + @size) must fit within the address space + * @color - color to apply to node, if this node is not from a VMA, + * color must be #I915_COLOR_UNEVICTABLE + * @flags - control search and eviction behaviour + * + * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside + * the address space (using @size and @color). If the @node does not fit, it + * tries to evict any overlapping nodes from the GTT, including any + * neighbouring nodes if the colors do not match (to ensure guard pages between + * differing domains). See i915_gem_evict_for_node() for the gory details + * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on + * evicting active overlapping objects, and any overlapping node that is pinned + * or marked as unevictable will also result in failure. + * + * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if + * asked to wait for eviction and interrupted. + */ +int i915_gem_gtt_reserve(struct i915_address_space *vm, + struct drm_mm_node *node, + u64 size, u64 offset, unsigned long color, + unsigned int flags) +{ + int err; + + GEM_BUG_ON(!size); + GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); + GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT)); + GEM_BUG_ON(range_overflows(offset, size, vm->total)); + + node->size = size; + node->start = offset; + node->color = color; + + err = drm_mm_reserve_node(&vm->mm, node); + if (err != -ENOSPC) + return err; + + err = i915_gem_evict_for_node(vm, node, flags); + if (err == 0) + err = drm_mm_reserve_node(&vm->mm, node); + + return err; +} + +/** * i915_gem_gtt_insert - insert a node into an address_space (GTT) * @vm - the &struct i915_address_space * @node - the &struct drm_mm_node (typically i915_vma.node) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 79198352a491..3e031a057f78 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -532,6 +532,11 @@ int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj, void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj, struct sg_table *pages); +int i915_gem_gtt_reserve(struct i915_address_space *vm, + struct drm_mm_node *node, + u64 size, u64 offset, unsigned long color, + unsigned int flags); + int i915_gem_gtt_insert(struct i915_address_space *vm, struct drm_mm_node *node, u64 size, u64 alignment, unsigned long color, diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index e5be8e04bf3b..52dbb9bab268 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -694,10 +694,9 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv * setting up the GTT space. The actual reservation will occur * later. */ - vma->node.start = gtt_offset; - vma->node.size = size; - - ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node); + ret = i915_gem_gtt_reserve(&ggtt->base, &vma->node, + size, gtt_offset, obj->cache_level, + 0); if (ret) { DRM_DEBUG_KMS("failed to allocate stolen GTT space\n"); goto err_pages; diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 18ae37c411fd..4461df5a94fe 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -450,9 +450,9 @@ TRACE_EVENT(i915_gem_evict_vm, TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm) ); -TRACE_EVENT(i915_gem_evict_vma, - TP_PROTO(struct i915_vma *vma, unsigned int flags), - TP_ARGS(vma, flags), +TRACE_EVENT(i915_gem_evict_node, + TP_PROTO(struct i915_address_space *vm, struct drm_mm_node *node, unsigned int flags), + TP_ARGS(vm, node, flags), TP_STRUCT__entry( __field(u32, dev) @@ -464,11 +464,11 @@ TRACE_EVENT(i915_gem_evict_vma, ), TP_fast_assign( - __entry->dev = vma->vm->i915->drm.primary->index; - __entry->vm = vma->vm; - __entry->start = vma->node.start; - __entry->size = vma->node.size; - __entry->color = vma->node.color; + __entry->dev = vm->i915->drm.primary->index; + __entry->vm = vm; + __entry->start = node->start; + __entry->size = node->size; + __entry->color = node->color; __entry->flags = flags; ), diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c index dae340cfc6c7..f1ad4fbb5ba7 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.c +++ b/drivers/gpu/drm/i915/i915_vgpu.c @@ -116,22 +116,20 @@ void intel_vgt_deballoon(struct drm_i915_private *dev_priv) memset(&bl_info, 0, sizeof(bl_info)); } -static int vgt_balloon_space(struct drm_mm *mm, +static int vgt_balloon_space(struct i915_ggtt *ggtt, struct drm_mm_node *node, unsigned long start, unsigned long end) { unsigned long size = end - start; - if (start == end) + if (start <= end) return -EINVAL; DRM_INFO("balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n", start, end, size / 1024); - - node->start = start; - node->size = size; - - return drm_mm_reserve_node(mm, node); + return i915_gem_gtt_reserve(&ggtt->base, node, + size, start, I915_COLOR_UNEVICTABLE, + 0); } /** @@ -214,10 +212,8 @@ int intel_vgt_balloon(struct drm_i915_private *dev_priv) /* Unmappable graphic memory ballooning */ if (unmappable_base > ggtt->mappable_end) { - ret = vgt_balloon_space(&ggtt->base.mm, - &bl_info.space[2], - ggtt->mappable_end, - unmappable_base); + ret = vgt_balloon_space(ggtt, &bl_info.space[2], + ggtt->mappable_end, unmappable_base); if (ret) goto err; @@ -228,18 +224,15 @@ int intel_vgt_balloon(struct drm_i915_private *dev_priv) * because it is reserved to the guard page. */ if (unmappable_end < ggtt_end - PAGE_SIZE) { - ret = vgt_balloon_space(&ggtt->base.mm, - &bl_info.space[3], - unmappable_end, - ggtt_end - PAGE_SIZE); + ret = vgt_balloon_space(ggtt, &bl_info.space[3], + unmappable_end, ggtt_end - PAGE_SIZE); if (ret) goto err; } /* Mappable graphic memory ballooning */ if (mappable_base > ggtt->base.start) { - ret = vgt_balloon_space(&ggtt->base.mm, - &bl_info.space[0], + ret = vgt_balloon_space(ggtt, &bl_info.space[0], ggtt->base.start, mappable_base); if (ret) @@ -247,10 +240,8 @@ int intel_vgt_balloon(struct drm_i915_private *dev_priv) } if (mappable_end < ggtt->mappable_end) { - ret = vgt_balloon_space(&ggtt->base.mm, - &bl_info.space[1], - mappable_end, - ggtt->mappable_end); + ret = vgt_balloon_space(ggtt, &bl_info.space[1], + mappable_end, ggtt->mappable_end); if (ret) goto err; diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index df3750d4c907..b74eeb73ae41 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -419,17 +419,11 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) goto err_unpin; } - vma->node.start = offset; - vma->node.size = size; - vma->node.color = obj->cache_level; - ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node); - if (ret) { - ret = i915_gem_evict_for_vma(vma, flags); - if (ret == 0) - ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node); - if (ret) - goto err_unpin; - } + ret = i915_gem_gtt_reserve(vma->vm, &vma->node, + size, offset, obj->cache_level, + flags); + if (ret) + goto err_unpin; } else { ret = i915_gem_gtt_insert(vma->vm, &vma->node, size, alignment, obj->cache_level, |