diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_vma.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_vma.c | 144 |
1 files changed, 124 insertions, 20 deletions
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 02d1a5eacb00..fbfab2f33023 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -54,12 +54,21 @@ i915_vma_retire(struct i915_gem_active *active, if (--obj->active_count) return; + /* Prune the shared fence arrays iff completely idle (inc. external) */ + if (reservation_object_trylock(obj->resv)) { + if (reservation_object_test_signaled_rcu(obj->resv, true)) + reservation_object_add_excl_fence(obj->resv, NULL); + reservation_object_unlock(obj->resv); + } + /* Bump our place on the bound list to keep it roughly in LRU order * so that we don't steal from recently used but inactive objects * (unless we are forced to ofc!) */ + spin_lock(&rq->i915->mm.obj_lock); if (obj->bind_count) - list_move_tail(&obj->global_link, &rq->i915->mm.bound_list); + list_move_tail(&obj->mm.link, &rq->i915->mm.bound_list); + spin_unlock(&rq->i915->mm.obj_lock); obj->mm.dirty = true; /* be paranoid */ @@ -266,6 +275,8 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, if (bind_flags == 0) return 0; + GEM_BUG_ON(!vma->pages); + trace_i915_vma_bind(vma, bind_flags); ret = vma->vm->bind_vma(vma, cache_level, bind_flags); if (ret) @@ -278,13 +289,16 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) { void __iomem *ptr; + int err; /* Access through the GTT requires the device to be awake. */ assert_rpm_wakelock_held(vma->vm->i915); lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); - if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) - return IO_ERR_PTR(-ENODEV); + if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) { + err = -ENODEV; + goto err; + } GEM_BUG_ON(!i915_vma_is_ggtt(vma)); GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0); @@ -294,14 +308,36 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable, vma->node.start, vma->node.size); - if (ptr == NULL) - return IO_ERR_PTR(-ENOMEM); + if (ptr == NULL) { + err = -ENOMEM; + goto err; + } vma->iomap = ptr; } __i915_vma_pin(vma); + + err = i915_vma_pin_fence(vma); + if (err) + goto err_unpin; + return ptr; + +err_unpin: + __i915_vma_unpin(vma); +err: + return IO_ERR_PTR(err); +} + +void i915_vma_unpin_iomap(struct i915_vma *vma) +{ + lockdep_assert_held(&vma->obj->base.dev->struct_mutex); + + GEM_BUG_ON(vma->iomap == NULL); + + i915_vma_unpin_fence(vma); + i915_vma_unpin(vma); } void i915_vma_unpin_and_release(struct i915_vma **p_vma) @@ -471,25 +507,64 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) if (ret) return ret; + GEM_BUG_ON(vma->pages); + + ret = vma->vm->set_pages(vma); + if (ret) + goto err_unpin; + if (flags & PIN_OFFSET_FIXED) { u64 offset = flags & PIN_OFFSET_MASK; if (!IS_ALIGNED(offset, alignment) || range_overflows(offset, size, end)) { ret = -EINVAL; - goto err_unpin; + goto err_clear; } ret = i915_gem_gtt_reserve(vma->vm, &vma->node, size, offset, obj->cache_level, flags); if (ret) - goto err_unpin; + goto err_clear; } else { + /* + * We only support huge gtt pages through the 48b PPGTT, + * however we also don't want to force any alignment for + * objects which need to be tightly packed into the low 32bits. + * + * Note that we assume that GGTT are limited to 4GiB for the + * forseeable future. See also i915_ggtt_offset(). + */ + if (upper_32_bits(end - 1) && + vma->page_sizes.sg > I915_GTT_PAGE_SIZE) { + /* + * We can't mix 64K and 4K PTEs in the same page-table + * (2M block), and so to avoid the ugliness and + * complexity of coloring we opt for just aligning 64K + * objects to 2M. + */ + u64 page_alignment = + rounddown_pow_of_two(vma->page_sizes.sg | + I915_GTT_PAGE_SIZE_2M); + + /* + * Check we don't expand for the limited Global GTT + * (mappable aperture is even more precious!). This + * also checks that we exclude the aliasing-ppgtt. + */ + GEM_BUG_ON(i915_vma_is_ggtt(vma)); + + alignment = max(alignment, page_alignment); + + if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) + size = round_up(size, I915_GTT_PAGE_SIZE_2M); + } + ret = i915_gem_gtt_insert(vma->vm, &vma->node, size, alignment, obj->cache_level, start, end, flags); if (ret) - goto err_unpin; + goto err_clear; GEM_BUG_ON(vma->node.start < start); GEM_BUG_ON(vma->node.start + vma->node.size > end); @@ -497,13 +572,19 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level)); - list_move_tail(&obj->global_link, &dev_priv->mm.bound_list); list_move_tail(&vma->vm_link, &vma->vm->inactive_list); + + spin_lock(&dev_priv->mm.obj_lock); + list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list); obj->bind_count++; + spin_unlock(&dev_priv->mm.obj_lock); + GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count); return 0; +err_clear: + vma->vm->clear_pages(vma); err_unpin: i915_gem_object_unpin_pages(obj); return ret; @@ -512,20 +593,24 @@ err_unpin: static void i915_vma_remove(struct i915_vma *vma) { + struct drm_i915_private *i915 = vma->vm->i915; struct drm_i915_gem_object *obj = vma->obj; GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); + vma->vm->clear_pages(vma); + drm_mm_remove_node(&vma->node); list_move_tail(&vma->vm_link, &vma->vm->unbound_list); /* Since the unbound list is global, only move to that list if * no more VMAs exist. */ + spin_lock(&i915->mm.obj_lock); if (--obj->bind_count == 0) - list_move_tail(&obj->global_link, - &to_i915(obj->base.dev)->mm.unbound_list); + list_move_tail(&obj->mm.link, &i915->mm.unbound_list); + spin_unlock(&i915->mm.obj_lock); /* And finally now the object is completely decoupled from this vma, * we can drop its hold on the backing storage and allow it to be @@ -569,8 +654,8 @@ int __i915_vma_do_pin(struct i915_vma *vma, err_remove: if ((bound & I915_VMA_BIND_MASK) == 0) { - GEM_BUG_ON(vma->pages); i915_vma_remove(vma); + GEM_BUG_ON(vma->pages); } err_unpin: __i915_vma_unpin(vma); @@ -620,6 +705,30 @@ static void __i915_vma_iounmap(struct i915_vma *vma) vma->iomap = NULL; } +void i915_vma_revoke_mmap(struct i915_vma *vma) +{ + struct drm_vma_offset_node *node = &vma->obj->base.vma_node; + u64 vma_offset; + + lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); + + if (!i915_vma_has_userfault(vma)) + return; + + GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma)); + GEM_BUG_ON(!vma->obj->userfault_count); + + vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT; + unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping, + drm_vma_node_offset_addr(node) + vma_offset, + vma->size, + 1); + + i915_vma_unset_userfault(vma); + if (!--vma->obj->userfault_count) + list_del(&vma->obj->userfault_link); +} + int i915_vma_unbind(struct i915_vma *vma) { struct drm_i915_gem_object *obj = vma->obj; @@ -683,11 +792,13 @@ int i915_vma_unbind(struct i915_vma *vma) return ret; /* Force a pagefault for domain tracking on next user access */ - i915_gem_release_mmap(obj); + i915_vma_revoke_mmap(vma); __i915_vma_iounmap(vma); vma->flags &= ~I915_VMA_CAN_FENCE; } + GEM_BUG_ON(vma->fence); + GEM_BUG_ON(i915_vma_has_userfault(vma)); if (likely(!vma->vm->closed)) { trace_i915_vma_unbind(vma); @@ -695,13 +806,6 @@ int i915_vma_unbind(struct i915_vma *vma) } vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); - if (vma->pages != obj->mm.pages) { - GEM_BUG_ON(!vma->pages); - sg_free_table(vma->pages); - kfree(vma->pages); - } - vma->pages = NULL; - i915_vma_remove(vma); destroy: |