summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
diff options
context:
space:
mode:
authorMatthew Auld <matthew.auld@intel.com>2019-10-08 17:01:15 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2019-10-08 20:50:01 +0100
commit2f0b97ca02118630132dddf258fbdb5d5f5ec32a (patch)
tree655a905f97962439bc6069ae84c45164428bb8ae /drivers/gpu/drm/i915/gem/selftests/huge_pages.c
parent232a6ebae419193f5b8da4fa869ae5089ab105c2 (diff)
downloadlinux-2f0b97ca02118630132dddf258fbdb5d5f5ec32a.tar.bz2
drm/i915/region: support contiguous allocations
Some kernel internal objects may need to be allocated as a contiguous block, also thinking ahead the various kernel io_mapping interfaces seem to expect it, although this is purely a limitation in the kernel API...so perhaps something to be improved. Signed-off-by: Matthew Auld <matthew.auld@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Abdiel Janulgue <abdiel.janulgue@linux.intel.com> Cc: Michael J Ruhl <michael.j.ruhl@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20191008160116.18379-3-matthew.auld@intel.com
Diffstat (limited to 'drivers/gpu/drm/i915/gem/selftests/huge_pages.c')
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c71
1 files changed, 39 insertions, 32 deletions
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index b4c390e9fa50..63a4743e5f54 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -456,6 +456,7 @@ out_device:
static int igt_mock_memory_region_huge_pages(void *arg)
{
+ const unsigned int flags[] = { 0, I915_BO_ALLOC_CONTIGUOUS };
struct i915_ppgtt *ppgtt = arg;
struct drm_i915_private *i915 = ppgtt->vm.i915;
unsigned long supported = INTEL_INFO(i915)->page_sizes;
@@ -474,46 +475,52 @@ static int igt_mock_memory_region_huge_pages(void *arg)
for_each_set_bit(bit, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
unsigned int page_size = BIT(bit);
resource_size_t phys;
+ int i;
- obj = i915_gem_object_create_region(mem, page_size, 0);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto out_region;
- }
+ for (i = 0; i < ARRAY_SIZE(flags); ++i) {
+ obj = i915_gem_object_create_region(mem, page_size,
+ flags[i]);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_region;
+ }
- vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto out_put;
- }
+ vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_put;
+ }
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
- goto out_close;
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto out_close;
- err = igt_check_page_sizes(vma);
- if (err)
- goto out_unpin;
+ err = igt_check_page_sizes(vma);
+ if (err)
+ goto out_unpin;
- phys = i915_gem_object_get_dma_address(obj, 0);
- if (!IS_ALIGNED(phys, page_size)) {
- pr_err("%s addr misaligned(%pa) page_size=%u\n",
- __func__, &phys, page_size);
- err = -EINVAL;
- goto out_unpin;
- }
+ phys = i915_gem_object_get_dma_address(obj, 0);
+ if (!IS_ALIGNED(phys, page_size)) {
+ pr_err("%s addr misaligned(%pa) page_size=%u\n",
+ __func__, &phys, page_size);
+ err = -EINVAL;
+ goto out_unpin;
+ }
- if (vma->page_sizes.gtt != page_size) {
- pr_err("%s page_sizes.gtt=%u, expected=%u\n",
- __func__, vma->page_sizes.gtt, page_size);
- err = -EINVAL;
- goto out_unpin;
- }
+ if (vma->page_sizes.gtt != page_size) {
+ pr_err("%s page_sizes.gtt=%u, expected=%u\n",
+ __func__, vma->page_sizes.gtt,
+ page_size);
+ err = -EINVAL;
+ goto out_unpin;
+ }
- i915_vma_unpin(vma);
- i915_vma_close(vma);
+ i915_vma_unpin(vma);
+ i915_vma_close(vma);
- i915_gem_object_put(obj);
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ i915_gem_object_put(obj);
+ }
}
goto out_region;