diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-05-14 10:10:55 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-05-14 10:10:55 -0700 |
commit | 318222a35bfb0ae9b5ff3e359a583463e6cfcd94 (patch) | |
tree | 6a8d921f7ac9915f3f12dd3fcd7efaaf6f16bb09 /drivers/gpu | |
parent | 7e9890a3500d95c01511a4c45b7e7192dfa47ae2 (diff) | |
parent | 640be2d1ffbc1946f1547eb89b5005ed7542de99 (diff) | |
download | linux-318222a35bfb0ae9b5ff3e359a583463e6cfcd94.tar.bz2 |
Merge branch 'akpm' (patches from Andrew)
Merge misc updates from Andrew Morton:
- a few misc things and hotfixes
- ocfs2
- almost all of MM
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (139 commits)
kernel/memremap.c: remove the unused device_private_entry_fault() export
mm: delete find_get_entries_tag
mm/huge_memory.c: make __thp_get_unmapped_area static
mm/mprotect.c: fix compilation warning because of unused 'mm' variable
mm/page-writeback: introduce tracepoint for wait_on_page_writeback()
mm/vmscan: simplify trace_reclaim_flags and trace_shrink_flags
mm/Kconfig: update "Memory Model" help text
mm/vmscan.c: don't disable irq again when count pgrefill for memcg
mm: memblock: make keeping memblock memory opt-in rather than opt-out
hugetlbfs: always use address space in inode for resv_map pointer
mm/z3fold.c: support page migration
mm/z3fold.c: add structure for buddy handles
mm/z3fold.c: improve compression by extending search
mm/z3fold.c: introduce helper functions
mm/page_alloc.c: remove unnecessary parameter in rmqueue_pcplist
mm/hmm: add ARCH_HAS_HMM_MIRROR ARCH_HAS_HMM_DEVICE Kconfig
mm/vmscan.c: simplify shrink_inactive_list()
fs/sync.c: sync_file_range(2) may use WB_SYNC_ALL writeback
xen/privcmd-buf.c: convert to use vm_map_pages_zero()
xen/gntdev.c: convert to use vm_map_pages()
...
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_userptr.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_mn.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/rockchip/rockchip_drm_gem.c | 17 | ||||
-rw-r--r-- | drivers/gpu/drm/via/via_dmablit.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/xen/xen_drm_front_gem.c | 18 |
6 files changed, 16 insertions, 36 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index 3e6823fdd939..58ed401c5996 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -256,14 +256,14 @@ static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn, /* TODO we should be able to split locking for interval tree and * amdgpu_mn_invalidate_node */ - if (amdgpu_mn_read_lock(amn, range->blockable)) + if (amdgpu_mn_read_lock(amn, mmu_notifier_range_blockable(range))) return -EAGAIN; it = interval_tree_iter_first(&amn->objects, range->start, end); while (it) { struct amdgpu_mn_node *node; - if (!range->blockable) { + if (!mmu_notifier_range_blockable(range)) { amdgpu_mn_read_unlock(amn); return -EAGAIN; } @@ -299,7 +299,7 @@ static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn, /* notification is exclusive, but interval is inclusive */ end = range->end - 1; - if (amdgpu_mn_read_lock(amn, range->blockable)) + if (amdgpu_mn_read_lock(amn, mmu_notifier_range_blockable(range))) return -EAGAIN; it = interval_tree_iter_first(&amn->objects, range->start, end); @@ -307,7 +307,7 @@ static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn, struct amdgpu_mn_node *node; struct amdgpu_bo *bo; - if (!range->blockable) { + if (!mmu_notifier_range_blockable(range)) { amdgpu_mn_read_unlock(amn); return -EAGAIN; } diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 215bf3fef10c..8079ea3af103 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -122,7 +122,7 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, while (it) { struct drm_i915_gem_object *obj; - if (!range->blockable) { + if (!mmu_notifier_range_blockable(range)) { ret = -EAGAIN; break; } diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c index b3019505065a..c9bd1278f573 100644 --- a/drivers/gpu/drm/radeon/radeon_mn.c +++ b/drivers/gpu/drm/radeon/radeon_mn.c @@ -133,7 +133,7 @@ static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn, /* TODO we should be able to split locking for interval tree and * the tear down. */ - if (range->blockable) + if (mmu_notifier_range_blockable(range)) mutex_lock(&rmn->lock); else if (!mutex_trylock(&rmn->lock)) return -EAGAIN; @@ -144,7 +144,7 @@ static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn, struct radeon_bo *bo; long r; - if (!range->blockable) { + if (!mmu_notifier_range_blockable(range)) { ret = -EAGAIN; goto out_unlock; } diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index a8db758d523e..a2ebb08990e9 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -221,26 +221,13 @@ static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj, struct vm_area_struct *vma) { struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); - unsigned int i, count = obj->size >> PAGE_SHIFT; + unsigned int count = obj->size >> PAGE_SHIFT; unsigned long user_count = vma_pages(vma); - unsigned long uaddr = vma->vm_start; - unsigned long offset = vma->vm_pgoff; - unsigned long end = user_count + offset; - int ret; if (user_count == 0) return -ENXIO; - if (end > count) - return -ENXIO; - for (i = offset; i < end; i++) { - ret = vm_insert_page(vma, uaddr, rk_obj->pages[i]); - if (ret) - return ret; - uaddr += PAGE_SIZE; - } - - return 0; + return vm_map_pages(vma, rk_obj->pages, count); } static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj, diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c index 8bf3a7c23ed3..062067438f1d 100644 --- a/drivers/gpu/drm/via/via_dmablit.c +++ b/drivers/gpu/drm/via/via_dmablit.c @@ -243,7 +243,8 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) if (NULL == vsg->pages) return -ENOMEM; ret = get_user_pages_fast((unsigned long)xfer->mem_addr, - vsg->num_pages, vsg->direction == DMA_FROM_DEVICE, + vsg->num_pages, + vsg->direction == DMA_FROM_DEVICE ? FOLL_WRITE : 0, vsg->pages); if (ret != vsg->num_pages) { if (ret < 0) diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c index 53c376d55fcf..a24548489dde 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_gem.c +++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c @@ -224,8 +224,7 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev, static int gem_mmap_obj(struct xen_gem_object *xen_obj, struct vm_area_struct *vma) { - unsigned long addr = vma->vm_start; - int i; + int ret; /* * clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the @@ -252,18 +251,11 @@ static int gem_mmap_obj(struct xen_gem_object *xen_obj, * FIXME: as we insert all the pages now then no .fault handler must * be called, so don't provide one */ - for (i = 0; i < xen_obj->num_pages; i++) { - int ret; - - ret = vm_insert_page(vma, addr, xen_obj->pages[i]); - if (ret < 0) { - DRM_ERROR("Failed to insert pages into vma: %d\n", ret); - return ret; - } + ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages); + if (ret < 0) + DRM_ERROR("Failed to map pages into vma: %d\n", ret); - addr += PAGE_SIZE; - } - return 0; + return ret; } int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma) |