diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-12-14 11:07:56 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-12-14 11:07:56 -0800 |
commit | 1d36dffa5d887715dacca0f717f4519b7be5e498 (patch) | |
tree | a68f7c00dbb3036a67806ed6c6b8cc61c3cff60d /drivers/gpu/drm/msm/msm_gem.c | |
parent | 2c85ebc57b3e1817b6ce1a6b703928e113a90442 (diff) | |
parent | b10733527bfd864605c33ab2e9a886eec317ec39 (diff) | |
download | linux-1d36dffa5d887715dacca0f717f4519b7be5e498.tar.bz2 |
Merge tag 'drm-next-2020-12-11' of git://anongit.freedesktop.org/drm/drm
Pull drm updates from Dave Airlie:
"Not a huge amount of big things here, AMD has support for a few new HW
variants (vangogh, green sardine, dimgrey cavefish), Intel has some
more DG1 enablement. We have a few big reworks of the TTM layers and
interfaces, GEM and atomic internal API reworks cross tree. fbdev is
marked orphaned in here as well to reflect the current reality.
core:
- documentation updates
- deprecate DRM_FORMAT_MOD_NONE
- atomic crtc enable/disable rework
- GEM convert drivers to gem object functions
- remove SCATTER_LIST_MAX_SEGMENT
sched:
- avoid infinite waits
ttm:
- remove AGP support
- don't modify caching for swapout
- ttm pinning rework
- major TTM reworks
- new backend allocator
- multihop support
vram-helper:
- top down BO placement fix
- TTM changes
- GEM object support
displayport:
- DP 2.0 DPCD prep work
- DP MST extended DPCD caps
fbdev:
- mark as orphaned
amdgpu:
- Initial Vangogh support
- Green Sardine support
- Dimgrey Cavefish support
- SG display support for renoir
- SMU7 improvements
- gfx9+ modiifier support
- CI BACO fixes
radeon:
- expose voltage via hwmon on SUMO
amdkfd:
- fix unique id handling
i915:
- more DG1 enablement
- bigjoiner support
- integer scaling filter support
- async flip support
- ICL+ DSI command mode
- Improve display shutdown
- Display refactoring
- eLLC machine fbdev loading fix
- dma scatterlist fixes
- TGL hang fixes
- eLLC display buffer caching on SKL+
- MOCS PTE seeting for gen9+
msm:
- Shutdown hook
- GPU cooling device support
- DSI 7nm and 10nm phy/pll updates
- sm8150/sm2850 DPU support
- GEM locking re-work
- LLCC system cache support
aspeed:
- sysfs output config support
ast:
- LUT fix
- new display mode
gma500:
- remove 2d framebuffer accel
panfrost:
- move gpu reset to a worker
exynos:
- new HDMI mode support
mediatek:
- MT8167 support
- yaml bindings
- MIPI DSI phy code moved
etnaviv:
- new perf counter
- more lockdep annotation
hibmc:
- i2c DDC support
ingenic:
- pixel clock reset fix
- reserved memory support
- allow both DMA channels at once
- different pixel format support
- 30/24/8-bit palette modes
tilcdc:
- don't keep vblank irq enabled
vc4:
- new maintainer added
- DSI registration fix
virtio:
- blob resource support
- host visible and cross-device support
- uuid api support"
* tag 'drm-next-2020-12-11' of git://anongit.freedesktop.org/drm/drm: (1754 commits)
drm/amdgpu: Initialise drm_gem_object_funcs for imported BOs
drm/amdgpu: fix size calculation with stolen vga memory
drm/amdgpu: remove amdgpu_ttm_late_init and amdgpu_bo_late_init
drm/amdgpu: free the pre-OS console framebuffer after the first modeset
drm/amdgpu: enable runtime pm using BACO on CI dGPUs
drm/amdgpu/cik: enable BACO reset on Bonaire
drm/amd/pm: update smu10.h WORKLOAD_PPLIB setting for raven
drm/amd/pm: remove one unsupported smu function for vangogh
drm/amd/display: setup system context for APUs
drm/amd/display: add S/G support for Vangogh
drm/amdkfd: Fix leak in dmabuf import
drm/amdgpu: use AMDGPU_NUM_VMID when possible
drm/amdgpu: fix sdma instance fw version and feature version init
drm/amd/pm: update driver if version for dimgrey_cavefish
drm/amd/display: 3.2.115
drm/amd/display: [FW Promotion] Release 0.0.45
drm/amd/display: Revert DCN2.1 dram_clock_change_latency update
drm/amd/display: Enable gpu_vm_support for dcn3.01
drm/amd/display: Fixed the audio noise during mode switching with HDCP mode on
drm/amd/display: Add wm table for Renoir
...
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gem.c')
-rw-r--r-- | drivers/gpu/drm/msm/msm_gem.c | 352 |
1 files changed, 200 insertions, 152 deletions
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 04be4cfcccc1..82cbaf337b50 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -18,8 +18,7 @@ #include "msm_gpu.h" #include "msm_mmu.h" -static void msm_gem_vunmap_locked(struct drm_gem_object *obj); - +static void update_inactive(struct msm_gem_object *msm_obj); static dma_addr_t physaddr(struct drm_gem_object *obj) { @@ -177,15 +176,15 @@ struct page **msm_gem_get_pages(struct drm_gem_object *obj) struct msm_gem_object *msm_obj = to_msm_bo(obj); struct page **p; - mutex_lock(&msm_obj->lock); + msm_gem_lock(obj); if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { - mutex_unlock(&msm_obj->lock); + msm_gem_unlock(obj); return ERR_PTR(-EBUSY); } p = get_pages(obj); - mutex_unlock(&msm_obj->lock); + msm_gem_unlock(obj); return p; } @@ -236,7 +235,7 @@ int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma) return msm_gem_mmap_obj(vma->vm_private_data, vma); } -vm_fault_t msm_gem_fault(struct vm_fault *vmf) +static vm_fault_t msm_gem_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct drm_gem_object *obj = vma->vm_private_data; @@ -251,14 +250,14 @@ vm_fault_t msm_gem_fault(struct vm_fault *vmf) * vm_ops.open/drm_gem_mmap_obj and close get and put * a reference on obj. So, we dont need to hold one here. */ - err = mutex_lock_interruptible(&msm_obj->lock); + err = msm_gem_lock_interruptible(obj); if (err) { ret = VM_FAULT_NOPAGE; goto out; } if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { - mutex_unlock(&msm_obj->lock); + msm_gem_unlock(obj); return VM_FAULT_SIGBUS; } @@ -279,7 +278,7 @@ vm_fault_t msm_gem_fault(struct vm_fault *vmf) ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); out_unlock: - mutex_unlock(&msm_obj->lock); + msm_gem_unlock(obj); out: return ret; } @@ -288,10 +287,9 @@ out: static uint64_t mmap_offset(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; - struct msm_gem_object *msm_obj = to_msm_bo(obj); int ret; - WARN_ON(!mutex_is_locked(&msm_obj->lock)); + WARN_ON(!msm_gem_is_locked(obj)); /* Make it mmapable */ ret = drm_gem_create_mmap_offset(obj); @@ -307,11 +305,10 @@ static uint64_t mmap_offset(struct drm_gem_object *obj) uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) { uint64_t offset; - struct msm_gem_object *msm_obj = to_msm_bo(obj); - mutex_lock(&msm_obj->lock); + msm_gem_lock(obj); offset = mmap_offset(obj); - mutex_unlock(&msm_obj->lock); + msm_gem_unlock(obj); return offset; } @@ -321,7 +318,7 @@ static struct msm_gem_vma *add_vma(struct drm_gem_object *obj, struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_vma *vma; - WARN_ON(!mutex_is_locked(&msm_obj->lock)); + WARN_ON(!msm_gem_is_locked(obj)); vma = kzalloc(sizeof(*vma), GFP_KERNEL); if (!vma) @@ -340,7 +337,7 @@ static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj, struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_vma *vma; - WARN_ON(!mutex_is_locked(&msm_obj->lock)); + WARN_ON(!msm_gem_is_locked(obj)); list_for_each_entry(vma, &msm_obj->vmas, list) { if (vma->aspace == aspace) @@ -359,33 +356,45 @@ static void del_vma(struct msm_gem_vma *vma) kfree(vma); } -/* Called with msm_obj->lock locked */ +/* Called with msm_obj locked */ static void -put_iova(struct drm_gem_object *obj) +put_iova_spaces(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); - struct msm_gem_vma *vma, *tmp; + struct msm_gem_vma *vma; - WARN_ON(!mutex_is_locked(&msm_obj->lock)); + WARN_ON(!msm_gem_is_locked(obj)); - list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { + list_for_each_entry(vma, &msm_obj->vmas, list) { if (vma->aspace) { msm_gem_purge_vma(vma->aspace, vma); msm_gem_close_vma(vma->aspace, vma); } + } +} + +/* Called with msm_obj locked */ +static void +put_iova_vmas(struct drm_gem_object *obj) +{ + struct msm_gem_object *msm_obj = to_msm_bo(obj); + struct msm_gem_vma *vma, *tmp; + + WARN_ON(!msm_gem_is_locked(obj)); + + list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { del_vma(vma); } } -static int msm_gem_get_iova_locked(struct drm_gem_object *obj, +static int get_iova_locked(struct drm_gem_object *obj, struct msm_gem_address_space *aspace, uint64_t *iova, u64 range_start, u64 range_end) { - struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_vma *vma; int ret = 0; - WARN_ON(!mutex_is_locked(&msm_obj->lock)); + WARN_ON(!msm_gem_is_locked(obj)); vma = lookup_vma(obj, aspace); @@ -420,7 +429,7 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj, if (msm_obj->flags & MSM_BO_MAP_PRIV) prot |= IOMMU_PRIV; - WARN_ON(!mutex_is_locked(&msm_obj->lock)); + WARN_ON(!msm_gem_is_locked(obj)); if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) return -EBUSY; @@ -437,21 +446,16 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj, msm_obj->sgt, obj->size >> PAGE_SHIFT); } -/* - * get iova and pin it. Should have a matching put - * limits iova to specified range (in pages) - */ -int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj, +static int get_and_pin_iova_range_locked(struct drm_gem_object *obj, struct msm_gem_address_space *aspace, uint64_t *iova, u64 range_start, u64 range_end) { - struct msm_gem_object *msm_obj = to_msm_bo(obj); u64 local; int ret; - mutex_lock(&msm_obj->lock); + WARN_ON(!msm_gem_is_locked(obj)); - ret = msm_gem_get_iova_locked(obj, aspace, &local, + ret = get_iova_locked(obj, aspace, &local, range_start, range_end); if (!ret) @@ -460,10 +464,32 @@ int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj, if (!ret) *iova = local; - mutex_unlock(&msm_obj->lock); return ret; } +/* + * get iova and pin it. Should have a matching put + * limits iova to specified range (in pages) + */ +int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj, + struct msm_gem_address_space *aspace, uint64_t *iova, + u64 range_start, u64 range_end) +{ + int ret; + + msm_gem_lock(obj); + ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end); + msm_gem_unlock(obj); + + return ret; +} + +int msm_gem_get_and_pin_iova_locked(struct drm_gem_object *obj, + struct msm_gem_address_space *aspace, uint64_t *iova) +{ + return get_and_pin_iova_range_locked(obj, aspace, iova, 0, U64_MAX); +} + /* get iova and pin it. Should have a matching put */ int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, struct msm_gem_address_space *aspace, uint64_t *iova) @@ -478,12 +504,11 @@ int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, int msm_gem_get_iova(struct drm_gem_object *obj, struct msm_gem_address_space *aspace, uint64_t *iova) { - struct msm_gem_object *msm_obj = to_msm_bo(obj); int ret; - mutex_lock(&msm_obj->lock); - ret = msm_gem_get_iova_locked(obj, aspace, iova, 0, U64_MAX); - mutex_unlock(&msm_obj->lock); + msm_gem_lock(obj); + ret = get_iova_locked(obj, aspace, iova, 0, U64_MAX); + msm_gem_unlock(obj); return ret; } @@ -494,35 +519,43 @@ int msm_gem_get_iova(struct drm_gem_object *obj, uint64_t msm_gem_iova(struct drm_gem_object *obj, struct msm_gem_address_space *aspace) { - struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_vma *vma; - mutex_lock(&msm_obj->lock); + msm_gem_lock(obj); vma = lookup_vma(obj, aspace); - mutex_unlock(&msm_obj->lock); + msm_gem_unlock(obj); WARN_ON(!vma); return vma ? vma->iova : 0; } /* - * Unpin a iova by updating the reference counts. The memory isn't actually - * purged until something else (shrinker, mm_notifier, destroy, etc) decides - * to get rid of it + * Locked variant of msm_gem_unpin_iova() */ -void msm_gem_unpin_iova(struct drm_gem_object *obj, +void msm_gem_unpin_iova_locked(struct drm_gem_object *obj, struct msm_gem_address_space *aspace) { - struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_vma *vma; - mutex_lock(&msm_obj->lock); + WARN_ON(!msm_gem_is_locked(obj)); + vma = lookup_vma(obj, aspace); if (!WARN_ON(!vma)) msm_gem_unmap_vma(aspace, vma); +} - mutex_unlock(&msm_obj->lock); +/* + * Unpin a iova by updating the reference counts. The memory isn't actually + * purged until something else (shrinker, mm_notifier, destroy, etc) decides + * to get rid of it + */ +void msm_gem_unpin_iova(struct drm_gem_object *obj, + struct msm_gem_address_space *aspace) +{ + msm_gem_lock(obj); + msm_gem_unpin_iova_locked(obj, aspace); + msm_gem_unlock(obj); } int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, @@ -560,23 +593,22 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv) struct msm_gem_object *msm_obj = to_msm_bo(obj); int ret = 0; + WARN_ON(!msm_gem_is_locked(obj)); + if (obj->import_attach) return ERR_PTR(-ENODEV); - mutex_lock(&msm_obj->lock); - if (WARN_ON(msm_obj->madv > madv)) { DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n", msm_obj->madv, madv); - mutex_unlock(&msm_obj->lock); return ERR_PTR(-EBUSY); } /* increment vmap_count *before* vmap() call, so shrinker can - * check vmap_count (is_vunmapable()) outside of msm_obj->lock. + * check vmap_count (is_vunmapable()) outside of msm_obj lock. * This guarantees that we won't try to msm_gem_vunmap() this * same object from within the vmap() call (while we already - * hold msm_obj->lock) + * hold msm_obj lock) */ msm_obj->vmap_count++; @@ -594,20 +626,29 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv) } } - mutex_unlock(&msm_obj->lock); return msm_obj->vaddr; fail: msm_obj->vmap_count--; - mutex_unlock(&msm_obj->lock); return ERR_PTR(ret); } -void *msm_gem_get_vaddr(struct drm_gem_object *obj) +void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj) { return get_vaddr(obj, MSM_MADV_WILLNEED); } +void *msm_gem_get_vaddr(struct drm_gem_object *obj) +{ + void *ret; + + msm_gem_lock(obj); + ret = msm_gem_get_vaddr_locked(obj); + msm_gem_unlock(obj); + + return ret; +} + /* * Don't use this! It is for the very special case of dumping * submits from GPU hangs or faults, were the bo may already @@ -619,14 +660,21 @@ void *msm_gem_get_vaddr_active(struct drm_gem_object *obj) return get_vaddr(obj, __MSM_MADV_PURGED); } -void msm_gem_put_vaddr(struct drm_gem_object *obj) +void msm_gem_put_vaddr_locked(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); - mutex_lock(&msm_obj->lock); + WARN_ON(!msm_gem_is_locked(obj)); WARN_ON(msm_obj->vmap_count < 1); + msm_obj->vmap_count--; - mutex_unlock(&msm_obj->lock); +} + +void msm_gem_put_vaddr(struct drm_gem_object *obj) +{ + msm_gem_lock(obj); + msm_gem_put_vaddr_locked(obj); + msm_gem_unlock(obj); } /* Update madvise status, returns true if not purged, else @@ -636,37 +684,40 @@ int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv) { struct msm_gem_object *msm_obj = to_msm_bo(obj); - mutex_lock(&msm_obj->lock); - - WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); + msm_gem_lock(obj); if (msm_obj->madv != __MSM_MADV_PURGED) msm_obj->madv = madv; madv = msm_obj->madv; - mutex_unlock(&msm_obj->lock); + /* If the obj is inactive, we might need to move it + * between inactive lists + */ + if (msm_obj->active_count == 0) + update_inactive(msm_obj); + + msm_gem_unlock(obj); return (madv != __MSM_MADV_PURGED); } -void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass) +void msm_gem_purge(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; struct msm_gem_object *msm_obj = to_msm_bo(obj); - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); WARN_ON(!is_purgeable(msm_obj)); WARN_ON(obj->import_attach); - mutex_lock_nested(&msm_obj->lock, subclass); + put_iova_spaces(obj); - put_iova(obj); - - msm_gem_vunmap_locked(obj); + msm_gem_vunmap(obj); put_pages(obj); + put_iova_vmas(obj); + msm_obj->madv = __MSM_MADV_PURGED; drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); @@ -681,15 +732,13 @@ void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass) invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1); - - mutex_unlock(&msm_obj->lock); } -static void msm_gem_vunmap_locked(struct drm_gem_object *obj) +void msm_gem_vunmap(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); - WARN_ON(!mutex_is_locked(&msm_obj->lock)); + WARN_ON(!msm_gem_is_locked(obj)); if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj))) return; @@ -698,15 +747,6 @@ static void msm_gem_vunmap_locked(struct drm_gem_object *obj) msm_obj->vaddr = NULL; } -void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass) -{ - struct msm_gem_object *msm_obj = to_msm_bo(obj); - - mutex_lock_nested(&msm_obj->lock, subclass); - msm_gem_vunmap_locked(obj); - mutex_unlock(&msm_obj->lock); -} - /* must be called before _move_to_active().. */ int msm_gem_sync_object(struct drm_gem_object *obj, struct msm_fence_context *fctx, bool exclusive) @@ -745,30 +785,48 @@ int msm_gem_sync_object(struct drm_gem_object *obj, void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu) { struct msm_gem_object *msm_obj = to_msm_bo(obj); - WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); + struct msm_drm_private *priv = obj->dev->dev_private; + + might_sleep(); + WARN_ON(!msm_gem_is_locked(obj)); WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); - if (!atomic_fetch_inc(&msm_obj->active_count)) { - msm_obj->gpu = gpu; + if (msm_obj->active_count++ == 0) { + mutex_lock(&priv->mm_lock); list_del_init(&msm_obj->mm_list); list_add_tail(&msm_obj->mm_list, &gpu->active_list); + mutex_unlock(&priv->mm_lock); } } void msm_gem_active_put(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); - struct msm_drm_private *priv = obj->dev->dev_private; - WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); + might_sleep(); + WARN_ON(!msm_gem_is_locked(obj)); - if (!atomic_dec_return(&msm_obj->active_count)) { - msm_obj->gpu = NULL; - list_del_init(&msm_obj->mm_list); - list_add_tail(&msm_obj->mm_list, &priv->inactive_list); + if (--msm_obj->active_count == 0) { + update_inactive(msm_obj); } } +static void update_inactive(struct msm_gem_object *msm_obj) +{ + struct msm_drm_private *priv = msm_obj->base.dev->dev_private; + + mutex_lock(&priv->mm_lock); + WARN_ON(msm_obj->active_count != 0); + + list_del_init(&msm_obj->mm_list); + if (msm_obj->madv == MSM_MADV_WILLNEED) + list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed); + else + list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed); + + mutex_unlock(&priv->mm_lock); +} + int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) { bool write = !!(op & MSM_PREP_WRITE); @@ -815,7 +873,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) uint64_t off = drm_vma_node_start(&obj->vma_node); const char *madv; - mutex_lock(&msm_obj->lock); + msm_gem_lock(obj); switch (msm_obj->madv) { case __MSM_MADV_PURGED: @@ -883,7 +941,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) describe_fence(fence, "Exclusive", m); rcu_read_unlock(); - mutex_unlock(&msm_obj->lock); + msm_gem_unlock(obj); } void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) @@ -912,25 +970,16 @@ void msm_gem_free_object(struct drm_gem_object *obj) struct drm_device *dev = obj->dev; struct msm_drm_private *priv = dev->dev_private; - if (llist_add(&msm_obj->freed, &priv->free_list)) - queue_work(priv->wq, &priv->free_work); -} - -static void free_object(struct msm_gem_object *msm_obj) -{ - struct drm_gem_object *obj = &msm_obj->base; - struct drm_device *dev = obj->dev; + mutex_lock(&priv->mm_lock); + list_del(&msm_obj->mm_list); + mutex_unlock(&priv->mm_lock); - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + msm_gem_lock(obj); /* object should not be on active list: */ WARN_ON(is_active(msm_obj)); - list_del(&msm_obj->mm_list); - - mutex_lock(&msm_obj->lock); - - put_iova(obj); + put_iova_spaces(obj); if (obj->import_attach) { WARN_ON(msm_obj->vaddr); @@ -941,41 +990,25 @@ static void free_object(struct msm_gem_object *msm_obj) if (msm_obj->pages) kvfree(msm_obj->pages); + /* dma_buf_detach() grabs resv lock, so we need to unlock + * prior to drm_prime_gem_destroy + */ + msm_gem_unlock(obj); + drm_prime_gem_destroy(obj, msm_obj->sgt); } else { - msm_gem_vunmap_locked(obj); + msm_gem_vunmap(obj); put_pages(obj); + msm_gem_unlock(obj); } + put_iova_vmas(obj); + drm_gem_object_release(obj); - mutex_unlock(&msm_obj->lock); kfree(msm_obj); } -void msm_gem_free_work(struct work_struct *work) -{ - struct msm_drm_private *priv = - container_of(work, struct msm_drm_private, free_work); - struct drm_device *dev = priv->dev; - struct llist_node *freed; - struct msm_gem_object *msm_obj, *next; - - while ((freed = llist_del_all(&priv->free_list))) { - - mutex_lock(&dev->struct_mutex); - - llist_for_each_entry_safe(msm_obj, next, - freed, freed) - free_object(msm_obj); - - mutex_unlock(&dev->struct_mutex); - - if (need_resched()) - break; - } -} - /* convenience method to construct a GEM buffer object, and userspace handle */ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, uint32_t size, uint32_t flags, uint32_t *handle, @@ -1000,6 +1033,22 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, return ret; } +static const struct vm_operations_struct vm_ops = { + .fault = msm_gem_fault, + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; + +static const struct drm_gem_object_funcs msm_gem_object_funcs = { + .free = msm_gem_free_object, + .pin = msm_gem_prime_pin, + .unpin = msm_gem_prime_unpin, + .get_sg_table = msm_gem_prime_get_sg_table, + .vmap = msm_gem_prime_vmap, + .vunmap = msm_gem_prime_vunmap, + .vm_ops = &vm_ops, +}; + static int msm_gem_new_impl(struct drm_device *dev, uint32_t size, uint32_t flags, struct drm_gem_object **obj) @@ -1021,8 +1070,6 @@ static int msm_gem_new_impl(struct drm_device *dev, if (!msm_obj) return -ENOMEM; - mutex_init(&msm_obj->lock); - msm_obj->flags = flags; msm_obj->madv = MSM_MADV_WILLNEED; @@ -1030,6 +1077,7 @@ static int msm_gem_new_impl(struct drm_device *dev, INIT_LIST_HEAD(&msm_obj->vmas); *obj = &msm_obj->base; + (*obj)->funcs = &msm_gem_object_funcs; return 0; } @@ -1069,10 +1117,10 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, struct msm_gem_vma *vma; struct page **pages; - mutex_lock(&msm_obj->lock); + msm_gem_lock(obj); vma = add_vma(obj, NULL); - mutex_unlock(&msm_obj->lock); + msm_gem_unlock(obj); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto fail; @@ -1102,19 +1150,19 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER); } - if (struct_mutex_locked) { - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - list_add_tail(&msm_obj->mm_list, &priv->inactive_list); - } else { - mutex_lock(&dev->struct_mutex); - list_add_tail(&msm_obj->mm_list, &priv->inactive_list); - mutex_unlock(&dev->struct_mutex); - } + mutex_lock(&priv->mm_lock); + /* Initially obj is idle, obj->madv == WILLNEED: */ + list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed); + mutex_unlock(&priv->mm_lock); return obj; fail: - drm_gem_object_put(obj); + if (struct_mutex_locked) { + drm_gem_object_put_locked(obj); + } else { + drm_gem_object_put(obj); + } return ERR_PTR(ret); } @@ -1156,26 +1204,26 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, npages = size / PAGE_SIZE; msm_obj = to_msm_bo(obj); - mutex_lock(&msm_obj->lock); + msm_gem_lock(obj); msm_obj->sgt = sgt; msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); if (!msm_obj->pages) { - mutex_unlock(&msm_obj->lock); + msm_gem_unlock(obj); ret = -ENOMEM; goto fail; } ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); if (ret) { - mutex_unlock(&msm_obj->lock); + msm_gem_unlock(obj); goto fail; } - mutex_unlock(&msm_obj->lock); + msm_gem_unlock(obj); - mutex_lock(&dev->struct_mutex); - list_add_tail(&msm_obj->mm_list, &priv->inactive_list); - mutex_unlock(&dev->struct_mutex); + mutex_lock(&priv->mm_lock); + list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed); + mutex_unlock(&priv->mm_lock); return obj; |