From 0e08270a1f01bceae17d32a0d75aad2388bd1ba2 Mon Sep 17 00:00:00 2001 From: Sushmita Susheelendra Date: Tue, 13 Jun 2017 16:52:54 -0600 Subject: drm/msm: Separate locking of buffer resources from struct_mutex Buffer object specific resources like pages, domains, sg list need not be protected with struct_mutex. They can be protected with a buffer object level lock. This simplifies locking and makes it easier to avoid potential recursive locking scenarios for SVM involving mmap_sem and struct_mutex. This also removes unnecessary serialization when creating buffer objects, and also between buffer object creation and GPU command submission. Signed-off-by: Sushmita Susheelendra [robclark: squash in handling new locking for shrinker] Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/msm_gem_vma.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/msm/msm_gem_vma.c') diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index f285d7e210db..c36321bc8714 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -50,7 +50,9 @@ msm_gem_unmap_vma(struct msm_gem_address_space *aspace, aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, size); } + spin_lock(&aspace->lock); drm_mm_remove_node(&vma->node); + spin_unlock(&aspace->lock); vma->iova = 0; @@ -63,10 +65,15 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace, { int ret; - if (WARN_ON(drm_mm_node_allocated(&vma->node))) + spin_lock(&aspace->lock); + if (WARN_ON(drm_mm_node_allocated(&vma->node))) { + spin_unlock(&aspace->lock); return 0; + } ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages); + spin_unlock(&aspace->lock); + if (ret) return ret; @@ -94,6 +101,7 @@ msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain, if (!aspace) return ERR_PTR(-ENOMEM); + spin_lock_init(&aspace->lock); aspace->name = name; aspace->mmu = msm_iommu_new(dev, domain); -- cgit v1.2.3