summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/msm/msm_gem_vma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gem_vma.c')
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c51
1 files changed, 39 insertions, 12 deletions
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index c4c42bf0db0e..ee46d8321b05 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -38,26 +38,32 @@ void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
kref_put(&aspace->kref, msm_gem_address_space_destroy);
}
-void
-msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
+/* Actually unmap memory for the vma */
+void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma)
{
- if (!aspace || !vma->iova)
+ unsigned size = vma->node.size << PAGE_SHIFT;
+
+ /* Print a message if we try to purge a vma in use */
+ if (WARN_ON(vma->inuse > 0))
return;
- if (aspace->mmu) {
- unsigned size = vma->node.size << PAGE_SHIFT;
- aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
- }
+ /* Don't do anything if the memory isn't mapped */
+ if (!vma->mapped)
+ return;
- spin_lock(&aspace->lock);
- drm_mm_remove_node(&vma->node);
- spin_unlock(&aspace->lock);
+ if (aspace->mmu)
+ aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
- vma->iova = 0;
vma->mapped = false;
+}
- msm_gem_address_space_put(aspace);
+/* Remove reference counts for the mapping */
+void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma)
+{
+ if (!WARN_ON(!vma->iova))
+ vma->inuse--;
}
int
@@ -70,6 +76,9 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
if (WARN_ON(!vma->iova))
return -EINVAL;
+ /* Increase the usage counter */
+ vma->inuse++;
+
if (vma->mapped)
return 0;
@@ -85,6 +94,23 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
return ret;
}
+/* Close an iova. Warn if it is still in use */
+void msm_gem_close_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma)
+{
+ if (WARN_ON(vma->inuse > 0 || vma->mapped))
+ return;
+
+ spin_lock(&aspace->lock);
+ if (vma->iova)
+ drm_mm_remove_node(&vma->node);
+ spin_unlock(&aspace->lock);
+
+ vma->iova = 0;
+
+ msm_gem_address_space_put(aspace);
+}
+
/* Initialize a new vma and allocate an iova for it */
int msm_gem_init_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, int npages)
@@ -109,6 +135,7 @@ int msm_gem_init_vma(struct msm_gem_address_space *aspace,
return 0;
}
+
struct msm_gem_address_space *
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
const char *name)