summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorMichel Lespinasse <walken@google.com>2020-06-08 21:33:54 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-06-09 09:39:14 -0700
commitc1e8d7c6a7a682e1405e3e242d32fc377fd196ff (patch)
treeef02402b77990834fbb5bdb1f146fc0393cc8987 /drivers
parent3e4e28c5a8f01ee4174d639e36ed155ade489a6f (diff)
downloadlinux-c1e8d7c6a7a682e1405e3e242d32fc377fd196ff.tar.bz2
mmap locking API: convert mmap_sem comments
Convert comments that reference mmap_sem to reference mmap_lock instead. [akpm@linux-foundation.org: fix up linux-next leftovers] [akpm@linux-foundation.org: s/lockaphore/lock/, per Vlastimil] [akpm@linux-foundation.org: more linux-next fixups, per Michel] Signed-off-by: Michel Lespinasse <walken@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com> Cc: Davidlohr Bueso <dbueso@suse.de> Cc: David Rientjes <rientjes@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: Jerome Glisse <jglisse@redhat.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Laurent Dufour <ldufour@linux.ibm.com> Cc: Liam Howlett <Liam.Howlett@oracle.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ying Han <yinghan@google.com> Link: http://lkml.kernel.org/r/20200520052908.204642-13-walken@google.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/mspec.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c6
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c6
-rw-r--r--drivers/infiniband/core/uverbs_main.c2
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.c2
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c2
-rw-r--r--drivers/misc/cxl/cxllib.c5
-rw-r--r--drivers/misc/sgi-gru/grufault.c8
-rw-r--r--drivers/oprofile/buffer_sync.c2
-rw-r--r--drivers/staging/android/ashmem.c4
-rw-r--r--drivers/staging/comedi/comedi_fops.c2
-rw-r--r--drivers/tty/vt/consolemap.c2
-rw-r--r--drivers/vfio/pci/vfio_pci.c14
-rw-r--r--drivers/xen/gntdev.c2
18 files changed, 33 insertions, 34 deletions
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index d620e48bb8f7..0fae33319d2e 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -64,7 +64,7 @@ enum mspec_page_type {
* This structure is shared by all vma's that are split off from the
* original vma when split_vma()'s are done.
*
- * The refcnt is incremented atomically because mm->mmap_sem does not
+ * The refcnt is incremented atomically because mm->mmap_lock does not
* protect in fork case where multiple tasks share the vma_data.
*/
struct vma_data {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 3f2b695cf19e..53b4126373a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -186,7 +186,7 @@ uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *s
* disabled. The memory must be pinned and mapped to the hardware when
* this is called in hqd_load functions, so it should never fault in
* the first place. This resolves a circular lock dependency involving
- * four locks, including the DQM lock and mmap_sem.
+ * four locks, including the DQM lock and mmap_lock.
*/
#define read_user_wptr(mmptr, wptr, dst) \
({ \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 0b7e78748540..c6944739183a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -237,7 +237,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
- /* read_user_ptr may take the mm->mmap_sem.
+ /* read_user_ptr may take the mm->mmap_lock.
* release srbm_mutex to avoid circular dependency between
* srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index ccd635b812b5..2f4bdc80a6b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -224,7 +224,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
- /* read_user_ptr may take the mm->mmap_sem.
+ /* read_user_ptr may take the mm->mmap_lock.
* release srbm_mutex to avoid circular dependency between
* srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 81f7656f41c3..c31a6744daee 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -203,7 +203,7 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm)
mmap_write_lock(mm->mm);
mutex_lock(&mm->i915->mm_lock);
if (mm->mn == NULL && !err) {
- /* Protected by mmap_sem (write-lock) */
+ /* Protected by mmap_lock (write-lock) */
err = __mmu_notifier_register(&mn->mn, mm->mm);
if (!err) {
/* Protected by mm_lock */
@@ -522,8 +522,8 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
/* Spawn a worker so that we can acquire the
* user pages without holding our mutex. Access
- * to the user pages requires mmap_sem, and we have
- * a strict lock ordering of mmap_sem, struct_mutex -
+ * to the user pages requires mmap_lock, and we have
+ * a strict lock ordering of mmap_lock, struct_mutex -
* we already hold struct_mutex here and so cannot
* call gup without encountering a lock inversion.
*
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 75c60c2afb7e..665bb076e84d 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -3676,7 +3676,7 @@ static int read_properties_unlocked(struct i915_perf *perf,
* buffered data written by the GPU besides periodic OA metrics.
*
* Note we copy the properties from userspace outside of the i915 perf
- * mutex to avoid an awkward lockdep with mmap_sem.
+ * mutex to avoid an awkward lockdep with mmap_lock.
*
* Most of the implementation details are handled by
* i915_perf_open_ioctl_locked() after taking the &perf->lock
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 63f55685c3d8..a43aa7275f12 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -58,7 +58,7 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
goto out_clear;
/*
- * If possible, avoid waiting for GPU with mmap_sem
+ * If possible, avoid waiting for GPU with mmap_lock
* held. We only do this if the fault allows retry and this
* is the first attempt.
*/
@@ -131,14 +131,14 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
{
/*
* Work around locking order reversal in fault / nopfn
- * between mmap_sem and bo_reserve: Perform a trylock operation
+ * between mmap_lock and bo_reserve: Perform a trylock operation
* for reserve, and if it fails, retry the fault after waiting
* for the buffer to become unreserved.
*/
if (unlikely(!dma_resv_trylock(bo->base.resv))) {
/*
* If the fault allows retry and this is the first
- * fault attempt, we try to release the mmap_sem
+ * fault attempt, we try to release the mmap_lock
* before waiting
*/
if (fault_flag_allow_retry_first(vmf->flags)) {
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 3c7367312398..69e4755cc04b 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -835,7 +835,7 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
return;
/*
- * The umap_lock is nested under mmap_sem since it used within
+ * The umap_lock is nested under mmap_lock since it used within
* the vma_ops callbacks, so we have to clean the list one mm
* at a time to get the lock ordering right. Typically there
* will only be one mm, so no big deal.
diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
index 14d2a90964c3..24ca17b77b72 100644
--- a/drivers/infiniband/hw/hfi1/mmu_rb.c
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
@@ -333,7 +333,7 @@ static void do_remove(struct mmu_rb_handler *handler,
/*
* Work queue function to remove all nodes that have been queued up to
- * be removed. The key feature is that mm->mmap_sem is not being held
+ * be removed. The key feature is that mm->mmap_lock is not being held
* and the remove callback can sleep while taking it, if needed.
*/
static void handle_remove(struct work_struct *work)
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index 672963808ab9..46ff19df9f53 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -533,7 +533,7 @@ static int __videobuf_iolock(struct videobuf_queue *q,
} else {
/* NOTE: HACK: videobuf_iolock on V4L2_MEMORY_MMAP
buffers can only be called from videobuf_qbuf
- we take current->mm->mmap_sem there, to prevent
+ we take current->mm->mmap_lock there, to prevent
locking inversion, so don't take it here */
err = videobuf_dma_init_user_locked(&mem->dma,
diff --git a/drivers/misc/cxl/cxllib.c b/drivers/misc/cxl/cxllib.c
index a2c0fbb5eb28..2a1783f32254 100644
--- a/drivers/misc/cxl/cxllib.c
+++ b/drivers/misc/cxl/cxllib.c
@@ -245,9 +245,8 @@ int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags)
dar += page_size) {
if (dar < vma_start || dar >= vma_end) {
/*
- * We don't hold the mm->mmap_sem semaphore
- * while iterating, since the semaphore is
- * required by one of the lower-level page
+ * We don't hold mm->mmap_lock while iterating, since
+ * the lock is required by one of the lower-level page
* fault processing functions and it could
* create a deadlock.
*
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index 8f4e2895b565..b1521112dbbd 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -42,7 +42,7 @@ static inline int is_gru_paddr(unsigned long paddr)
}
/*
- * Find the vma of a GRU segment. Caller must hold mmap_sem.
+ * Find the vma of a GRU segment. Caller must hold mmap_lock.
*/
struct vm_area_struct *gru_find_vma(unsigned long vaddr)
{
@@ -58,7 +58,7 @@ struct vm_area_struct *gru_find_vma(unsigned long vaddr)
* Find and lock the gts that contains the specified user vaddr.
*
* Returns:
- * - *gts with the mmap_sem locked for read and the GTS locked.
+ * - *gts with the mmap_lock locked for read and the GTS locked.
* - NULL if vaddr invalid OR is not a valid GSEG vaddr.
*/
@@ -198,7 +198,7 @@ static int non_atomic_pte_lookup(struct vm_area_struct *vma,
* Only supports Intel large pages (2MB only) on x86_64.
* ZZZ - hugepage support is incomplete
*
- * NOTE: mmap_sem is already held on entry to this function. This
+ * NOTE: mmap_lock is already held on entry to this function. This
* guarantees existence of the page tables.
*/
static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
@@ -569,7 +569,7 @@ static irqreturn_t gru_intr(int chiplet, int blade)
}
/*
- * This is running in interrupt context. Trylock the mmap_sem.
+ * This is running in interrupt context. Trylock the mmap_lock.
* If it fails, retry the fault in user context.
*/
gts->ustats.fmm_tlbmiss++;
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index d3b017af7758..4d7695289eda 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -486,7 +486,7 @@ typedef enum {
/* Sync one of the CPU's buffers into the global event buffer.
* Here we need to go through each batch of samples punctuated
- * by context switch notes, taking the task's mmap_sem and doing
+ * by context switch notes, taking the task's mmap_lock and doing
* lookup in task->mm->mmap to convert EIP into dcookie/offset
* value.
*/
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 8044510d8ec6..c05a214191da 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -555,7 +555,7 @@ static int set_name(struct ashmem_area *asma, void __user *name)
/*
* Holding the ashmem_mutex while doing a copy_from_user might cause
- * an data abort which would try to access mmap_sem. If another
+ * an data abort which would try to access mmap_lock. If another
* thread has invoked ashmem_mmap then it will be holding the
* semaphore and will be waiting for ashmem_mutex, there by leading to
* deadlock. We'll release the mutex and take the name to a local
@@ -586,7 +586,7 @@ static int get_name(struct ashmem_area *asma, void __user *name)
* Have a local variable to which we'll copy the content
* from asma with the lock held. Later we can copy this to the user
* space safely without holding any locks. So even if we proceed to
- * wait for mmap_sem, it won't lead to deadlock.
+ * wait for mmap_lock, it won't lead to deadlock.
*/
char local_name[ASHMEM_NAME_LEN];
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index a56c8f74a27b..e85a99b68f31 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -2325,7 +2325,7 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
int retval = 0;
/*
- * 'trylock' avoids circular dependency with current->mm->mmap_sem
+ * 'trylock' avoids circular dependency with current->mm->mmap_lock
* and down-reading &dev->attach_lock should normally succeed without
* contention unless the device is in the process of being attached
* or detached.
diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c
index b28aa0d289f8..c1be96bb3ecf 100644
--- a/drivers/tty/vt/consolemap.c
+++ b/drivers/tty/vt/consolemap.c
@@ -12,7 +12,7 @@
* Fix bug in inverse translation. Stanislav Voronyi <stas@cnti.uanet.kharkov.ua>, Dec 1998
*
* In order to prevent the following circular lock dependency:
- * &mm->mmap_sem --> cpu_hotplug.lock --> console_lock --> &mm->mmap_sem
+ * &mm->mmap_lock --> cpu_hotplug.lock --> console_lock --> &mm->mmap_lock
*
* We cannot allow page fault to happen while holding the console_lock.
* Therefore, all the userspace copy operations have to be done outside
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index b5f6ef2d12f6..7c0779018b1b 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -1185,7 +1185,7 @@ reset_info_exit:
/*
* We need to get memory_lock for each device, but devices
- * can share mmap_sem, therefore we need to zap and hold
+ * can share mmap_lock, therefore we need to zap and hold
* the vma_lock for each device, and only then get each
* memory_lock.
*/
@@ -1375,26 +1375,26 @@ static int vfio_pci_zap_and_vma_lock(struct vfio_pci_device *vdev, bool try)
/*
* Lock ordering:
- * vma_lock is nested under mmap_sem for vm_ops callback paths.
+ * vma_lock is nested under mmap_lock for vm_ops callback paths.
* The memory_lock semaphore is used by both code paths calling
* into this function to zap vmas and the vm_ops.fault callback
* to protect the memory enable state of the device.
*
- * When zapping vmas we need to maintain the mmap_sem => vma_lock
+ * When zapping vmas we need to maintain the mmap_lock => vma_lock
* ordering, which requires using vma_lock to walk vma_list to
- * acquire an mm, then dropping vma_lock to get the mmap_sem and
+ * acquire an mm, then dropping vma_lock to get the mmap_lock and
* reacquiring vma_lock. This logic is derived from similar
* requirements in uverbs_user_mmap_disassociate().
*
- * mmap_sem must always be the top-level lock when it is taken.
+ * mmap_lock must always be the top-level lock when it is taken.
* Therefore we can only hold the memory_lock write lock when
- * vma_list is empty, as we'd need to take mmap_sem to clear
+ * vma_list is empty, as we'd need to take mmap_lock to clear
* entries. vma_list can only be guaranteed empty when holding
* vma_lock, thus memory_lock is nested under vma_lock.
*
* This enables the vm_ops.fault callback to acquire vma_lock,
* followed by memory_lock read lock, while already holding
- * mmap_sem without risk of deadlock.
+ * mmap_lock without risk of deadlock.
*/
while (1) {
struct mm_struct *mm = NULL;
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 3a939e358284..64a9025a87be 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -1014,7 +1014,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
* to the PTE from going stale.
*
* Since this vma's mappings can't be touched without the
- * mmap_sem, and we are holding it now, there is no need for
+ * mmap_lock, and we are holding it now, there is no need for
* the notifier_range locking pattern.
*/
mmu_interval_read_begin(&map->notifier);