diff options
author | Michel Lespinasse <walken@google.com> | 2020-06-08 21:33:54 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-06-09 09:39:14 -0700 |
commit | c1e8d7c6a7a682e1405e3e242d32fc377fd196ff (patch) | |
tree | ef02402b77990834fbb5bdb1f146fc0393cc8987 /arch | |
parent | 3e4e28c5a8f01ee4174d639e36ed155ade489a6f (diff) | |
download | linux-c1e8d7c6a7a682e1405e3e242d32fc377fd196ff.tar.bz2 |
mmap locking API: convert mmap_sem comments
Convert comments that reference mmap_sem to reference mmap_lock instead.
[akpm@linux-foundation.org: fix up linux-next leftovers]
[akpm@linux-foundation.org: s/lockaphore/lock/, per Vlastimil]
[akpm@linux-foundation.org: more linux-next fixups, per Michel]
Signed-off-by: Michel Lespinasse <walken@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com>
Cc: Davidlohr Bueso <dbueso@suse.de>
Cc: David Rientjes <rientjes@google.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Laurent Dufour <ldufour@linux.ibm.com>
Cc: Liam Howlett <Liam.Howlett@oracle.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ying Han <yinghan@google.com>
Link: http://lkml.kernel.org/r/20200520052908.204642-13-walken@google.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch')
32 files changed, 68 insertions, 68 deletions
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index 5b213bc0ae84..72f5405a7ec5 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c @@ -141,7 +141,7 @@ retry: } /* - * Fault retry nuances, mmap_sem already relinquished by core mm + * Fault retry nuances, mmap_lock already relinquished by core mm */ if (unlikely((fault & VM_FAULT_RETRY) && (flags & FAULT_FLAG_ALLOW_RETRY))) { diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c index e0330a25e1c6..6bfdca4769a7 100644 --- a/arch/arm/kernel/vdso.c +++ b/arch/arm/kernel/vdso.c @@ -240,7 +240,7 @@ static int install_vvar(struct mm_struct *mm, unsigned long addr) return PTR_ERR_OR_ZERO(vma); } -/* assumes mmap_sem is write-locked */ +/* assumes mmap_lock is write-locked */ void arm_install_vdso(struct mm_struct *mm, unsigned long addr) { struct vm_area_struct *vma; diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 8e56c43981b4..c6550eddfce1 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -293,7 +293,7 @@ retry: fault = __do_page_fault(mm, addr, fsr, flags, tsk); /* If we need to retry but a fatal signal is pending, handle the - * signal first. We do not need to release the mmap_sem because + * signal first. We do not need to release the mmap_lock because * it would already be released in __lock_page_or_retry in * mm/filemap.c. */ if (fault_signal_pending(fault, regs)) { diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index 6c09f43d9711..3a4dec334cc5 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -86,7 +86,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re #ifdef CONFIG_VIRTUAL_MEM_MAP /* * If fault is in region 5 and we are in the kernel, we may already - * have the mmap_sem (pfn_valid macro is called during mmap). There + * have the mmap_lock (pfn_valid macro is called during mmap). There * is no vma for region 5 addr's anyway, so skip getting the semaphore * and go directly to the exception handling code. */ diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c index 74358902a5db..a2bfe587b491 100644 --- a/arch/microblaze/mm/fault.c +++ b/arch/microblaze/mm/fault.c @@ -124,7 +124,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, /* When running in the kernel we expect faults to occur only to * addresses in user space. All other faults represent errors in the * kernel and should generate an OOPS. Unfortunately, in the case of an - * erroneous fault occurring in a code path which already holds mmap_sem + * erroneous fault occurring in a code path which already holds mmap_lock * we will deadlock attempting to validate the fault against the * address space. Luckily the kernel only validly references user * space from well defined areas of code, which are listed in the diff --git a/arch/nds32/mm/fault.c b/arch/nds32/mm/fault.c index 89831b6e1ede..8fb73f6401a0 100644 --- a/arch/nds32/mm/fault.c +++ b/arch/nds32/mm/fault.c @@ -210,7 +210,7 @@ good_area: /* * If we need to retry but a fatal signal is pending, handle the - * signal first. We do not need to release the mmap_sem because it + * signal first. We do not need to release the mmap_lock because it * would already be released in __lock_page_or_retry in mm/filemap.c. */ if (fault_signal_pending(fault, regs)) { diff --git a/arch/powerpc/include/asm/pkeys.h b/arch/powerpc/include/asm/pkeys.h index 20ebf153c871..2fe6cae14d10 100644 --- a/arch/powerpc/include/asm/pkeys.h +++ b/arch/powerpc/include/asm/pkeys.h @@ -101,7 +101,7 @@ static inline bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey) /* * Returns a positive, 5-bit key on success, or -1 on failure. - * Relies on the mmap_sem to protect against concurrency in mm_pkey_alloc() and + * Relies on the mmap_lock to protect against concurrency in mm_pkey_alloc() and * mm_pkey_free(). */ static inline int mm_pkey_alloc(struct mm_struct *mm) diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c index 305997b015b6..f91224ea034a 100644 --- a/arch/powerpc/kvm/book3s_hv_uvmem.c +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c @@ -47,7 +47,7 @@ * Locking order * * 1. kvm->srcu - Protects KVM memslots - * 2. kvm->mm->mmap_sem - find_vma, migrate_vma_pages and helpers, ksm_madvise + * 2. kvm->mm->mmap_lock - find_vma, migrate_vma_pages and helpers, ksm_madvise * 3. kvm->arch.uvmem_lock - protects read/writes to uvmem slots thus acting * as sync-points for page-in/out */ @@ -402,8 +402,8 @@ kvmppc_svm_page_in(struct vm_area_struct *vma, unsigned long start, mig.dst = &dst_pfn; /* - * We come here with mmap_sem write lock held just for - * ksm_madvise(), otherwise we only need read mmap_sem. + * We come here with mmap_lock write lock held just for + * ksm_madvise(), otherwise we only need read mmap_lock. * Hence downgrade to read lock once ksm_madvise() is done. */ ret = ksm_madvise(vma, vma->vm_start, vma->vm_end, diff --git a/arch/powerpc/mm/book3s32/tlb.c b/arch/powerpc/mm/book3s32/tlb.c index 157f98f6aea9..b6c7427daa6f 100644 --- a/arch/powerpc/mm/book3s32/tlb.c +++ b/arch/powerpc/mm/book3s32/tlb.c @@ -129,7 +129,7 @@ void flush_tlb_mm(struct mm_struct *mm) /* * It is safe to go down the mm's list of vmas when called - * from dup_mmap, holding mmap_sem. It would also be safe from + * from dup_mmap, holding mmap_lock. It would also be safe from * unmap_region or exit_mmap, but not from vmtruncate on SMP - * but it seems dup_mmap is the only SMP case which gets here. */ diff --git a/arch/powerpc/mm/book3s64/hash_pgtable.c b/arch/powerpc/mm/book3s64/hash_pgtable.c index 00af58cc8714..2a99167afbaf 100644 --- a/arch/powerpc/mm/book3s64/hash_pgtable.c +++ b/arch/powerpc/mm/book3s64/hash_pgtable.c @@ -237,7 +237,7 @@ pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addres * to hugepage, we first clear the pmd, then invalidate all * the PTE entries. The assumption here is that any low level * page fault will see a none pmd and take the slow path that - * will wait on mmap_sem. But we could very well be in a + * will wait on mmap_lock. But we could very well be in a * hash_page with local ptep pointer value. Such a hash page * can result in adding new HPTE entries for normal subpages. * That means we could be modifying the page content as we @@ -251,7 +251,7 @@ pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addres * Now invalidate the hpte entries in the range * covered by pmd. This make sure we take a * fault and will find the pmd as none, which will - * result in a major fault which takes mmap_sem and + * result in a major fault which takes mmap_lock and * hence wait for collapse to complete. Without this * the __collapse_huge_page_copy can result in copying * the old content. diff --git a/arch/powerpc/mm/book3s64/subpage_prot.c b/arch/powerpc/mm/book3s64/subpage_prot.c index e814d34bf7f4..60c6ea16a972 100644 --- a/arch/powerpc/mm/book3s64/subpage_prot.c +++ b/arch/powerpc/mm/book3s64/subpage_prot.c @@ -225,7 +225,7 @@ SYSCALL_DEFINE3(subpage_prot, unsigned long, addr, if (!spt) { /* * Allocate subpage prot table if not already done. - * Do this with mmap_sem held + * Do this with mmap_lock held */ spt = kzalloc(sizeof(struct subpage_prot_table), GFP_KERNEL); if (!spt) { diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index ff3653e67c7b..641fc5f3d7dd 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -138,7 +138,7 @@ static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address, * 2. T1 : set AMR to deny access to pkey=4, touches, page * 3. T1 : faults... * 4. T2: mprotect_key(foo, PAGE_SIZE, pkey=5); - * 5. T1 : enters fault handler, takes mmap_sem, etc... + * 5. T1 : enters fault handler, takes mmap_lock, etc... * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really * faulted on a pte with its pkey=4. */ @@ -525,9 +525,9 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address, perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); /* - * We want to do this outside mmap_sem, because reading code around nip + * We want to do this outside mmap_lock, because reading code around nip * can result in fault, which will cause a deadlock when called with - * mmap_sem held + * mmap_lock held */ if (is_user) flags |= FAULT_FLAG_USER; @@ -539,7 +539,7 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address, /* When running in the kernel we expect faults to occur only to * addresses in user space. All other faults represent errors in the * kernel and should generate an OOPS. Unfortunately, in the case of an - * erroneous fault occurring in a code path which already holds mmap_sem + * erroneous fault occurring in a code path which already holds mmap_lock * we will deadlock attempting to validate the fault against the * address space. Luckily the kernel only validly references user * space from well defined areas of code, which are listed in the @@ -615,7 +615,7 @@ good_area: return user_mode(regs) ? 0 : SIGBUS; /* - * Handle the retry right now, the mmap_sem has been released in that + * Handle the retry right now, the mmap_lock has been released in that * case. */ if (unlikely(fault & VM_FAULT_RETRY)) { diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index cea5b4e25a24..45a0556089e8 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c @@ -306,7 +306,7 @@ void assert_pte_locked(struct mm_struct *mm, unsigned long addr) pmd = pmd_offset(pud, addr); /* * khugepaged to collapse normal pages to hugepage, first set - * pmd to none to force page fault/gup to take mmap_sem. After + * pmd to none to force page fault/gup to take mmap_lock. After * pmd is set to none, we do a pte_clear which does this assertion * so if we find pmd none, return. */ diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index 324c0fb091a1..62d90a5e23d1 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -325,7 +325,7 @@ static vm_fault_t spufs_ps_fault(struct vm_fault *vmf, return VM_FAULT_SIGBUS; /* - * Because we release the mmap_sem, the context may be destroyed while + * Because we release the mmap_lock, the context may be destroyed while * we're in spu_wait. Grab an extra reference so it isn't destroyed * in the meantime. */ @@ -334,8 +334,8 @@ static vm_fault_t spufs_ps_fault(struct vm_fault *vmf, /* * We have to wait for context to be loaded before we have * pages to hand out to the user, but we don't want to wait - * with the mmap_sem held. - * It is possible to drop the mmap_sem here, but then we need + * with the mmap_lock held. + * It is possible to drop the mmap_lock here, but then we need * to return VM_FAULT_NOPAGE because the mappings may have * hanged. */ diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index 996db5ebbf39..ae7b7fe24658 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -114,7 +114,7 @@ good_area: /* * If we need to retry but a fatal signal is pending, handle the - * signal first. We do not need to release the mmap_sem because it + * signal first. We do not need to release the mmap_lock because it * would already be released in __lock_page_or_retry in mm/filemap.c. */ if (fault_signal_pending(fault, regs)) diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 57e08b0a0a1a..96ae368aa0a2 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -1122,7 +1122,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) } /* - * Must be called with relevant read locks held (kvm->mm->mmap_sem, kvm->srcu) + * Must be called with relevant read locks held (kvm->mm->mmap_lock, kvm->srcu) */ static inline int __do_essa(struct kvm_vcpu *vcpu, const int orc) { diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index f0fa51af8f5b..6a24751557f0 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -507,7 +507,7 @@ retry: if (IS_ENABLED(CONFIG_PGSTE) && gmap && (flags & FAULT_FLAG_RETRY_NOWAIT)) { /* FAULT_FLAG_RETRY_NOWAIT has been set, - * mmap_sem has not been released */ + * mmap_lock has not been released */ current->thread.gmap_pfault = 1; fault = VM_FAULT_PFAULT; goto out_up; diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index a8e818d10c6b..190357ff86b3 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -300,7 +300,7 @@ struct gmap *gmap_get_enabled(void) EXPORT_SYMBOL_GPL(gmap_get_enabled); /* - * gmap_alloc_table is assumed to be called with mmap_sem held + * gmap_alloc_table is assumed to be called with mmap_lock held */ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table, unsigned long init, unsigned long gaddr) @@ -466,7 +466,7 @@ EXPORT_SYMBOL_GPL(gmap_map_segment); * Returns user space address which corresponds to the guest address or * -EFAULT if no such mapping exists. * This function does not establish potentially missing page table entries. - * The mmap_sem of the mm that belongs to the address space must be held + * The mmap_lock of the mm that belongs to the address space must be held * when this function gets called. * * Note: Can also be called for shadow gmaps. @@ -534,7 +534,7 @@ static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *old, pmd_t new, * * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT * if the vm address is already mapped to a different guest segment. - * The mmap_sem of the mm that belongs to the address space must be held + * The mmap_lock of the mm that belongs to the address space must be held * when this function gets called. */ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr) @@ -655,7 +655,7 @@ retry: goto out_up; } /* - * In the case that fixup_user_fault unlocked the mmap_sem during + * In the case that fixup_user_fault unlocked the mmap_lock during * faultin redo __gmap_translate to not race with a map/unmap_segment. */ if (unlocked) @@ -669,7 +669,7 @@ out_up: EXPORT_SYMBOL_GPL(gmap_fault); /* - * this function is assumed to be called with mmap_sem held + * this function is assumed to be called with mmap_lock held */ void __gmap_zap(struct gmap *gmap, unsigned long gaddr) { @@ -882,7 +882,7 @@ static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr, if (fixup_user_fault(current, mm, vmaddr, fault_flags, &unlocked)) return -EFAULT; if (unlocked) - /* lost mmap_sem, caller has to retry __gmap_translate */ + /* lost mmap_lock, caller has to retry __gmap_translate */ return 0; /* Connect the page tables */ return __gmap_link(gmap, gaddr, vmaddr); @@ -953,7 +953,7 @@ static inline void gmap_pmd_op_end(struct gmap *gmap, pmd_t *pmdp) * -EAGAIN if a fixup is needed * -EINVAL if unsupported notifier bits have been specified * - * Expected to be called with sg->mm->mmap_sem in read and + * Expected to be called with sg->mm->mmap_lock in read and * guest_table_lock held. */ static int gmap_protect_pmd(struct gmap *gmap, unsigned long gaddr, @@ -999,7 +999,7 @@ static int gmap_protect_pmd(struct gmap *gmap, unsigned long gaddr, * Returns 0 if successfully protected, -ENOMEM if out of memory and * -EAGAIN if a fixup is needed. * - * Expected to be called with sg->mm->mmap_sem in read + * Expected to be called with sg->mm->mmap_lock in read */ static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr, pmd_t *pmdp, int prot, unsigned long bits) @@ -1035,7 +1035,7 @@ static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr, * Returns 0 if successfully protected, -ENOMEM if out of memory and * -EFAULT if gaddr is invalid (or mapping for shadows is missing). * - * Called with sg->mm->mmap_sem in read. + * Called with sg->mm->mmap_lock in read. */ static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr, unsigned long len, int prot, unsigned long bits) @@ -1124,7 +1124,7 @@ EXPORT_SYMBOL_GPL(gmap_mprotect_notify); * if reading using the virtual address failed. -EINVAL if called on a gmap * shadow. * - * Called with gmap->mm->mmap_sem in read. + * Called with gmap->mm->mmap_lock in read. */ int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val) { @@ -1729,7 +1729,7 @@ EXPORT_SYMBOL_GPL(gmap_shadow); * shadow table structure is incomplete, -ENOMEM if out of memory and * -EFAULT if an address in the parent gmap could not be resolved. * - * Called with sg->mm->mmap_sem in read. + * Called with sg->mm->mmap_lock in read. */ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t, int fake) @@ -1813,7 +1813,7 @@ EXPORT_SYMBOL_GPL(gmap_shadow_r2t); * shadow table structure is incomplete, -ENOMEM if out of memory and * -EFAULT if an address in the parent gmap could not be resolved. * - * Called with sg->mm->mmap_sem in read. + * Called with sg->mm->mmap_lock in read. */ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t, int fake) @@ -1897,7 +1897,7 @@ EXPORT_SYMBOL_GPL(gmap_shadow_r3t); * shadow table structure is incomplete, -ENOMEM if out of memory and * -EFAULT if an address in the parent gmap could not be resolved. * - * Called with sg->mm->mmap_sem in read. + * Called with sg->mm->mmap_lock in read. */ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt, int fake) @@ -1981,7 +1981,7 @@ EXPORT_SYMBOL_GPL(gmap_shadow_sgt); * Returns 0 if the shadow page table was found and -EAGAIN if the page * table was not found. * - * Called with sg->mm->mmap_sem in read. + * Called with sg->mm->mmap_lock in read. */ int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr, unsigned long *pgt, int *dat_protection, @@ -2021,7 +2021,7 @@ EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup); * shadow table structure is incomplete, -ENOMEM if out of memory, * -EFAULT if an address in the parent gmap could not be resolved and * - * Called with gmap->mm->mmap_sem in read + * Called with gmap->mm->mmap_lock in read */ int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt, int fake) @@ -2100,7 +2100,7 @@ EXPORT_SYMBOL_GPL(gmap_shadow_pgt); * shadow table structure is incomplete, -ENOMEM if out of memory and * -EFAULT if an address in the parent gmap could not be resolved. * - * Called with sg->mm->mmap_sem in read. + * Called with sg->mm->mmap_lock in read. */ int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte) { diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index fff169d64711..11d2c8395e2a 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c @@ -114,7 +114,7 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long end) spin_lock_bh(&mm->page_table_lock); /* - * This routine gets called with mmap_sem lock held and there is + * This routine gets called with mmap_lock lock held and there is * no reason to optimize for the case of otherwise. However, if * that would ever change, the below check will let us know. */ diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index de9e0a60e119..ddfa9685f1ef 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c @@ -182,7 +182,7 @@ static void sh4_flush_cache_all(void *unused) * accessed with (hence cache set) is in accord with the physical * address (i.e. tag). It's no different here. * - * Caller takes mm->mmap_sem. + * Caller takes mm->mmap_lock. */ static void sh4_flush_cache_mm(void *arg) { diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index 3a125aad586d..fbe1f2fe9a8c 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -326,7 +326,7 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, return 1; } - /* Release mmap_sem first if necessary */ + /* Release mmap_lock first if necessary */ if (!(fault & VM_FAULT_RETRY)) mmap_read_unlock(current->mm); diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 73f95a5ba683..a3806614e4dc 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -70,7 +70,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr) } /* - * We now make sure that mmap_sem is held in all paths that call + * We now make sure that mmap_lock is held in all paths that call * this. Additionally, to prevent kswapd from ripping ptes from * under us, raise interrupts around the time that we look at the * pte, kswapd will have to wait to get his smp ipi response from diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c index 60ea924e551d..d9961163da66 100644 --- a/arch/um/kernel/skas/mmu.c +++ b/arch/um/kernel/skas/mmu.c @@ -114,7 +114,7 @@ void uml_setup_stubs(struct mm_struct *mm) mm->context.stub_pages[0] = virt_to_page(__syscall_stub_start); mm->context.stub_pages[1] = virt_to_page(mm->context.id.stack); - /* dup_mmap already holds mmap_sem */ + /* dup_mmap already holds mmap_lock */ err = install_special_mapping(mm, STUB_START, STUB_END - STUB_START, VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC | VM_DONTCOPY | VM_PFNMAP, diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c index 6c0e76553657..61776790cd67 100644 --- a/arch/um/kernel/tlb.c +++ b/arch/um/kernel/tlb.c @@ -348,7 +348,7 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr, if (ret) { printk(KERN_ERR "fix_range_common: failed, killing current " "process: %d\n", task_tgid_vnr(current)); - /* We are under mmap_sem, release it such that current can terminate */ + /* We are under mmap_lock, release it such that current can terminate */ mmap_write_unlock(current->mm); force_sig(SIGKILL); do_signal(¤t->thread.regs); diff --git a/arch/unicore32/mm/fault.c b/arch/unicore32/mm/fault.c index 99869b57b6cb..7654bddde133 100644 --- a/arch/unicore32/mm/fault.c +++ b/arch/unicore32/mm/fault.c @@ -246,7 +246,7 @@ retry: fault = __do_pf(mm, addr, fsr, flags, tsk); /* If we need to retry but a fatal signal is pending, handle the - * signal first. We do not need to release the mmap_sem because + * signal first. We do not need to release the mmap_lock because * it would already be released in __lock_page_or_retry in * mm/filemap.c. */ if (fault_signal_pending(fault, regs)) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 081cdfdf11f4..4103665c6e03 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -2178,7 +2178,7 @@ static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm) * userspace with CR4.PCE clear while another task is still * doing on_each_cpu_mask() to propagate CR4.PCE. * - * For now, this can't happen because all callers hold mmap_sem + * For now, this can't happen because all callers hold mmap_lock * for write. If this changes, we'll need a different solution. */ mmap_assert_write_locked(mm); diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h index bdeae9291e5c..0a301ad0b02f 100644 --- a/arch/x86/include/asm/mmu.h +++ b/arch/x86/include/asm/mmu.h @@ -45,7 +45,7 @@ typedef struct { #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS /* * One bit per protection key says whether userspace can - * use it or not. protected by mmap_sem. + * use it or not. protected by mmap_lock. */ u16 pkey_allocation_map; s16 execute_only_pkey; diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index 5afb5e0fe903..e896ebef8c24 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h @@ -39,23 +39,23 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte) * pte_offset_map_lock() on 32-bit PAE kernels was reading the pmd_t with * a "*pmdp" dereference done by GCC. Problem is, in certain places * where pte_offset_map_lock() is called, concurrent page faults are - * allowed, if the mmap_sem is hold for reading. An example is mincore + * allowed, if the mmap_lock is hold for reading. An example is mincore * vs page faults vs MADV_DONTNEED. On the page fault side * pmd_populate() rightfully does a set_64bit(), but if we're reading the * pmd_t with a "*pmdp" on the mincore side, a SMP race can happen * because GCC will not read the 64-bit value of the pmd atomically. * * To fix this all places running pte_offset_map_lock() while holding the - * mmap_sem in read mode, shall read the pmdp pointer using this + * mmap_lock in read mode, shall read the pmdp pointer using this * function to know if the pmd is null or not, and in turn to know if * they can run pte_offset_map_lock() or pmd_trans_huge() or other pmd * operations. * - * Without THP if the mmap_sem is held for reading, the pmd can only + * Without THP if the mmap_lock is held for reading, the pmd can only * transition from null to not null while pmd_read_atomic() runs. So * we can always return atomic pmd values with this function. * - * With THP if the mmap_sem is held for reading, the pmd can become + * With THP if the mmap_lock is held for reading, the pmd can become * trans_huge or none or point to a pte (and in turn become "stable") * at any time under pmd_read_atomic(). We could read it truly * atomically here with an atomic64_read() for the THP enabled case (and diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c index 4bd28b388a1a..0daf2f1cf7a8 100644 --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c @@ -1326,9 +1326,9 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp) * pseudo-locked region will still be here on return. * * The mutex has to be released temporarily to avoid a potential - * deadlock with the mm->mmap_sem semaphore which is obtained in - * the device_create() and debugfs_create_dir() callpath below - * as well as before the mmap() callback is called. + * deadlock with the mm->mmap_lock which is obtained in the + * device_create() and debugfs_create_dir() callpath below as well as + * before the mmap() callback is called. */ mutex_unlock(&rdtgroup_mutex); diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index d7cb5ab0d1f0..23b4b61319d3 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -3199,10 +3199,10 @@ int __init rdtgroup_init(void) * during the debugfs directory creation also &sb->s_type->i_mutex_key * (the lockdep class of inode->i_rwsem). Other filesystem * interactions (eg. SyS_getdents) have the lock ordering: - * &sb->s_type->i_mutex_key --> &mm->mmap_sem - * During mmap(), called with &mm->mmap_sem, the rdtgroup_mutex + * &sb->s_type->i_mutex_key --> &mm->mmap_lock + * During mmap(), called with &mm->mmap_lock, the rdtgroup_mutex * is taken, thus creating dependency: - * &mm->mmap_sem --> rdtgroup_mutex for the latter that can cause + * &mm->mmap_lock --> rdtgroup_mutex for the latter that can cause * issues considering the other two lock dependencies. * By creating the debugfs directory here we avoid a dependency * that may cause deadlock (even though file operations cannot diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index 84c3ba32f211..8748321c4486 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c @@ -8,7 +8,7 @@ * * Lock order: * contex.ldt_usr_sem - * mmap_sem + * mmap_lock * context.lock */ diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index c23bcd027ae1..0b03ae8c39cd 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -865,7 +865,7 @@ bad_area_access_error(struct pt_regs *regs, unsigned long error_code, * 2. T1 : set PKRU to deny access to pkey=4, touches page * 3. T1 : faults... * 4. T2: mprotect_key(foo, PAGE_SIZE, pkey=5); - * 5. T1 : enters fault handler, takes mmap_sem, etc... + * 5. T1 : enters fault handler, takes mmap_lock, etc... * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really * faulted on a pte with its pkey=4. */ @@ -1231,12 +1231,12 @@ void do_user_addr_fault(struct pt_regs *regs, * Kernel-mode access to the user address space should only occur * on well-defined single instructions listed in the exception * tables. But, an erroneous kernel fault occurring outside one of - * those areas which also holds mmap_sem might deadlock attempting + * those areas which also holds mmap_lock might deadlock attempting * to validate the fault against the address space. * * Only do the expensive exception table search when we might be at * risk of a deadlock. This happens if we - * 1. Failed to acquire mmap_sem, and + * 1. Failed to acquire mmap_lock, and * 2. The access did not originate in userspace. */ if (unlikely(!mmap_read_trylock(mm))) { @@ -1289,9 +1289,9 @@ good_area: * If for any reason at all we couldn't handle the fault, * make sure we exit gracefully rather than endlessly redo * the fault. Since we never set FAULT_FLAG_RETRY_NOWAIT, if - * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked. + * we get VM_FAULT_RETRY back, the mmap_lock has been unlocked. * - * Note that handle_userfault() may also release and reacquire mmap_sem + * Note that handle_userfault() may also release and reacquire mmap_lock * (and not return with VM_FAULT_RETRY), when returning to userland to * repeat the page fault later with a VM_FAULT_NOPAGE retval * (potentially after handling any pending signal during the return to @@ -1310,7 +1310,7 @@ good_area: } /* - * If we need to retry the mmap_sem has already been released, + * If we need to retry the mmap_lock has already been released, * and if there is a fatal signal pending there is no guarantee * that we made any progress. Handle this case first. */ |