From d8ed45c5dcd455fc5848d47f86883a1b872ac0d0 Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Mon, 8 Jun 2020 21:33:25 -0700 Subject: mmap locking API: use coccinelle to convert mmap_sem rwsem call sites This change converts the existing mmap_sem rwsem calls to use the new mmap locking API instead. The change is generated using coccinelle with the following rule: // spatch --sp-file mmap_lock_api.cocci --in-place --include-headers --dir . @@ expression mm; @@ ( -init_rwsem +mmap_init_lock | -down_write +mmap_write_lock | -down_write_killable +mmap_write_lock_killable | -down_write_trylock +mmap_write_trylock | -up_write +mmap_write_unlock | -downgrade_write +mmap_write_downgrade | -down_read +mmap_read_lock | -down_read_killable +mmap_read_lock_killable | -down_read_trylock +mmap_read_trylock | -up_read +mmap_read_unlock ) -(&mm->mmap_sem) +(mm) Signed-off-by: Michel Lespinasse Signed-off-by: Andrew Morton Reviewed-by: Daniel Jordan Reviewed-by: Laurent Dufour Reviewed-by: Vlastimil Babka Cc: Davidlohr Bueso Cc: David Rientjes Cc: Hugh Dickins Cc: Jason Gunthorpe Cc: Jerome Glisse Cc: John Hubbard Cc: Liam Howlett Cc: Matthew Wilcox Cc: Peter Zijlstra Cc: Ying Han Link: http://lkml.kernel.org/r/20200520052908.204642-5-walken@google.com Signed-off-by: Linus Torvalds --- arch/alpha/kernel/traps.c | 4 +-- arch/alpha/mm/fault.c | 10 +++---- arch/arc/kernel/process.c | 4 +-- arch/arc/kernel/troubleshoot.c | 4 +-- arch/arc/mm/fault.c | 4 +-- arch/arm/kernel/process.c | 4 +-- arch/arm/kernel/swp_emulate.c | 4 +-- arch/arm/lib/uaccess_with_memcpy.c | 16 +++++------ arch/arm/mm/fault.c | 6 ++-- arch/arm64/kernel/traps.c | 4 +-- arch/arm64/kernel/vdso.c | 8 +++--- arch/arm64/mm/fault.c | 8 +++--- arch/csky/kernel/vdso.c | 4 +-- arch/csky/mm/fault.c | 8 +++--- arch/hexagon/kernel/vdso.c | 4 +-- arch/hexagon/mm/vm_fault.c | 8 +++--- arch/ia64/kernel/perfmon.c | 8 +++--- arch/ia64/mm/fault.c | 8 +++--- arch/ia64/mm/init.c | 12 ++++---- arch/m68k/kernel/sys_m68k.c | 14 +++++----- arch/m68k/mm/fault.c | 8 +++--- arch/microblaze/mm/fault.c | 12 ++++---- arch/mips/kernel/traps.c | 4 +-- arch/mips/kernel/vdso.c | 4 +-- arch/nds32/kernel/vdso.c | 6 ++-- arch/nds32/mm/fault.c | 12 ++++---- arch/nios2/mm/fault.c | 12 ++++---- arch/nios2/mm/init.c | 4 +-- arch/openrisc/mm/fault.c | 10 +++---- arch/parisc/kernel/traps.c | 6 ++-- arch/parisc/mm/fault.c | 8 +++--- arch/powerpc/kernel/vdso.c | 6 ++-- arch/powerpc/kvm/book3s_hv.c | 6 ++-- arch/powerpc/kvm/book3s_hv_uvmem.c | 12 ++++---- arch/powerpc/kvm/e500_mmu_host.c | 4 +-- arch/powerpc/mm/book3s64/iommu_api.c | 4 +-- arch/powerpc/mm/book3s64/subpage_prot.c | 12 ++++---- arch/powerpc/mm/copro_fault.c | 4 +-- arch/powerpc/mm/fault.c | 12 ++++---- arch/powerpc/oprofile/cell/spu_task_sync.c | 6 ++-- arch/powerpc/platforms/cell/spufs/file.c | 4 +-- arch/riscv/kernel/vdso.c | 4 +-- arch/riscv/mm/fault.c | 10 +++---- arch/s390/kernel/uv.c | 4 +-- arch/s390/kernel/vdso.c | 4 +-- arch/s390/kvm/gaccess.c | 4 +-- arch/s390/kvm/interrupt.c | 4 +-- arch/s390/kvm/kvm-s390.c | 28 +++++++++---------- arch/s390/kvm/priv.c | 32 +++++++++++----------- arch/s390/mm/fault.c | 14 +++++----- arch/s390/mm/gmap.c | 44 +++++++++++++++--------------- arch/s390/pci/pci_mmio.c | 4 +-- arch/sh/kernel/sys_sh.c | 6 ++-- arch/sh/kernel/vsyscall/vsyscall.c | 4 +-- arch/sh/mm/fault.c | 10 +++---- arch/sparc/mm/fault_32.c | 18 ++++++------ arch/sparc/mm/fault_64.c | 12 ++++---- arch/sparc/vdso/vma.c | 4 +-- arch/um/include/asm/mmu_context.h | 2 +- arch/um/kernel/tlb.c | 2 +- arch/um/kernel/trap.c | 6 ++-- arch/unicore32/mm/fault.c | 6 ++-- arch/x86/entry/vdso/vma.c | 14 +++++----- arch/x86/kernel/vm86_32.c | 4 +-- arch/x86/mm/fault.c | 8 +++--- arch/x86/um/vdso/vma.c | 4 +-- arch/xtensa/mm/fault.c | 10 +++---- 67 files changed, 280 insertions(+), 280 deletions(-) (limited to 'arch') diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c index 8383ccfaccdc..49754e07e04f 100644 --- a/arch/alpha/kernel/traps.c +++ b/arch/alpha/kernel/traps.c @@ -957,12 +957,12 @@ give_sigsegv: si_code = SEGV_ACCERR; else { struct mm_struct *mm = current->mm; - down_read(&mm->mmap_sem); + mmap_read_lock(mm); if (find_vma(mm, (unsigned long)va)) si_code = SEGV_ACCERR; else si_code = SEGV_MAPERR; - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); } send_sig_fault(SIGSEGV, si_code, va, 0, current); return; diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index c2d7b6d7bac7..36efa778ee1a 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c @@ -117,7 +117,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr, if (user_mode(regs)) flags |= FAULT_FLAG_USER; retry: - down_read(&mm->mmap_sem); + mmap_read_lock(mm); vma = find_vma(mm, address); if (!vma) goto bad_area; @@ -180,14 +180,14 @@ retry: } } - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); return; /* Something tried to access memory that isn't in our memory map. Fix it, but check if it's kernel or user first. */ bad_area: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); if (user_mode(regs)) goto do_sigsegv; @@ -211,14 +211,14 @@ retry: /* We ran out of memory, or some other thing happened to us that made us unable to handle the page fault gracefully. */ out_of_memory: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); if (!user_mode(regs)) goto no_context; pagefault_out_of_memory(); return; do_sigbus: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); /* Send a sigbus, regardless of whether we were in kernel or user mode. */ force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *) address, 0); diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c index 315528f04bc1..8c8e5172fecd 100644 --- a/arch/arc/kernel/process.c +++ b/arch/arc/kernel/process.c @@ -90,10 +90,10 @@ fault: if (unlikely(ret != -EFAULT)) goto fail; - down_read(¤t->mm->mmap_sem); + mmap_read_lock(current->mm); ret = fixup_user_fault(current, current->mm, (unsigned long) uaddr, FAULT_FLAG_WRITE, NULL); - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(current->mm); if (likely(!ret)) goto again; diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c index 3044c8347b74..28e8bf04b253 100644 --- a/arch/arc/kernel/troubleshoot.c +++ b/arch/arc/kernel/troubleshoot.c @@ -89,7 +89,7 @@ static void show_faulting_vma(unsigned long address) /* can't use print_vma_addr() yet as it doesn't check for * non-inclusive vma */ - down_read(&active_mm->mmap_sem); + mmap_read_lock(active_mm); vma = find_vma(active_mm, address); /* check against the find_vma( ) behaviour which returns the next VMA @@ -111,7 +111,7 @@ static void show_faulting_vma(unsigned long address) } else pr_info(" @No matching VMA found\n"); - up_read(&active_mm->mmap_sem); + mmap_read_unlock(active_mm); } static void show_ecr_verbose(struct pt_regs *regs) diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index 92b339c7adba..5b213bc0ae84 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c @@ -107,7 +107,7 @@ void do_page_fault(unsigned long address, struct pt_regs *regs) flags |= FAULT_FLAG_WRITE; retry: - down_read(&mm->mmap_sem); + mmap_read_lock(mm); vma = find_vma(mm, address); if (!vma) @@ -150,7 +150,7 @@ retry: } bad_area: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); /* * Major/minor page fault accounting diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 46e478fb5ea2..58eaa1f60e16 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -431,7 +431,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) npages = 1; /* for sigpage */ npages += vdso_total_pages; - if (down_write_killable(&mm->mmap_sem)) + if (mmap_write_lock_killable(mm)) return -EINTR; hint = sigpage_addr(mm, npages); addr = get_unmapped_area(NULL, hint, npages << PAGE_SHIFT, 0, 0); @@ -458,7 +458,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) arm_install_vdso(mm, addr + PAGE_SIZE); up_fail: - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); return ret; } #endif diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c index e640871328c1..6166ba38bf99 100644 --- a/arch/arm/kernel/swp_emulate.c +++ b/arch/arm/kernel/swp_emulate.c @@ -97,12 +97,12 @@ static void set_segfault(struct pt_regs *regs, unsigned long addr) { int si_code; - down_read(¤t->mm->mmap_sem); + mmap_read_lock(current->mm); if (find_vma(current->mm, addr) == NULL) si_code = SEGV_MAPERR; else si_code = SEGV_ACCERR; - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(current->mm); pr_debug("SWP{B} emulation: access caused memory abort!\n"); arm_notify_die("Illegal memory access", regs, diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c index d72b14c96670..106f83a5ea6d 100644 --- a/arch/arm/lib/uaccess_with_memcpy.c +++ b/arch/arm/lib/uaccess_with_memcpy.c @@ -101,7 +101,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n) atomic = faulthandler_disabled(); if (!atomic) - down_read(¤t->mm->mmap_sem); + mmap_read_lock(current->mm); while (n) { pte_t *pte; spinlock_t *ptl; @@ -109,11 +109,11 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n) while (!pin_page_for_write(to, &pte, &ptl)) { if (!atomic) - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(current->mm); if (__put_user(0, (char __user *)to)) goto out; if (!atomic) - down_read(¤t->mm->mmap_sem); + mmap_read_lock(current->mm); } tocopy = (~(unsigned long)to & ~PAGE_MASK) + 1; @@ -133,7 +133,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n) spin_unlock(ptl); } if (!atomic) - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(current->mm); out: return n; @@ -170,17 +170,17 @@ __clear_user_memset(void __user *addr, unsigned long n) return 0; } - down_read(¤t->mm->mmap_sem); + mmap_read_lock(current->mm); while (n) { pte_t *pte; spinlock_t *ptl; int tocopy; while (!pin_page_for_write(addr, &pte, &ptl)) { - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(current->mm); if (__put_user(0, (char __user *)addr)) goto out; - down_read(¤t->mm->mmap_sem); + mmap_read_lock(current->mm); } tocopy = (~(unsigned long)addr & ~PAGE_MASK) + 1; @@ -198,7 +198,7 @@ __clear_user_memset(void __user *addr, unsigned long n) else spin_unlock(ptl); } - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(current->mm); out: return n; diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 6229e9d2da6e..8e56c43981b4 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -271,11 +271,11 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) * validly references user space from well defined areas of the code, * we can bug out early if this is from code which shouldn't. */ - if (!down_read_trylock(&mm->mmap_sem)) { + if (!mmap_read_trylock(mm)) { if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc)) goto no_context; retry: - down_read(&mm->mmap_sem); + mmap_read_lock(mm); } else { /* * The above down_read_trylock() might have succeeded in @@ -325,7 +325,7 @@ retry: } } - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); /* * Handle the "normal" case first - VM_FAULT_MAJOR diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 3fd9e2731e0d..50cc30acf106 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -448,12 +448,12 @@ void arm64_notify_segfault(unsigned long addr) { int code; - down_read(¤t->mm->mmap_sem); + mmap_read_lock(current->mm); if (find_vma(current->mm, addr) == NULL) code = SEGV_MAPERR; else code = SEGV_ACCERR; - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(current->mm); force_signal_inject(SIGSEGV, code, addr); } diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index d51a898fd60f..4e016574bd91 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -340,7 +340,7 @@ int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) struct mm_struct *mm = current->mm; int ret; - if (down_write_killable(&mm->mmap_sem)) + if (mmap_write_lock_killable(mm)) return -EINTR; ret = aarch32_kuser_helpers_setup(mm); @@ -357,7 +357,7 @@ int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) #endif /* CONFIG_COMPAT_VDSO */ out: - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); return ret; } #endif /* CONFIG_COMPAT */ @@ -398,7 +398,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, struct mm_struct *mm = current->mm; int ret; - if (down_write_killable(&mm->mmap_sem)) + if (mmap_write_lock_killable(mm)) return -EINTR; ret = __setup_additional_pages(VDSO_ABI_AA64, @@ -406,7 +406,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, bprm, uses_interp); - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); return ret; } diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index fe30e95e95b2..8afb238ff335 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -497,11 +497,11 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, * validly references user space from well defined areas of the code, * we can bug out early if this is from code which shouldn't. */ - if (!down_read_trylock(&mm->mmap_sem)) { + if (!mmap_read_trylock(mm)) { if (!user_mode(regs) && !search_exception_tables(regs->pc)) goto no_context; retry: - down_read(&mm->mmap_sem); + mmap_read_lock(mm); } else { /* * The above down_read_trylock() might have succeeded in which @@ -510,7 +510,7 @@ retry: might_sleep(); #ifdef CONFIG_DEBUG_VM if (!user_mode(regs) && !search_exception_tables(regs->pc)) { - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); goto no_context; } #endif @@ -532,7 +532,7 @@ retry: goto retry; } } - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); /* * Handle the "normal" (no error) case first. diff --git a/arch/csky/kernel/vdso.c b/arch/csky/kernel/vdso.c index 60ff7adfad1d..abc3dbc658d4 100644 --- a/arch/csky/kernel/vdso.c +++ b/arch/csky/kernel/vdso.c @@ -50,7 +50,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) unsigned long addr; struct mm_struct *mm = current->mm; - down_write(&mm->mmap_sem); + mmap_write_lock(mm); addr = get_unmapped_area(NULL, STACK_TOP, PAGE_SIZE, 0, 0); if (IS_ERR_VALUE(addr)) { @@ -70,7 +70,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) mm->context.vdso = (void *)addr; up_fail: - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); return ret; } diff --git a/arch/csky/mm/fault.c b/arch/csky/mm/fault.c index 4055d430c0c8..0b9cbf2cf6a9 100644 --- a/arch/csky/mm/fault.c +++ b/arch/csky/mm/fault.c @@ -120,7 +120,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, if (in_atomic() || !mm) goto bad_area_nosemaphore; - down_read(&mm->mmap_sem); + mmap_read_lock(mm); vma = find_vma(mm, address); if (!vma) goto bad_area; @@ -170,7 +170,7 @@ good_area: address); } - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); return; /* @@ -178,7 +178,7 @@ good_area: * Fix it, but check if it's kernel or user first.. */ bad_area: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); bad_area_nosemaphore: /* User mode accesses just cause a SIGSEGV */ @@ -217,7 +217,7 @@ out_of_memory: do_sigbus: tsk->thread.trap_no = (regs->sr >> 16) & 0xff; - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); /* Kernel mode? Handle exceptions or die */ if (!user_mode(regs)) diff --git a/arch/hexagon/kernel/vdso.c b/arch/hexagon/kernel/vdso.c index 25a1d9cfd4cc..b70970ac809f 100644 --- a/arch/hexagon/kernel/vdso.c +++ b/arch/hexagon/kernel/vdso.c @@ -52,7 +52,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) unsigned long vdso_base; struct mm_struct *mm = current->mm; - if (down_write_killable(&mm->mmap_sem)) + if (mmap_write_lock_killable(mm)) return -EINTR; /* Try to get it loaded right near ld.so/glibc. */ @@ -76,7 +76,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) mm->context.vdso = (void *)vdso_base; up_fail: - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); return ret; } diff --git a/arch/hexagon/mm/vm_fault.c b/arch/hexagon/mm/vm_fault.c index 6ed099351c06..cd3808f96b93 100644 --- a/arch/hexagon/mm/vm_fault.c +++ b/arch/hexagon/mm/vm_fault.c @@ -54,7 +54,7 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs) if (user_mode(regs)) flags |= FAULT_FLAG_USER; retry: - down_read(&mm->mmap_sem); + mmap_read_lock(mm); vma = find_vma(mm, address); if (!vma) goto bad_area; @@ -106,11 +106,11 @@ good_area: } } - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); return; } - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); /* Handle copyin/out exception cases */ if (!user_mode(regs)) @@ -137,7 +137,7 @@ good_area: return; bad_area: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); if (user_mode(regs)) { force_sig_fault(SIGSEGV, si_code, (void __user *)address); diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index df257002950e..971f166873aa 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -2260,13 +2260,13 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t * now we atomically find some area in the address space and * remap the buffer in it. */ - down_write(&task->mm->mmap_sem); + mmap_write_lock(task->mm); /* find some free area in address space, must have mmap sem held */ vma->vm_start = get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS); if (IS_ERR_VALUE(vma->vm_start)) { DPRINT(("Cannot find unmapped area for size %ld\n", size)); - up_write(&task->mm->mmap_sem); + mmap_write_unlock(task->mm); goto error; } vma->vm_end = vma->vm_start + size; @@ -2277,7 +2277,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t /* can only be applied to current task, need to have the mm semaphore held when called */ if (pfm_remap_buffer(vma, (unsigned long)smpl_buf, vma->vm_start, size)) { DPRINT(("Can't remap buffer\n")); - up_write(&task->mm->mmap_sem); + mmap_write_unlock(task->mm); goto error; } @@ -2288,7 +2288,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t insert_vm_struct(mm, vma); vm_stat_account(vma->vm_mm, vma->vm_flags, vma_pages(vma)); - up_write(&task->mm->mmap_sem); + mmap_write_unlock(task->mm); /* * keep track of user level virtual address diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index 1bad5be5e61a..245545b43a4c 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -106,7 +106,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re if (mask & VM_WRITE) flags |= FAULT_FLAG_WRITE; retry: - down_read(&mm->mmap_sem); + mmap_read_lock(mm); vma = find_vma_prev(mm, address, &prev_vma); if (!vma && !prev_vma ) @@ -182,7 +182,7 @@ retry: } } - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); return; check_expansion: @@ -213,7 +213,7 @@ retry: goto good_area; bad_area: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); #ifdef CONFIG_VIRTUAL_MEM_MAP bad_area_no_up: #endif @@ -279,7 +279,7 @@ retry: return; out_of_memory: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); if (!user_mode(regs)) goto no_context; pagefault_out_of_memory(); diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index ca760f6cb18f..0b3fb4c7af29 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -118,13 +118,13 @@ ia64_init_addr_space (void) vma->vm_end = vma->vm_start + PAGE_SIZE; vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT; vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); - down_write(¤t->mm->mmap_sem); + mmap_write_lock(current->mm); if (insert_vm_struct(current->mm, vma)) { - up_write(¤t->mm->mmap_sem); + mmap_write_unlock(current->mm); vm_area_free(vma); return; } - up_write(¤t->mm->mmap_sem); + mmap_write_unlock(current->mm); } /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */ @@ -136,13 +136,13 @@ ia64_init_addr_space (void) vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT); vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | VM_DONTEXPAND | VM_DONTDUMP; - down_write(¤t->mm->mmap_sem); + mmap_write_lock(current->mm); if (insert_vm_struct(current->mm, vma)) { - up_write(¤t->mm->mmap_sem); + mmap_write_unlock(current->mm); vm_area_free(vma); return; } - up_write(¤t->mm->mmap_sem); + mmap_write_unlock(current->mm); } } } diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c index 18a4de7d5934..1c235d8f53f3 100644 --- a/arch/m68k/kernel/sys_m68k.c +++ b/arch/m68k/kernel/sys_m68k.c @@ -399,7 +399,7 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len) * Verify that the specified address region actually belongs * to this process. */ - down_read(¤t->mm->mmap_sem); + mmap_read_lock(current->mm); vma = find_vma(current->mm, addr); if (!vma || addr < vma->vm_start || addr + len > vma->vm_end) goto out_unlock; @@ -450,7 +450,7 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len) } } out_unlock: - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(current->mm); out: return ret; } @@ -472,7 +472,7 @@ sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5, spinlock_t *ptl; unsigned long mem_value; - down_read(&mm->mmap_sem); + mmap_read_lock(mm); pgd = pgd_offset(mm, (unsigned long)mem); if (!pgd_present(*pgd)) goto bad_access; @@ -501,11 +501,11 @@ sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5, __put_user(newval, mem); pte_unmap_unlock(pte, ptl); - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); return mem_value; bad_access: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); /* This is not necessarily a bad access, we can get here if a memory we're trying to write to should be copied-on-write. Make the kernel do the necessary page stuff, then re-iterate. @@ -545,13 +545,13 @@ sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5, struct mm_struct *mm = current->mm; unsigned long mem_value; - down_read(&mm->mmap_sem); + mmap_read_lock(mm); mem_value = *mem; if (mem_value == oldval) *mem = newval; - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); return mem_value; } diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index 3bfb5c8ac3c7..650acab0d77d 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c @@ -86,7 +86,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, if (user_mode(regs)) flags |= FAULT_FLAG_USER; retry: - down_read(&mm->mmap_sem); + mmap_read_lock(mm); vma = find_vma(mm, address); if (!vma) @@ -174,7 +174,7 @@ good_area: } } - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); return 0; /* @@ -182,7 +182,7 @@ good_area: * us unable to handle the page fault gracefully. */ out_of_memory: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); if (!user_mode(regs)) goto no_context; pagefault_out_of_memory(); @@ -211,6 +211,6 @@ acc_err: current->thread.faddr = address; send_sig: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); return send_fault_sig(regs); } diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c index 77dabc91d192..952ab614d50e 100644 --- a/arch/microblaze/mm/fault.c +++ b/arch/microblaze/mm/fault.c @@ -136,12 +136,12 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, * source. If this is invalid we can skip the address space check, * thus avoiding the deadlock. */ - if (unlikely(!down_read_trylock(&mm->mmap_sem))) { + if (unlikely(!mmap_read_trylock(mm))) { if (kernel_mode(regs) && !search_exception_tables(regs->pc)) goto bad_area_nosemaphore; retry: - down_read(&mm->mmap_sem); + mmap_read_lock(mm); } vma = find_vma(mm, address); @@ -247,7 +247,7 @@ good_area: } } - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); /* * keep track of tlb+htab misses that are good addrs but @@ -258,7 +258,7 @@ good_area: return; bad_area: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); bad_area_nosemaphore: pte_errors++; @@ -277,7 +277,7 @@ bad_area_nosemaphore: * us unable to handle the page fault gracefully. */ out_of_memory: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); if (!user_mode(regs)) bad_page_fault(regs, address, SIGKILL); else @@ -285,7 +285,7 @@ out_of_memory: return; do_sigbus: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); if (user_mode(regs)) { force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address); return; diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index e8cc10a9edc4..7c32c956156a 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -793,13 +793,13 @@ int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31) return 1; case SIGSEGV: - down_read(¤t->mm->mmap_sem); + mmap_read_lock(current->mm); vma = find_vma(current->mm, (unsigned long)fault_addr); if (vma && (vma->vm_start <= (unsigned long)fault_addr)) si_code = SEGV_ACCERR; else si_code = SEGV_MAPERR; - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(current->mm); force_sig_fault(SIGSEGV, si_code, fault_addr); return 1; diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c index 3adb7354bc01..242dc5e83847 100644 --- a/arch/mips/kernel/vdso.c +++ b/arch/mips/kernel/vdso.c @@ -94,7 +94,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) struct vm_area_struct *vma; int ret; - if (down_write_killable(&mm->mmap_sem)) + if (mmap_write_lock_killable(mm)) return -EINTR; if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) { @@ -187,6 +187,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) ret = 0; out: - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); return ret; } diff --git a/arch/nds32/kernel/vdso.c b/arch/nds32/kernel/vdso.c index 90bcae6f8554..e16009a07971 100644 --- a/arch/nds32/kernel/vdso.c +++ b/arch/nds32/kernel/vdso.c @@ -130,7 +130,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) vdso_mapping_len += L1_cache_info[DCACHE].aliasing_num - 1; #endif - if (down_write_killable(&mm->mmap_sem)) + if (mmap_write_lock_killable(mm)) return -EINTR; addr = vdso_random_addr(vdso_mapping_len); @@ -185,12 +185,12 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) goto up_fail; } - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); return 0; up_fail: mm->context.vdso = NULL; - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); return ret; } diff --git a/arch/nds32/mm/fault.c b/arch/nds32/mm/fault.c index ccd70004f97c..b92785588c30 100644 --- a/arch/nds32/mm/fault.c +++ b/arch/nds32/mm/fault.c @@ -126,12 +126,12 @@ void do_page_fault(unsigned long entry, unsigned long addr, * validly references user space from well defined areas of the code, * we can bug out early if this is from code which shouldn't. */ - if (unlikely(!down_read_trylock(&mm->mmap_sem))) { + if (unlikely(!mmap_read_trylock(mm))) { if (!user_mode(regs) && !search_exception_tables(instruction_pointer(regs))) goto no_context; retry: - down_read(&mm->mmap_sem); + mmap_read_lock(mm); } else { /* * The above down_read_trylock() might have succeeded in which @@ -255,7 +255,7 @@ good_area: } } - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); return; /* @@ -263,7 +263,7 @@ good_area: * Fix it, but check if it's kernel or user first.. */ bad_area: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); bad_area_nosemaphore: @@ -323,14 +323,14 @@ no_context: */ out_of_memory: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); if (!user_mode(regs)) goto no_context; pagefault_out_of_memory(); return; do_sigbus: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); /* Kernel mode? Handle exceptions or die */ if (!user_mode(regs)) diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c index 964eac1a21d0..b8a0b51c6b0f 100644 --- a/arch/nios2/mm/fault.c +++ b/arch/nios2/mm/fault.c @@ -83,11 +83,11 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause, if (user_mode(regs)) flags |= FAULT_FLAG_USER; - if (!down_read_trylock(&mm->mmap_sem)) { + if (!mmap_read_trylock(mm)) { if (!user_mode(regs) && !search_exception_tables(regs->ea)) goto bad_area_nosemaphore; retry: - down_read(&mm->mmap_sem); + mmap_read_lock(mm); } vma = find_vma(mm, address); @@ -169,7 +169,7 @@ good_area: } } - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); return; /* @@ -177,7 +177,7 @@ good_area: * Fix it, but check if it's kernel or user first.. */ bad_area: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); bad_area_nosemaphore: /* User mode accesses just cause a SIGSEGV */ @@ -215,14 +215,14 @@ no_context: * us unable to handle the page fault gracefully. */ out_of_memory: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); if (!user_mode(regs)) goto no_context; pagefault_out_of_memory(); return; do_sigbus: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); /* Kernel mode? Handle exceptions or die */ if (!user_mode(regs)) diff --git a/arch/nios2/mm/init.c b/arch/nios2/mm/init.c index 2ab0a16f36a8..61862dbb0e32 100644 --- a/arch/nios2/mm/init.c +++ b/arch/nios2/mm/init.c @@ -109,14 +109,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) struct mm_struct *mm = current->mm; int ret; - down_write(&mm->mmap_sem); + mmap_write_lock(mm); /* Map kuser helpers to user space address */ ret = install_special_mapping(mm, KUSER_BASE, KUSER_SIZE, VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC, kuser_page); - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); return ret; } diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c index 6e0a11ac4c00..0bbb1a76949a 100644 --- a/arch/openrisc/mm/fault.c +++ b/arch/openrisc/mm/fault.c @@ -104,7 +104,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address, goto no_context; retry: - down_read(&mm->mmap_sem); + mmap_read_lock(mm); vma = find_vma(mm, address); if (!vma) @@ -192,7 +192,7 @@ good_area: } } - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); return; /* @@ -201,7 +201,7 @@ good_area: */ bad_area: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); bad_area_nosemaphore: @@ -260,14 +260,14 @@ out_of_memory: __asm__ __volatile__("l.nop 42"); __asm__ __volatile__("l.nop 1"); - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); if (!user_mode(regs)) goto no_context; pagefault_out_of_memory(); return; do_sigbus: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); /* * Send a sigbus, regardless of whether we were in kernel diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index 0a89899f154a..5400e23a77a1 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -717,7 +717,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs) if (user_mode(regs)) { struct vm_area_struct *vma; - down_read(¤t->mm->mmap_sem); + mmap_read_lock(current->mm); vma = find_vma(current->mm,regs->iaoq[0]); if (vma && (regs->iaoq[0] >= vma->vm_start) && (vma->vm_flags & VM_EXEC)) { @@ -725,10 +725,10 @@ void notrace handle_interruption(int code, struct pt_regs *regs) fault_address = regs->iaoq[0]; fault_space = regs->iasq[0]; - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(current->mm); break; /* call do_page_fault() */ } - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(current->mm); } /* Fall Through */ case 27: diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 86e8c848f3d7..bc840fdb398f 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -282,7 +282,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code, if (acc_type & VM_WRITE) flags |= FAULT_FLAG_WRITE; retry: - down_read(&mm->mmap_sem); + mmap_read_lock(mm); vma = find_vma_prev(mm, address, &prev_vma); if (!vma || address < vma->vm_start) goto check_expansion; @@ -337,7 +337,7 @@ good_area: goto retry; } } - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); return; check_expansion: @@ -349,7 +349,7 @@ check_expansion: * Something tried to access memory that isn't in our memory map.. */ bad_area: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); if (user_mode(regs)) { int signo, si_code; @@ -421,7 +421,7 @@ no_context: parisc_terminate("Bad Address (null pointer deref?)", regs, code, address); out_of_memory: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); if (!user_mode(regs)) goto no_context; pagefault_out_of_memory(); diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 74782129a84e..e0f4ba45b6cc 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -170,7 +170,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) * and end up putting it elsewhere. * Add enough to the size so that the result can be aligned. */ - if (down_write_killable(&mm->mmap_sem)) + if (mmap_write_lock_killable(mm)) return -EINTR; vdso_base = get_unmapped_area(NULL, vdso_base, (vdso_pages << PAGE_SHIFT) + @@ -210,11 +210,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) goto fail_mmapsem; } - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); return 0; fail_mmapsem: - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); return rc; } diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index a07e12ed9f5a..7f5d58663f13 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -4621,14 +4621,14 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) /* Look up the VMA for the start of this memory slot */ hva = memslot->userspace_addr; - down_read(&kvm->mm->mmap_sem); + mmap_read_lock(kvm->mm); vma = find_vma(kvm->mm, hva); if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO)) goto up_out; psize = vma_kernel_pagesize(vma); - up_read(&kvm->mm->mmap_sem); + mmap_read_unlock(kvm->mm); /* We can handle 4k, 64k or 16M pages in the VRMA */ if (psize >= 0x1000000) @@ -4661,7 +4661,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) return err; up_out: - up_read(&kvm->mm->mmap_sem); + mmap_read_unlock(kvm->mm); goto out_srcu; } diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c index 76d05c71fb1f..305997b015b6 100644 --- a/arch/powerpc/kvm/book3s_hv_uvmem.c +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c @@ -408,7 +408,7 @@ kvmppc_svm_page_in(struct vm_area_struct *vma, unsigned long start, */ ret = ksm_madvise(vma, vma->vm_start, vma->vm_end, MADV_UNMERGEABLE, &vma->vm_flags); - downgrade_write(&kvm->mm->mmap_sem); + mmap_write_downgrade(kvm->mm); *downgrade = true; if (ret) return ret; @@ -525,7 +525,7 @@ kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa, ret = H_PARAMETER; srcu_idx = srcu_read_lock(&kvm->srcu); - down_write(&kvm->mm->mmap_sem); + mmap_write_lock(kvm->mm); start = gfn_to_hva(kvm, gfn); if (kvm_is_error_hva(start)) @@ -548,9 +548,9 @@ out_unlock: mutex_unlock(&kvm->arch.uvmem_lock); out: if (downgrade) - up_read(&kvm->mm->mmap_sem); + mmap_read_unlock(kvm->mm); else - up_write(&kvm->mm->mmap_sem); + mmap_write_unlock(kvm->mm); srcu_read_unlock(&kvm->srcu, srcu_idx); return ret; } @@ -703,7 +703,7 @@ kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa, ret = H_PARAMETER; srcu_idx = srcu_read_lock(&kvm->srcu); - down_read(&kvm->mm->mmap_sem); + mmap_read_lock(kvm->mm); start = gfn_to_hva(kvm, gfn); if (kvm_is_error_hva(start)) goto out; @@ -716,7 +716,7 @@ kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa, if (!kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa)) ret = H_SUCCESS; out: - up_read(&kvm->mm->mmap_sem); + mmap_read_unlock(kvm->mm); srcu_read_unlock(&kvm->srcu, srcu_idx); return ret; } diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index df9989cf7ba3..d6c1069e9954 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -355,7 +355,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, if (tlbsel == 1) { struct vm_area_struct *vma; - down_read(&kvm->mm->mmap_sem); + mmap_read_lock(kvm->mm); vma = find_vma(kvm->mm, hva); if (vma && hva >= vma->vm_start && @@ -441,7 +441,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1); } - up_read(&kvm->mm->mmap_sem); + mmap_read_unlock(kvm->mm); } if (likely(!pfnmap)) { diff --git a/arch/powerpc/mm/book3s64/iommu_api.c b/arch/powerpc/mm/book3s64/iommu_api.c index fa05bbd1f682..563faa10bb66 100644 --- a/arch/powerpc/mm/book3s64/iommu_api.c +++ b/arch/powerpc/mm/book3s64/iommu_api.c @@ -96,7 +96,7 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua, goto unlock_exit; } - down_read(&mm->mmap_sem); + mmap_read_lock(mm); chunk = (1UL << (PAGE_SHIFT + MAX_ORDER - 1)) / sizeof(struct vm_area_struct *); chunk = min(chunk, entries); @@ -114,7 +114,7 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua, pinned += ret; break; } - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); if (pinned != entries) { if (!ret) ret = -EFAULT; diff --git a/arch/powerpc/mm/book3s64/subpage_prot.c b/arch/powerpc/mm/book3s64/subpage_prot.c index 6c9d5415aa72..e814d34bf7f4 100644 --- a/arch/powerpc/mm/book3s64/subpage_prot.c +++ b/arch/powerpc/mm/book3s64/subpage_prot.c @@ -94,7 +94,7 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len) size_t nw; unsigned long next, limit; - down_write(&mm->mmap_sem); + mmap_write_lock(mm); spt = mm_ctx_subpage_prot(&mm->context); if (!spt) @@ -129,7 +129,7 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len) } err_out: - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -219,7 +219,7 @@ SYSCALL_DEFINE3(subpage_prot, unsigned long, addr, if (!access_ok(map, (len >> PAGE_SHIFT) * sizeof(u32))) return -EFAULT; - down_write(&mm->mmap_sem); + mmap_write_lock(mm); spt = mm_ctx_subpage_prot(&mm->context); if (!spt) { @@ -269,11 +269,11 @@ SYSCALL_DEFINE3(subpage_prot, unsigned long, addr, if (addr + (nw << PAGE_SHIFT) > next) nw = (next - addr) >> PAGE_SHIFT; - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); if (__copy_from_user(spp, map, nw * sizeof(u32))) return -EFAULT; map += nw; - down_write(&mm->mmap_sem); + mmap_write_lock(mm); /* now flush any existing HPTEs for the range */ hpte_flush_range(mm, addr, nw); @@ -282,6 +282,6 @@ SYSCALL_DEFINE3(subpage_prot, unsigned long, addr, spt->maxaddr = limit; err = 0; out: - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); return err; } diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c index beb060b96632..b83abbead4a2 100644 --- a/arch/powerpc/mm/copro_fault.c +++ b/arch/powerpc/mm/copro_fault.c @@ -33,7 +33,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea, if (mm->pgd == NULL) return -EFAULT; - down_read(&mm->mmap_sem); + mmap_read_lock(mm); ret = -EFAULT; vma = find_vma(mm, ea); if (!vma) @@ -82,7 +82,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea, current->min_flt++; out_unlock: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); return ret; } EXPORT_SYMBOL_GPL(copro_handle_mm_fault); diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 39d23a557bef..ff3653e67c7b 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -108,7 +108,7 @@ static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code) * Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first.. */ - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); return __bad_area_nosemaphore(regs, address, si_code); } @@ -144,7 +144,7 @@ static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address, */ pkey = vma_pkey(vma); - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); /* * If we are in kernel mode, bail out with a SEGV, this will @@ -551,12 +551,12 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address, * source. If this is invalid we can skip the address space check, * thus avoiding the deadlock. */ - if (unlikely(!down_read_trylock(&mm->mmap_sem))) { + if (unlikely(!mmap_read_trylock(mm))) { if (!is_user && !search_exception_tables(regs->nip)) return bad_area_nosemaphore(regs, address); retry: - down_read(&mm->mmap_sem); + mmap_read_lock(mm); } else { /* * The above down_read_trylock() might have succeeded in @@ -580,7 +580,7 @@ retry: if (!must_retry) return bad_area(regs, address); - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); if (fault_in_pages_readable((const char __user *)regs->nip, sizeof(unsigned int))) return bad_area_nosemaphore(regs, address); @@ -625,7 +625,7 @@ good_area: } } - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(current->mm); if (unlikely(fault & VM_FAULT_ERROR)) return mm_fault_error(regs, address, fault); diff --git a/arch/powerpc/oprofile/cell/spu_task_sync.c b/arch/powerpc/oprofile/cell/spu_task_sync.c index 0caec3d8d436..df59d0bb121f 100644 --- a/arch/powerpc/oprofile/cell/spu_task_sync.c +++ b/arch/powerpc/oprofile/cell/spu_task_sync.c @@ -332,7 +332,7 @@ get_exec_dcookie_and_offset(struct spu *spu, unsigned int *offsetp, fput(exe_file); } - down_read(&mm->mmap_sem); + mmap_read_lock(mm); for (vma = mm->mmap; vma; vma = vma->vm_next) { if (vma->vm_start > spu_ref || vma->vm_end <= spu_ref) continue; @@ -349,13 +349,13 @@ get_exec_dcookie_and_offset(struct spu *spu, unsigned int *offsetp, *spu_bin_dcookie = fast_get_dcookie(&vma->vm_file->f_path); pr_debug("got dcookie for %pD\n", vma->vm_file); - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); out: return app_cookie; fail_no_image_cookie: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); printk(KERN_ERR "SPU_PROF: " "%s, line %d: Cannot find dcookie for SPU binary\n", diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index e44427c24585..324c0fb091a1 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -343,11 +343,11 @@ static vm_fault_t spufs_ps_fault(struct vm_fault *vmf, goto refault; if (ctx->state == SPU_STATE_SAVED) { - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(current->mm); spu_context_nospu_trace(spufs_ps_fault__sleep, ctx); err = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE); spu_context_trace(spufs_ps_fault__wake, ctx, ctx->spu); - down_read(¤t->mm->mmap_sem); + mmap_read_lock(current->mm); } else { area = ctx->spu->problem_phys + ps_offs; ret = vmf_insert_pfn(vmf->vma, vmf->address, diff --git a/arch/riscv/kernel/vdso.c b/arch/riscv/kernel/vdso.c index 484d95a70907..e827fae3bf90 100644 --- a/arch/riscv/kernel/vdso.c +++ b/arch/riscv/kernel/vdso.c @@ -61,7 +61,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, vdso_len = (vdso_pages + 1) << PAGE_SHIFT; - down_write(&mm->mmap_sem); + mmap_write_lock(mm); vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0); if (IS_ERR_VALUE(vdso_base)) { ret = vdso_base; @@ -83,7 +83,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, mm->context.vdso = NULL; end: - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); return ret; } diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index be84e32adc4c..cd7f4af95e56 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -69,7 +69,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs) perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); retry: - down_read(&mm->mmap_sem); + mmap_read_lock(mm); vma = find_vma(mm, addr); if (unlikely(!vma)) goto bad_area; @@ -155,7 +155,7 @@ good_area: } } - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); return; /* @@ -163,7 +163,7 @@ good_area: * Fix it, but check if it's kernel or user first. */ bad_area: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); /* User mode accesses just cause a SIGSEGV */ if (user_mode(regs)) { do_trap(regs, SIGSEGV, code, addr); @@ -191,14 +191,14 @@ no_context: * (which will retry the fault, or kill us if we got oom-killed). */ out_of_memory: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); if (!user_mode(regs)) goto no_context; pagefault_out_of_memory(); return; do_sigbus: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); /* Kernel mode? Handle exceptions or die */ if (!user_mode(regs)) goto no_context; diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c index 4c0677fc8904..66e89b2866d7 100644 --- a/arch/s390/kernel/uv.c +++ b/arch/s390/kernel/uv.c @@ -205,7 +205,7 @@ int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb) again: rc = -EFAULT; - down_read(&gmap->mm->mmap_sem); + mmap_read_lock(gmap->mm); uaddr = __gmap_translate(gmap, gaddr); if (IS_ERR_VALUE(uaddr)) @@ -234,7 +234,7 @@ again: pte_unmap_unlock(ptep, ptelock); unlock_page(page); out: - up_read(&gmap->mm->mmap_sem); + mmap_read_unlock(gmap->mm); if (rc == -EAGAIN) { wait_on_page_writeback(page); diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index 0e98a5d1bbfb..c4baefaa6e34 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c @@ -207,7 +207,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) * it at vdso_base which is the "natural" base for it, but we might * fail and end up putting it elsewhere. */ - if (down_write_killable(&mm->mmap_sem)) + if (mmap_write_lock_killable(mm)) return -EINTR; vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0); if (IS_ERR_VALUE(vdso_base)) { @@ -238,7 +238,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) rc = 0; out_up: - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); return rc; } diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index 4e6fb34c7d90..6d6b57059493 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -1173,7 +1173,7 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg, int dat_protection, fake; int rc; - down_read(&sg->mm->mmap_sem); + mmap_read_lock(sg->mm); /* * We don't want any guest-2 tables to change - so the parent * tables/pointers we read stay valid - unshadowing is however @@ -1202,6 +1202,6 @@ shadow_page: if (!rc) rc = gmap_shadow_page(sg, saddr, __pte(pte.val)); ipte_unlock(vcpu); - up_read(&sg->mm->mmap_sem); + mmap_read_unlock(sg->mm); return rc; } diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index a4d4ca2769bd..1608fd99bbee 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -2767,10 +2767,10 @@ static struct page *get_map_page(struct kvm *kvm, u64 uaddr) { struct page *page = NULL; - down_read(&kvm->mm->mmap_sem); + mmap_read_lock(kvm->mm); get_user_pages_remote(NULL, kvm->mm, uaddr, 1, FOLL_WRITE, &page, NULL, NULL); - up_read(&kvm->mm->mmap_sem); + mmap_read_unlock(kvm->mm); return page; } diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 5c546d27fdae..d0ff26d157bc 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -763,9 +763,9 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) r = -EINVAL; else { r = 0; - down_write(&kvm->mm->mmap_sem); + mmap_write_lock(kvm->mm); kvm->mm->context.allow_gmap_hpage_1m = 1; - up_write(&kvm->mm->mmap_sem); + mmap_write_unlock(kvm->mm); /* * We might have to create fake 4k page * tables. To avoid that the hardware works on @@ -1815,7 +1815,7 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) if (!keys) return -ENOMEM; - down_read(¤t->mm->mmap_sem); + mmap_read_lock(current->mm); srcu_idx = srcu_read_lock(&kvm->srcu); for (i = 0; i < args->count; i++) { hva = gfn_to_hva(kvm, args->start_gfn + i); @@ -1829,7 +1829,7 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) break; } srcu_read_unlock(&kvm->srcu, srcu_idx); - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(current->mm); if (!r) { r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys, @@ -1873,7 +1873,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) goto out; i = 0; - down_read(¤t->mm->mmap_sem); + mmap_read_lock(current->mm); srcu_idx = srcu_read_lock(&kvm->srcu); while (i < args->count) { unlocked = false; @@ -1900,7 +1900,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) i++; } srcu_read_unlock(&kvm->srcu, srcu_idx); - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(current->mm); out: kvfree(keys); return r; @@ -2089,14 +2089,14 @@ static int kvm_s390_get_cmma_bits(struct kvm *kvm, if (!values) return -ENOMEM; - down_read(&kvm->mm->mmap_sem); + mmap_read_lock(kvm->mm); srcu_idx = srcu_read_lock(&kvm->srcu); if (peek) ret = kvm_s390_peek_cmma(kvm, args, values, bufsize); else ret = kvm_s390_get_cmma(kvm, args, values, bufsize); srcu_read_unlock(&kvm->srcu, srcu_idx); - up_read(&kvm->mm->mmap_sem); + mmap_read_unlock(kvm->mm); if (kvm->arch.migration_mode) args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages); @@ -2146,7 +2146,7 @@ static int kvm_s390_set_cmma_bits(struct kvm *kvm, goto out; } - down_read(&kvm->mm->mmap_sem); + mmap_read_lock(kvm->mm); srcu_idx = srcu_read_lock(&kvm->srcu); for (i = 0; i < args->count; i++) { hva = gfn_to_hva(kvm, args->start_gfn + i); @@ -2161,12 +2161,12 @@ static int kvm_s390_set_cmma_bits(struct kvm *kvm, set_pgste_bits(kvm->mm, hva, mask, pgstev); } srcu_read_unlock(&kvm->srcu, srcu_idx); - up_read(&kvm->mm->mmap_sem); + mmap_read_unlock(kvm->mm); if (!kvm->mm->context.uses_cmm) { - down_write(&kvm->mm->mmap_sem); + mmap_write_lock(kvm->mm); kvm->mm->context.uses_cmm = 1; - up_write(&kvm->mm->mmap_sem); + mmap_write_unlock(kvm->mm); } out: vfree(bits); @@ -2239,9 +2239,9 @@ static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd) if (r) break; - down_write(¤t->mm->mmap_sem); + mmap_write_lock(current->mm); r = gmap_mark_unmergeable(); - up_write(¤t->mm->mmap_sem); + mmap_write_unlock(current->mm); if (r) break; diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index b579b77deea6..57e08b0a0a1a 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -270,18 +270,18 @@ static int handle_iske(struct kvm_vcpu *vcpu) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); retry: unlocked = false; - down_read(¤t->mm->mmap_sem); + mmap_read_lock(current->mm); rc = get_guest_storage_key(current->mm, vmaddr, &key); if (rc) { rc = fixup_user_fault(current, current->mm, vmaddr, FAULT_FLAG_WRITE, &unlocked); if (!rc) { - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(current->mm); goto retry; } } - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(current->mm); if (rc == -EFAULT) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); if (rc < 0) @@ -317,17 +317,17 @@ static int handle_rrbe(struct kvm_vcpu *vcpu) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); retry: unlocked = false; - down_read(¤t->mm->mmap_sem); + mmap_read_lock(current->mm); rc = reset_guest_reference_bit(current->mm, vmaddr); if (rc < 0) { rc = fixup_user_fault(current, current->mm, vmaddr, FAULT_FLAG_WRITE, &unlocked); if (!rc) { - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(current->mm); goto retry; } } - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(current->mm); if (rc == -EFAULT) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); if (rc < 0) @@ -385,7 +385,7 @@ static int handle_sske(struct kvm_vcpu *vcpu) if (kvm_is_error_hva(vmaddr)) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); - down_read(¤t->mm->mmap_sem); + mmap_read_lock(current->mm); rc = cond_set_guest_storage_key(current->mm, vmaddr, key, &oldkey, m3 & SSKE_NQ, m3 & SSKE_MR, m3 & SSKE_MC); @@ -395,7 +395,7 @@ static int handle_sske(struct kvm_vcpu *vcpu) FAULT_FLAG_WRITE, &unlocked); rc = !rc ? -EAGAIN : rc; } - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(current->mm); if (rc == -EFAULT) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); if (rc < 0) @@ -1091,7 +1091,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) if (rc) return rc; - down_read(¤t->mm->mmap_sem); + mmap_read_lock(current->mm); rc = cond_set_guest_storage_key(current->mm, vmaddr, key, NULL, nq, mr, mc); if (rc < 0) { @@ -1099,7 +1099,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) FAULT_FLAG_WRITE, &unlocked); rc = !rc ? -EAGAIN : rc; } - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(current->mm); if (rc == -EFAULT) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); if (rc == -EAGAIN) @@ -1220,9 +1220,9 @@ static int handle_essa(struct kvm_vcpu *vcpu) * already correct, we do nothing and avoid the lock. */ if (vcpu->kvm->mm->context.uses_cmm == 0) { - down_write(&vcpu->kvm->mm->mmap_sem); + mmap_write_lock(vcpu->kvm->mm); vcpu->kvm->mm->context.uses_cmm = 1; - up_write(&vcpu->kvm->mm->mmap_sem); + mmap_write_unlock(vcpu->kvm->mm); } /* * If we are here, we are supposed to have CMMA enabled in @@ -1239,11 +1239,11 @@ static int handle_essa(struct kvm_vcpu *vcpu) } else { int srcu_idx; - down_read(&vcpu->kvm->mm->mmap_sem); + mmap_read_lock(vcpu->kvm->mm); srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); i = __do_essa(vcpu, orc); srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); - up_read(&vcpu->kvm->mm->mmap_sem); + mmap_read_unlock(vcpu->kvm->mm); if (i < 0) return i; /* Account for the possible extra cbrl entry */ @@ -1251,10 +1251,10 @@ static int handle_essa(struct kvm_vcpu *vcpu) } vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */ cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo); - down_read(&gmap->mm->mmap_sem); + mmap_read_lock(gmap->mm); for (i = 0; i < entries; ++i) __gmap_zap(gmap, cbrlo[i]); - up_read(&gmap->mm->mmap_sem); + mmap_read_unlock(gmap->mm); return 0; } diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 2082323e2e59..f0fa51af8f5b 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -433,7 +433,7 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access) flags |= FAULT_FLAG_USER; if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400) flags |= FAULT_FLAG_WRITE; - down_read(&mm->mmap_sem); + mmap_read_lock(mm); gmap = NULL; if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) { @@ -514,7 +514,7 @@ retry: } flags &= ~FAULT_FLAG_RETRY_NOWAIT; flags |= FAULT_FLAG_TRIED; - down_read(&mm->mmap_sem); + mmap_read_lock(mm); goto retry; } } @@ -532,7 +532,7 @@ retry: } fault = 0; out_up: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); out: return fault; } @@ -824,22 +824,22 @@ void do_secure_storage_access(struct pt_regs *regs) switch (get_fault_type(regs)) { case USER_FAULT: mm = current->mm; - down_read(&mm->mmap_sem); + mmap_read_lock(mm); vma = find_vma(mm, addr); if (!vma) { - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP); break; } page = follow_page(vma, addr, FOLL_WRITE | FOLL_GET); if (IS_ERR_OR_NULL(page)) { - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); break; } if (arch_make_page_accessible(page)) send_sig(SIGSEGV, current, 0); put_page(page); - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); break; case KERNEL_FAULT: page = phys_to_page(addr); diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index 223e14d4fcff..a8e818d10c6b 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -405,10 +405,10 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len) return -EINVAL; flush = 0; - down_write(&gmap->mm->mmap_sem); + mmap_write_lock(gmap->mm); for (off = 0; off < len; off += PMD_SIZE) flush |= __gmap_unmap_by_gaddr(gmap, to + off); - up_write(&gmap->mm->mmap_sem); + mmap_write_unlock(gmap->mm); if (flush) gmap_flush_tlb(gmap); return 0; @@ -438,7 +438,7 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from, return -EINVAL; flush = 0; - down_write(&gmap->mm->mmap_sem); + mmap_write_lock(gmap->mm); for (off = 0; off < len; off += PMD_SIZE) { /* Remove old translation */ flush |= __gmap_unmap_by_gaddr(gmap, to + off); @@ -448,7 +448,7 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from, (void *) from + off)) break; } - up_write(&gmap->mm->mmap_sem); + mmap_write_unlock(gmap->mm); if (flush) gmap_flush_tlb(gmap); if (off >= len) @@ -495,9 +495,9 @@ unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr) { unsigned long rc; - down_read(&gmap->mm->mmap_sem); + mmap_read_lock(gmap->mm); rc = __gmap_translate(gmap, gaddr); - up_read(&gmap->mm->mmap_sem); + mmap_read_unlock(gmap->mm); return rc; } EXPORT_SYMBOL_GPL(gmap_translate); @@ -640,7 +640,7 @@ int gmap_fault(struct gmap *gmap, unsigned long gaddr, int rc; bool unlocked; - down_read(&gmap->mm->mmap_sem); + mmap_read_lock(gmap->mm); retry: unlocked = false; @@ -663,7 +663,7 @@ retry: rc = __gmap_link(gmap, gaddr, vmaddr); out_up: - up_read(&gmap->mm->mmap_sem); + mmap_read_unlock(gmap->mm); return rc; } EXPORT_SYMBOL_GPL(gmap_fault); @@ -696,7 +696,7 @@ void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to) unsigned long gaddr, vmaddr, size; struct vm_area_struct *vma; - down_read(&gmap->mm->mmap_sem); + mmap_read_lock(gmap->mm); for (gaddr = from; gaddr < to; gaddr = (gaddr + PMD_SIZE) & PMD_MASK) { /* Find the vm address for the guest address */ @@ -719,7 +719,7 @@ void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to) size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK)); zap_page_range(vma, vmaddr, size); } - up_read(&gmap->mm->mmap_sem); + mmap_read_unlock(gmap->mm); } EXPORT_SYMBOL_GPL(gmap_discard); @@ -1106,9 +1106,9 @@ int gmap_mprotect_notify(struct gmap *gmap, unsigned long gaddr, return -EINVAL; if (!MACHINE_HAS_ESOP && prot == PROT_READ) return -EINVAL; - down_read(&gmap->mm->mmap_sem); + mmap_read_lock(gmap->mm); rc = gmap_protect_range(gmap, gaddr, len, prot, GMAP_NOTIFY_MPROT); - up_read(&gmap->mm->mmap_sem); + mmap_read_unlock(gmap->mm); return rc; } EXPORT_SYMBOL_GPL(gmap_mprotect_notify); @@ -1696,11 +1696,11 @@ struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce, } spin_unlock(&parent->shadow_lock); /* protect after insertion, so it will get properly invalidated */ - down_read(&parent->mm->mmap_sem); + mmap_read_lock(parent->mm); rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN, ((asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE, PROT_READ, GMAP_NOTIFY_SHADOW); - up_read(&parent->mm->mmap_sem); + mmap_read_unlock(parent->mm); spin_lock(&parent->shadow_lock); new->initialized = true; if (rc) { @@ -2543,12 +2543,12 @@ int s390_enable_sie(void) /* Fail if the page tables are 2K */ if (!mm_alloc_pgste(mm)) return -EINVAL; - down_write(&mm->mmap_sem); + mmap_write_lock(mm); mm->context.has_pgste = 1; /* split thp mappings and disable thp for future mappings */ thp_split_mm(mm); walk_page_range(mm, 0, TASK_SIZE, &zap_zero_walk_ops, NULL); - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); return 0; } EXPORT_SYMBOL_GPL(s390_enable_sie); @@ -2617,7 +2617,7 @@ int s390_enable_skey(void) struct mm_struct *mm = current->mm; int rc = 0; - down_write(&mm->mmap_sem); + mmap_write_lock(mm); if (mm_uses_skeys(mm)) goto out_up; @@ -2630,7 +2630,7 @@ int s390_enable_skey(void) walk_page_range(mm, 0, TASK_SIZE, &enable_skey_walk_ops, NULL); out_up: - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); return rc; } EXPORT_SYMBOL_GPL(s390_enable_skey); @@ -2651,9 +2651,9 @@ static const struct mm_walk_ops reset_cmma_walk_ops = { void s390_reset_cmma(struct mm_struct *mm) { - down_write(&mm->mmap_sem); + mmap_write_lock(mm); walk_page_range(mm, 0, TASK_SIZE, &reset_cmma_walk_ops, NULL); - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); } EXPORT_SYMBOL_GPL(s390_reset_cmma); @@ -2685,9 +2685,9 @@ void s390_reset_acc(struct mm_struct *mm) */ if (!mmget_not_zero(mm)) return; - down_read(&mm->mmap_sem); + mmap_read_lock(mm); walk_page_range(mm, 0, TASK_SIZE, &reset_acc_walk_ops, NULL); - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); mmput(mm); } EXPORT_SYMBOL_GPL(s390_reset_acc); diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c index 020a2c514d96..38efa3e852c4 100644 --- a/arch/s390/pci/pci_mmio.c +++ b/arch/s390/pci/pci_mmio.c @@ -125,7 +125,7 @@ static long get_pfn(unsigned long user_addr, unsigned long access, struct vm_area_struct *vma; long ret; - down_read(¤t->mm->mmap_sem); + mmap_read_lock(current->mm); ret = -EINVAL; vma = find_vma(current->mm, user_addr); if (!vma) @@ -135,7 +135,7 @@ static long get_pfn(unsigned long user_addr, unsigned long access, goto out; ret = follow_pfn(vma, user_addr, pfn); out: - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(current->mm); return ret; } diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c index f8afc014e084..a5a7b33ed81a 100644 --- a/arch/sh/kernel/sys_sh.c +++ b/arch/sh/kernel/sys_sh.c @@ -69,10 +69,10 @@ asmlinkage int sys_cacheflush(unsigned long addr, unsigned long len, int op) if (addr + len < addr) return -EFAULT; - down_read(¤t->mm->mmap_sem); + mmap_read_lock(current->mm); vma = find_vma (current->mm, addr); if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) { - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(current->mm); return -EFAULT; } @@ -91,6 +91,6 @@ asmlinkage int sys_cacheflush(unsigned long addr, unsigned long len, int op) if (op & CACHEFLUSH_I) flush_icache_range(addr, addr+len); - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(current->mm); return 0; } diff --git a/arch/sh/kernel/vsyscall/vsyscall.c b/arch/sh/kernel/vsyscall/vsyscall.c index 98494480f048..1bd85a6949c4 100644 --- a/arch/sh/kernel/vsyscall/vsyscall.c +++ b/arch/sh/kernel/vsyscall/vsyscall.c @@ -61,7 +61,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) unsigned long addr; int ret; - if (down_write_killable(&mm->mmap_sem)) + if (mmap_write_lock_killable(mm)) return -EINTR; addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); @@ -80,7 +80,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) current->mm->context.vdso = (void *)addr; up_fail: - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); return ret; } diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index 67d0e739897c..d0c0d5351280 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -279,7 +279,7 @@ __bad_area(struct pt_regs *regs, unsigned long error_code, * Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first.. */ - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); __bad_area_nosemaphore(regs, error_code, address, si_code); } @@ -303,7 +303,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address) struct task_struct *tsk = current; struct mm_struct *mm = tsk->mm; - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); /* Kernel mode? Handle exceptions or die: */ if (!user_mode(regs)) @@ -328,7 +328,7 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, /* Release mmap_sem first if necessary */ if (!(fault & VM_FAULT_RETRY)) - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(current->mm); if (!(fault & VM_FAULT_ERROR)) return 0; @@ -442,7 +442,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, } retry: - down_read(&mm->mmap_sem); + mmap_read_lock(mm); vma = find_vma(mm, address); if (unlikely(!vma)) { @@ -510,5 +510,5 @@ good_area: } } - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); } diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index 9489513888eb..34588c4ab9d9 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c @@ -195,7 +195,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); retry: - down_read(&mm->mmap_sem); + mmap_read_lock(mm); if (!from_user && address >= PAGE_OFFSET) goto bad_area; @@ -271,7 +271,7 @@ good_area: } } - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); return; /* @@ -279,7 +279,7 @@ good_area: * Fix it, but check if it's kernel or user first.. */ bad_area: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); bad_area_nosemaphore: /* User mode accesses just cause a SIGSEGV */ @@ -328,7 +328,7 @@ no_context: * us unable to handle the page fault gracefully. */ out_of_memory: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); if (from_user) { pagefault_out_of_memory(); return; @@ -336,7 +336,7 @@ out_of_memory: goto no_context; do_sigbus: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault); if (!from_user) goto no_context; @@ -390,7 +390,7 @@ static void force_user_fault(unsigned long address, int write) code = SEGV_MAPERR; - down_read(&mm->mmap_sem); + mmap_read_lock(mm); vma = find_vma(mm, address); if (!vma) goto bad_area; @@ -415,15 +415,15 @@ good_area: case VM_FAULT_OOM: goto do_sigbus; } - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); return; bad_area: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); __do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address); return; do_sigbus: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); __do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address); } diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index d8cd85a5f4ca..417d7f677eb3 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -318,7 +318,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); - if (!down_read_trylock(&mm->mmap_sem)) { + if (!mmap_read_trylock(mm)) { if ((regs->tstate & TSTATE_PRIV) && !search_exception_tables(regs->tpc)) { insn = get_fault_insn(regs, insn); @@ -326,7 +326,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) } retry: - down_read(&mm->mmap_sem); + mmap_read_lock(mm); } if (fault_code & FAULT_CODE_BAD_RA) @@ -458,7 +458,7 @@ good_area: goto retry; } } - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); mm_rss = get_mm_rss(mm); #if defined(CONFIG_TRANSPARENT_HUGEPAGE) @@ -489,7 +489,7 @@ exit_exception: */ bad_area: insn = get_fault_insn(regs, insn); - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); handle_kernel_fault: do_kernel_fault(regs, si_code, fault_code, insn, address); @@ -501,7 +501,7 @@ handle_kernel_fault: */ out_of_memory: insn = get_fault_insn(regs, insn); - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); if (!(regs->tstate & TSTATE_PRIV)) { pagefault_out_of_memory(); goto exit_exception; @@ -514,7 +514,7 @@ intr_or_no_mm: do_sigbus: insn = get_fault_insn(regs, insn); - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); /* * Send a sigbus, regardless of whether we were in kernel diff --git a/arch/sparc/vdso/vma.c b/arch/sparc/vdso/vma.c index 9961b0f81693..cc19e09b0fa1 100644 --- a/arch/sparc/vdso/vma.c +++ b/arch/sparc/vdso/vma.c @@ -366,7 +366,7 @@ static int map_vdso(const struct vdso_image *image, unsigned long text_start, addr = 0; int ret = 0; - down_write(&mm->mmap_sem); + mmap_write_lock(mm); /* * First, get an unmapped region: then randomize it, and make sure that @@ -422,7 +422,7 @@ up_fail: if (ret) current->mm->context.vdso = NULL; - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); return ret; } diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h index b4deb1bfbb68..62262c5c7785 100644 --- a/arch/um/include/asm/mmu_context.h +++ b/arch/um/include/asm/mmu_context.h @@ -49,7 +49,7 @@ static inline void activate_mm(struct mm_struct *old, struct mm_struct *new) __switch_mm(&new->context.id); down_write_nested(&new->mmap_sem, 1); uml_setup_stubs(new); - up_write(&new->mmap_sem); + mmap_write_unlock(new); } static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c index ed7f024c7f2b..6c0e76553657 100644 --- a/arch/um/kernel/tlb.c +++ b/arch/um/kernel/tlb.c @@ -349,7 +349,7 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr, printk(KERN_ERR "fix_range_common: failed, killing current " "process: %d\n", task_tgid_vnr(current)); /* We are under mmap_sem, release it such that current can terminate */ - up_write(¤t->mm->mmap_sem); + mmap_write_unlock(current->mm); force_sig(SIGKILL); do_signal(¤t->thread.regs); } diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index 9c4b7e73ab8d..2b3afa354a90 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c @@ -43,7 +43,7 @@ int handle_page_fault(unsigned long address, unsigned long ip, if (is_user) flags |= FAULT_FLAG_USER; retry: - down_read(&mm->mmap_sem); + mmap_read_lock(mm); vma = find_vma(mm, address); if (!vma) goto out; @@ -116,7 +116,7 @@ good_area: #endif flush_tlb_page(vma, address); out: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); out_nosemaphore: return err; @@ -125,7 +125,7 @@ out_of_memory: * We ran out of memory, call the OOM killer, and return the userspace * (which will retry the fault, or kill us if we got oom-killed). */ - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); if (!is_user) goto out_nosemaphore; pagefault_out_of_memory(); diff --git a/arch/unicore32/mm/fault.c b/arch/unicore32/mm/fault.c index 0549d5baa11b..99869b57b6cb 100644 --- a/arch/unicore32/mm/fault.c +++ b/arch/unicore32/mm/fault.c @@ -223,12 +223,12 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs) * validly references user space from well defined areas of the code, * we can bug out early if this is from code which shouldn't. */ - if (!down_read_trylock(&mm->mmap_sem)) { + if (!mmap_read_trylock(mm)) { if (!user_mode(regs) && !search_exception_tables(regs->UCreg_pc)) goto no_context; retry: - down_read(&mm->mmap_sem); + mmap_read_lock(mm); } else { /* * The above down_read_trylock() might have succeeded in @@ -263,7 +263,7 @@ retry: } } - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); /* * Handle the "normal" case first - VM_FAULT_MAJOR diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c index 43428cc514c8..ea7c1f0b79df 100644 --- a/arch/x86/entry/vdso/vma.c +++ b/arch/x86/entry/vdso/vma.c @@ -144,7 +144,7 @@ int vdso_join_timens(struct task_struct *task, struct time_namespace *ns) struct mm_struct *mm = task->mm; struct vm_area_struct *vma; - if (down_write_killable(&mm->mmap_sem)) + if (mmap_write_lock_killable(mm)) return -EINTR; for (vma = mm->mmap; vma; vma = vma->vm_next) { @@ -154,7 +154,7 @@ int vdso_join_timens(struct task_struct *task, struct time_namespace *ns) zap_page_range(vma, vma->vm_start, size); } - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); return 0; } #else @@ -268,7 +268,7 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr) unsigned long text_start; int ret = 0; - if (down_write_killable(&mm->mmap_sem)) + if (mmap_write_lock_killable(mm)) return -EINTR; addr = get_unmapped_area(NULL, addr, @@ -311,7 +311,7 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr) } up_fail: - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); return ret; } @@ -373,7 +373,7 @@ int map_vdso_once(const struct vdso_image *image, unsigned long addr) struct mm_struct *mm = current->mm; struct vm_area_struct *vma; - down_write(&mm->mmap_sem); + mmap_write_lock(mm); /* * Check if we have already mapped vdso blob - fail to prevent * abusing from userspace install_speciall_mapping, which may @@ -384,11 +384,11 @@ int map_vdso_once(const struct vdso_image *image, unsigned long addr) for (vma = mm->mmap; vma; vma = vma->vm_next) { if (vma_is_special_mapping(vma, &vdso_mapping) || vma_is_special_mapping(vma, &vvar_mapping)) { - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); return -EEXIST; } } - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); return map_vdso(image, addr); } diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index 47a8676c7395..764573de3996 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c @@ -171,7 +171,7 @@ static void mark_screen_rdonly(struct mm_struct *mm) pte_t *pte; int i; - down_write(&mm->mmap_sem); + mmap_write_lock(mm); pgd = pgd_offset(mm, 0xA0000); if (pgd_none_or_clear_bad(pgd)) goto out; @@ -197,7 +197,7 @@ static void mark_screen_rdonly(struct mm_struct *mm) } pte_unmap_unlock(pte, ptl); out: - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); flush_tlb_mm_range(mm, 0xA0000, 0xA0000 + 32*PAGE_SIZE, PAGE_SHIFT, false); } diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index c5437f2964ee..8e7e4c2bd527 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -811,7 +811,7 @@ __bad_area(struct pt_regs *regs, unsigned long error_code, * Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first.. */ - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); __bad_area_nosemaphore(regs, error_code, address, pkey, si_code); } @@ -1239,7 +1239,7 @@ void do_user_addr_fault(struct pt_regs *regs, * 1. Failed to acquire mmap_sem, and * 2. The access did not originate in userspace. */ - if (unlikely(!down_read_trylock(&mm->mmap_sem))) { + if (unlikely(!mmap_read_trylock(mm))) { if (!user_mode(regs) && !search_exception_tables(regs->ip)) { /* * Fault from code in kernel from @@ -1249,7 +1249,7 @@ void do_user_addr_fault(struct pt_regs *regs, return; } retry: - down_read(&mm->mmap_sem); + mmap_read_lock(mm); } else { /* * The above down_read_trylock() might have succeeded in @@ -1320,7 +1320,7 @@ good_area: goto retry; } - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); if (unlikely(fault & VM_FAULT_ERROR)) { mm_fault_error(regs, hw_error_code, address, fault); return; diff --git a/arch/x86/um/vdso/vma.c b/arch/x86/um/vdso/vma.c index 9e7c4aba6c3a..76d9f6ce7a3d 100644 --- a/arch/x86/um/vdso/vma.c +++ b/arch/x86/um/vdso/vma.c @@ -58,7 +58,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) if (!vdso_enabled) return 0; - if (down_write_killable(&mm->mmap_sem)) + if (mmap_write_lock_killable(mm)) return -EINTR; err = install_special_mapping(mm, um_vdso_addr, PAGE_SIZE, @@ -66,7 +66,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, vdsop); - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); return err; } diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index e7172bd53ced..1c8d22a0cf46 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c @@ -74,7 +74,7 @@ void do_page_fault(struct pt_regs *regs) if (user_mode(regs)) flags |= FAULT_FLAG_USER; retry: - down_read(&mm->mmap_sem); + mmap_read_lock(mm); vma = find_vma(mm, address); if (!vma) @@ -139,7 +139,7 @@ good_area: } } - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); if (flags & VM_FAULT_MAJOR) perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); @@ -152,7 +152,7 @@ good_area: * Fix it, but check if it's kernel or user first.. */ bad_area: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); if (user_mode(regs)) { current->thread.bad_vaddr = address; current->thread.error_code = is_write; @@ -167,7 +167,7 @@ bad_area: * us unable to handle the page fault gracefully. */ out_of_memory: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); if (!user_mode(regs)) bad_page_fault(regs, address, SIGKILL); else @@ -175,7 +175,7 @@ out_of_memory: return; do_sigbus: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); /* Send a sigbus, regardless of whether we were in kernel * or user mode. -- cgit v1.2.3