diff options
author | Laurent Dufour <ldufour@linux.vnet.ibm.com> | 2017-02-14 17:45:11 +0100 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2017-03-21 22:09:11 +1100 |
commit | 14c02e419a395c4bd7950175fc47eda1ecaa8c69 (patch) | |
tree | c35121af999465f3d28fbf445df00b0dade8599f /arch/powerpc/mm/fault.c | |
parent | c2294e0ffe741c8b34c630a71c7dc44d30503538 (diff) | |
download | linux-14c02e419a395c4bd7950175fc47eda1ecaa8c69.tar.bz2 |
powerpc/mm: Handle VM_FAULT_RETRY earlier
In do_page_fault() if handle_mm_fault() returns VM_FAULT_RETRY, retry
the page fault handling before anything else.
This would simplify the handling of the mmap_sem lock in this part of
the code.
Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Laurent Dufour <ldufour@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/mm/fault.c')
-rw-r--r-- | arch/powerpc/mm/fault.c | 67 |
1 files changed, 38 insertions, 29 deletions
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 2d77c1c8014a..5809a3c86161 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -458,6 +458,26 @@ good_area: * the fault. */ fault = handle_mm_fault(vma, address, flags); + + /* + * Handle the retry right now, the mmap_sem has been released in that + * case. + */ + if (unlikely(fault & VM_FAULT_RETRY)) { + /* We retry only once */ + if (flags & FAULT_FLAG_ALLOW_RETRY) { + /* + * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk + * of starvation. + */ + flags &= ~FAULT_FLAG_ALLOW_RETRY; + flags |= FAULT_FLAG_TRIED; + if (!fatal_signal_pending(current)) + goto retry; + } + /* We will enter mm_fault_error() below */ + } + if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) { if (fault & VM_FAULT_SIGSEGV) goto bad_area; @@ -469,38 +489,27 @@ good_area: } /* - * Major/minor page fault accounting is only done on the - * initial attempt. If we go through a retry, it is extremely - * likely that the page will be found in page cache at that point. + * Major/minor page fault accounting. */ - if (flags & FAULT_FLAG_ALLOW_RETRY) { - if (fault & VM_FAULT_MAJOR) { - current->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, - regs, address); + if (fault & VM_FAULT_MAJOR) { + current->maj_flt++; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, + regs, address); #ifdef CONFIG_PPC_SMLPAR - if (firmware_has_feature(FW_FEATURE_CMO)) { - u32 page_ins; - - preempt_disable(); - page_ins = be32_to_cpu(get_lppaca()->page_ins); - page_ins += 1 << PAGE_FACTOR; - get_lppaca()->page_ins = cpu_to_be32(page_ins); - preempt_enable(); - } -#endif /* CONFIG_PPC_SMLPAR */ - } else { - current->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, - regs, address); - } - if (fault & VM_FAULT_RETRY) { - /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk - * of starvation. */ - flags &= ~FAULT_FLAG_ALLOW_RETRY; - flags |= FAULT_FLAG_TRIED; - goto retry; + if (firmware_has_feature(FW_FEATURE_CMO)) { + u32 page_ins; + + preempt_disable(); + page_ins = be32_to_cpu(get_lppaca()->page_ins); + page_ins += 1 << PAGE_FACTOR; + get_lppaca()->page_ins = cpu_to_be32(page_ins); + preempt_enable(); } +#endif /* CONFIG_PPC_SMLPAR */ + } else { + current->min_flt++; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, + regs, address); } up_read(&mm->mmap_sem); |