From e9d55e157034a9efd99405c99c1565d64619d82b Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 26 Jul 2016 15:24:09 -0700 Subject: mm: change the interface for __tlb_remove_page() This updates the generic and arch specific implementation to return true if we need to do a tlb flush. That means if a __tlb_remove_page indicate a flush is needed, the page we try to remove need to be tracked and added again after the flush. We need to track it because we have already update the pte to none and we can't just loop back. This change is done to enable us to do a tlb_flush when we try to flush a range that consists of different page sizes. For architectures like ppc64, we can do a range based tlb flush and we need to track page size for that. When we try to remove a huge page, we will force a tlb flush and starts a new mmu gather. [aneesh.kumar@linux.vnet.ibm.com: mm-change-the-interface-for-__tlb_remove_page-v3] Link: http://lkml.kernel.org/r/1465049193-22197-2-git-send-email-aneesh.kumar@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1464860389-29019-2-git-send-email-aneesh.kumar@linux.vnet.ibm.com Signed-off-by: Aneesh Kumar K.V Cc: Benjamin Herrenschmidt Cc: Michael Ellerman Cc: Hugh Dickins Cc: "Kirill A. Shutemov" Cc: Andrea Arcangeli Cc: Joonsoo Kim Cc: Mel Gorman Cc: David Rientjes Cc: Vlastimil Babka Cc: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/sh/include/asm/tlb.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'arch/sh') diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h index 62f80d2a9df9..21ae8f5546b2 100644 --- a/arch/sh/include/asm/tlb.h +++ b/arch/sh/include/asm/tlb.h @@ -101,7 +101,7 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb) static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) { free_page_and_swap_cache(page); - return 1; /* avoid calling tlb_flush_mmu */ + return false; /* avoid calling tlb_flush_mmu */ } static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) @@ -109,6 +109,12 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) __tlb_remove_page(tlb, page); } +static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, + struct page *page) +{ + return __tlb_remove_page(tlb, page); +} + #define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep) #define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) #define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp) -- cgit v1.2.3 From e77b0852b551ffd8b29fa0225e1ef62c195e3160 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 26 Jul 2016 15:24:12 -0700 Subject: mm/mmu_gather: track page size with mmu gather and force flush if page size change This allows an arch which needs to do special handing with respect to different page size when flushing tlb to implement the same in mmu gather. Link: http://lkml.kernel.org/r/1465049193-22197-3-git-send-email-aneesh.kumar@linux.vnet.ibm.com Signed-off-by: Aneesh Kumar K.V Cc: Benjamin Herrenschmidt Cc: Michael Ellerman Cc: Hugh Dickins Cc: "Kirill A. Shutemov" Cc: Andrea Arcangeli Cc: Joonsoo Kim Cc: Mel Gorman Cc: David Rientjes Cc: Vlastimil Babka Cc: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/include/asm/tlb.h | 12 ++++++++++++ arch/ia64/include/asm/tlb.h | 12 ++++++++++++ arch/s390/include/asm/tlb.h | 13 +++++++++++++ arch/sh/include/asm/tlb.h | 12 ++++++++++++ arch/um/include/asm/tlb.h | 12 ++++++++++++ include/asm-generic/tlb.h | 27 +++++++++++++++++++++------ mm/huge_memory.c | 2 +- mm/hugetlb.c | 2 +- mm/memory.c | 10 +++++++++- 9 files changed, 93 insertions(+), 9 deletions(-) (limited to 'arch/sh') diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index a9d2aee3826f..1e25cd80589e 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -225,12 +225,24 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) } } +static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, + struct page *page, int page_size) +{ + return __tlb_remove_page(tlb, page); +} + static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, struct page *page) { return __tlb_remove_page(tlb, page); } +static inline void tlb_remove_page_size(struct mmu_gather *tlb, + struct page *page, int page_size) +{ + return tlb_remove_page(tlb, page); +} + static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr) { diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h index e7da41aa9110..77e541cf0e5d 100644 --- a/arch/ia64/include/asm/tlb.h +++ b/arch/ia64/include/asm/tlb.h @@ -242,12 +242,24 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) } } +static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, + struct page *page, int page_size) +{ + return __tlb_remove_page(tlb, page); +} + static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, struct page *page) { return __tlb_remove_page(tlb, page); } +static inline void tlb_remove_page_size(struct mmu_gather *tlb, + struct page *page, int page_size) +{ + return tlb_remove_page(tlb, page); +} + /* * Remove TLB entry for PTE mapped at virtual address ADDRESS. This is called for any * PTE, not just those pointing to (normal) physical memory. diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index 30759b560849..15711de10403 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h @@ -98,11 +98,24 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) free_page_and_swap_cache(page); } +static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, + struct page *page, int page_size) +{ + return __tlb_remove_page(tlb, page); +} + static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, struct page *page) { return __tlb_remove_page(tlb, page); } + +static inline void tlb_remove_page_size(struct mmu_gather *tlb, + struct page *page, int page_size) +{ + return tlb_remove_page(tlb, page); +} + /* * pte_free_tlb frees a pte table and clears the CRSTE for the * page table from the tlb. diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h index 21ae8f5546b2..025cdb1032f6 100644 --- a/arch/sh/include/asm/tlb.h +++ b/arch/sh/include/asm/tlb.h @@ -109,12 +109,24 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) __tlb_remove_page(tlb, page); } +static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, + struct page *page, int page_size) +{ + return __tlb_remove_page(tlb, page); +} + static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, struct page *page) { return __tlb_remove_page(tlb, page); } +static inline void tlb_remove_page_size(struct mmu_gather *tlb, + struct page *page, int page_size) +{ + return tlb_remove_page(tlb, page); +} + #define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep) #define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) #define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp) diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h index 3dc4cbb3c2c0..821ff0acfe17 100644 --- a/arch/um/include/asm/tlb.h +++ b/arch/um/include/asm/tlb.h @@ -110,12 +110,24 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) __tlb_remove_page(tlb, page); } +static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, + struct page *page, int page_size) +{ + return __tlb_remove_page(tlb, page); +} + static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, struct page *page) { return __tlb_remove_page(tlb, page); } +static inline void tlb_remove_page_size(struct mmu_gather *tlb, + struct page *page, int page_size) +{ + return tlb_remove_page(tlb, page); +} + /** * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation. * diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 7b899a46a4cb..c6d667187608 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -112,6 +112,7 @@ struct mmu_gather { * that that we can adjust the range after the flush */ unsigned long addr; + int page_size; }; #define HAVE_GENERIC_MMU_GATHER @@ -120,7 +121,8 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long void tlb_flush_mmu(struct mmu_gather *tlb); void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end); -bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page); +extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, + int page_size); static inline void __tlb_adjust_range(struct mmu_gather *tlb, unsigned long address) @@ -145,23 +147,36 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb) } } +static inline void tlb_remove_page_size(struct mmu_gather *tlb, + struct page *page, int page_size) +{ + if (__tlb_remove_page_size(tlb, page, page_size)) { + tlb_flush_mmu(tlb); + tlb->page_size = page_size; + __tlb_adjust_range(tlb, tlb->addr); + __tlb_remove_page_size(tlb, page, page_size); + } +} + +static bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) +{ + return __tlb_remove_page_size(tlb, page, PAGE_SIZE); +} + /* tlb_remove_page * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when * required. */ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) { - if (__tlb_remove_page(tlb, page)) { - tlb_flush_mmu(tlb); - __tlb_adjust_range(tlb, tlb->addr); - __tlb_remove_page(tlb, page); - } + return tlb_remove_page_size(tlb, page, PAGE_SIZE); } static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, struct page *page) { /* active->nr should be zero when we call this */ VM_BUG_ON_PAGE(tlb->active->nr, page); + tlb->page_size = PAGE_SIZE; __tlb_adjust_range(tlb, tlb->addr); return __tlb_remove_page(tlb, page); } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 343a2b7e57aa..23d1bf42fef1 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1689,7 +1689,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd)); atomic_long_dec(&tlb->mm->nr_ptes); spin_unlock(ptl); - tlb_remove_page(tlb, page); + tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE); } return 1; } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 524c078ce67b..a9a8c313d133 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3250,7 +3250,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, page_remove_rmap(page, true); spin_unlock(ptl); - tlb_remove_page(tlb, page); + tlb_remove_page_size(tlb, page, huge_page_size(h)); /* * Bail out after unmapping reference page if supplied */ diff --git a/mm/memory.c b/mm/memory.c index 12f31501c323..a329149e1c54 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -233,6 +233,7 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long #ifdef CONFIG_HAVE_RCU_TABLE_FREE tlb->batch = NULL; #endif + tlb->page_size = 0; __tlb_reset_range(tlb); } @@ -294,12 +295,19 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e * When out of page slots we must call tlb_flush_mmu(). *returns true if the caller should flush. */ -bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) +bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size) { struct mmu_gather_batch *batch; VM_BUG_ON(!tlb->end); + if (!tlb->page_size) + tlb->page_size = page_size; + else { + if (page_size != tlb->page_size) + return true; + } + batch = tlb->active; if (batch->nr == batch->max) { if (!tlb_next_batch(tlb)) -- cgit v1.2.3 From dcddffd41d3f1d3bdcc1dce3f1cd142779b6d4c1 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 26 Jul 2016 15:25:18 -0700 Subject: mm: do not pass mm_struct into handle_mm_fault We always have vma->vm_mm around. Link: http://lkml.kernel.org/r/1466021202-61880-8-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/alpha/mm/fault.c | 2 +- arch/arc/mm/fault.c | 2 +- arch/arm/mm/fault.c | 2 +- arch/arm64/mm/fault.c | 2 +- arch/avr32/mm/fault.c | 2 +- arch/cris/mm/fault.c | 2 +- arch/frv/mm/fault.c | 2 +- arch/hexagon/mm/vm_fault.c | 2 +- arch/ia64/mm/fault.c | 2 +- arch/m32r/mm/fault.c | 2 +- arch/m68k/mm/fault.c | 2 +- arch/metag/mm/fault.c | 2 +- arch/microblaze/mm/fault.c | 2 +- arch/mips/mm/fault.c | 2 +- arch/mn10300/mm/fault.c | 2 +- arch/nios2/mm/fault.c | 2 +- arch/openrisc/mm/fault.c | 2 +- arch/parisc/mm/fault.c | 2 +- arch/powerpc/mm/copro_fault.c | 2 +- arch/powerpc/mm/fault.c | 2 +- arch/s390/mm/fault.c | 2 +- arch/score/mm/fault.c | 2 +- arch/sh/mm/fault.c | 2 +- arch/sparc/mm/fault_32.c | 4 ++-- arch/sparc/mm/fault_64.c | 2 +- arch/tile/mm/fault.c | 2 +- arch/um/kernel/trap.c | 2 +- arch/unicore32/mm/fault.c | 2 +- arch/x86/mm/fault.c | 2 +- arch/xtensa/mm/fault.c | 2 +- drivers/iommu/amd_iommu_v2.c | 3 +-- drivers/iommu/intel-svm.c | 2 +- include/linux/mm.h | 9 ++++----- mm/gup.c | 5 ++--- mm/ksm.c | 5 ++--- mm/memory.c | 13 +++++++------ 36 files changed, 48 insertions(+), 51 deletions(-) (limited to 'arch/sh') diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index 4a905bd667e2..83e9eee57a55 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c @@ -147,7 +147,7 @@ retry: /* If for any reason at all we couldn't handle the fault, make sure we exit gracefully rather than endlessly redo the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index af63f4a13e60..e94e5aa33985 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c @@ -137,7 +137,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); /* If Pagefault was interrupted by SIGKILL, exit page fault "early" */ if (unlikely(fatal_signal_pending(current))) { diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index ad5841856007..3a2e678b8d30 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -243,7 +243,7 @@ good_area: goto out; } - return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags); + return handle_mm_fault(vma, addr & PAGE_MASK, flags); check_stack: /* Don't allow expansion below FIRST_USER_ADDRESS */ diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index b1166d1e5955..031820d989a8 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -233,7 +233,7 @@ good_area: goto out; } - return handle_mm_fault(mm, vma, addr & PAGE_MASK, mm_flags); + return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags); check_stack: if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr)) diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c index c03533937a9f..a4b7edac8f10 100644 --- a/arch/avr32/mm/fault.c +++ b/arch/avr32/mm/fault.c @@ -134,7 +134,7 @@ good_area: * sure we exit gracefully rather than endlessly redo the * fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c index 3066d40a6db1..112ef26c7f2e 100644 --- a/arch/cris/mm/fault.c +++ b/arch/cris/mm/fault.c @@ -168,7 +168,7 @@ retry: * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c index 61d99767fe16..614a46c413d2 100644 --- a/arch/frv/mm/fault.c +++ b/arch/frv/mm/fault.c @@ -164,7 +164,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, ear0, flags); + fault = handle_mm_fault(vma, ear0, flags); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; diff --git a/arch/hexagon/mm/vm_fault.c b/arch/hexagon/mm/vm_fault.c index 8704c9320032..bd7c251e2bce 100644 --- a/arch/hexagon/mm/vm_fault.c +++ b/arch/hexagon/mm/vm_fault.c @@ -101,7 +101,7 @@ good_area: break; } - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index 70b40d1205a6..fa6ad95e992e 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -159,7 +159,7 @@ retry: * sure we exit gracefully rather than endlessly redo the * fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c index 8f9875b7933d..a3785d3644c2 100644 --- a/arch/m32r/mm/fault.c +++ b/arch/m32r/mm/fault.c @@ -196,7 +196,7 @@ good_area: */ addr = (address & PAGE_MASK); set_thread_fault_code(error_code); - fault = handle_mm_fault(mm, vma, addr, flags); + fault = handle_mm_fault(vma, addr, flags); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index 6a94cdd0c830..bd66a0b20c6b 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c @@ -136,7 +136,7 @@ good_area: * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); pr_debug("handle_mm_fault returns %d\n", fault); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) diff --git a/arch/metag/mm/fault.c b/arch/metag/mm/fault.c index f57edca63609..372783a67dda 100644 --- a/arch/metag/mm/fault.c +++ b/arch/metag/mm/fault.c @@ -133,7 +133,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return 0; diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c index 177dfc003643..abb678ccde6f 100644 --- a/arch/microblaze/mm/fault.c +++ b/arch/microblaze/mm/fault.c @@ -216,7 +216,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 4b88fa031891..9560ad731120 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -153,7 +153,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c index 4a1d181ed32f..f23781d6bbb3 100644 --- a/arch/mn10300/mm/fault.c +++ b/arch/mn10300/mm/fault.c @@ -254,7 +254,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c index b51878b0c6b8..affc4eb3f89e 100644 --- a/arch/nios2/mm/fault.c +++ b/arch/nios2/mm/fault.c @@ -131,7 +131,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c index 230ac20ae794..e94cd225e816 100644 --- a/arch/openrisc/mm/fault.c +++ b/arch/openrisc/mm/fault.c @@ -163,7 +163,7 @@ good_area: * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 16dbe81c97c9..163af2c31d76 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -239,7 +239,7 @@ good_area: * fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c index 6527882ce05e..bb0354222b11 100644 --- a/arch/powerpc/mm/copro_fault.c +++ b/arch/powerpc/mm/copro_fault.c @@ -75,7 +75,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea, } ret = 0; - *flt = handle_mm_fault(mm, vma, ea, is_write ? FAULT_FLAG_WRITE : 0); + *flt = handle_mm_fault(vma, ea, is_write ? FAULT_FLAG_WRITE : 0); if (unlikely(*flt & VM_FAULT_ERROR)) { if (*flt & VM_FAULT_OOM) { ret = -ENOMEM; diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index a67c6d781c52..a4db22f65021 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -429,7 +429,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) { if (fault & VM_FAULT_SIGSEGV) goto bad_area; diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 19288c1b36d3..6c47488745ae 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -456,7 +456,7 @@ retry: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); /* No reason to continue if interrupted by SIGKILL. */ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) { fault = VM_FAULT_SIGNAL; diff --git a/arch/score/mm/fault.c b/arch/score/mm/fault.c index 37a6c2e0e969..995b71e4db4b 100644 --- a/arch/score/mm/fault.c +++ b/arch/score/mm/fault.c @@ -111,7 +111,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index 79d8276377d1..9bf876780cef 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -487,7 +487,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR))) if (mm_fault_error(regs, error_code, address, fault)) diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index b6c559cbd64d..4714061d6cd3 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c @@ -241,7 +241,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; @@ -411,7 +411,7 @@ good_area: if (!(vma->vm_flags & (VM_READ | VM_EXEC))) goto bad_area; } - switch (handle_mm_fault(mm, vma, address, flags)) { + switch (handle_mm_fault(vma, address, flags)) { case VM_FAULT_SIGBUS: case VM_FAULT_OOM: goto do_sigbus; diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index cb841a33da59..6c43b924a7a2 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -436,7 +436,7 @@ good_area: goto bad_area; } - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) goto exit_exception; diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c index 26734214818c..beba986589e5 100644 --- a/arch/tile/mm/fault.c +++ b/arch/tile/mm/fault.c @@ -434,7 +434,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return 0; diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index 98783dd0fa2e..ad8f206ab5e8 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c @@ -73,7 +73,7 @@ good_area: do { int fault; - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) goto out_nosemaphore; diff --git a/arch/unicore32/mm/fault.c b/arch/unicore32/mm/fault.c index 2ec3d3adcefc..6c7f70bcaae3 100644 --- a/arch/unicore32/mm/fault.c +++ b/arch/unicore32/mm/fault.c @@ -194,7 +194,7 @@ good_area: * If for any reason at all we couldn't handle the fault, make * sure we exit gracefully rather than endlessly redo the fault. */ - fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, flags); + fault = handle_mm_fault(vma, addr & PAGE_MASK, flags); return fault; check_stack: diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index d22161ab941d..dc8023060456 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -1353,7 +1353,7 @@ good_area: * the fault. Since we never set FAULT_FLAG_RETRY_NOWAIT, if * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); major |= fault & VM_FAULT_MAJOR; /* diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index 7f4a1fdb1502..2725e08ef353 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c @@ -110,7 +110,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index 56999d2fac07..fbdaf81ae925 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c @@ -538,8 +538,7 @@ static void do_fault(struct work_struct *work) if (access_error(vma, fault)) goto out; - ret = handle_mm_fault(mm, vma, address, flags); - + ret = handle_mm_fault(vma, address, flags); out: up_read(&mm->mmap_sem); diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index d9939fa9b588..8ebb3530afa7 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c @@ -583,7 +583,7 @@ static irqreturn_t prq_event_thread(int irq, void *d) if (access_error(vma, req)) goto invalid; - ret = handle_mm_fault(svm->mm, vma, address, + ret = handle_mm_fault(vma, address, req->wr_req ? FAULT_FLAG_WRITE : 0); if (ret & VM_FAULT_ERROR) goto invalid; diff --git a/include/linux/mm.h b/include/linux/mm.h index 6c9a394b2979..646bc36b4d1b 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1215,15 +1215,14 @@ int generic_error_remove_page(struct address_space *mapping, struct page *page); int invalidate_inode_page(struct page *page); #ifdef CONFIG_MMU -extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, unsigned int flags); +extern int handle_mm_fault(struct vm_area_struct *vma, unsigned long address, + unsigned int flags); extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, unsigned long address, unsigned int fault_flags, bool *unlocked); #else -static inline int handle_mm_fault(struct mm_struct *mm, - struct vm_area_struct *vma, unsigned long address, - unsigned int flags) +static inline int handle_mm_fault(struct vm_area_struct *vma, + unsigned long address, unsigned int flags) { /* should never happen if there's no MMU */ BUG(); diff --git a/mm/gup.c b/mm/gup.c index dee142e100f4..9671e29f8ffd 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -352,7 +352,6 @@ unmap: static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, unsigned long address, unsigned int *flags, int *nonblocking) { - struct mm_struct *mm = vma->vm_mm; unsigned int fault_flags = 0; int ret; @@ -377,7 +376,7 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, fault_flags |= FAULT_FLAG_TRIED; } - ret = handle_mm_fault(mm, vma, address, fault_flags); + ret = handle_mm_fault(vma, address, fault_flags); if (ret & VM_FAULT_ERROR) { if (ret & VM_FAULT_OOM) return -ENOMEM; @@ -692,7 +691,7 @@ retry: if (!vma_permits_fault(vma, fault_flags)) return -EFAULT; - ret = handle_mm_fault(mm, vma, address, fault_flags); + ret = handle_mm_fault(vma, address, fault_flags); major |= ret & VM_FAULT_MAJOR; if (ret & VM_FAULT_ERROR) { if (ret & VM_FAULT_OOM) diff --git a/mm/ksm.c b/mm/ksm.c index 35b8aef867a9..73d43bafd9fb 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -376,9 +376,8 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr) if (IS_ERR_OR_NULL(page)) break; if (PageKsm(page)) - ret = handle_mm_fault(vma->vm_mm, vma, addr, - FAULT_FLAG_WRITE | - FAULT_FLAG_REMOTE); + ret = handle_mm_fault(vma, addr, + FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE); else ret = VM_FAULT_WRITE; put_page(page); diff --git a/mm/memory.c b/mm/memory.c index 5e6eadd127e7..6bf2b8564376 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3420,9 +3420,10 @@ unlock: * The mmap_sem may have been released depending on flags and our * return value. See filemap_fault() and __lock_page_or_retry(). */ -static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, unsigned int flags) +static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address, + unsigned int flags) { + struct mm_struct *mm = vma->vm_mm; pgd_t *pgd; pud_t *pud; pmd_t *pmd; @@ -3509,15 +3510,15 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, * The mmap_sem may have been released depending on flags and our * return value. See filemap_fault() and __lock_page_or_retry(). */ -int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, unsigned int flags) +int handle_mm_fault(struct vm_area_struct *vma, unsigned long address, + unsigned int flags) { int ret; __set_current_state(TASK_RUNNING); count_vm_event(PGFAULT); - mem_cgroup_count_vm_event(mm, PGFAULT); + mem_cgroup_count_vm_event(vma->vm_mm, PGFAULT); /* do counter updates before entering really critical section. */ check_sync_rss_stat(current); @@ -3529,7 +3530,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, if (flags & FAULT_FLAG_USER) mem_cgroup_oom_enable(); - ret = __handle_mm_fault(mm, vma, address, flags); + ret = __handle_mm_fault(vma, address, flags); if (flags & FAULT_FLAG_USER) { mem_cgroup_oom_disable(); -- cgit v1.2.3