diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2018-09-18 14:51:50 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2019-04-03 10:32:55 +0200 |
commit | 952a31c9e6fa963eabf3692f31a769e59f4c8303 (patch) | |
tree | 43bfc72419969559a86bcc4f3ab93ad0c3c08e70 | |
parent | 6137fed0823247e32306bde2b48cac627c24f894 (diff) | |
download | linux-952a31c9e6fa963eabf3692f31a769e59f4c8303.tar.bz2 |
asm-generic/tlb: Introduce CONFIG_HAVE_MMU_GATHER_NO_GATHER=y
Add the Kconfig option HAVE_MMU_GATHER_NO_GATHER to the generic
mmu_gather code. If the option is set the mmu_gather will not
track individual pages for delayed page free anymore. A platform
that enables the option needs to provide its own implementation
of the __tlb_remove_page_size() function to free pages.
No change in behavior intended.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Will Deacon <will.deacon@arm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@surriel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: aneesh.kumar@linux.vnet.ibm.com
Cc: heiko.carstens@de.ibm.com
Cc: linux@armlinux.org.uk
Cc: npiggin@gmail.com
Link: http://lkml.kernel.org/r/20180918125151.31744-2-schwidefsky@de.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | arch/Kconfig | 3 | ||||
-rw-r--r-- | include/asm-generic/tlb.h | 9 | ||||
-rw-r--r-- | mm/mmu_gather.c | 107 |
3 files changed, 70 insertions, 49 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index 04b3e8b94cfe..a826843470ed 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -389,6 +389,9 @@ config HAVE_RCU_TABLE_NO_INVALIDATE config HAVE_MMU_GATHER_PAGE_SIZE bool +config HAVE_MMU_GATHER_NO_GATHER + bool + config ARCH_HAVE_NMI_SAFE_CMPXCHG bool diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 81799e6a4304..af20aa8255cd 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -191,6 +191,7 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table); #endif +#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER /* * If we can't allocate a page to make a big batch of page pointers * to work on, then just handle a few from the on-stack structure. @@ -215,6 +216,10 @@ struct mmu_gather_batch { */ #define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH) +extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, + int page_size); +#endif + /* * struct mmu_gather is an opaque type used by the mm code for passing around * any data needed by arch specific code for tlb_remove_page. @@ -261,6 +266,7 @@ struct mmu_gather { unsigned int batch_count; +#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER struct mmu_gather_batch *active; struct mmu_gather_batch local; struct page *__pages[MMU_GATHER_BUNDLE]; @@ -268,6 +274,7 @@ struct mmu_gather { #ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE unsigned int page_size; #endif +#endif }; void arch_tlb_gather_mmu(struct mmu_gather *tlb, @@ -276,8 +283,6 @@ void tlb_flush_mmu(struct mmu_gather *tlb); void arch_tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end, bool force); void tlb_flush_mmu_free(struct mmu_gather *tlb); -extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, - int page_size); static inline void __tlb_adjust_range(struct mmu_gather *tlb, unsigned long address, diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c index 2a5322d52b0a..ab220edcd7ef 100644 --- a/mm/mmu_gather.c +++ b/mm/mmu_gather.c @@ -13,6 +13,8 @@ #ifdef HAVE_GENERIC_MMU_GATHER +#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER + static bool tlb_next_batch(struct mmu_gather *tlb) { struct mmu_gather_batch *batch; @@ -41,6 +43,56 @@ static bool tlb_next_batch(struct mmu_gather *tlb) return true; } +static void tlb_batch_pages_flush(struct mmu_gather *tlb) +{ + struct mmu_gather_batch *batch; + + for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { + free_pages_and_swap_cache(batch->pages, batch->nr); + batch->nr = 0; + } + tlb->active = &tlb->local; +} + +static void tlb_batch_list_free(struct mmu_gather *tlb) +{ + struct mmu_gather_batch *batch, *next; + + for (batch = tlb->local.next; batch; batch = next) { + next = batch->next; + free_pages((unsigned long)batch, 0); + } + tlb->local.next = NULL; +} + +bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size) +{ + struct mmu_gather_batch *batch; + + VM_BUG_ON(!tlb->end); + +#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE + VM_WARN_ON(tlb->page_size != page_size); +#endif + + batch = tlb->active; + /* + * Add the page and check if we are full. If so + * force a flush. + */ + batch->pages[batch->nr++] = page; + if (batch->nr == batch->max) { + if (!tlb_next_batch(tlb)) + return true; + batch = tlb->active; + } + VM_BUG_ON_PAGE(batch->nr > batch->max, page); + + return false; +} + +#endif /* HAVE_MMU_GATHER_NO_GATHER */ + void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) { @@ -48,12 +100,15 @@ void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, /* Is it from 0 to ~0? */ tlb->fullmm = !(start | (end+1)); + +#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER tlb->need_flush_all = 0; tlb->local.next = NULL; tlb->local.nr = 0; tlb->local.max = ARRAY_SIZE(tlb->__pages); tlb->active = &tlb->local; tlb->batch_count = 0; +#endif #ifdef CONFIG_HAVE_RCU_TABLE_FREE tlb->batch = NULL; @@ -67,16 +122,12 @@ void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, void tlb_flush_mmu_free(struct mmu_gather *tlb) { - struct mmu_gather_batch *batch; - #ifdef CONFIG_HAVE_RCU_TABLE_FREE tlb_table_flush(tlb); #endif - for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { - free_pages_and_swap_cache(batch->pages, batch->nr); - batch->nr = 0; - } - tlb->active = &tlb->local; +#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER + tlb_batch_pages_flush(tlb); +#endif } void tlb_flush_mmu(struct mmu_gather *tlb) @@ -92,8 +143,6 @@ void tlb_flush_mmu(struct mmu_gather *tlb) void arch_tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end, bool force) { - struct mmu_gather_batch *batch, *next; - if (force) { __tlb_reset_range(tlb); __tlb_adjust_range(tlb, start, end - start); @@ -103,45 +152,9 @@ void arch_tlb_finish_mmu(struct mmu_gather *tlb, /* keep the page table cache within bounds */ check_pgt_cache(); - - for (batch = tlb->local.next; batch; batch = next) { - next = batch->next; - free_pages((unsigned long)batch, 0); - } - tlb->local.next = NULL; -} - -/* __tlb_remove_page - * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while - * handling the additional races in SMP caused by other CPUs caching valid - * mappings in their TLBs. Returns the number of free page slots left. - * When out of page slots we must call tlb_flush_mmu(). - *returns true if the caller should flush. - */ -bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size) -{ - struct mmu_gather_batch *batch; - - VM_BUG_ON(!tlb->end); - -#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE - VM_WARN_ON(tlb->page_size != page_size); +#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER + tlb_batch_list_free(tlb); #endif - - batch = tlb->active; - /* - * Add the page and check if we are full. If so - * force a flush. - */ - batch->pages[batch->nr++] = page; - if (batch->nr == batch->max) { - if (!tlb_next_batch(tlb)) - return true; - batch = tlb->active; - } - VM_BUG_ON_PAGE(batch->nr > batch->max, page); - - return false; } #endif /* HAVE_GENERIC_MMU_GATHER */ |