diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-11-03 13:21:09 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-11-03 13:21:09 -0800 |
commit | 2c2b8285dcd4d0674b6e77269cf32721fffea59e (patch) | |
tree | 47569549a3f1587c82401bd56a3422f553131389 /mm | |
parent | 0921f1efb605d8fda43d794734222d1ad39c6840 (diff) | |
parent | 5a364c2a1762e8a78721fafc93144509c0b6cb84 (diff) | |
download | linux-2c2b8285dcd4d0674b6e77269cf32721fffea59e.tar.bz2 |
Merge tag 'arc-4.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
Pull ARC updates from Vineet Gupta:
- Support for new MM features in ARCv2 cores (THP, PAE40) Some generic
THP bits are touched - all ACKed by Kirill
- Platform framework updates to prepare for EZChip arrival (still in works)
- ARC Public Mailing list setup finally (linux-snps-arc@lists.infraded.org)
* tag 'arc-4.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc: (42 commits)
ARC: mm: PAE40 support
ARC: mm: PAE40: tlbex.S: Explicitify the size of pte_t
ARC: mm: PAE40: switch to using phys_addr_t for physical addresses
ARC: mm: HIGHMEM: populate high memory from DT
ARC: mm: HIGHMEM: kmap API implementation
ARC: mm: preps ahead of HIGHMEM support #2
ARC: mm: preps ahead of HIGHMEM support
ARC: mm: use generic macros _BITUL()/_AC()
ARC: mm: Improve Duplicate PD Fault handler
MAINTAINERS: Add public mailing list for ARC
ARC: Ensure DT mem base is same as what kernel is built with
ARC: boot: Non Master cpus only need to call EARLY_CPU_SETUP once
ARCv2: smp: [plat-*]: No need to explicitly call mcip_init_smp()
ARC: smp: Introduce smp hook @init_irq_cpu called for all cores
ARC: smp: Rename platform hook @init_smp -> @init_cpu_smp
ARCv2: smp: [plat-*]: No need to explicitly call mcip_init_early_smp()
ARC: smp: Introduce smp hook @init_early_smp for Master core
ARC: remove @init_time, @init_irq platform callbacks
ARC: smp: irqchip: handle IPI as percpu irq like timer
ARC: boot: Support Halt-on-reset and Run-on-reset SMP booting modes
...
Diffstat (limited to 'mm')
-rw-r--r-- | mm/huge_memory.c | 2 | ||||
-rw-r--r-- | mm/pgtable-generic.c | 100 |
2 files changed, 49 insertions, 53 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index bbac913f96bc..3fd0311c3ba7 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1880,7 +1880,7 @@ static int __split_huge_page_map(struct page *page, * here). But it is generally safer to never allow * small and huge TLB entries for the same virtual * address to be loaded simultaneously. So instead of - * doing "pmd_populate(); flush_tlb_range();" we first + * doing "pmd_populate(); flush_pmd_tlb_range();" we first * mark the current pmd notpresent (atomically because * here the pmd_trans_huge and pmd_trans_splitting * must remain set at all times on the pmd until the diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c index 6b674e00153c..7d3db0247983 100644 --- a/mm/pgtable-generic.c +++ b/mm/pgtable-generic.c @@ -57,35 +57,59 @@ int ptep_set_access_flags(struct vm_area_struct *vma, } #endif +#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH +int ptep_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep) +{ + int young; + young = ptep_test_and_clear_young(vma, address, ptep); + if (young) + flush_tlb_page(vma, address); + return young; +} +#endif + +#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH +pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, + pte_t *ptep) +{ + struct mm_struct *mm = (vma)->vm_mm; + pte_t pte; + pte = ptep_get_and_clear(mm, address, ptep); + if (pte_accessible(mm, pte)) + flush_tlb_page(vma, address); + return pte; +} +#endif + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + +#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE + +/* + * ARCHes with special requirements for evicting THP backing TLB entries can + * implement this. Otherwise also, it can help optimize normal TLB flush in + * THP regime. stock flush_tlb_range() typically has optimization to nuke the + * entire TLB TLB if flush span is greater than a threshhold, which will + * likely be true for a single huge page. Thus a single thp flush will + * invalidate the entire TLB which is not desitable. + * e.g. see arch/arc: flush_pmd_tlb_range + */ +#define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) +#endif + #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp, pmd_t entry, int dirty) { -#ifdef CONFIG_TRANSPARENT_HUGEPAGE int changed = !pmd_same(*pmdp, entry); VM_BUG_ON(address & ~HPAGE_PMD_MASK); if (changed) { set_pmd_at(vma->vm_mm, address, pmdp, entry); - flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); + flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); } return changed; -#else /* CONFIG_TRANSPARENT_HUGEPAGE */ - BUG(); - return 0; -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ -} -#endif - -#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH -int ptep_clear_flush_young(struct vm_area_struct *vma, - unsigned long address, pte_t *ptep) -{ - int young; - young = ptep_test_and_clear_young(vma, address, ptep); - if (young) - flush_tlb_page(vma, address); - return young; } #endif @@ -94,33 +118,15 @@ int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { int young; -#ifdef CONFIG_TRANSPARENT_HUGEPAGE VM_BUG_ON(address & ~HPAGE_PMD_MASK); -#else - BUG(); -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ young = pmdp_test_and_clear_young(vma, address, pmdp); if (young) - flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); + flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); return young; } #endif -#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH -pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, - pte_t *ptep) -{ - struct mm_struct *mm = (vma)->vm_mm; - pte_t pte; - pte = ptep_get_and_clear(mm, address, ptep); - if (pte_accessible(mm, pte)) - flush_tlb_page(vma, address); - return pte; -} -#endif - #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH -#ifdef CONFIG_TRANSPARENT_HUGEPAGE pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { @@ -128,14 +134,12 @@ pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address, VM_BUG_ON(address & ~HPAGE_PMD_MASK); VM_BUG_ON(!pmd_trans_huge(*pmdp)); pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); - flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); + flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); return pmd; } -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif #ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH -#ifdef CONFIG_TRANSPARENT_HUGEPAGE void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { @@ -143,13 +147,11 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, VM_BUG_ON(address & ~HPAGE_PMD_MASK); set_pmd_at(vma->vm_mm, address, pmdp, pmd); /* tlb flush only to serialize against gup-fast */ - flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); + flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); } -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT -#ifdef CONFIG_TRANSPARENT_HUGEPAGE void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, pgtable_t pgtable) { @@ -162,11 +164,9 @@ void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru); pmd_huge_pte(mm, pmdp) = pgtable; } -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW -#ifdef CONFIG_TRANSPARENT_HUGEPAGE /* no "address" argument so destroys page coloring of some arch */ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) { @@ -185,23 +185,19 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) } return pgtable; } -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif #ifndef __HAVE_ARCH_PMDP_INVALIDATE -#ifdef CONFIG_TRANSPARENT_HUGEPAGE void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { pmd_t entry = *pmdp; set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry)); - flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); + flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); } -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif #ifndef pmdp_collapse_flush -#ifdef CONFIG_TRANSPARENT_HUGEPAGE pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { @@ -214,8 +210,8 @@ pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, VM_BUG_ON(address & ~HPAGE_PMD_MASK); VM_BUG_ON(pmd_trans_huge(*pmdp)); pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); - flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); + flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); return pmd; } -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |