diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-09 13:12:47 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-09 13:12:47 -0800 |
commit | b64bb1d758163814687eb3b84d74e56f04d0c9d1 (patch) | |
tree | 59f1db8b718e98d13c6cf9d3486221cfff6e7eef /mm/memory.c | |
parent | 50569687e9c688a8688982805be6d8e3c8879042 (diff) | |
parent | eb8a653137b7e74f7cdc01f814eb9d094a65aed9 (diff) | |
download | linux-b64bb1d758163814687eb3b84d74e56f04d0c9d1.tar.bz2 |
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Will Deacon:
"Here's the usual mixed bag of arm64 updates, also including some
related EFI changes (Acked by Matt) and the MMU gather range cleanup
(Acked by you).
Changes include:
- support for alternative instruction patching from Andre
- seccomp from Akashi
- some AArch32 instruction emulation, required by the Android folks
- optimisations for exception entry/exit code, cmpxchg, pcpu atomics
- mmu_gather range calculations moved into core code
- EFI updates from Ard, including long-awaited SMBIOS support
- /proc/cpuinfo fixes to align with the format used by arch/arm/
- a few non-critical fixes across the architecture"
* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (70 commits)
arm64: remove the unnecessary arm64_swiotlb_init()
arm64: add module support for alternatives fixups
arm64: perf: Prevent wraparound during overflow
arm64/include/asm: Fixed a warning about 'struct pt_regs'
arm64: Provide a namespace to NCAPS
arm64: bpf: lift restriction on last instruction
arm64: Implement support for read-mostly sections
arm64: compat: align cacheflush syscall with arch/arm
arm64: add seccomp support
arm64: add SIGSYS siginfo for compat task
arm64: add seccomp syscall for compat task
asm-generic: add generic seccomp.h for secure computing mode 1
arm64: ptrace: allow tracer to skip a system call
arm64: ptrace: add NT_ARM_SYSTEM_CALL regset
arm64: Move some head.text functions to executable section
arm64: jump labels: NOP out NOP -> NOP replacement
arm64: add support to dump the kernel page tables
arm64: Add FIX_HOLE to permanent fixed addresses
arm64: alternatives: fix pr_fmt string for consistency
arm64: vmlinux.lds.S: don't discard .exit.* sections at link-time
...
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 30 |
1 files changed, 8 insertions, 22 deletions
diff --git a/mm/memory.c b/mm/memory.c index d5f2ae9c4a23..0b3f6c71620d 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -220,9 +220,6 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long /* Is it from 0 to ~0? */ tlb->fullmm = !(start | (end+1)); tlb->need_flush_all = 0; - tlb->start = start; - tlb->end = end; - tlb->need_flush = 0; tlb->local.next = NULL; tlb->local.nr = 0; tlb->local.max = ARRAY_SIZE(tlb->__pages); @@ -232,15 +229,20 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long #ifdef CONFIG_HAVE_RCU_TABLE_FREE tlb->batch = NULL; #endif + + __tlb_reset_range(tlb); } static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) { - tlb->need_flush = 0; + if (!tlb->end) + return; + tlb_flush(tlb); #ifdef CONFIG_HAVE_RCU_TABLE_FREE tlb_table_flush(tlb); #endif + __tlb_reset_range(tlb); } static void tlb_flush_mmu_free(struct mmu_gather *tlb) @@ -256,8 +258,6 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb) void tlb_flush_mmu(struct mmu_gather *tlb) { - if (!tlb->need_flush) - return; tlb_flush_mmu_tlbonly(tlb); tlb_flush_mmu_free(tlb); } @@ -292,7 +292,7 @@ int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) { struct mmu_gather_batch *batch; - VM_BUG_ON(!tlb->need_flush); + VM_BUG_ON(!tlb->end); batch = tlb->active; batch->pages[batch->nr++] = page; @@ -359,8 +359,6 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table) { struct mmu_table_batch **batch = &tlb->batch; - tlb->need_flush = 1; - /* * When there's less then two users of this mm there cannot be a * concurrent page-table walk. @@ -1186,20 +1184,8 @@ again: arch_leave_lazy_mmu_mode(); /* Do the actual TLB flush before dropping ptl */ - if (force_flush) { - unsigned long old_end; - - /* - * Flush the TLB just for the previous segment, - * then update the range to be the remaining - * TLB range. - */ - old_end = tlb->end; - tlb->end = addr; + if (force_flush) tlb_flush_mmu_tlbonly(tlb); - tlb->start = addr; - tlb->end = old_end; - } pte_unmap_unlock(start_pte, ptl); /* |