diff options
Diffstat (limited to 'arch/powerpc/mm/book3s64')
-rw-r--r-- | arch/powerpc/mm/book3s64/hash_native.c | 8 | ||||
-rw-r--r-- | arch/powerpc/mm/book3s64/hash_utils.c | 12 | ||||
-rw-r--r-- | arch/powerpc/mm/book3s64/internal.h | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/book3s64/mmu_context.c | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/book3s64/radix_pgtable.c | 10 | ||||
-rw-r--r-- | arch/powerpc/mm/book3s64/radix_tlb.c | 35 | ||||
-rw-r--r-- | arch/powerpc/mm/book3s64/slb.c | 4 |
7 files changed, 47 insertions, 28 deletions
diff --git a/arch/powerpc/mm/book3s64/hash_native.c b/arch/powerpc/mm/book3s64/hash_native.c index cf20e5229ce1..0203cdf48c54 100644 --- a/arch/powerpc/mm/book3s64/hash_native.c +++ b/arch/powerpc/mm/book3s64/hash_native.c @@ -82,7 +82,7 @@ static void tlbiel_all_isa206(unsigned int num_sets, unsigned int is) for (set = 0; set < num_sets; set++) tlbiel_hash_set_isa206(set, is); - asm volatile("ptesync": : :"memory"); + ppc_after_tlbiel_barrier(); } static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is) @@ -110,7 +110,7 @@ static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is) */ tlbiel_hash_set_isa300(0, is, 0, 2, 1); - asm volatile("ptesync": : :"memory"); + ppc_after_tlbiel_barrier(); asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT "; isync" : : :"memory"); } @@ -303,7 +303,7 @@ static inline void tlbie(unsigned long vpn, int psize, int apsize, asm volatile("ptesync": : :"memory"); if (use_local) { __tlbiel(vpn, psize, apsize, ssize); - asm volatile("ptesync": : :"memory"); + ppc_after_tlbiel_barrier(); } else { __tlbie(vpn, psize, apsize, ssize); fixup_tlbie_vpn(vpn, psize, apsize, ssize); @@ -879,7 +879,7 @@ static void native_flush_hash_range(unsigned long number, int local) __tlbiel(vpn, psize, psize, ssize); } pte_iterate_hashed_end(); } - asm volatile("ptesync":::"memory"); + ppc_after_tlbiel_barrier(); } else { int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index b830adee51f5..24702c0a92e0 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -260,8 +260,12 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend, DBG("htab_bolt_mapping(%lx..%lx -> %lx (%lx,%d,%d)\n", vstart, vend, pstart, prot, psize, ssize); - for (vaddr = vstart, paddr = pstart; vaddr < vend; - vaddr += step, paddr += step) { + /* Carefully map only the possible range */ + vaddr = ALIGN(vstart, step); + paddr = ALIGN(pstart, step); + vend = ALIGN_DOWN(vend, step); + + for (; vaddr < vend; vaddr += step, paddr += step) { unsigned long hash, hpteg; unsigned long vsid = get_kernel_vsid(vaddr, ssize); unsigned long vpn = hpt_vpn(vaddr, vsid, ssize); @@ -343,7 +347,9 @@ int htab_remove_mapping(unsigned long vstart, unsigned long vend, if (!mmu_hash_ops.hpte_removebolted) return -ENODEV; - for (vaddr = vstart; vaddr < vend; vaddr += step) { + /* Unmap the full range specificied */ + vaddr = ALIGN_DOWN(vstart, step); + for (;vaddr < vend; vaddr += step) { rc = mmu_hash_ops.hpte_removebolted(vaddr, psize, ssize); if (rc == -ENOENT) { ret = -ENOENT; diff --git a/arch/powerpc/mm/book3s64/internal.h b/arch/powerpc/mm/book3s64/internal.h index 7eda0d30d765..c12d78ee42f5 100644 --- a/arch/powerpc/mm/book3s64/internal.h +++ b/arch/powerpc/mm/book3s64/internal.h @@ -13,4 +13,6 @@ static inline bool stress_slb(void) return static_branch_unlikely(&stress_slb_key); } +void slb_setup_new_exec(void); + #endif /* ARCH_POWERPC_MM_BOOK3S64_INTERNAL_H */ diff --git a/arch/powerpc/mm/book3s64/mmu_context.c b/arch/powerpc/mm/book3s64/mmu_context.c index 0ba30b8b935b..1c54821de7bf 100644 --- a/arch/powerpc/mm/book3s64/mmu_context.c +++ b/arch/powerpc/mm/book3s64/mmu_context.c @@ -21,6 +21,8 @@ #include <asm/mmu_context.h> #include <asm/pgalloc.h> +#include "internal.h" + static DEFINE_IDA(mmu_context_ida); static int alloc_context_id(int min_id, int max_id) @@ -48,8 +50,6 @@ int hash__alloc_context_id(void) } EXPORT_SYMBOL_GPL(hash__alloc_context_id); -void slb_setup_new_exec(void); - static int realloc_context_ids(mm_context_t *ctx) { int i, id; diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index cc72666e891a..3adcf730f478 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -34,7 +34,7 @@ unsigned int mmu_pid_bits; unsigned int mmu_base_pid; -unsigned int radix_mem_block_size __ro_after_init; +unsigned long radix_mem_block_size __ro_after_init; static __ref void *early_alloc_pgtable(unsigned long size, int nid, unsigned long region_start, unsigned long region_end) @@ -276,6 +276,7 @@ static int __meminit create_physical_mapping(unsigned long start, int psize; start = ALIGN(start, PAGE_SIZE); + end = ALIGN_DOWN(end, PAGE_SIZE); for (addr = start; addr < end; addr += mapping_size) { unsigned long gap, previous_size; int rc; @@ -497,7 +498,7 @@ static int __init probe_memory_block_size(unsigned long node, const char *uname, depth, void *data) { unsigned long *mem_block_size = (unsigned long *)data; - const __be64 *prop; + const __be32 *prop; int len; if (depth != 1) @@ -507,13 +508,14 @@ static int __init probe_memory_block_size(unsigned long node, const char *uname, return 0; prop = of_get_flat_dt_prop(node, "ibm,lmb-size", &len); - if (!prop || len < sizeof(__be64)) + + if (!prop || len < dt_root_size_cells * sizeof(__be32)) /* * Nothing in the device tree */ *mem_block_size = MIN_MEMORY_BLOCK_SIZE; else - *mem_block_size = be64_to_cpup(prop); + *mem_block_size = of_read_number(prop, dt_root_size_cells); return 1; } diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c index 0d233763441f..b487b489d4b6 100644 --- a/arch/powerpc/mm/book3s64/radix_tlb.c +++ b/arch/powerpc/mm/book3s64/radix_tlb.c @@ -65,7 +65,7 @@ static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is) for (set = 1; set < num_sets; set++) tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 1); - asm volatile("ptesync": : :"memory"); + ppc_after_tlbiel_barrier(); } void radix__tlbiel_all(unsigned int action) @@ -296,7 +296,7 @@ static __always_inline void _tlbiel_pid(unsigned long pid, unsigned long ric) /* For PWC, only one flush is needed */ if (ric == RIC_FLUSH_PWC) { - asm volatile("ptesync": : :"memory"); + ppc_after_tlbiel_barrier(); return; } @@ -304,7 +304,7 @@ static __always_inline void _tlbiel_pid(unsigned long pid, unsigned long ric) for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++) __tlbiel_pid(pid, set, RIC_FLUSH_TLB); - asm volatile("ptesync": : :"memory"); + ppc_after_tlbiel_barrier(); asm volatile(PPC_RADIX_INVALIDATE_ERAT_USER "; isync" : : :"memory"); } @@ -431,7 +431,7 @@ static __always_inline void _tlbiel_va(unsigned long va, unsigned long pid, asm volatile("ptesync": : :"memory"); __tlbiel_va(va, pid, ap, ric); - asm volatile("ptesync": : :"memory"); + ppc_after_tlbiel_barrier(); } static inline void _tlbiel_va_range(unsigned long start, unsigned long end, @@ -442,7 +442,7 @@ static inline void _tlbiel_va_range(unsigned long start, unsigned long end, if (also_pwc) __tlbiel_pid(pid, 0, RIC_FLUSH_PWC); __tlbiel_va_range(start, end, pid, page_size, psize); - asm volatile("ptesync": : :"memory"); + ppc_after_tlbiel_barrier(); } static inline void __tlbie_va_range(unsigned long start, unsigned long end, @@ -645,19 +645,29 @@ static void do_exit_flush_lazy_tlb(void *arg) struct mm_struct *mm = arg; unsigned long pid = mm->context.id; + /* + * A kthread could have done a mmget_not_zero() after the flushing CPU + * checked mm_is_singlethreaded, and be in the process of + * kthread_use_mm when interrupted here. In that case, current->mm will + * be set to mm, because kthread_use_mm() setting ->mm and switching to + * the mm is done with interrupts off. + */ if (current->mm == mm) - return; /* Local CPU */ + goto out_flush; if (current->active_mm == mm) { - /* - * Must be a kernel thread because sender is single-threaded. - */ - BUG_ON(current->mm); + WARN_ON_ONCE(current->mm != NULL); + /* Is a kernel thread and is using mm as the lazy tlb */ mmgrab(&init_mm); - switch_mm(mm, &init_mm, current); current->active_mm = &init_mm; + switch_mm_irqs_off(mm, &init_mm, current); mmdrop(mm); } + + atomic_dec(&mm->context.active_cpus); + cpumask_clear_cpu(smp_processor_id(), mm_cpumask(mm)); + +out_flush: _tlbiel_pid(pid, RIC_FLUSH_ALL); } @@ -672,7 +682,6 @@ static void exit_flush_lazy_tlbs(struct mm_struct *mm) */ smp_call_function_many(mm_cpumask(mm), do_exit_flush_lazy_tlb, (void *)mm, 1); - mm_reset_thread_local(mm); } void radix__flush_tlb_mm(struct mm_struct *mm) @@ -940,7 +949,7 @@ is_local: if (hflush) __tlbiel_va_range(hstart, hend, pid, PMD_SIZE, MMU_PAGE_2M); - asm volatile("ptesync": : :"memory"); + ppc_after_tlbiel_barrier(); } else if (cputlb_use_tlbie()) { asm volatile("ptesync": : :"memory"); __tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize); diff --git a/arch/powerpc/mm/book3s64/slb.c b/arch/powerpc/mm/book3s64/slb.c index 156c38f89511..c30fcbfa0e32 100644 --- a/arch/powerpc/mm/book3s64/slb.c +++ b/arch/powerpc/mm/book3s64/slb.c @@ -765,8 +765,8 @@ static long slb_allocate_kernel(unsigned long ea, unsigned long id) if (id == LINEAR_MAP_REGION_ID) { - /* We only support upto MAX_PHYSMEM_BITS */ - if ((ea & EA_MASK) > (1UL << MAX_PHYSMEM_BITS)) + /* We only support upto H_MAX_PHYSMEM_BITS */ + if ((ea & EA_MASK) > (1UL << H_MAX_PHYSMEM_BITS)) return -EFAULT; flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_linear_psize].sllp; |