diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-03-05 11:28:25 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-03-05 11:28:25 -0800 |
commit | d9862cfbe2099deb83f0e9c1932c91f2d9c50464 (patch) | |
tree | 7092ef41113269f30b5429868a9d161e171c746d /arch/mips/mm | |
parent | 8feed3efa8022107bcb3432ac3ec9917e078ae70 (diff) | |
parent | aeb669d41ffabb91b1542f1f802cb12a989fced0 (diff) | |
download | linux-d9862cfbe2099deb83f0e9c1932c91f2d9c50464.tar.bz2 |
Merge tag 'mips_5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux
Pull MIPS updates from Paul Burton:
- Support for the MIPSr6 MemoryMapID register & Global INValidate TLB
(GINVT) instructions, allowing for more efficient TLB maintenance
when running on a CPU such as the I6500 that supports these.
- Enable huge page support for MIPS64r6.
- Optimize post-DMA cache sync by removing that code entirely for
kernel configurations in which we know it won't be needed.
- The number of pages allocated for interrupt stacks is now calculated
correctly, where before we would wastefully allocate too much memory
in some configurations.
- The ath79 platform migrates to devicetree.
- The bcm47xx platform sees fixes for the Buffalo WHR-G54S board.
- The ingenic/jz4740 platform gains support for appended devicetrees.
- The cavium_octeon, lantiq, loongson32 & sgi-ip27 platforms all see
cleanups as do various pieces of core architecture code.
* tag 'mips_5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux: (66 commits)
MIPS: lantiq: Remove separate GPHY Firmware loader
MIPS: ingenic: Add support for appended devicetree
MIPS: SGI-IP27: rework HUB interrupts
MIPS: SGI-IP27: do boot CPU init later
MIPS: SGI-IP27: do xtalk scanning later
MIPS: SGI-IP27: use pr_info/pr_emerg and pr_cont to fix output
MIPS: SGI-IP27: clean up bridge access and header files
MIPS: SGI-IP27: get rid of volatile and hubreg_t
MIPS: irq: Allocate accurate order pages for irq stack
MIPS: dma-noncoherent: Remove bogus condition in dma_sync_phys()
MIPS: eBPF: Remove REG_32BIT_ZERO_EX
MIPS: eBPF: Always return sign extended 32b values
MIPS: CM: Fix indentation
MIPS: BCM47XX: Fix/improve Buffalo WHR-G54S support
MIPS: OCTEON: program rx/tx-delay always from DT
MIPS: OCTEON: delete board-specific link status
MIPS: OCTEON: don't lie about interface type of CN3005 board
MIPS: OCTEON: warn if deprecated link status is being used
MIPS: OCTEON: add fixed-link nodes to in-kernel device tree
MIPS: Delete unused flush_cache_sigtramp()
...
Diffstat (limited to 'arch/mips/mm')
-rw-r--r-- | arch/mips/mm/Makefile | 16 | ||||
-rw-r--r-- | arch/mips/mm/c-octeon.c | 18 | ||||
-rw-r--r-- | arch/mips/mm/c-r3k.c | 25 | ||||
-rw-r--r-- | arch/mips/mm/c-r4k.c | 124 | ||||
-rw-r--r-- | arch/mips/mm/c-tx39.c | 21 | ||||
-rw-r--r-- | arch/mips/mm/cache.c | 1 | ||||
-rw-r--r-- | arch/mips/mm/context.c | 291 | ||||
-rw-r--r-- | arch/mips/mm/dma-noncoherent.c | 9 | ||||
-rw-r--r-- | arch/mips/mm/init.c | 7 | ||||
-rw-r--r-- | arch/mips/mm/sc-debugfs.c | 15 | ||||
-rw-r--r-- | arch/mips/mm/tlb-r3k.c | 14 | ||||
-rw-r--r-- | arch/mips/mm/tlb-r4k.c | 71 | ||||
-rw-r--r-- | arch/mips/mm/tlb-r8k.c | 10 |
13 files changed, 365 insertions, 257 deletions
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile index 3e5bb203c95a..f34d7ff5eb60 100644 --- a/arch/mips/mm/Makefile +++ b/arch/mips/mm/Makefile @@ -3,9 +3,19 @@ # Makefile for the Linux/MIPS-specific parts of the memory manager. # -obj-y += cache.o extable.o fault.o \ - gup.o init.o mmap.o page.o page-funcs.o \ - pgtable.o tlbex.o tlbex-fault.o tlb-funcs.o +obj-y += cache.o +obj-y += context.o +obj-y += extable.o +obj-y += fault.o +obj-y += gup.o +obj-y += init.o +obj-y += mmap.o +obj-y += page.o +obj-y += page-funcs.o +obj-y += pgtable.o +obj-y += tlbex.o +obj-y += tlbex-fault.o +obj-y += tlb-funcs.o ifdef CONFIG_CPU_MICROMIPS obj-y += uasm-micromips.o diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c index 0e45b061e514..8064821e9805 100644 --- a/arch/mips/mm/c-octeon.c +++ b/arch/mips/mm/c-octeon.c @@ -128,23 +128,6 @@ static void octeon_flush_icache_range(unsigned long start, unsigned long end) /** - * Flush the icache for a trampoline. These are used for interrupt - * and exception hooking. - * - * @addr: Address to flush - */ -static void octeon_flush_cache_sigtramp(unsigned long addr) -{ - struct vm_area_struct *vma; - - down_read(¤t->mm->mmap_sem); - vma = find_vma(current->mm, addr); - octeon_flush_icache_all_cores(vma); - up_read(¤t->mm->mmap_sem); -} - - -/** * Flush a range out of a vma * * @vma: VMA to flush @@ -289,7 +272,6 @@ void octeon_cache_init(void) flush_cache_mm = octeon_flush_cache_mm; flush_cache_page = octeon_flush_cache_page; flush_cache_range = octeon_flush_cache_range; - flush_cache_sigtramp = octeon_flush_cache_sigtramp; flush_icache_all = octeon_flush_icache_all; flush_data_cache_page = octeon_flush_data_cache_page; flush_icache_range = octeon_flush_icache_range; diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c index 01848cdf2074..0ca401ddf3b7 100644 --- a/arch/mips/mm/c-r3k.c +++ b/arch/mips/mm/c-r3k.c @@ -274,30 +274,6 @@ static void r3k_flush_data_cache_page(unsigned long addr) { } -static void r3k_flush_cache_sigtramp(unsigned long addr) -{ - unsigned long flags; - - pr_debug("csigtramp[%08lx]\n", addr); - - flags = read_c0_status(); - - write_c0_status(flags&~ST0_IEC); - - /* Fill the TLB to avoid an exception with caches isolated. */ - asm( "lw\t$0, 0x000(%0)\n\t" - "lw\t$0, 0x004(%0)\n\t" - : : "r" (addr) ); - - write_c0_status((ST0_ISC|ST0_SWC|flags)&~ST0_IEC); - - asm( "sb\t$0, 0x000(%0)\n\t" - "sb\t$0, 0x004(%0)\n\t" - : : "r" (addr) ); - - write_c0_status(flags); -} - static void r3k_flush_kernel_vmap_range(unsigned long vaddr, int size) { BUG(); @@ -331,7 +307,6 @@ void r3k_cache_init(void) __flush_kernel_vmap_range = r3k_flush_kernel_vmap_range; - flush_cache_sigtramp = r3k_flush_cache_sigtramp; local_flush_data_cache_page = local_r3k_flush_data_cache_page; flush_data_cache_page = r3k_flush_data_cache_page; diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index d0b64df51eb2..5166e38cd1c6 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -540,6 +540,9 @@ static inline int has_valid_asid(const struct mm_struct *mm, unsigned int type) unsigned int i; const cpumask_t *mask = cpu_present_mask; + if (cpu_has_mmid) + return cpu_context(0, mm) != 0; + /* cpu_sibling_map[] undeclared when !CONFIG_SMP */ #ifdef CONFIG_SMP /* @@ -697,10 +700,7 @@ static inline void local_r4k_flush_cache_page(void *args) } if (exec) { if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) { - int cpu = smp_processor_id(); - - if (cpu_context(cpu, mm) != 0) - drop_mmu_context(mm, cpu); + drop_mmu_context(mm); } else vaddr ? r4k_blast_icache_page(addr) : r4k_blast_icache_user_page(addr); @@ -937,119 +937,6 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) } #endif /* CONFIG_DMA_NONCOHERENT */ -struct flush_cache_sigtramp_args { - struct mm_struct *mm; - struct page *page; - unsigned long addr; -}; - -/* - * While we're protected against bad userland addresses we don't care - * very much about what happens in that case. Usually a segmentation - * fault will dump the process later on anyway ... - */ -static void local_r4k_flush_cache_sigtramp(void *args) -{ - struct flush_cache_sigtramp_args *fcs_args = args; - unsigned long addr = fcs_args->addr; - struct page *page = fcs_args->page; - struct mm_struct *mm = fcs_args->mm; - int map_coherent = 0; - void *vaddr; - - unsigned long ic_lsize = cpu_icache_line_size(); - unsigned long dc_lsize = cpu_dcache_line_size(); - unsigned long sc_lsize = cpu_scache_line_size(); - - /* - * If owns no valid ASID yet, cannot possibly have gotten - * this page into the cache. - */ - if (!has_valid_asid(mm, R4K_HIT)) - return; - - if (mm == current->active_mm) { - vaddr = NULL; - } else { - /* - * Use kmap_coherent or kmap_atomic to do flushes for - * another ASID than the current one. - */ - map_coherent = (cpu_has_dc_aliases && - page_mapcount(page) && - !Page_dcache_dirty(page)); - if (map_coherent) - vaddr = kmap_coherent(page, addr); - else - vaddr = kmap_atomic(page); - addr = (unsigned long)vaddr + (addr & ~PAGE_MASK); - } - - R4600_HIT_CACHEOP_WAR_IMPL; - if (!cpu_has_ic_fills_f_dc) { - if (dc_lsize) - vaddr ? flush_dcache_line(addr & ~(dc_lsize - 1)) - : protected_writeback_dcache_line( - addr & ~(dc_lsize - 1)); - if (!cpu_icache_snoops_remote_store && scache_size) - vaddr ? flush_scache_line(addr & ~(sc_lsize - 1)) - : protected_writeback_scache_line( - addr & ~(sc_lsize - 1)); - } - if (ic_lsize) - vaddr ? flush_icache_line(addr & ~(ic_lsize - 1)) - : protected_flush_icache_line(addr & ~(ic_lsize - 1)); - - if (vaddr) { - if (map_coherent) - kunmap_coherent(); - else - kunmap_atomic(vaddr); - } - - if (MIPS4K_ICACHE_REFILL_WAR) { - __asm__ __volatile__ ( - ".set push\n\t" - ".set noat\n\t" - ".set "MIPS_ISA_LEVEL"\n\t" -#ifdef CONFIG_32BIT - "la $at,1f\n\t" -#endif -#ifdef CONFIG_64BIT - "dla $at,1f\n\t" -#endif - "cache %0,($at)\n\t" - "nop; nop; nop\n" - "1:\n\t" - ".set pop" - : - : "i" (Hit_Invalidate_I)); - } - if (MIPS_CACHE_SYNC_WAR) - __asm__ __volatile__ ("sync"); -} - -static void r4k_flush_cache_sigtramp(unsigned long addr) -{ - struct flush_cache_sigtramp_args args; - int npages; - - down_read(¤t->mm->mmap_sem); - - npages = get_user_pages_fast(addr, 1, 0, &args.page); - if (npages < 1) - goto out; - - args.mm = current->mm; - args.addr = addr; - - r4k_on_each_cpu(R4K_HIT, local_r4k_flush_cache_sigtramp, &args); - - put_page(args.page); -out: - up_read(¤t->mm->mmap_sem); -} - static void r4k_flush_icache_all(void) { if (cpu_has_vtag_icache) @@ -1978,7 +1865,6 @@ void r4k_cache_init(void) __flush_kernel_vmap_range = r4k_flush_kernel_vmap_range; - flush_cache_sigtramp = r4k_flush_cache_sigtramp; flush_icache_all = r4k_flush_icache_all; local_flush_data_cache_page = local_r4k_flush_data_cache_page; flush_data_cache_page = r4k_flush_data_cache_page; @@ -2033,7 +1919,6 @@ void r4k_cache_init(void) /* I$ fills from D$ just by emptying the write buffers */ flush_cache_page = (void *)b5k_instruction_hazard; flush_cache_range = (void *)b5k_instruction_hazard; - flush_cache_sigtramp = (void *)b5k_instruction_hazard; local_flush_data_cache_page = (void *)b5k_instruction_hazard; flush_data_cache_page = (void *)b5k_instruction_hazard; flush_icache_range = (void *)b5k_instruction_hazard; @@ -2052,7 +1937,6 @@ void r4k_cache_init(void) flush_cache_mm = (void *)cache_noop; flush_cache_page = (void *)cache_noop; flush_cache_range = (void *)cache_noop; - flush_cache_sigtramp = (void *)cache_noop; flush_icache_all = (void *)cache_noop; flush_data_cache_page = (void *)cache_noop; local_flush_data_cache_page = (void *)cache_noop; diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c index 5f6c099a9457..b7c8a9d79c35 100644 --- a/arch/mips/mm/c-tx39.c +++ b/arch/mips/mm/c-tx39.c @@ -290,25 +290,6 @@ static void tx39_dma_cache_inv(unsigned long addr, unsigned long size) } } -static void tx39_flush_cache_sigtramp(unsigned long addr) -{ - unsigned long ic_lsize = current_cpu_data.icache.linesz; - unsigned long dc_lsize = current_cpu_data.dcache.linesz; - unsigned long config; - unsigned long flags; - - protected_writeback_dcache_line(addr & ~(dc_lsize - 1)); - - /* disable icache (set ICE#) */ - local_irq_save(flags); - config = read_c0_conf(); - write_c0_conf(config & ~TX39_CONF_ICE); - TX39_STOP_STREAMING(); - protected_flush_icache_line(addr & ~(ic_lsize - 1)); - write_c0_conf(config); - local_irq_restore(flags); -} - static __init void tx39_probe_cache(void) { unsigned long config; @@ -368,7 +349,6 @@ void tx39_cache_init(void) flush_icache_range = (void *) tx39h_flush_icache_all; local_flush_icache_range = (void *) tx39h_flush_icache_all; - flush_cache_sigtramp = (void *) tx39h_flush_icache_all; local_flush_data_cache_page = (void *) tx39h_flush_icache_all; flush_data_cache_page = (void *) tx39h_flush_icache_all; @@ -397,7 +377,6 @@ void tx39_cache_init(void) __flush_kernel_vmap_range = tx39_flush_kernel_vmap_range; - flush_cache_sigtramp = tx39_flush_cache_sigtramp; local_flush_data_cache_page = local_tx39_flush_data_cache_page; flush_data_cache_page = tx39_flush_data_cache_page; diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 55099fbff4e6..3da216988672 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -47,7 +47,6 @@ void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size); EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range); /* MIPS specific cache operations */ -void (*flush_cache_sigtramp)(unsigned long addr); void (*local_flush_data_cache_page)(void * addr); void (*flush_data_cache_page)(unsigned long addr); void (*flush_icache_all)(void); diff --git a/arch/mips/mm/context.c b/arch/mips/mm/context.c new file mode 100644 index 000000000000..b25564090939 --- /dev/null +++ b/arch/mips/mm/context.c @@ -0,0 +1,291 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/atomic.h> +#include <linux/mmu_context.h> +#include <linux/percpu.h> +#include <linux/spinlock.h> + +static DEFINE_RAW_SPINLOCK(cpu_mmid_lock); + +static atomic64_t mmid_version; +static unsigned int num_mmids; +static unsigned long *mmid_map; + +static DEFINE_PER_CPU(u64, reserved_mmids); +static cpumask_t tlb_flush_pending; + +static bool asid_versions_eq(int cpu, u64 a, u64 b) +{ + return ((a ^ b) & asid_version_mask(cpu)) == 0; +} + +void get_new_mmu_context(struct mm_struct *mm) +{ + unsigned int cpu; + u64 asid; + + /* + * This function is specific to ASIDs, and should not be called when + * MMIDs are in use. + */ + if (WARN_ON(IS_ENABLED(CONFIG_DEBUG_VM) && cpu_has_mmid)) + return; + + cpu = smp_processor_id(); + asid = asid_cache(cpu); + + if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) { + if (cpu_has_vtag_icache) + flush_icache_all(); + local_flush_tlb_all(); /* start new asid cycle */ + } + + set_cpu_context(cpu, mm, asid); + asid_cache(cpu) = asid; +} +EXPORT_SYMBOL_GPL(get_new_mmu_context); + +void check_mmu_context(struct mm_struct *mm) +{ + unsigned int cpu = smp_processor_id(); + + /* + * This function is specific to ASIDs, and should not be called when + * MMIDs are in use. + */ + if (WARN_ON(IS_ENABLED(CONFIG_DEBUG_VM) && cpu_has_mmid)) + return; + + /* Check if our ASID is of an older version and thus invalid */ + if (!asid_versions_eq(cpu, cpu_context(cpu, mm), asid_cache(cpu))) + get_new_mmu_context(mm); +} +EXPORT_SYMBOL_GPL(check_mmu_context); + +static void flush_context(void) +{ + u64 mmid; + int cpu; + + /* Update the list of reserved MMIDs and the MMID bitmap */ + bitmap_clear(mmid_map, 0, num_mmids); + + /* Reserve an MMID for kmap/wired entries */ + __set_bit(MMID_KERNEL_WIRED, mmid_map); + + for_each_possible_cpu(cpu) { + mmid = xchg_relaxed(&cpu_data[cpu].asid_cache, 0); + + /* + * If this CPU has already been through a + * rollover, but hasn't run another task in + * the meantime, we must preserve its reserved + * MMID, as this is the only trace we have of + * the process it is still running. + */ + if (mmid == 0) + mmid = per_cpu(reserved_mmids, cpu); + + __set_bit(mmid & cpu_asid_mask(&cpu_data[cpu]), mmid_map); + per_cpu(reserved_mmids, cpu) = mmid; + } + + /* + * Queue a TLB invalidation for each CPU to perform on next + * context-switch + */ + cpumask_setall(&tlb_flush_pending); +} + +static bool check_update_reserved_mmid(u64 mmid, u64 newmmid) +{ + bool hit; + int cpu; + + /* + * Iterate over the set of reserved MMIDs looking for a match. + * If we find one, then we can update our mm to use newmmid + * (i.e. the same MMID in the current generation) but we can't + * exit the loop early, since we need to ensure that all copies + * of the old MMID are updated to reflect the mm. Failure to do + * so could result in us missing the reserved MMID in a future + * generation. + */ + hit = false; + for_each_possible_cpu(cpu) { + if (per_cpu(reserved_mmids, cpu) == mmid) { + hit = true; + per_cpu(reserved_mmids, cpu) = newmmid; + } + } + + return hit; +} + +static u64 get_new_mmid(struct mm_struct *mm) +{ + static u32 cur_idx = MMID_KERNEL_WIRED + 1; + u64 mmid, version, mmid_mask; + + mmid = cpu_context(0, mm); + version = atomic64_read(&mmid_version); + mmid_mask = cpu_asid_mask(&boot_cpu_data); + + if (!asid_versions_eq(0, mmid, 0)) { + u64 newmmid = version | (mmid & mmid_mask); + + /* + * If our current MMID was active during a rollover, we + * can continue to use it and this was just a false alarm. + */ + if (check_update_reserved_mmid(mmid, newmmid)) { + mmid = newmmid; + goto set_context; + } + + /* + * We had a valid MMID in a previous life, so try to re-use + * it if possible. + */ + if (!__test_and_set_bit(mmid & mmid_mask, mmid_map)) { + mmid = newmmid; + goto set_context; + } + } + + /* Allocate a free MMID */ + mmid = find_next_zero_bit(mmid_map, num_mmids, cur_idx); + if (mmid != num_mmids) + goto reserve_mmid; + + /* We're out of MMIDs, so increment the global version */ + version = atomic64_add_return_relaxed(asid_first_version(0), + &mmid_version); + + /* Note currently active MMIDs & mark TLBs as requiring flushes */ + flush_context(); + + /* We have more MMIDs than CPUs, so this will always succeed */ + mmid = find_first_zero_bit(mmid_map, num_mmids); + +reserve_mmid: + __set_bit(mmid, mmid_map); + cur_idx = mmid; + mmid |= version; +set_context: + set_cpu_context(0, mm, mmid); + return mmid; +} + +void check_switch_mmu_context(struct mm_struct *mm) +{ + unsigned int cpu = smp_processor_id(); + u64 ctx, old_active_mmid; + unsigned long flags; + + if (!cpu_has_mmid) { + check_mmu_context(mm); + write_c0_entryhi(cpu_asid(cpu, mm)); + goto setup_pgd; + } + + /* + * MMID switch fast-path, to avoid acquiring cpu_mmid_lock when it's + * unnecessary. + * + * The memory ordering here is subtle. If our active_mmids is non-zero + * and the MMID matches the current version, then we update the CPU's + * asid_cache with a relaxed cmpxchg. Racing with a concurrent rollover + * means that either: + * + * - We get a zero back from the cmpxchg and end up waiting on + * cpu_mmid_lock in check_mmu_context(). Taking the lock synchronises + * with the rollover and so we are forced to see the updated + * generation. + * + * - We get a valid MMID back from the cmpxchg, which means the + * relaxed xchg in flush_context will treat us as reserved + * because atomic RmWs are totally ordered for a given location. + */ + ctx = cpu_context(cpu, mm); + old_active_mmid = READ_ONCE(cpu_data[cpu].asid_cache); + if (!old_active_mmid || + !asid_versions_eq(cpu, ctx, atomic64_read(&mmid_version)) || + !cmpxchg_relaxed(&cpu_data[cpu].asid_cache, old_active_mmid, ctx)) { + raw_spin_lock_irqsave(&cpu_mmid_lock, flags); + + ctx = cpu_context(cpu, mm); + if (!asid_versions_eq(cpu, ctx, atomic64_read(&mmid_version))) + ctx = get_new_mmid(mm); + + WRITE_ONCE(cpu_data[cpu].asid_cache, ctx); + raw_spin_unlock_irqrestore(&cpu_mmid_lock, flags); + } + + /* + * Invalidate the local TLB if needed. Note that we must only clear our + * bit in tlb_flush_pending after this is complete, so that the + * cpu_has_shared_ftlb_entries case below isn't misled. + */ + if (cpumask_test_cpu(cpu, &tlb_flush_pending)) { + if (cpu_has_vtag_icache) + flush_icache_all(); + local_flush_tlb_all(); + cpumask_clear_cpu(cpu, &tlb_flush_pending); + } + + write_c0_memorymapid(ctx & cpu_asid_mask(&boot_cpu_data)); + + /* + * If this CPU shares FTLB entries with its siblings and one or more of + * those siblings hasn't yet invalidated its TLB following a version + * increase then we need to invalidate any TLB entries for our MMID + * that we might otherwise pick up from a sibling. + * + * We ifdef on CONFIG_SMP because cpu_sibling_map isn't defined in + * CONFIG_SMP=n kernels. + */ +#ifdef CONFIG_SMP + if (cpu_has_shared_ftlb_entries && + cpumask_intersects(&tlb_flush_pending, &cpu_sibling_map[cpu])) { + /* Ensure we operate on the new MMID */ + mtc0_tlbw_hazard(); + + /* + * Invalidate all TLB entries associated with the new + * MMID, and wait for the invalidation to complete. + */ + ginvt_mmid(); + sync_ginv(); + } +#endif + +setup_pgd: + TLBMISS_HANDLER_SETUP_PGD(mm->pgd); +} +EXPORT_SYMBOL_GPL(check_switch_mmu_context); + +static int mmid_init(void) +{ + if (!cpu_has_mmid) + return 0; + + /* + * Expect allocation after rollover to fail if we don't have at least + * one more MMID than CPUs. + */ + num_mmids = asid_first_version(0); + WARN_ON(num_mmids <= num_possible_cpus()); + + atomic64_set(&mmid_version, asid_first_version(0)); + mmid_map = kcalloc(BITS_TO_LONGS(num_mmids), sizeof(*mmid_map), + GFP_KERNEL); + if (!mmid_map) + panic("Failed to allocate bitmap for %u MMIDs\n", num_mmids); + + /* Reserve an MMID for kmap/wired entries */ + __set_bit(MMID_KERNEL_WIRED, mmid_map); + + pr_info("MMID allocator initialised with %u entries\n", num_mmids); + return 0; +} +early_initcall(mmid_init); diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c index cb38461391cb..b57465733e87 100644 --- a/arch/mips/mm/dma-noncoherent.c +++ b/arch/mips/mm/dma-noncoherent.c @@ -120,13 +120,8 @@ static inline void dma_sync_phys(phys_addr_t paddr, size_t size, if (PageHighMem(page)) { void *addr; - if (offset + len > PAGE_SIZE) { - if (offset >= PAGE_SIZE) { - page += offset >> PAGE_SHIFT; - offset &= ~PAGE_MASK; - } + if (offset + len > PAGE_SIZE) len = PAGE_SIZE - offset; - } addr = kmap_atomic(page); dma_sync_virt(addr + offset, len, dir); @@ -145,12 +140,14 @@ void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, dma_sync_phys(paddr, size, dir); } +#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, size_t size, enum dma_data_direction dir) { if (cpu_needs_post_dma_flush(dev)) dma_sync_phys(paddr, size, dir); } +#endif void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index b521d8e2d359..c3b45e248806 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -84,6 +84,7 @@ void setup_zero_pages(void) static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot) { enum fixed_addresses idx; + unsigned int uninitialized_var(old_mmid); unsigned long vaddr, flags, entrylo; unsigned long old_ctx; pte_t pte; @@ -110,6 +111,10 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot) write_c0_entryhi(vaddr & (PAGE_MASK << 1)); write_c0_entrylo0(entrylo); write_c0_entrylo1(entrylo); + if (cpu_has_mmid) { + old_mmid = read_c0_memorymapid(); + write_c0_memorymapid(MMID_KERNEL_WIRED); + } #ifdef CONFIG_XPA if (cpu_has_xpa) { entrylo = (pte.pte_low & _PFNX_MASK); @@ -124,6 +129,8 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot) tlb_write_indexed(); tlbw_use_hazard(); write_c0_entryhi(old_ctx); + if (cpu_has_mmid) + write_c0_memorymapid(old_mmid); local_irq_restore(flags); return (void*) vaddr; diff --git a/arch/mips/mm/sc-debugfs.c b/arch/mips/mm/sc-debugfs.c index 2a116084216f..9507421de335 100644 --- a/arch/mips/mm/sc-debugfs.c +++ b/arch/mips/mm/sc-debugfs.c @@ -55,20 +55,11 @@ static const struct file_operations sc_prefetch_fops = { static int __init sc_debugfs_init(void) { - struct dentry *dir, *file; - - if (!mips_debugfs_dir) - return -ENODEV; + struct dentry *dir; dir = debugfs_create_dir("l2cache", mips_debugfs_dir); - if (IS_ERR(dir)) - return PTR_ERR(dir); - - file = debugfs_create_file("prefetch", S_IRUGO | S_IWUSR, dir, - NULL, &sc_prefetch_fops); - if (!file) - return -ENOMEM; - + debugfs_create_file("prefetch", S_IRUGO | S_IWUSR, dir, NULL, + &sc_prefetch_fops); return 0; } late_initcall(sc_debugfs_init); diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c index 6f589e0112ce..50f207591b6d 100644 --- a/arch/mips/mm/tlb-r3k.c +++ b/arch/mips/mm/tlb-r3k.c @@ -67,18 +67,6 @@ void local_flush_tlb_all(void) local_irq_restore(flags); } -void local_flush_tlb_mm(struct mm_struct *mm) -{ - int cpu = smp_processor_id(); - - if (cpu_context(cpu, mm) != 0) { -#ifdef DEBUG_TLB - printk("[tlbmm<%lu>]", (unsigned long)cpu_context(cpu, mm)); -#endif - drop_mmu_context(mm, cpu); - } -} - void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { @@ -117,7 +105,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, } write_c0_entryhi(oldpid); } else { - drop_mmu_context(mm, cpu); + drop_mmu_context(mm); } local_irq_restore(flags); } diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index 0596505770db..c13e46ced425 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -104,23 +104,6 @@ void local_flush_tlb_all(void) } EXPORT_SYMBOL(local_flush_tlb_all); -/* All entries common to a mm share an asid. To effectively flush - these entries, we just bump the asid. */ -void local_flush_tlb_mm(struct mm_struct *mm) -{ - int cpu; - - preempt_disable(); - - cpu = smp_processor_id(); - - if (cpu_context(cpu, mm) != 0) { - drop_mmu_context(mm, cpu); - } - - preempt_enable(); -} - void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { @@ -137,14 +120,23 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, if (size <= (current_cpu_data.tlbsizeftlbsets ? current_cpu_data.tlbsize / 8 : current_cpu_data.tlbsize / 2)) { - int oldpid = read_c0_entryhi(); + unsigned long old_entryhi, uninitialized_var(old_mmid); int newpid = cpu_asid(cpu, mm); + old_entryhi = read_c0_entryhi(); + if (cpu_has_mmid) { + old_mmid = read_c0_memorymapid(); + write_c0_memorymapid(newpid); + } + htw_stop(); while (start < end) { int idx; - write_c0_entryhi(start | newpid); + if (cpu_has_mmid) + write_c0_entryhi(start); + else + write_c0_entryhi(start | newpid); start += (PAGE_SIZE << 1); mtc0_tlbw_hazard(); tlb_probe(); @@ -160,10 +152,12 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, tlb_write_indexed(); } tlbw_use_hazard(); - write_c0_entryhi(oldpid); + write_c0_entryhi(old_entryhi); + if (cpu_has_mmid) + write_c0_memorymapid(old_mmid); htw_start(); } else { - drop_mmu_context(mm, cpu); + drop_mmu_context(mm); } flush_micro_tlb(); local_irq_restore(flags); @@ -220,15 +214,21 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) int cpu = smp_processor_id(); if (cpu_context(cpu, vma->vm_mm) != 0) { - unsigned long flags; - int oldpid, newpid, idx; + unsigned long uninitialized_var(old_mmid); + unsigned long flags, old_entryhi; + int idx; - newpid = cpu_asid(cpu, vma->vm_mm); page &= (PAGE_MASK << 1); local_irq_save(flags); - oldpid = read_c0_entryhi(); + old_entryhi = read_c0_entryhi(); htw_stop(); - write_c0_entryhi(page | newpid); + if (cpu_has_mmid) { + old_mmid = read_c0_memorymapid(); + write_c0_entryhi(page); + write_c0_memorymapid(cpu_asid(cpu, vma->vm_mm)); + } else { + write_c0_entryhi(page | cpu_asid(cpu, vma->vm_mm)); + } mtc0_tlbw_hazard(); tlb_probe(); tlb_probe_hazard(); @@ -244,7 +244,9 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) tlbw_use_hazard(); finish: - write_c0_entryhi(oldpid); + write_c0_entryhi(old_entryhi); + if (cpu_has_mmid) + write_c0_memorymapid(old_mmid); htw_start(); flush_micro_tlb_vm(vma); local_irq_restore(flags); @@ -307,9 +309,13 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) local_irq_save(flags); htw_stop(); - pid = read_c0_entryhi() & cpu_asid_mask(¤t_cpu_data); address &= (PAGE_MASK << 1); - write_c0_entryhi(address | pid); + if (cpu_has_mmid) { + write_c0_entryhi(address); + } else { + pid = read_c0_entryhi() & cpu_asid_mask(¤t_cpu_data); + write_c0_entryhi(address | pid); + } pgdp = pgd_offset(vma->vm_mm, address); mtc0_tlbw_hazard(); tlb_probe(); @@ -375,12 +381,17 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, #ifdef CONFIG_XPA panic("Broken for XPA kernels"); #else + unsigned int uninitialized_var(old_mmid); unsigned long flags; unsigned long wired; unsigned long old_pagemask; unsigned long old_ctx; local_irq_save(flags); + if (cpu_has_mmid) { + old_mmid = read_c0_memorymapid(); + write_c0_memorymapid(MMID_KERNEL_WIRED); + } /* Save old context and create impossible VPN2 value */ old_ctx = read_c0_entryhi(); htw_stop(); @@ -398,6 +409,8 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, tlbw_use_hazard(); write_c0_entryhi(old_ctx); + if (cpu_has_mmid) + write_c0_memorymapid(old_mmid); tlbw_use_hazard(); /* What is the hazard here? */ htw_start(); write_c0_pagemask(old_pagemask); diff --git a/arch/mips/mm/tlb-r8k.c b/arch/mips/mm/tlb-r8k.c index e86e2e55ad3e..c1e9e144007e 100644 --- a/arch/mips/mm/tlb-r8k.c +++ b/arch/mips/mm/tlb-r8k.c @@ -50,14 +50,6 @@ void local_flush_tlb_all(void) local_irq_restore(flags); } -void local_flush_tlb_mm(struct mm_struct *mm) -{ - int cpu = smp_processor_id(); - - if (cpu_context(cpu, mm) != 0) - drop_mmu_context(mm, cpu); -} - void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { @@ -75,7 +67,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, local_irq_save(flags); if (size > TFP_TLB_SIZE / 2) { - drop_mmu_context(mm, cpu); + drop_mmu_context(mm); goto out_restore; } |