From f3ade253615ae6d83aeb72d1c8a96f62a4b4b29b Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Thu, 15 Nov 2018 15:53:52 +0800 Subject: MIPS: Loongson: Add Loongson-3A R2.1 basic support Loongson-3A R2.1 is the bugfix revision of Loongson-3A R2. All Loongson-3 CPU family: Code-name Brand-name PRId Loongson-3A R1 Loongson-3A1000 0x6305 Loongson-3A R2 Loongson-3A2000 0x6308 Loongson-3A R2.1 Loongson-3A2000 0x630c Loongson-3A R3 Loongson-3A3000 0x6309 Loongson-3A R3.1 Loongson-3A3000 0x630d Loongson-3B R1 Loongson-3B1000 0x6306 Loongson-3B R2 Loongson-3B1500 0x6307 Signed-off-by: Huacai Chen Signed-off-by: Paul Burton Patchwork: https://patchwork.linux-mips.org/patch/21128/ Cc: Ralf Baechle Cc: James Hogan Cc: Steven J . Hill Cc: linux-mips@linux-mips.org Cc: Fuxin Zhang Cc: Zhangjin Wu --- arch/mips/mm/c-r4k.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/mips/mm') diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 05bd77727fb9..7e430b4d8778 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -1352,7 +1352,7 @@ static void probe_pcache(void) c->dcache.ways * c->dcache.linesz; c->dcache.waybit = 0; - if ((prid & PRID_REV_MASK) >= PRID_REV_LOONGSON3A_R2) + if ((prid & PRID_REV_MASK) >= PRID_REV_LOONGSON3A_R2_0) c->options |= MIPS_CPU_PREFETCH; break; -- cgit v1.2.3 From bb53fdf395eed103f85061bfff3b116cee123895 Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Thu, 15 Nov 2018 15:53:53 +0800 Subject: MIPS: c-r4k: Add r4k_blast_scache_node for Loongson-3 For multi-node Loongson-3 (NUMA configuration), r4k_blast_scache() can only flush Node-0's scache. So we add r4k_blast_scache_node() by using (CAC_BASE | (node_id << NODE_ADDRSPACE_SHIFT)) instead of CKSEG0 as the start address. Signed-off-by: Huacai Chen [paul.burton@mips.com: Include asm/mmzone.h from asm/r4kcache.h for nid_to_addrbase(). Add asm/mach-generic/mmzone.h to allow inclusion for all platforms.] Signed-off-by: Paul Burton Patchwork: https://patchwork.linux-mips.org/patch/21129/ Cc: Ralf Baechle Cc: James Hogan Cc: Steven J . Hill Cc: linux-mips@linux-mips.org Cc: Fuxin Zhang Cc: Zhangjin Wu Cc: # 3.15+ --- arch/mips/include/asm/mach-generic/mmzone.h | 2 ++ arch/mips/include/asm/mach-loongson64/mmzone.h | 1 + arch/mips/include/asm/mmzone.h | 8 +++++ arch/mips/include/asm/r4kcache.h | 22 +++++++++++++ arch/mips/mm/c-r4k.c | 44 ++++++++++++++++++++++---- 5 files changed, 70 insertions(+), 7 deletions(-) create mode 100644 arch/mips/include/asm/mach-generic/mmzone.h (limited to 'arch/mips/mm') diff --git a/arch/mips/include/asm/mach-generic/mmzone.h b/arch/mips/include/asm/mach-generic/mmzone.h new file mode 100644 index 000000000000..826392f2ee32 --- /dev/null +++ b/arch/mips/include/asm/mach-generic/mmzone.h @@ -0,0 +1,2 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Intentionally empty */ diff --git a/arch/mips/include/asm/mach-loongson64/mmzone.h b/arch/mips/include/asm/mach-loongson64/mmzone.h index c9f7e231e66b..59c8b11c090e 100644 --- a/arch/mips/include/asm/mach-loongson64/mmzone.h +++ b/arch/mips/include/asm/mach-loongson64/mmzone.h @@ -21,6 +21,7 @@ #define NODE3_ADDRSPACE_OFFSET 0x300000000000UL #define pa_to_nid(addr) (((addr) & 0xf00000000000) >> NODE_ADDRSPACE_SHIFT) +#define nid_to_addrbase(nid) ((nid) << NODE_ADDRSPACE_SHIFT) #define LEVELS_PER_SLICE 128 diff --git a/arch/mips/include/asm/mmzone.h b/arch/mips/include/asm/mmzone.h index f085fba41da5..2a0fe1d0eac6 100644 --- a/arch/mips/include/asm/mmzone.h +++ b/arch/mips/include/asm/mmzone.h @@ -9,6 +9,14 @@ #include #include +#ifndef pa_to_nid +#define pa_to_nid(addr) 0 +#endif + +#ifndef nid_to_addrbase +#define nid_to_addrbase(nid) 0 +#endif + #ifdef CONFIG_DISCONTIGMEM #define pfn_to_nid(pfn) pa_to_nid((pfn) << PAGE_SHIFT) diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h index d19b2d65336b..7f4a32d3345a 100644 --- a/arch/mips/include/asm/r4kcache.h +++ b/arch/mips/include/asm/r4kcache.h @@ -20,6 +20,7 @@ #include #include #include +#include #include /* for uaccess_kernel() */ extern void (*r4k_blast_dcache)(void); @@ -674,4 +675,25 @@ __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , ) __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , ) __BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , ) +/* Currently, this is very specific to Loongson-3 */ +#define __BUILD_BLAST_CACHE_NODE(pfx, desc, indexop, hitop, lsize) \ +static inline void blast_##pfx##cache##lsize##_node(long node) \ +{ \ + unsigned long start = CAC_BASE | nid_to_addrbase(node); \ + unsigned long end = start + current_cpu_data.desc.waysize; \ + unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \ + unsigned long ws_end = current_cpu_data.desc.ways << \ + current_cpu_data.desc.waybit; \ + unsigned long ws, addr; \ + \ + for (ws = 0; ws < ws_end; ws += ws_inc) \ + for (addr = start; addr < end; addr += lsize * 32) \ + cache##lsize##_unroll32(addr|ws, indexop); \ +} + +__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16) +__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32) +__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64) +__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128) + #endif /* _ASM_R4KCACHE_H */ diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 7e430b4d8778..96d666a0f4a0 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -459,11 +459,28 @@ static void r4k_blast_scache_setup(void) r4k_blast_scache = blast_scache128; } +static void (*r4k_blast_scache_node)(long node); + +static void r4k_blast_scache_node_setup(void) +{ + unsigned long sc_lsize = cpu_scache_line_size(); + + if (current_cpu_type() != CPU_LOONGSON3) + r4k_blast_scache_node = (void *)cache_noop; + else if (sc_lsize == 16) + r4k_blast_scache_node = blast_scache16_node; + else if (sc_lsize == 32) + r4k_blast_scache_node = blast_scache32_node; + else if (sc_lsize == 64) + r4k_blast_scache_node = blast_scache64_node; + else if (sc_lsize == 128) + r4k_blast_scache_node = blast_scache128_node; +} + static inline void local_r4k___flush_cache_all(void * args) { switch (current_cpu_type()) { case CPU_LOONGSON2: - case CPU_LOONGSON3: case CPU_R4000SC: case CPU_R4000MC: case CPU_R4400SC: @@ -480,6 +497,11 @@ static inline void local_r4k___flush_cache_all(void * args) r4k_blast_scache(); break; + case CPU_LOONGSON3: + /* Use get_ebase_cpunum() for both NUMA=y/n */ + r4k_blast_scache_node(get_ebase_cpunum() >> 2); + break; + case CPU_BMIPS5000: r4k_blast_scache(); __sync(); @@ -840,10 +862,14 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) preempt_disable(); if (cpu_has_inclusive_pcaches) { - if (size >= scache_size) - r4k_blast_scache(); - else + if (size >= scache_size) { + if (current_cpu_type() != CPU_LOONGSON3) + r4k_blast_scache(); + else + r4k_blast_scache_node(pa_to_nid(addr)); + } else { blast_scache_range(addr, addr + size); + } preempt_enable(); __sync(); return; @@ -877,9 +903,12 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) preempt_disable(); if (cpu_has_inclusive_pcaches) { - if (size >= scache_size) - r4k_blast_scache(); - else { + if (size >= scache_size) { + if (current_cpu_type() != CPU_LOONGSON3) + r4k_blast_scache(); + else + r4k_blast_scache_node(pa_to_nid(addr)); + } else { /* * There is no clearly documented alignment requirement * for the cache instruction on MIPS processors and @@ -1918,6 +1947,7 @@ void r4k_cache_init(void) r4k_blast_scache_page_setup(); r4k_blast_scache_page_indexed_setup(); r4k_blast_scache_setup(); + r4k_blast_scache_node_setup(); #ifdef CONFIG_EVA r4k_blast_dcache_user_page_setup(); r4k_blast_icache_user_page_setup(); -- cgit v1.2.3 From 69095e3900b22bc289ade04ac548ae6b9e8f45ec Mon Sep 17 00:00:00 2001 From: Mathieu Malaterre Date: Mon, 3 Dec 2018 22:23:43 +0100 Subject: mips: annotate implicit fall throughs There is a plan to build the kernel with -Wimplicit-fallthrough and these places in the code produced warnings. Fix them up. This patch produces no change in behaviour, but should be reviewed in case these are actually bugs not intentional fallthoughs. Signed-off-by: Mathieu Malaterre Signed-off-by: Paul Burton Cc: Kees Cook Cc: Ralf Baechle Cc: James Hogan Cc: linux-mips@vger.kernel.org Cc: linux-kernel@vger.kernel.org --- arch/mips/kernel/branch.c | 7 +++++++ arch/mips/kernel/cpu-probe.c | 7 +++++++ arch/mips/kernel/watch.c | 13 +++++++++++++ arch/mips/mm/c-r4k.c | 2 ++ arch/mips/mm/tlbex.c | 1 + 5 files changed, 30 insertions(+) (limited to 'arch/mips/mm') diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index 2077a4dce763..180ad081afcf 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -451,6 +451,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case bltzl_op: if (NO_R6EMU) goto sigill_r2r6; + /* fall through */ case bltz_op: if ((long)regs->regs[insn.i_format.rs] < 0) { epc = epc + 4 + (insn.i_format.simmediate << 2); @@ -464,6 +465,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case bgezl_op: if (NO_R6EMU) goto sigill_r2r6; + /* fall through */ case bgez_op: if ((long)regs->regs[insn.i_format.rs] >= 0) { epc = epc + 4 + (insn.i_format.simmediate << 2); @@ -559,6 +561,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case jalx_op: case jal_op: regs->regs[31] = regs->cp0_epc + 8; + /* fall through */ case j_op: epc += 4; epc >>= 28; @@ -575,6 +578,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case beql_op: if (NO_R6EMU) goto sigill_r2r6; + /* fall through */ case beq_op: if (regs->regs[insn.i_format.rs] == regs->regs[insn.i_format.rt]) { @@ -589,6 +593,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case bnel_op: if (NO_R6EMU) goto sigill_r2r6; + /* fall through */ case bne_op: if (regs->regs[insn.i_format.rs] != regs->regs[insn.i_format.rt]) { @@ -603,6 +608,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case blezl_op: /* not really i_format */ if (!insn.i_format.rt && NO_R6EMU) goto sigill_r2r6; + /* fall through */ case blez_op: /* * Compact branches for R6 for the @@ -638,6 +644,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case bgtzl_op: if (!insn.i_format.rt && NO_R6EMU) goto sigill_r2r6; + /* fall through */ case bgtz_op: /* * Compact branches for R6 for the diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 65dc2e699a2e..95b18a194f53 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -517,12 +517,16 @@ static void set_isa(struct cpuinfo_mips *c, unsigned int isa) switch (isa) { case MIPS_CPU_ISA_M64R2: c->isa_level |= MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2; + /* fall through */ case MIPS_CPU_ISA_M64R1: c->isa_level |= MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1; + /* fall through */ case MIPS_CPU_ISA_V: c->isa_level |= MIPS_CPU_ISA_V; + /* fall through */ case MIPS_CPU_ISA_IV: c->isa_level |= MIPS_CPU_ISA_IV; + /* fall through */ case MIPS_CPU_ISA_III: c->isa_level |= MIPS_CPU_ISA_II | MIPS_CPU_ISA_III; break; @@ -530,14 +534,17 @@ static void set_isa(struct cpuinfo_mips *c, unsigned int isa) /* R6 incompatible with everything else */ case MIPS_CPU_ISA_M64R6: c->isa_level |= MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6; + /* fall through */ case MIPS_CPU_ISA_M32R6: c->isa_level |= MIPS_CPU_ISA_M32R6; /* Break here so we don't add incompatible ISAs */ break; case MIPS_CPU_ISA_M32R2: c->isa_level |= MIPS_CPU_ISA_M32R2; + /* fall through */ case MIPS_CPU_ISA_M32R1: c->isa_level |= MIPS_CPU_ISA_M32R1; + /* fall through */ case MIPS_CPU_ISA_II: c->isa_level |= MIPS_CPU_ISA_II; break; diff --git a/arch/mips/kernel/watch.c b/arch/mips/kernel/watch.c index 0e61a5b7647f..ba73b4077668 100644 --- a/arch/mips/kernel/watch.c +++ b/arch/mips/kernel/watch.c @@ -27,12 +27,15 @@ void mips_install_watch_registers(struct task_struct *t) case 4: write_c0_watchlo3(watches->watchlo[3]); write_c0_watchhi3(watchhi | watches->watchhi[3]); + /* fall through */ case 3: write_c0_watchlo2(watches->watchlo[2]); write_c0_watchhi2(watchhi | watches->watchhi[2]); + /* fall through */ case 2: write_c0_watchlo1(watches->watchlo[1]); write_c0_watchhi1(watchhi | watches->watchhi[1]); + /* fall through */ case 1: write_c0_watchlo0(watches->watchlo[0]); write_c0_watchhi0(watchhi | watches->watchhi[0]); @@ -55,10 +58,13 @@ void mips_read_watch_registers(void) BUG(); case 4: watches->watchhi[3] = (read_c0_watchhi3() & watchhi_mask); + /* fall through */ case 3: watches->watchhi[2] = (read_c0_watchhi2() & watchhi_mask); + /* fall through */ case 2: watches->watchhi[1] = (read_c0_watchhi1() & watchhi_mask); + /* fall through */ case 1: watches->watchhi[0] = (read_c0_watchhi0() & watchhi_mask); } @@ -85,18 +91,25 @@ void mips_clear_watch_registers(void) BUG(); case 8: write_c0_watchlo7(0); + /* fall through */ case 7: write_c0_watchlo6(0); + /* fall through */ case 6: write_c0_watchlo5(0); + /* fall through */ case 5: write_c0_watchlo4(0); + /* fall through */ case 4: write_c0_watchlo3(0); + /* fall through */ case 3: write_c0_watchlo2(0); + /* fall through */ case 2: write_c0_watchlo1(0); + /* fall through */ case 1: write_c0_watchlo0(0); } diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 96d666a0f4a0..d0b64df51eb2 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -1280,6 +1280,7 @@ static void probe_pcache(void) case CPU_VR4133: write_c0_config(config & ~VR41_CONF_P4K); + /* fall through */ case CPU_VR4131: /* Workaround for cache instruction bug of VR4131 */ if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U || @@ -1527,6 +1528,7 @@ static void probe_pcache(void) c->dcache.flags |= MIPS_CACHE_PINDEX; break; } + /* fall through */ default: if (has_74k_erratum || c->dcache.waysize > PAGE_SIZE) c->dcache.flags |= MIPS_CACHE_ALIASES; diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 067714291643..37b1cb246332 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -576,6 +576,7 @@ void build_tlb_write_entry(u32 **p, struct uasm_label **l, case CPU_R5500: if (m4kc_tlbp_war()) uasm_i_nop(p); + /* fall through */ case CPU_ALCHEMY: tlbw(p); break; -- cgit v1.2.3 From ff4dd232ec45a0e45ea69f28f069f2ab22b4908a Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Tue, 4 Dec 2018 23:44:12 +0000 Subject: MIPS: Expand MIPS32 ASIDs to 64 bits ASIDs have always been stored as unsigned longs, ie. 32 bits on MIPS32 kernels. This is problematic because it is feasible for the ASID version to overflow & wrap around to zero. We currently attempt to handle this overflow by simply setting the ASID version to 1, using asid_first_version(), but we make no attempt to account for the fact that there may be mm_structs with stale ASIDs that have versions which we now reuse due to the overflow & wrap around. Encountering this requires that: 1) A struct mm_struct X is active on CPU A using ASID (V,n). 2) That mm is not used on CPU A for the length of time that it takes for CPU A's asid_cache to overflow & wrap around to the same version V that the mm had in step 1. During this time tasks using the mm could either be sleeping or only scheduled on other CPUs. 3) Some other mm Y becomes active on CPU A and is allocated the same ASID (V,n). 4) mm X now becomes active on CPU A again, and now incorrectly has the same ASID as mm Y. Where struct mm_struct ASIDs are represented above in the format (version, EntryHi.ASID), and on a typical MIPS32 system version will be 24 bits wide & EntryHi.ASID will be 8 bits wide. The length of time required in step 2 is highly dependent upon the CPU & workload, but for a hypothetical 2GHz CPU running a workload which generates a new ASID every 10000 cycles this period is around 248 days. Due to this long period of time & the fact that tasks need to be scheduled in just the right (or wrong, depending upon your inclination) way, this is obviously a difficult bug to encounter but it's entirely possible as evidenced by reports. In order to fix this, simply extend ASIDs to 64 bits even on MIPS32 builds. This will extend the period of time required for the hypothetical system above to encounter the problem from 28 days to around 3 trillion years, which feels safely outside of the realms of possibility. The cost of this is slightly more generated code in some commonly executed paths, but this is pretty minimal: | Code Size Gain | Percentage -----------------------|----------------|------------- decstation_defconfig | +270 | +0.00% 32r2el_defconfig | +652 | +0.01% 32r6el_defconfig | +1000 | +0.01% I have been unable to measure any change in performance of the LMbench lat_ctx or lat_proc tests resulting from the 64b ASIDs on either 32r2el_defconfig+interAptiv or 32r6el_defconfig+I6500 systems. Signed-off-by: Paul Burton Suggested-by: James Hogan References: https://lore.kernel.org/linux-mips/80B78A8B8FEE6145A87579E8435D78C30205D5F3@fzex.ruijie.com.cn/ References: https://lore.kernel.org/linux-mips/1488684260-18867-1-git-send-email-jiwei.sun@windriver.com/ Cc: Jiwei Sun Cc: Yu Huabing Cc: stable@vger.kernel.org # 2.6.12+ Cc: linux-mips@vger.kernel.org --- arch/mips/include/asm/cpu-info.h | 2 +- arch/mips/include/asm/mmu.h | 2 +- arch/mips/include/asm/mmu_context.h | 10 ++++------ arch/mips/mm/c-r3k.c | 2 +- 4 files changed, 7 insertions(+), 9 deletions(-) (limited to 'arch/mips/mm') diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h index a41059d47d31..ed7ffe4e63a3 100644 --- a/arch/mips/include/asm/cpu-info.h +++ b/arch/mips/include/asm/cpu-info.h @@ -50,7 +50,7 @@ struct guest_info { #define MIPS_CACHE_PINDEX 0x00000020 /* Physically indexed cache */ struct cpuinfo_mips { - unsigned long asid_cache; + u64 asid_cache; #ifdef CONFIG_MIPS_ASID_BITS_VARIABLE unsigned long asid_mask; #endif diff --git a/arch/mips/include/asm/mmu.h b/arch/mips/include/asm/mmu.h index 0740be7d5d4a..24d6b42345fb 100644 --- a/arch/mips/include/asm/mmu.h +++ b/arch/mips/include/asm/mmu.h @@ -7,7 +7,7 @@ #include typedef struct { - unsigned long asid[NR_CPUS]; + u64 asid[NR_CPUS]; void *vdso; atomic_t fp_mode_switching; diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h index 94414561de0e..a589585be21b 100644 --- a/arch/mips/include/asm/mmu_context.h +++ b/arch/mips/include/asm/mmu_context.h @@ -76,14 +76,14 @@ extern unsigned long pgd_current[]; * All unused by hardware upper bits will be considered * as a software asid extension. */ -static unsigned long asid_version_mask(unsigned int cpu) +static inline u64 asid_version_mask(unsigned int cpu) { unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]); - return ~(asid_mask | (asid_mask - 1)); + return ~(u64)(asid_mask | (asid_mask - 1)); } -static unsigned long asid_first_version(unsigned int cpu) +static inline u64 asid_first_version(unsigned int cpu) { return ~asid_version_mask(cpu) + 1; } @@ -102,14 +102,12 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) static inline void get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) { - unsigned long asid = asid_cache(cpu); + u64 asid = asid_cache(cpu); if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) { if (cpu_has_vtag_icache) flush_icache_all(); local_flush_tlb_all(); /* start new asid cycle */ - if (!asid) /* fix version if needed */ - asid = asid_first_version(cpu); } cpu_context(cpu, mm) = asid_cache(cpu) = asid; diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c index 3466fcdae0ca..01848cdf2074 100644 --- a/arch/mips/mm/c-r3k.c +++ b/arch/mips/mm/c-r3k.c @@ -245,7 +245,7 @@ static void r3k_flush_cache_page(struct vm_area_struct *vma, pmd_t *pmdp; pte_t *ptep; - pr_debug("cpage[%08lx,%08lx]\n", + pr_debug("cpage[%08llx,%08lx]\n", cpu_context(smp_processor_id(), mm), addr); /* No ASID => no such page in the cache. */ -- cgit v1.2.3