summaryrefslogtreecommitdiffstats
path: root/arch/mips
diff options
context:
space:
mode:
authorPaul Burton <paul.burton@imgtec.com>2016-11-12 01:26:07 +0000
committerRalf Baechle <ralf@linux-mips.org>2016-11-24 16:44:16 +0100
commit1031398035a25e5c90c66befb6ff41fa4746df98 (patch)
tree656c7fb80b4078a25691d349a95df3496482265b /arch/mips
parent9c763584b7c8911106bb77af7e648bef09af9d80 (diff)
downloadlinux-1031398035a25e5c90c66befb6ff41fa4746df98.tar.bz2
MIPS: Mask out limit field when calculating wired entry count
Since MIPSr6 the Wired register is split into 2 fields, with the upper 16 bits of the register indicating a limit on the value that the wired entry count in the bottom 16 bits of the register can take. This means that simply reading the wired register doesn't get us a valid TLB entry index any longer, and we instead need to retrieve only the lower 16 bits of the register. Introduce a new num_wired_entries() function which does this on MIPSr6 or higher and simply returns the value of the wired register on older architecture revisions, and make use of it when reading the number of wired entries. Since commit e710d6668309 ("MIPS: tlb-r4k: If there are wired entries, don't use TLBINVF") we have been using a non-zero number of wired entries to determine whether we should avoid use of the tlbinvf instruction (which would invalidate wired entries) and instead loop over TLB entries in local_flush_tlb_all(). This loop begins with the number of wired entries, or before this patch some large bogus TLB index on MIPSr6 systems. Thus since the aforementioned commit some MIPSr6 systems with FTLBs have been prone to leaving stale address translations in the FTLB & crashing in various weird & wonderful ways when we later observe the wrong memory. Signed-off-by: Paul Burton <paul.burton@imgtec.com> Cc: Matt Redfearn <matt.redfearn@imgtec.com> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/14557/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/include/asm/mipsregs.h6
-rw-r--r--arch/mips/include/asm/tlb.h13
-rw-r--r--arch/mips/mm/init.c4
-rw-r--r--arch/mips/mm/tlb-r4k.c6
4 files changed, 24 insertions, 5 deletions
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 7dd2dd47909a..df78b2ca70eb 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -215,6 +215,12 @@
#endif
/*
+ * Wired register bits
+ */
+#define MIPSR6_WIRED_LIMIT (_ULCAST_(0xffff) << 16)
+#define MIPSR6_WIRED_WIRED (_ULCAST_(0xffff) << 0)
+
+/*
* Values used for computation of new tlb entries
*/
#define PL_4K 12
diff --git a/arch/mips/include/asm/tlb.h b/arch/mips/include/asm/tlb.h
index 4a2349302b55..dd179fd8acda 100644
--- a/arch/mips/include/asm/tlb.h
+++ b/arch/mips/include/asm/tlb.h
@@ -1,6 +1,9 @@
#ifndef __ASM_TLB_H
#define __ASM_TLB_H
+#include <asm/cpu-features.h>
+#include <asm/mipsregs.h>
+
/*
* MIPS doesn't need any special per-pte or per-vma handling, except
* we need to flush cache for area to be unmapped.
@@ -22,6 +25,16 @@
((CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) | \
(cpu_has_tlbinv ? MIPS_ENTRYHI_EHINV : 0))
+static inline unsigned int num_wired_entries(void)
+{
+ unsigned int wired = read_c0_wired();
+
+ if (cpu_has_mips_r6)
+ wired &= MIPSR6_WIRED_WIRED;
+
+ return wired;
+}
+
#include <asm-generic/tlb.h>
#endif /* __ASM_TLB_H */
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 3a6edecc3f38..e86ebcf5c071 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -118,7 +118,7 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
writex_c0_entrylo1(entrylo);
}
#endif
- tlbidx = read_c0_wired();
+ tlbidx = num_wired_entries();
write_c0_wired(tlbidx + 1);
write_c0_index(tlbidx);
mtc0_tlbw_hazard();
@@ -147,7 +147,7 @@ void kunmap_coherent(void)
local_irq_save(flags);
old_ctx = read_c0_entryhi();
- wired = read_c0_wired() - 1;
+ wired = num_wired_entries() - 1;
write_c0_wired(wired);
write_c0_index(wired);
write_c0_entryhi(UNIQUE_ENTRYHI(wired));
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index bba9c1484b41..0596505770db 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -65,7 +65,7 @@ void local_flush_tlb_all(void)
write_c0_entrylo0(0);
write_c0_entrylo1(0);
- entry = read_c0_wired();
+ entry = num_wired_entries();
/*
* Blast 'em all away.
@@ -385,7 +385,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
old_ctx = read_c0_entryhi();
htw_stop();
old_pagemask = read_c0_pagemask();
- wired = read_c0_wired();
+ wired = num_wired_entries();
write_c0_wired(wired + 1);
write_c0_index(wired);
tlbw_use_hazard(); /* What is the hazard here? */
@@ -449,7 +449,7 @@ __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
htw_stop();
old_ctx = read_c0_entryhi();
old_pagemask = read_c0_pagemask();
- wired = read_c0_wired();
+ wired = num_wired_entries();
if (--temp_tlb_entry < wired) {
printk(KERN_WARNING
"No TLB space left for add_temporary_entry\n");