summaryrefslogtreecommitdiffstats
path: root/arch/mips/mm
diff options
context:
space:
mode:
authorLeonid Yegoshin <Leonid.Yegoshin@imgtec.com>2013-11-14 16:12:31 +0000
committerRalf Baechle <ralf@linux-mips.org>2014-01-22 20:19:00 +0100
commit75b5b5e0a262790fa11043fe45700499c7e3d818 (patch)
tree3c5af9caa9c5478668159ff34db0ab34b51d7511 /arch/mips/mm
parent601cfa7b6fb657cff9e8f77bbcce79f75dd7ab74 (diff)
downloadlinux-75b5b5e0a262790fa11043fe45700499c7e3d818.tar.bz2
MIPS: Add support for FTLBs
The Fixed Page Size TLB (FTLB) is a set-associative dual entry TLB. Its purpose is to reduce the number of TLB misses by increasing the effective TLB size and keep the implementation complexity to minimum levels. A supported core can have both VTLB and FTLB. Reviewed-by: James Hogan <james.hogan@imgtec.com> Reviewed-by: Paul Burton <paul.burton@imgtec.com> Signed-off-by: Leonid Yegoshin <Leonid.Yegoshin@imgtec.com> Signed-off-by: Markos Chandras <markos.chandras@imgtec.com> Signed-off-by: John Crispin <blogic@openwrt.org> Patchwork: http://patchwork.linux-mips.org/patch/6139/
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/tlb-r4k.c29
1 files changed, 22 insertions, 7 deletions
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 427dcacca586..ae4ca2450707 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -72,7 +72,7 @@ void local_flush_tlb_all(void)
{
unsigned long flags;
unsigned long old_ctx;
- int entry;
+ int entry, ftlbhighset;
ENTER_CRITICAL(flags);
/* Save old context and create impossible VPN2 value */
@@ -83,10 +83,21 @@ void local_flush_tlb_all(void)
entry = read_c0_wired();
/* Blast 'em all away. */
- if (cpu_has_tlbinv && current_cpu_data.tlbsize) {
- write_c0_index(0);
- mtc0_tlbw_hazard();
- tlbinvf(); /* invalidate VTLB */
+ if (cpu_has_tlbinv) {
+ if (current_cpu_data.tlbsizevtlb) {
+ write_c0_index(0);
+ mtc0_tlbw_hazard();
+ tlbinvf(); /* invalidate VTLB */
+ }
+ ftlbhighset = current_cpu_data.tlbsizevtlb +
+ current_cpu_data.tlbsizeftlbsets;
+ for (entry = current_cpu_data.tlbsizevtlb;
+ entry < ftlbhighset;
+ entry++) {
+ write_c0_index(entry);
+ mtc0_tlbw_hazard();
+ tlbinvf(); /* invalidate one FTLB set */
+ }
} else {
while (entry < current_cpu_data.tlbsize) {
/* Make sure all entries differ. */
@@ -134,7 +145,9 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
start = round_down(start, PAGE_SIZE << 1);
end = round_up(end, PAGE_SIZE << 1);
size = (end - start) >> (PAGE_SHIFT + 1);
- if (size <= current_cpu_data.tlbsize/2) {
+ if (size <= (current_cpu_data.tlbsizeftlbsets ?
+ current_cpu_data.tlbsize / 8 :
+ current_cpu_data.tlbsize / 2)) {
int oldpid = read_c0_entryhi();
int newpid = cpu_asid(cpu, mm);
@@ -173,7 +186,9 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
ENTER_CRITICAL(flags);
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
size = (size + 1) >> 1;
- if (size <= current_cpu_data.tlbsize / 2) {
+ if (size <= (current_cpu_data.tlbsizeftlbsets ?
+ current_cpu_data.tlbsize / 8 :
+ current_cpu_data.tlbsize / 2)) {
int pid = read_c0_entryhi();
start &= (PAGE_MASK << 1);