summaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/ktlb.S
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2006-02-21 22:31:11 -0800
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 01:13:56 -0800
commitd7744a09504d5ae84edc8289a02254e1f2102410 (patch)
treebe0f245ee0725f2f066bf87d17d254ce1e7279bf /arch/sparc64/kernel/ktlb.S
parent9cc3a1ac9a819cadff05ca37bb7f208013a22035 (diff)
downloadlinux-d7744a09504d5ae84edc8289a02254e1f2102410.tar.bz2
[SPARC64]: Create a seperate kernel TSB for 4MB/256MB mappings.
It can map all of the linear kernel mappings with zero TSB hash conflicts for systems with 16GB or less ram. In such cases, on SUN4V, once we load up this TSB the first time with all the mappings, we never take a linear kernel mapping TLB miss ever again, the hypervisor handles them all. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel/ktlb.S')
-rw-r--r--arch/sparc64/kernel/ktlb.S15
1 files changed, 14 insertions, 1 deletions
diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S
index ae1dac17bc8d..efcf38b6e284 100644
--- a/arch/sparc64/kernel/ktlb.S
+++ b/arch/sparc64/kernel/ktlb.S
@@ -121,6 +121,12 @@ kvmap_dtlb_obp:
nop
.align 32
+kvmap_dtlb_tsb4m_load:
+ KTSB_LOCK_TAG(%g1, %g2, %g7)
+ KTSB_WRITE(%g1, %g5, %g6)
+ ba,pt %xcc, kvmap_dtlb_load
+ nop
+
kvmap_dtlb:
/* %g6: TAG TARGET */
mov TLB_TAG_ACCESS, %g4
@@ -133,6 +139,13 @@ kvmap_dtlb_4v:
brgez,pn %g4, kvmap_dtlb_nonlinear
nop
+ /* Correct TAG_TARGET is already in %g6, check 4mb TSB. */
+ KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
+
+ /* TSB entry address left in %g1, lookup linear PTE.
+ * Must preserve %g1 and %g6 (TAG).
+ */
+kvmap_dtlb_tsb4m_miss:
sethi %hi(kpte_linear_bitmap), %g2
or %g2, %lo(kpte_linear_bitmap), %g2
@@ -163,7 +176,7 @@ kvmap_dtlb_4v:
.globl kvmap_linear_patch
kvmap_linear_patch:
- ba,pt %xcc, kvmap_dtlb_load
+ ba,pt %xcc, kvmap_dtlb_tsb4m_load
xor %g2, %g4, %g5
kvmap_dtlb_vmalloc_addr: