summaryrefslogtreecommitdiffstats
path: root/arch/sparc64
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2007-12-13 06:13:38 -0800
committerDavid S. Miller <davem@sunset.davemloft.net>2007-12-13 06:13:38 -0800
commit8f361453d8e9a67c85b2cf9b93c642c2d8fe0462 (patch)
tree79c021106e2c3f708d085285680a29bb665c0b37 /arch/sparc64
parentda8cadb31b82c9d41fc593c8deab6aa20b162d6b (diff)
downloadlinux-8f361453d8e9a67c85b2cf9b93c642c2d8fe0462.tar.bz2
[SPARC64]: Fix two kernel linear mapping setup bugs.
This was caught and identified by Greg Onufer. Since we setup the 256M/4M bitmap table after taking over the trap table, it's possible for some 4M mapping to get loaded in the TLB beforhand which later will be 256M mappings. This can cause illegal TLB multiple-match conditions. Fix this by setting up the bitmap before we take over the trap table. Next, __flush_tlb_all() was not doing anything on hypervisor platforms. Fix by adding sun4v_mmu_demap_all() and calling it. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64')
-rw-r--r--arch/sparc64/kernel/entry.S12
-rw-r--r--arch/sparc64/mm/init.c29
2 files changed, 32 insertions, 9 deletions
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index c9b0d7af64ae..ea257e828364 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -2593,3 +2593,15 @@ sun4v_mmustat_info:
retl
nop
.size sun4v_mmustat_info, .-sun4v_mmustat_info
+
+ .globl sun4v_mmu_demap_all
+ .type sun4v_mmu_demap_all,#function
+sun4v_mmu_demap_all:
+ clr %o0
+ clr %o1
+ mov HV_MMU_ALL, %o2
+ mov HV_FAST_MMU_DEMAP_ALL, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ .size sun4v_mmu_demap_all, .-sun4v_mmu_demap_all
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index e18ccf85224f..fbeb55d71e76 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -1133,14 +1133,9 @@ static void __init mark_kpte_bitmap(unsigned long start, unsigned long end)
}
}
-static void __init kernel_physical_mapping_init(void)
+static void __init init_kpte_bitmap(void)
{
unsigned long i;
-#ifdef CONFIG_DEBUG_PAGEALLOC
- unsigned long mem_alloced = 0UL;
-#endif
-
- read_obp_memory("reg", &pall[0], &pall_ents);
for (i = 0; i < pall_ents; i++) {
unsigned long phys_start, phys_end;
@@ -1149,14 +1144,24 @@ static void __init kernel_physical_mapping_init(void)
phys_end = phys_start + pall[i].reg_size;
mark_kpte_bitmap(phys_start, phys_end);
+ }
+}
+static void __init kernel_physical_mapping_init(void)
+{
#ifdef CONFIG_DEBUG_PAGEALLOC
+ unsigned long i, mem_alloced = 0UL;
+
+ for (i = 0; i < pall_ents; i++) {
+ unsigned long phys_start, phys_end;
+
+ phys_start = pall[i].phys_addr;
+ phys_end = phys_start + pall[i].reg_size;
+
mem_alloced += kernel_map_range(phys_start, phys_end,
PAGE_KERNEL);
-#endif
}
-#ifdef CONFIG_DEBUG_PAGEALLOC
printk("Allocated %ld bytes for kernel page tables.\n",
mem_alloced);
@@ -1398,6 +1403,10 @@ void __init paging_init(void)
inherit_prom_mappings();
+ read_obp_memory("reg", &pall[0], &pall_ents);
+
+ init_kpte_bitmap();
+
/* Ok, we can use our TLB miss and window trap handlers safely. */
setup_tba();
@@ -1904,7 +1913,9 @@ void __flush_tlb_all(void)
"wrpr %0, %1, %%pstate"
: "=r" (pstate)
: "i" (PSTATE_IE));
- if (tlb_type == spitfire) {
+ if (tlb_type == hypervisor) {
+ sun4v_mmu_demap_all();
+ } else if (tlb_type == spitfire) {
for (i = 0; i < 64; i++) {
/* Spitfire Errata #32 workaround */
/* NOTE: Always runs on spitfire, so no