summaryrefslogtreecommitdiffstats
path: root/arch/x86_64/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64/mm')
-rw-r--r--arch/x86_64/mm/init.c28
1 files changed, 17 insertions, 11 deletions
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c
index 2b1d6c382396..be483a1d7b54 100644
--- a/arch/x86_64/mm/init.c
+++ b/arch/x86_64/mm/init.c
@@ -312,12 +312,19 @@ void __init init_memory_mapping(unsigned long start, unsigned long end)
extern struct x8664_pda cpu_pda[NR_CPUS];
-/* Assumes all CPUs still execute in init_mm */
-void zap_low_mappings(void)
+void __cpuinit zap_low_mappings(int cpu)
{
- pgd_t *pgd = pgd_offset_k(0UL);
- pgd_clear(pgd);
- flush_tlb_all();
+ if (cpu == 0) {
+ pgd_t *pgd = pgd_offset_k(0UL);
+ pgd_clear(pgd);
+ } else {
+ /*
+ * For AP's, zap the low identity mappings by changing the cr3
+ * to init_level4_pgt and doing local flush tlb all
+ */
+ asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
+ }
+ __flush_tlb_all();
}
/* Compute zone sizes for the DMA and DMA32 zones in a node. */
@@ -474,14 +481,13 @@ void __init mem_init(void)
datasize >> 10,
initsize >> 10);
+#ifdef CONFIG_SMP
/*
- * Subtle. SMP is doing its boot stuff late (because it has to
- * fork idle threads) - but it also needs low mappings for the
- * protected-mode entry to work. We zap these entries only after
- * the WP-bit has been tested.
+ * Sync boot_level4_pgt mappings with the init_level4_pgt
+ * except for the low identity mappings which are already zapped
+ * in init_level4_pgt. This sync-up is essential for AP's bringup
*/
-#ifndef CONFIG_SMP
- zap_low_mappings();
+ memcpy(boot_level4_pgt+1, init_level4_pgt+1, (PTRS_PER_PGD-1)*sizeof(pgd_t));
#endif
}