summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2014-03-26 18:25:55 +0000
committerCatalin Marinas <catalin.marinas@arm.com>2014-04-05 10:06:18 +0100
commitc218bca74eeafa2f8528b6bbb34d112075fcf40a (patch)
tree29b3e350efca77e073436df3cf78b818bf57cc34 /arch
parent35a86976924a9eda7775b5b02ad47268dca1a5b4 (diff)
downloadlinux-c218bca74eeafa2f8528b6bbb34d112075fcf40a.tar.bz2
arm64: Relax the kernel cache requirements for boot
With system caches for the host OS or architected caches for guest OS we cannot easily guarantee that there are no dirty or stale cache lines for the areas of memory written by the kernel during boot with the MMU off (therefore non-cacheable accesses). This patch adds the necessary cache maintenance during boot and relaxes the booting requirements. Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/kernel/head.S30
-rw-r--r--arch/arm64/mm/cache.S9
2 files changed, 37 insertions, 2 deletions
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 61035d6814cb..26109682d2fa 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -26,6 +26,7 @@
#include <asm/assembler.h>
#include <asm/ptrace.h>
#include <asm/asm-offsets.h>
+#include <asm/cache.h>
#include <asm/cputype.h>
#include <asm/memory.h>
#include <asm/thread_info.h>
@@ -229,7 +230,11 @@ ENTRY(set_cpu_boot_mode_flag)
cmp w20, #BOOT_CPU_MODE_EL2
b.ne 1f
add x1, x1, #4
-1: str w20, [x1] // This CPU has booted in EL1
+1: dc cvac, x1 // Clean potentially dirty cache line
+ dsb sy
+ str w20, [x1] // This CPU has booted in EL1
+ dc civac, x1 // Clean&invalidate potentially stale cache line
+ dsb sy
ret
ENDPROC(set_cpu_boot_mode_flag)
@@ -240,8 +245,9 @@ ENDPROC(set_cpu_boot_mode_flag)
* This is not in .bss, because we set it sufficiently early that the boot-time
* zeroing of .bss would clobber it.
*/
- .pushsection .data
+ .pushsection .data..cacheline_aligned
ENTRY(__boot_cpu_mode)
+ .align L1_CACHE_SHIFT
.long BOOT_CPU_MODE_EL2
.long 0
.popsection
@@ -408,6 +414,15 @@ ENDPROC(__calc_phys_offset)
*/
__create_page_tables:
pgtbl x25, x26, x24 // idmap_pg_dir and swapper_pg_dir addresses
+ mov x27, lr
+
+ /*
+ * Invalidate the idmap and swapper page tables to avoid potential
+ * dirty cache lines being evicted.
+ */
+ mov x0, x25
+ add x1, x26, #SWAPPER_DIR_SIZE
+ bl __inval_cache_range
/*
* Clear the idmap and swapper page tables.
@@ -470,6 +485,17 @@ __create_page_tables:
add x0, x26, #2 * PAGE_SIZE // section table address
create_pgd_entry x26, x0, x5, x6, x7
#endif
+
+ /*
+ * Since the page tables have been populated with non-cacheable
+ * accesses (MMU disabled), invalidate the idmap and swapper page
+ * tables again to remove any speculatively loaded cache lines.
+ */
+ mov x0, x25
+ add x1, x26, #SWAPPER_DIR_SIZE
+ bl __inval_cache_range
+
+ mov lr, x27
ret
ENDPROC(__create_page_tables)
.ltorg
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index c46f48b33c14..e803a62e0e45 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -168,6 +168,14 @@ ENTRY(__flush_dcache_area)
ENDPROC(__flush_dcache_area)
/*
+ * __inval_cache_range(start, end)
+ * - start - start address of region
+ * - end - end address of region
+ */
+ENTRY(__inval_cache_range)
+ /* FALLTHROUGH */
+
+/*
* __dma_inv_range(start, end)
* - start - virtual start address of region
* - end - virtual end address of region
@@ -183,6 +191,7 @@ __dma_inv_range:
b.lo 1b
dsb sy
ret
+ENDPROC(__inval_cache_range)
ENDPROC(__dma_inv_range)
/*