summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
authorSteve Capper <steve.capper@arm.com>2019-08-07 16:55:17 +0100
committerWill Deacon <will@kernel.org>2019-08-09 11:17:16 +0100
commit90ec95cda91a021d82351c976896a63aa364ebf1 (patch)
tree6f5295463a65f0cda5e79bd47998ab10df11c0f6 /arch/arm64/kernel
parent99426e5e8c9f11b9de65e7c1200868e8a9ceaa47 (diff)
downloadlinux-90ec95cda91a021d82351c976896a63aa364ebf1.tar.bz2
arm64: mm: Introduce VA_BITS_MIN
In order to support 52-bit kernel addresses detectable at boot time, the kernel needs to know the most conservative VA_BITS possible should it need to fall back to this quantity due to lack of hardware support. A new compile time constant VA_BITS_MIN is introduced in this patch and it is employed in the KASAN end address, KASLR, and EFI stub. For Arm, if 52-bit VA support is unavailable the fallback is to 48-bits. In other words: VA_BITS_MIN = min (48, VA_BITS) Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Steve Capper <steve.capper@arm.com> Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/head.S2
-rw-r--r--arch/arm64/kernel/kaslr.c6
2 files changed, 4 insertions, 4 deletions
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 2cdacd1c141b..ac58c69993ec 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -314,7 +314,7 @@ __create_page_tables:
mov x5, #52
cbnz x6, 1f
#endif
- mov x5, #VA_BITS
+ mov x5, #VA_BITS_MIN
1:
adr_l x6, vabits_user
str x5, [x6]
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index 708051655ad9..5a59f7567f9c 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -116,15 +116,15 @@ u64 __init kaslr_early_init(u64 dt_phys)
/*
* OK, so we are proceeding with KASLR enabled. Calculate a suitable
* kernel image offset from the seed. Let's place the kernel in the
- * middle half of the VMALLOC area (VA_BITS - 2), and stay clear of
+ * middle half of the VMALLOC area (VA_BITS_MIN - 2), and stay clear of
* the lower and upper quarters to avoid colliding with other
* allocations.
* Even if we could randomize at page granularity for 16k and 64k pages,
* let's always round to 2 MB so we don't interfere with the ability to
* map using contiguous PTEs
*/
- mask = ((1UL << (VA_BITS - 2)) - 1) & ~(SZ_2M - 1);
- offset = BIT(VA_BITS - 3) + (seed & mask);
+ mask = ((1UL << (VA_BITS_MIN - 2)) - 1) & ~(SZ_2M - 1);
+ offset = BIT(VA_BITS_MIN - 3) + (seed & mask);
/* use the top 16 bits to randomize the linear region */
memstart_offset_seed = seed >> 48;