summaryrefslogtreecommitdiffstats
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorVasily Gorbik <gor@linux.ibm.com>2020-10-06 22:12:39 +0200
committerVasily Gorbik <gor@linux.ibm.com>2021-06-18 16:41:19 +0200
commit0c4f2623b95779fe8cfb277fa255e4b91c0f96f0 (patch)
treef7c6c12dc982a80e4ed7de67fdb829c0682f84ba /arch/s390/mm
parentb5415c8f9755069640aad184293198bcf794f66d (diff)
downloadlinux-0c4f2623b95779fe8cfb277fa255e4b91c0f96f0.tar.bz2
s390: setup kernel memory layout early
Currently there are two separate places where kernel memory layout has to be known and adjusted: 1. early kasan setup. 2. paging setup later. Those 2 places had to be kept in sync and adjusted to reflect peculiar technical details of one another. With additional factors which influence kernel memory layout like ultravisor secure storage limit, complexity of keeping two things in sync grew up even more. Besides that if we look forward towards creating identity mapping and enabling DAT before jumping into uncompressed kernel - that would also require full knowledge of and control over kernel memory layout. So, de-duplicate and move kernel memory layout setup logic into the decompressor. Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com> Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/kasan_init.c35
1 files changed, 6 insertions, 29 deletions
diff --git a/arch/s390/mm/kasan_init.c b/arch/s390/mm/kasan_init.c
index db4d303aaaa9..a0fdc6dc5f9d 100644
--- a/arch/s390/mm/kasan_init.c
+++ b/arch/s390/mm/kasan_init.c
@@ -13,7 +13,6 @@
#include <asm/setup.h>
#include <asm/uv.h>
-unsigned long kasan_vmax;
static unsigned long segment_pos __initdata;
static unsigned long segment_low __initdata;
static unsigned long pgalloc_pos __initdata;
@@ -251,28 +250,9 @@ static void __init kasan_early_detect_facilities(void)
}
}
-static bool __init has_uv_sec_stor_limit(void)
-{
- /*
- * keep these conditions in line with setup_uv()
- */
- if (!is_prot_virt_host())
- return false;
-
- if (is_prot_virt_guest())
- return false;
-
- if (!test_facility(158))
- return false;
-
- return !!uv_info.max_sec_stor_addr;
-}
-
void __init kasan_early_init(void)
{
- unsigned long untracked_mem_end;
unsigned long shadow_alloc_size;
- unsigned long vmax_unlimited;
unsigned long initrd_end;
unsigned long memsize;
unsigned long pgt_prot = pgprot_val(PAGE_KERNEL_RO);
@@ -306,9 +286,6 @@ void __init kasan_early_init(void)
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, P4D_SIZE));
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE));
crst_table_init((unsigned long *)early_pg_dir, _REGION2_ENTRY_EMPTY);
- untracked_mem_end = kasan_vmax = vmax_unlimited = _REGION1_SIZE;
- if (has_uv_sec_stor_limit())
- kasan_vmax = min(vmax_unlimited, uv_info.max_sec_stor_addr);
/* init kasan zero shadow */
crst_table_init((unsigned long *)kasan_early_shadow_p4d,
@@ -375,18 +352,18 @@ void __init kasan_early_init(void)
*/
/* populate kasan shadow (for identity mapping and zero page mapping) */
kasan_early_pgtable_populate(__sha(0), __sha(memsize), POPULATE_MAP);
- if (IS_ENABLED(CONFIG_MODULES))
- untracked_mem_end = kasan_vmax - MODULES_LEN;
if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
- untracked_mem_end = kasan_vmax - vmalloc_size - MODULES_LEN;
/* shallowly populate kasan shadow for vmalloc and modules */
- kasan_early_pgtable_populate(__sha(untracked_mem_end), __sha(kasan_vmax),
+ kasan_early_pgtable_populate(__sha(VMALLOC_START), __sha(MODULES_END),
POPULATE_SHALLOW);
}
/* populate kasan shadow for untracked memory */
- kasan_early_pgtable_populate(__sha(ident_map_size), __sha(untracked_mem_end),
+ kasan_early_pgtable_populate(__sha(ident_map_size),
+ IS_ENABLED(CONFIG_KASAN_VMALLOC) ?
+ __sha(VMALLOC_START) :
+ __sha(MODULES_VADDR),
POPULATE_ZERO_SHADOW);
- kasan_early_pgtable_populate(__sha(kasan_vmax), __sha(vmax_unlimited),
+ kasan_early_pgtable_populate(__sha(MODULES_END), __sha(_REGION1_SIZE),
POPULATE_ZERO_SHADOW);
/* memory allocated for identity mapping structs will be freed later */
pgalloc_freeable = pgalloc_pos;