summaryrefslogtreecommitdiffstats
path: root/arch/riscv/mm
diff options
context:
space:
mode:
authorAlexandre Ghiti <alex@ghiti.fr>2021-06-24 14:00:41 +0200
committerPalmer Dabbelt <palmerdabbelt@google.com>2021-06-30 21:18:58 -0700
commite5c35fa0401971701dcd7675f471b664698244dd (patch)
tree6f42965bac8ba06c33fa462472f852d92be187cd /arch/riscv/mm
parentc10bc260e7c030364b5150aac7ebf048ddfb9502 (diff)
downloadlinux-e5c35fa0401971701dcd7675f471b664698244dd.tar.bz2
riscv: Map the kernel with correct permissions the first time
For 64-bit kernels, we map all the kernel with write and execute permissions and afterwards remove writability from text and executability from data. For 32-bit kernels, the kernel mapping resides in the linear mapping, so we map all the linear mapping as writable and executable and afterwards we remove those properties for unused memory and kernel mapping as described above. Change this behavior to directly map the kernel with correct permissions and avoid going through the whole mapping to fix the permissions. At the same time, this fixes an issue introduced by commit 2bfc6cd81bd1 ("riscv: Move kernel mapping outside of linear mapping") as reported here https://github.com/starfive-tech/linux/issues/17. Signed-off-by: Alexandre Ghiti <alex@ghiti.fr> Reviewed-by: Anup Patel <anup@brainfault.org> Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
Diffstat (limited to 'arch/riscv/mm')
-rw-r--r--arch/riscv/mm/init.c113
1 files changed, 50 insertions, 63 deletions
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 4c4c92ce0bb8..dc37b9bb8cb9 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -436,6 +436,43 @@ asmlinkage void __init __copy_data(void)
}
#endif
+#ifdef CONFIG_STRICT_KERNEL_RWX
+static __init pgprot_t pgprot_from_va(uintptr_t va)
+{
+ if (is_va_kernel_text(va))
+ return PAGE_KERNEL_READ_EXEC;
+
+ /*
+ * In 64-bit kernel, the kernel mapping is outside the linear mapping so
+ * we must protect its linear mapping alias from being executed and
+ * written.
+ * And rodata section is marked readonly in mark_rodata_ro.
+ */
+ if (IS_ENABLED(CONFIG_64BIT) && is_va_kernel_lm_alias_text(va))
+ return PAGE_KERNEL_READ;
+
+ return PAGE_KERNEL;
+}
+
+void mark_rodata_ro(void)
+{
+ set_kernel_memory(__start_rodata, _data, set_memory_ro);
+ if (IS_ENABLED(CONFIG_64BIT))
+ set_kernel_memory(lm_alias(__start_rodata), lm_alias(_data),
+ set_memory_ro);
+
+ debug_checkwx();
+}
+#else
+static __init pgprot_t pgprot_from_va(uintptr_t va)
+{
+ if (IS_ENABLED(CONFIG_64BIT) && !is_kernel_mapping(va))
+ return PAGE_KERNEL;
+
+ return PAGE_KERNEL_EXEC;
+}
+#endif /* CONFIG_STRICT_KERNEL_RWX */
+
/*
* setup_vm() is called from head.S with MMU-off.
*
@@ -454,7 +491,8 @@ asmlinkage void __init __copy_data(void)
#error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
#endif
-uintptr_t load_pa, load_sz;
+static uintptr_t load_pa __initdata;
+uintptr_t load_sz;
#ifdef CONFIG_XIP_KERNEL
#define load_pa (*((uintptr_t *)XIP_FIXUP(&load_pa)))
#define load_sz (*((uintptr_t *)XIP_FIXUP(&load_sz)))
@@ -465,7 +503,8 @@ uintptr_t xiprom, xiprom_sz;
#define xiprom_sz (*((uintptr_t *)XIP_FIXUP(&xiprom_sz)))
#define xiprom (*((uintptr_t *)XIP_FIXUP(&xiprom)))
-static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size)
+static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size,
+ __always_unused bool early)
{
uintptr_t va, end_va;
@@ -484,7 +523,8 @@ static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size)
map_size, PAGE_KERNEL);
}
#else
-static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size)
+static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size,
+ bool early)
{
uintptr_t va, end_va;
@@ -492,7 +532,9 @@ static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size)
for (va = kernel_virt_addr; va < end_va; va += map_size)
create_pgd_mapping(pgdir, va,
load_pa + (va - kernel_virt_addr),
- map_size, PAGE_KERNEL_EXEC);
+ map_size,
+ early ?
+ PAGE_KERNEL_EXEC : pgprot_from_va(va));
}
#endif
@@ -569,7 +611,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
* us to reach paging_init(). We map all memory banks later
* in setup_vm_final() below.
*/
- create_kernel_page_table(early_pg_dir, map_size);
+ create_kernel_page_table(early_pg_dir, map_size, true);
#ifndef __PAGETABLE_PMD_FOLDED
/* Setup early PMD for DTB */
@@ -645,22 +687,6 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
#endif
}
-#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX)
-void protect_kernel_linear_mapping_text_rodata(void)
-{
- unsigned long text_start = (unsigned long)lm_alias(_start);
- unsigned long init_text_start = (unsigned long)lm_alias(__init_text_begin);
- unsigned long rodata_start = (unsigned long)lm_alias(__start_rodata);
- unsigned long data_start = (unsigned long)lm_alias(_data);
-
- set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
- set_memory_nx(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
-
- set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
- set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
-}
-#endif
-
static void __init setup_vm_final(void)
{
uintptr_t va, map_size;
@@ -693,21 +719,15 @@ static void __init setup_vm_final(void)
map_size = best_map_size(start, end - start);
for (pa = start; pa < end; pa += map_size) {
va = (uintptr_t)__va(pa);
- create_pgd_mapping(swapper_pg_dir, va, pa,
- map_size,
-#ifdef CONFIG_64BIT
- PAGE_KERNEL
-#else
- PAGE_KERNEL_EXEC
-#endif
- );
+ create_pgd_mapping(swapper_pg_dir, va, pa, map_size,
+ pgprot_from_va(va));
}
}
#ifdef CONFIG_64BIT
/* Map the kernel */
- create_kernel_page_table(swapper_pg_dir, PMD_SIZE);
+ create_kernel_page_table(swapper_pg_dir, PMD_SIZE, false);
#endif
/* Clear fixmap PTE and PMD mappings */
@@ -738,39 +758,6 @@ static inline void setup_vm_final(void)
}
#endif /* CONFIG_MMU */
-#ifdef CONFIG_STRICT_KERNEL_RWX
-void __init protect_kernel_text_data(void)
-{
- unsigned long text_start = (unsigned long)_start;
- unsigned long init_text_start = (unsigned long)__init_text_begin;
- unsigned long init_data_start = (unsigned long)__init_data_begin;
- unsigned long rodata_start = (unsigned long)__start_rodata;
- unsigned long data_start = (unsigned long)_data;
-#if defined(CONFIG_64BIT) && defined(CONFIG_MMU)
- unsigned long end_va = kernel_virt_addr + load_sz;
-#else
- unsigned long end_va = (unsigned long)(__va(PFN_PHYS(max_low_pfn)));
-#endif
-
- set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
- set_memory_ro(init_text_start, (init_data_start - init_text_start) >> PAGE_SHIFT);
- set_memory_nx(init_data_start, (rodata_start - init_data_start) >> PAGE_SHIFT);
- /* rodata section is marked readonly in mark_rodata_ro */
- set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
- set_memory_nx(data_start, (end_va - data_start) >> PAGE_SHIFT);
-}
-
-void mark_rodata_ro(void)
-{
- unsigned long rodata_start = (unsigned long)__start_rodata;
- unsigned long data_start = (unsigned long)_data;
-
- set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
-
- debug_checkwx();
-}
-#endif
-
#ifdef CONFIG_KEXEC_CORE
/*
* reserve_crashkernel() - reserves memory for crash kernel