From 6ca2b9ca459a598b78265477d288fdec8a0fdd6d Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 5 Sep 2022 23:54:04 +0100 Subject: arm64/sysreg: Add _EL1 into ID_AA64PFR1_EL1 constant names Our standard is to include the _EL1 in the constant names for registers but we did not do that for ID_AA64PFR1_EL1, update to do so in preparation for conversion to automatic generation. No functional change. Signed-off-by: Mark Brown Reviewed-by: Kristina Martsenko Link: https://lore.kernel.org/r/20220905225425.1871461-8-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/mm/mmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64/mm/mmu.c') diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index e7ad44585f40..5810eddfb48e 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -686,7 +686,7 @@ static bool arm64_early_this_cpu_has_bti(void) pfr1 = __read_sysreg_by_encoding(SYS_ID_AA64PFR1_EL1); return cpuid_feature_extract_unsigned_field(pfr1, - ID_AA64PFR1_BT_SHIFT); + ID_AA64PFR1_EL1_BT_SHIFT); } /* -- cgit v1.2.3 From 739e49e0fc80990a351961c99a3142094822f040 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Tue, 20 Sep 2022 09:49:51 +0800 Subject: arm64: mm: handle ARM64_KERNEL_USES_PMD_MAPS in vmemmap_populate() Directly check ARM64_SWAPPER_USES_SECTION_MAPS to choose base page or PMD level huge page mapping in vmemmap_populate() to simplify code a bit. Signed-off-by: Kefeng Wang Link: https://lore.kernel.org/r/20220920014951.196191-1-wangkefeng.wang@huawei.com Signed-off-by: Catalin Marinas --- arch/arm64/mm/mmu.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) (limited to 'arch/arm64/mm/mmu.c') diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index e7ad44585f40..69deed27dec8 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -1180,14 +1180,6 @@ static void free_empty_tables(unsigned long addr, unsigned long end, } #endif -#if !ARM64_KERNEL_USES_PMD_MAPS -int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, - struct vmem_altmap *altmap) -{ - WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END)); - return vmemmap_populate_basepages(start, end, node, altmap); -} -#else /* !ARM64_KERNEL_USES_PMD_MAPS */ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap) { @@ -1199,6 +1191,10 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, pmd_t *pmdp; WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END)); + + if (!ARM64_KERNEL_USES_PMD_MAPS) + return vmemmap_populate_basepages(start, end, node, altmap); + do { next = pmd_addr_end(addr, end); @@ -1232,7 +1228,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, return 0; } -#endif /* !ARM64_KERNEL_USES_PMD_MAPS */ #ifdef CONFIG_MEMORY_HOTPLUG void vmemmap_free(unsigned long start, unsigned long end, -- cgit v1.2.3 From b9dd04a20f81333e4b99662f1bbaf7c9e3a1e137 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Wed, 21 Sep 2022 10:48:41 +0300 Subject: arm64/mm: fold check for KFENCE into can_set_direct_map() KFENCE requires linear map to be mapped at page granularity, so that it is possible to protect/unprotect single pages, just like with rodata_full and DEBUG_PAGEALLOC. Instead of repating can_set_direct_map() || IS_ENABLED(CONFIG_KFENCE) make can_set_direct_map() handle the KFENCE case. This also prevents potential false positives in kernel_page_present() that may return true for non-present page if CONFIG_KFENCE is enabled. Signed-off-by: Mike Rapoport Reviewed-by: Anshuman Khandual Link: https://lore.kernel.org/r/20220921074841.382615-1-rppt@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/mm/mmu.c | 8 ++------ arch/arm64/mm/pageattr.c | 8 +++++++- 2 files changed, 9 insertions(+), 7 deletions(-) (limited to 'arch/arm64/mm/mmu.c') diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 69deed27dec8..e8a48dfd550f 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -535,7 +535,7 @@ static void __init map_mem(pgd_t *pgdp) */ BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end)); - if (can_set_direct_map() || IS_ENABLED(CONFIG_KFENCE)) + if (can_set_direct_map()) flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; /* @@ -1542,11 +1542,7 @@ int arch_add_memory(int nid, u64 start, u64 size, VM_BUG_ON(!mhp_range_allowed(start, size, true)); - /* - * KFENCE requires linear map to be mapped at page granularity, so that - * it is possible to protect/unprotect single pages in the KFENCE pool. - */ - if (can_set_direct_map() || IS_ENABLED(CONFIG_KFENCE)) + if (can_set_direct_map()) flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index 64e985eaa52d..d107c3d434e2 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -21,7 +21,13 @@ bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED bool can_set_direct_map(void) { - return rodata_full || debug_pagealloc_enabled(); + /* + * rodata_full, DEBUG_PAGEALLOC and KFENCE require linear map to be + * mapped at page granularity, so that it is possible to + * protect/unprotect single pages. + */ + return rodata_full || debug_pagealloc_enabled() || + IS_ENABLED(CONFIG_KFENCE); } static int change_page_range(pte_t *ptep, unsigned long addr, void *data) -- cgit v1.2.3