From 982aa7c5f0861bf56b2412ca341a13f44c238ba4 Mon Sep 17 00:00:00 2001 From: Kristina Martsenko Date: Wed, 13 Dec 2017 17:07:16 +0000 Subject: arm64: add kconfig symbol to configure physical address size ARMv8.2 introduces support for 52-bit physical addresses. To prepare for supporting this, add a new kconfig symbol to configure the physical address space size. The symbols will be used in subsequent patches. Currently the only choice is 48, a later patch will add the option of 52 once the required code is in place. Tested-by: Suzuki K Poulose Reviewed-by: Suzuki K Poulose Tested-by: Bob Picco Reviewed-by: Bob Picco Acked-by: Marc Zyngier Signed-off-by: Kristina Martsenko [catalin.marinas@arm.com: folded minor patches into this one] Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/pgtable-hwdef.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64/include/asm/pgtable-hwdef.h') diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index eb0c2bd90de9..c1de9f67980b 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -196,7 +196,7 @@ /* * Highest possible physical address supported. */ -#define PHYS_MASK_SHIFT (48) +#define PHYS_MASK_SHIFT (CONFIG_ARM64_PA_BITS) #define PHYS_MASK ((UL(1) << PHYS_MASK_SHIFT) - 1) /* -- cgit v1.2.3 From 787fd1d019b269af7912249231dfe34a5fe3e7c8 Mon Sep 17 00:00:00 2001 From: Kristina Martsenko Date: Wed, 13 Dec 2017 17:07:17 +0000 Subject: arm64: limit PA size to supported range We currently copy the physical address size from ID_AA64MMFR0_EL1.PARange directly into TCR.(I)PS. This will not work for 4k and 16k granule kernels on systems that support 52-bit physical addresses, since 52-bit addresses are only permitted with the 64k granule. To fix this, fall back to 48 bits when configuring the PA size when the kernel does not support 52-bit PAs. When it does, fall back to 52, to avoid similar problems in the future if the PA size is ever increased above 52. Tested-by: Suzuki K Poulose Reviewed-by: Suzuki K Poulose Reviewed-by: Marc Zyngier Tested-by: Bob Picco Reviewed-by: Bob Picco Signed-off-by: Kristina Martsenko [catalin.marinas@arm.com: tcr_set_pa_size macro renamed to tcr_compute_pa_size] [catalin.marinas@arm.com: comments added to tcr_compute_pa_size] [catalin.marinas@arm.com: definitions added for TCR_*PS_SHIFT] Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/assembler.h | 18 ++++++++++++++++++ arch/arm64/include/asm/pgtable-hwdef.h | 2 ++ arch/arm64/include/asm/sysreg.h | 8 ++++++++ arch/arm64/kvm/hyp-init.S | 6 ++---- arch/arm64/kvm/hyp/s2-setup.c | 2 ++ arch/arm64/mm/proc.S | 6 ++---- 6 files changed, 34 insertions(+), 8 deletions(-) (limited to 'arch/arm64/include/asm/pgtable-hwdef.h') diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index aef72d886677..04a92307e6c1 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -350,6 +350,24 @@ alternative_endif #endif .endm +/* + * tcr_compute_pa_size - set TCR.(I)PS to the highest supported + * ID_AA64MMFR0_EL1.PARange value + * + * tcr: register with the TCR_ELx value to be updated + * pos: PARange bitfield position + * tmp{0,1}: temporary registers + */ + .macro tcr_compute_pa_size, tcr, pos, tmp0, tmp1 + mrs \tmp0, ID_AA64MMFR0_EL1 + // Narrow PARange to fit the PS field in TCR_ELx + ubfx \tmp0, \tmp0, #ID_AA64MMFR0_PARANGE_SHIFT, #3 + mov \tmp1, #ID_AA64MMFR0_PARANGE_MAX + cmp \tmp0, \tmp1 + csel \tmp0, \tmp1, \tmp0, hi + bfi \tcr, \tmp0, \pos, #3 + .endm + /* * Macro to perform a data cache maintenance for the interval * [kaddr, kaddr + size) diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index c1de9f67980b..9be2e9371c52 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -272,6 +272,8 @@ #define TCR_TG1_4K (UL(2) << TCR_TG1_SHIFT) #define TCR_TG1_64K (UL(3) << TCR_TG1_SHIFT) +#define TCR_IPS_SHIFT 32 +#define TCR_IPS_MASK (UL(7) << TCR_IPS_SHIFT) #define TCR_ASID16 (UL(1) << 36) #define TCR_TBI0 (UL(1) << 37) #define TCR_HA (UL(1) << 39) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 08cc88574659..ec144f480b39 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -471,6 +471,14 @@ #define ID_AA64MMFR0_TGRAN64_SUPPORTED 0x0 #define ID_AA64MMFR0_TGRAN16_NI 0x0 #define ID_AA64MMFR0_TGRAN16_SUPPORTED 0x1 +#define ID_AA64MMFR0_PARANGE_48 0x5 +#define ID_AA64MMFR0_PARANGE_52 0x6 + +#ifdef CONFIG_ARM64_PA_BITS_52 +#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_52 +#else +#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_48 +#endif /* id_aa64mmfr1 */ #define ID_AA64MMFR1_PAN_SHIFT 20 diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S index 3f9615582377..e2d1fe03662a 100644 --- a/arch/arm64/kvm/hyp-init.S +++ b/arch/arm64/kvm/hyp-init.S @@ -90,11 +90,9 @@ __do_hyp_init: bfi x4, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH #endif /* - * Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS bits in - * TCR_EL2. + * Set the PS bits in TCR_EL2. */ - mrs x5, ID_AA64MMFR0_EL1 - bfi x4, x5, #16, #3 + tcr_compute_pa_size x4, #TCR_EL2_PS_SHIFT, x5, x6 msr tcr_el2, x4 diff --git a/arch/arm64/kvm/hyp/s2-setup.c b/arch/arm64/kvm/hyp/s2-setup.c index a81f5e10fc8c..603e1ee83e89 100644 --- a/arch/arm64/kvm/hyp/s2-setup.c +++ b/arch/arm64/kvm/hyp/s2-setup.c @@ -32,6 +32,8 @@ u32 __hyp_text __init_stage2_translation(void) * PS is only 3. Fortunately, bit 19 is RES0 in VTCR_EL2... */ parange = read_sysreg(id_aa64mmfr0_el1) & 7; + if (parange > ID_AA64MMFR0_PARANGE_MAX) + parange = ID_AA64MMFR0_PARANGE_MAX; val |= parange << 16; /* Compute the actual PARange... */ diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 95233dfc4c39..4f133cb340dc 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -228,11 +228,9 @@ ENTRY(__cpu_setup) tcr_set_idmap_t0sz x10, x9 /* - * Read the PARange bits from ID_AA64MMFR0_EL1 and set the IPS bits in - * TCR_EL1. + * Set the IPS bits in TCR_EL1. */ - mrs x9, ID_AA64MMFR0_EL1 - bfi x10, x9, #32, #3 + tcr_compute_pa_size x10, #TCR_IPS_SHIFT, x5, x6 #ifdef CONFIG_ARM64_HW_AFDBM /* * Hardware update of the Access and Dirty bits. -- cgit v1.2.3 From 529c4b05a3cb2f324aac347042ee6d641478e946 Mon Sep 17 00:00:00 2001 From: Kristina Martsenko Date: Wed, 13 Dec 2017 17:07:18 +0000 Subject: arm64: handle 52-bit addresses in TTBR The top 4 bits of a 52-bit physical address are positioned at bits 2..5 in the TTBR registers. Introduce a couple of macros to move the bits there, and change all TTBR writers to use them. Leave TTBR0 PAN code unchanged, to avoid complicating it. A system with 52-bit PA will have PAN anyway (because it's ARMv8.1 or later), and a system without 52-bit PA can only use up to 48-bit PAs. A later patch in this series will add a kconfig dependency to ensure PAN is configured. In addition, when using 52-bit PA there is a special alignment requirement on the top-level table. We don't currently have any VA_BITS configuration that would violate the requirement, but one could be added in the future, so add a compile-time BUG_ON to check for it. Tested-by: Suzuki K Poulose Reviewed-by: Suzuki K Poulose Reviewed-by: Marc Zyngier Tested-by: Bob Picco Reviewed-by: Bob Picco Signed-off-by: Kristina Martsenko [catalin.marinas@arm.com: added TTBR_BADD_MASK_52 comment] Signed-off-by: Catalin Marinas --- arch/arm/include/asm/kvm_mmu.h | 2 ++ arch/arm64/include/asm/assembler.h | 16 ++++++++++++++++ arch/arm64/include/asm/kvm_mmu.h | 2 ++ arch/arm64/include/asm/mmu_context.h | 2 +- arch/arm64/include/asm/pgtable-hwdef.h | 13 +++++++++++++ arch/arm64/include/asm/pgtable.h | 6 ++++++ arch/arm64/kernel/head.S | 6 ++++-- arch/arm64/kernel/hibernate-asm.S | 12 +++++++----- arch/arm64/kernel/hibernate.c | 2 +- arch/arm64/kvm/hyp-init.S | 3 ++- arch/arm64/mm/pgd.c | 8 ++++++++ arch/arm64/mm/proc.S | 13 ++++++++----- virt/kvm/arm/arm.c | 2 +- 13 files changed, 71 insertions(+), 16 deletions(-) (limited to 'arch/arm64/include/asm/pgtable-hwdef.h') diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index fa6f2174276b..8dbec683638b 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -221,6 +221,8 @@ static inline unsigned int kvm_get_vmid_bits(void) return 8; } +#define kvm_phys_to_vttbr(addr) (addr) + #endif /* !__ASSEMBLY__ */ #endif /* __ARM_KVM_MMU_H__ */ diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 04a92307e6c1..49ea3def4bd1 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -530,4 +530,20 @@ alternative_else_nop_endif #endif .endm +/* + * Arrange a physical address in a TTBR register, taking care of 52-bit + * addresses. + * + * phys: physical address, preserved + * ttbr: returns the TTBR value + */ + .macro phys_to_ttbr, phys, ttbr +#ifdef CONFIG_ARM64_PA_BITS_52 + orr \ttbr, \phys, \phys, lsr #46 + and \ttbr, \ttbr, #TTBR_BADDR_MASK_52 +#else + mov \ttbr, \phys +#endif + .endm + #endif /* __ASM_ASSEMBLER_H */ diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 672c8684d5c2..747bfff92948 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -309,5 +309,7 @@ static inline unsigned int kvm_get_vmid_bits(void) return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8; } +#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr) + #endif /* __ASSEMBLY__ */ #endif /* __ARM64_KVM_MMU_H__ */ diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 9d155fa9a507..accc2ff32a0e 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -51,7 +51,7 @@ static inline void contextidr_thread_switch(struct task_struct *next) */ static inline void cpu_set_reserved_ttbr0(void) { - unsigned long ttbr = __pa_symbol(empty_zero_page); + unsigned long ttbr = phys_to_ttbr(__pa_symbol(empty_zero_page)); write_sysreg(ttbr, ttbr0_el1); isb(); diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 9be2e9371c52..f92be11a209a 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -16,6 +16,8 @@ #ifndef __ASM_PGTABLE_HWDEF_H #define __ASM_PGTABLE_HWDEF_H +#include + /* * Number of page-table levels required to address 'va_bits' wide * address, without section mapping. We resolve the top (va_bits - PAGE_SHIFT) @@ -279,4 +281,15 @@ #define TCR_HA (UL(1) << 39) #define TCR_HD (UL(1) << 40) +/* + * TTBR. + */ +#ifdef CONFIG_ARM64_PA_BITS_52 +/* + * This should be GENMASK_ULL(47, 2). + * TTBR_ELx[1] is RES0 in this configuration. + */ +#define TTBR_BADDR_MASK_52 (((UL(1) << 46) - 1) << 2) +#endif + #endif diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 149d05fb9421..93677b9db947 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -733,6 +733,12 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, #define kc_vaddr_to_offset(v) ((v) & ~VA_START) #define kc_offset_to_vaddr(o) ((o) | VA_START) +#ifdef CONFIG_ARM64_PA_BITS_52 +#define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52) +#else +#define phys_to_ttbr(addr) (addr) +#endif + #endif /* !__ASSEMBLY__ */ #endif /* __ASM_PGTABLE_H */ diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 67e86a0f57ac..0addea3760a6 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -679,8 +679,10 @@ ENTRY(__enable_mmu) update_early_cpu_boot_status 0, x1, x2 adrp x1, idmap_pg_dir adrp x2, swapper_pg_dir - msr ttbr0_el1, x1 // load TTBR0 - msr ttbr1_el1, x2 // load TTBR1 + phys_to_ttbr x1, x3 + phys_to_ttbr x2, x4 + msr ttbr0_el1, x3 // load TTBR0 + msr ttbr1_el1, x4 // load TTBR1 isb msr sctlr_el1, x0 isb diff --git a/arch/arm64/kernel/hibernate-asm.S b/arch/arm64/kernel/hibernate-asm.S index e56d848b6466..84f5d52fddda 100644 --- a/arch/arm64/kernel/hibernate-asm.S +++ b/arch/arm64/kernel/hibernate-asm.S @@ -33,12 +33,14 @@ * Even switching to our copied tables will cause a changed output address at * each stage of the walk. */ -.macro break_before_make_ttbr_switch zero_page, page_table - msr ttbr1_el1, \zero_page +.macro break_before_make_ttbr_switch zero_page, page_table, tmp + phys_to_ttbr \zero_page, \tmp + msr ttbr1_el1, \tmp isb tlbi vmalle1 dsb nsh - msr ttbr1_el1, \page_table + phys_to_ttbr \page_table, \tmp + msr ttbr1_el1, \tmp isb .endm @@ -78,7 +80,7 @@ ENTRY(swsusp_arch_suspend_exit) * We execute from ttbr0, change ttbr1 to our copied linear map tables * with a break-before-make via the zero page */ - break_before_make_ttbr_switch x5, x0 + break_before_make_ttbr_switch x5, x0, x6 mov x21, x1 mov x30, x2 @@ -109,7 +111,7 @@ ENTRY(swsusp_arch_suspend_exit) dsb ish /* wait for PoU cleaning to finish */ /* switch to the restored kernels page tables */ - break_before_make_ttbr_switch x25, x21 + break_before_make_ttbr_switch x25, x21, x6 ic ialluis dsb ish diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index 3009b8b80f08..efbf6dbd93c8 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -264,7 +264,7 @@ static int create_safe_exec_page(void *src_start, size_t length, */ cpu_set_reserved_ttbr0(); local_flush_tlb_all(); - write_sysreg(virt_to_phys(pgd), ttbr0_el1); + write_sysreg(phys_to_ttbr(virt_to_phys(pgd)), ttbr0_el1); isb(); *phys_dst_addr = virt_to_phys((void *)dst); diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S index e2d1fe03662a..f9681cc00973 100644 --- a/arch/arm64/kvm/hyp-init.S +++ b/arch/arm64/kvm/hyp-init.S @@ -63,7 +63,8 @@ __do_hyp_init: cmp x0, #HVC_STUB_HCALL_NR b.lo __kvm_handle_stub_hvc - msr ttbr0_el2, x0 + phys_to_ttbr x0, x4 + msr ttbr0_el2, x4 mrs x4, tcr_el1 ldr x5, =TCR_EL2_MASK diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c index 051e71ec3335..289f9113a27a 100644 --- a/arch/arm64/mm/pgd.c +++ b/arch/arm64/mm/pgd.c @@ -49,6 +49,14 @@ void __init pgd_cache_init(void) if (PGD_SIZE == PAGE_SIZE) return; +#ifdef CONFIG_ARM64_PA_BITS_52 + /* + * With 52-bit physical addresses, the architecture requires the + * top-level table to be aligned to at least 64 bytes. + */ + BUILD_BUG_ON(PGD_SIZE < 64); +#endif + /* * Naturally aligned pgds required by the architecture. */ diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 4f133cb340dc..e79db5a7576a 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -138,10 +138,11 @@ ENDPROC(cpu_do_resume) * - pgd_phys - physical address of new TTB */ ENTRY(cpu_do_switch_mm) - pre_ttbr0_update_workaround x0, x2, x3 + phys_to_ttbr x0, x2 + pre_ttbr0_update_workaround x2, x3, x4 mmid x1, x1 // get mm->context.id - bfi x0, x1, #48, #16 // set the ASID - msr ttbr0_el1, x0 // set TTBR0 + bfi x2, x1, #48, #16 // set the ASID + msr ttbr0_el1, x2 // set TTBR0 isb post_ttbr0_update_workaround ret @@ -158,14 +159,16 @@ ENTRY(idmap_cpu_replace_ttbr1) save_and_disable_daif flags=x2 adrp x1, empty_zero_page - msr ttbr1_el1, x1 + phys_to_ttbr x1, x3 + msr ttbr1_el1, x3 isb tlbi vmalle1 dsb nsh isb - msr ttbr1_el1, x0 + phys_to_ttbr x0, x3 + msr ttbr1_el1, x3 isb restore_daif x2 diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 6b60c98a6e22..c8d49879307f 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -509,7 +509,7 @@ static void update_vttbr(struct kvm *kvm) pgd_phys = virt_to_phys(kvm->arch.pgd); BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK); vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits); - kvm->arch.vttbr = pgd_phys | vmid; + kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid; spin_unlock(&kvm_vmid_lock); } -- cgit v1.2.3 From e6d588a8e3da24ea51321279a064b97feb502ef0 Mon Sep 17 00:00:00 2001 From: Kristina Martsenko Date: Wed, 13 Dec 2017 17:07:19 +0000 Subject: arm64: head.S: handle 52-bit PAs in PTEs in early page table setup The top 4 bits of a 52-bit physical address are positioned at bits 12..15 in page table entries. Introduce a macro to move the bits there, and change the early ID map and swapper table setup code to use it. Tested-by: Suzuki K Poulose Reviewed-by: Suzuki K Poulose Reviewed-by: Marc Zyngier Tested-by: Bob Picco Reviewed-by: Bob Picco Signed-off-by: Kristina Martsenko [catalin.marinas@arm.com: additional comments for clarification] Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/pgtable-hwdef.h | 6 +++++ arch/arm64/kernel/head.S | 40 ++++++++++++++++++++++++++-------- 2 files changed, 37 insertions(+), 9 deletions(-) (limited to 'arch/arm64/include/asm/pgtable-hwdef.h') diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index f92be11a209a..5513ccd687f4 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -168,6 +168,12 @@ #define PTE_UXN (_AT(pteval_t, 1) << 54) /* User XN */ #define PTE_HYP_XN (_AT(pteval_t, 1) << 54) /* HYP XN */ +#ifdef CONFIG_ARM64_PA_BITS_52 +#define PTE_ADDR_LOW (((_AT(pteval_t, 1) << (48 - PAGE_SHIFT)) - 1) << PAGE_SHIFT) +#define PTE_ADDR_HIGH (_AT(pteval_t, 0xf) << 12) +#define PTE_ADDR_MASK_52 (PTE_ADDR_LOW | PTE_ADDR_HIGH) +#endif + /* * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers). */ diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 0addea3760a6..bb06223691ba 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -147,6 +147,26 @@ preserve_boot_args: b __inval_dcache_area // tail call ENDPROC(preserve_boot_args) +/* + * Macro to arrange a physical address in a page table entry, taking care of + * 52-bit addresses. + * + * Preserves: phys + * Returns: pte + */ + .macro phys_to_pte, phys, pte +#ifdef CONFIG_ARM64_PA_BITS_52 + /* + * We assume \phys is 64K aligned and this is guaranteed by only + * supporting this configuration with 64K pages. + */ + orr \pte, \phys, \phys, lsr #36 + and \pte, \pte, #PTE_ADDR_MASK_52 +#else + mov \pte, \phys +#endif + .endm + /* * Macro to create a table entry to the next page. * @@ -160,10 +180,11 @@ ENDPROC(preserve_boot_args) * Returns: tbl -> next level table page address */ .macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2 + add \tmp1, \tbl, #PAGE_SIZE + phys_to_pte \tmp1, \tmp2 + orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type lsr \tmp1, \virt, #\shift and \tmp1, \tmp1, #\ptrs - 1 // table index - add \tmp2, \tbl, #PAGE_SIZE - orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type str \tmp2, [\tbl, \tmp1, lsl #3] add \tbl, \tbl, #PAGE_SIZE // next level table page .endm @@ -190,16 +211,17 @@ ENDPROC(preserve_boot_args) * virtual range (inclusive). * * Preserves: tbl, flags - * Corrupts: phys, start, end, pstate + * Corrupts: phys, start, end, tmp, pstate */ - .macro create_block_map, tbl, flags, phys, start, end - lsr \phys, \phys, #SWAPPER_BLOCK_SHIFT + .macro create_block_map, tbl, flags, phys, start, end, tmp lsr \start, \start, #SWAPPER_BLOCK_SHIFT and \start, \start, #PTRS_PER_PTE - 1 // table index - orr \phys, \flags, \phys, lsl #SWAPPER_BLOCK_SHIFT // table entry + bic \phys, \phys, #SWAPPER_BLOCK_SIZE - 1 lsr \end, \end, #SWAPPER_BLOCK_SHIFT and \end, \end, #PTRS_PER_PTE - 1 // table end index -9999: str \phys, [\tbl, \start, lsl #3] // store the entry +9999: phys_to_pte \phys, \tmp + orr \tmp, \tmp, \flags // table entry + str \tmp, [\tbl, \start, lsl #3] // store the entry add \start, \start, #1 // next entry add \phys, \phys, #SWAPPER_BLOCK_SIZE // next block cmp \start, \end @@ -286,7 +308,7 @@ __create_page_tables: create_pgd_entry x0, x3, x5, x6 mov x5, x3 // __pa(__idmap_text_start) adr_l x6, __idmap_text_end // __pa(__idmap_text_end) - create_block_map x0, x7, x3, x5, x6 + create_block_map x0, x7, x3, x5, x6, x4 /* * Map the kernel image (starting with PHYS_OFFSET). @@ -299,7 +321,7 @@ __create_page_tables: adrp x3, _text // runtime __pa(_text) sub x6, x6, x3 // _end - _text add x6, x6, x5 // runtime __va(_end) - create_block_map x0, x7, x3, x5, x6 + create_block_map x0, x7, x3, x5, x6, x4 /* * Since the page tables have been populated with non-cacheable -- cgit v1.2.3 From 75387b92635e7dca410c1ef92cfe510019248f76 Mon Sep 17 00:00:00 2001 From: Kristina Martsenko Date: Wed, 13 Dec 2017 17:07:21 +0000 Subject: arm64: handle 52-bit physical addresses in page table entries The top 4 bits of a 52-bit physical address are positioned at bits 12..15 of a page table entry. Introduce macros to convert between a physical address and its placement in a table entry, and change all macros/functions that access PTEs to use them. Reviewed-by: Marc Zyngier Tested-by: Suzuki K Poulose Reviewed-by: Suzuki K Poulose Tested-by: Bob Picco Reviewed-by: Bob Picco Signed-off-by: Kristina Martsenko [catalin.marinas@arm.com: some long lines wrapped] Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/kvm_mmu.h | 7 +++-- arch/arm64/include/asm/pgalloc.h | 6 ++-- arch/arm64/include/asm/pgtable-hwdef.h | 6 ++-- arch/arm64/include/asm/pgtable.h | 50 ++++++++++++++++++++++++++-------- arch/arm64/kernel/head.S | 2 +- 5 files changed, 51 insertions(+), 20 deletions(-) (limited to 'arch/arm64/include/asm/pgtable-hwdef.h') diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 9810ebf949b3..b3f7b68b042d 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -287,6 +287,7 @@ static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd, unsigned long hyp_idmap_start) { int idmap_idx; + u64 pgd_addr; /* * Use the first entry to access the HYP mappings. It is @@ -294,7 +295,8 @@ static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd, * extended idmap. */ VM_BUG_ON(pgd_val(merged_hyp_pgd[0])); - merged_hyp_pgd[0] = __pgd(__pa(hyp_pgd) | PMD_TYPE_TABLE); + pgd_addr = __phys_to_pgd_val(__pa(hyp_pgd)); + merged_hyp_pgd[0] = __pgd(pgd_addr | PMD_TYPE_TABLE); /* * Create another extended level entry that points to the boot HYP map, @@ -304,7 +306,8 @@ static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd, */ idmap_idx = hyp_idmap_start >> VA_BITS; VM_BUG_ON(pgd_val(merged_hyp_pgd[idmap_idx])); - merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE); + pgd_addr = __phys_to_pgd_val(__pa(boot_hyp_pgd)); + merged_hyp_pgd[idmap_idx] = __pgd(pgd_addr | PMD_TYPE_TABLE); } static inline unsigned int kvm_get_vmid_bits(void) diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h index 5ca6a573a701..e9d9f1b006ef 100644 --- a/arch/arm64/include/asm/pgalloc.h +++ b/arch/arm64/include/asm/pgalloc.h @@ -44,7 +44,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) static inline void __pud_populate(pud_t *pud, phys_addr_t pmd, pudval_t prot) { - set_pud(pud, __pud(pmd | prot)); + set_pud(pud, __pud(__phys_to_pud_val(pmd) | prot)); } static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) @@ -73,7 +73,7 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud) static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pud, pgdval_t prot) { - set_pgd(pgdp, __pgd(pud | prot)); + set_pgd(pgdp, __pgd(__phys_to_pgd_val(pud) | prot)); } static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) @@ -129,7 +129,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte) static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte, pmdval_t prot) { - set_pmd(pmdp, __pmd(pte | prot)); + set_pmd(pmdp, __pmd(__phys_to_pmd_val(pte) | prot)); } /* diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 5513ccd687f4..85069f37ae37 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -168,10 +168,12 @@ #define PTE_UXN (_AT(pteval_t, 1) << 54) /* User XN */ #define PTE_HYP_XN (_AT(pteval_t, 1) << 54) /* HYP XN */ -#ifdef CONFIG_ARM64_PA_BITS_52 #define PTE_ADDR_LOW (((_AT(pteval_t, 1) << (48 - PAGE_SHIFT)) - 1) << PAGE_SHIFT) +#ifdef CONFIG_ARM64_PA_BITS_52 #define PTE_ADDR_HIGH (_AT(pteval_t, 0xf) << 12) -#define PTE_ADDR_MASK_52 (PTE_ADDR_LOW | PTE_ADDR_HIGH) +#define PTE_ADDR_MASK (PTE_ADDR_LOW | PTE_ADDR_HIGH) +#else +#define PTE_ADDR_MASK PTE_ADDR_LOW #endif /* diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 5d9554fb2692..4fd8af303e2c 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -57,9 +57,22 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte)) -#define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT) +/* + * Macros to convert between a physical address and its placement in a + * page table entry, taking care of 52-bit addresses. + */ +#ifdef CONFIG_ARM64_PA_BITS_52 +#define __pte_to_phys(pte) \ + ((pte_val(pte) & PTE_ADDR_LOW) | ((pte_val(pte) & PTE_ADDR_HIGH) << 36)) +#define __phys_to_pte_val(phys) (((phys) | ((phys) >> 36)) & PTE_ADDR_MASK) +#else +#define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_MASK) +#define __phys_to_pte_val(phys) (phys) +#endif -#define pfn_pte(pfn,prot) (__pte(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) +#define pte_pfn(pte) (__pte_to_phys(pte) >> PAGE_SHIFT) +#define pfn_pte(pfn,prot) \ + __pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) #define pte_none(pte) (!pte_val(pte)) #define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0)) @@ -284,6 +297,11 @@ static inline int pte_same(pte_t pte_a, pte_t pte_b) #define __HAVE_ARCH_PTE_SPECIAL +static inline pte_t pgd_pte(pgd_t pgd) +{ + return __pte(pgd_val(pgd)); +} + static inline pte_t pud_pte(pud_t pud) { return __pte(pud_val(pud)); @@ -349,16 +367,24 @@ static inline int pmd_protnone(pmd_t pmd) #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT)) -#define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT) -#define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) +#define __pmd_to_phys(pmd) __pte_to_phys(pmd_pte(pmd)) +#define __phys_to_pmd_val(phys) __phys_to_pte_val(phys) +#define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT) +#define pfn_pmd(pfn,prot) __pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) #define pud_write(pud) pte_write(pud_pte(pud)) -#define pud_pfn(pud) (((pud_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT) -#define pfn_pud(pfn,prot) (__pud(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) + +#define __pud_to_phys(pud) __pte_to_phys(pud_pte(pud)) +#define __phys_to_pud_val(phys) __phys_to_pte_val(phys) +#define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT) +#define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) #define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd)) +#define __pgd_to_phys(pgd) __pte_to_phys(pgd_pte(pgd)) +#define __phys_to_pgd_val(phys) __phys_to_pte_val(phys) + #define __pgprot_modify(prot,mask,bits) \ __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) @@ -409,7 +435,7 @@ static inline void pmd_clear(pmd_t *pmdp) static inline phys_addr_t pmd_page_paddr(pmd_t pmd) { - return pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK; + return __pmd_to_phys(pmd); } /* Find an entry in the third-level page table. */ @@ -427,7 +453,7 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd) #define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr)) #define pte_clear_fixmap() clear_fixmap(FIX_PTE) -#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) +#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(__pmd_to_phys(pmd))) /* use ONLY for statically allocated translation tables */ #define pte_offset_kimg(dir,addr) ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr)))) @@ -460,7 +486,7 @@ static inline void pud_clear(pud_t *pudp) static inline phys_addr_t pud_page_paddr(pud_t pud) { - return pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK; + return __pud_to_phys(pud); } /* Find an entry in the second-level page table. */ @@ -473,7 +499,7 @@ static inline phys_addr_t pud_page_paddr(pud_t pud) #define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr)) #define pmd_clear_fixmap() clear_fixmap(FIX_PMD) -#define pud_page(pud) pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK)) +#define pud_page(pud) pfn_to_page(__phys_to_pfn(__pud_to_phys(pud))) /* use ONLY for statically allocated translation tables */ #define pmd_offset_kimg(dir,addr) ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr)))) @@ -512,7 +538,7 @@ static inline void pgd_clear(pgd_t *pgdp) static inline phys_addr_t pgd_page_paddr(pgd_t pgd) { - return pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK; + return __pgd_to_phys(pgd); } /* Find an entry in the frst-level page table. */ @@ -525,7 +551,7 @@ static inline phys_addr_t pgd_page_paddr(pgd_t pgd) #define pud_set_fixmap_offset(pgd, addr) pud_set_fixmap(pud_offset_phys(pgd, addr)) #define pud_clear_fixmap() clear_fixmap(FIX_PUD) -#define pgd_page(pgd) pfn_to_page(__phys_to_pfn(pgd_val(pgd) & PHYS_MASK)) +#define pgd_page(pgd) pfn_to_page(__phys_to_pfn(__pgd_to_phys(pgd))) /* use ONLY for statically allocated translation tables */ #define pud_offset_kimg(dir,addr) ((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr)))) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index bb06223691ba..eeec0001e204 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -161,7 +161,7 @@ ENDPROC(preserve_boot_args) * supporting this configuration with 64K pages. */ orr \pte, \phys, \phys, lsr #36 - and \pte, \pte, #PTE_ADDR_MASK_52 + and \pte, \pte, #PTE_ADDR_MASK #else mov \pte, \phys #endif -- cgit v1.2.3