From 92b4b5619f12770d455ae639ed4e773bdf01aff6 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 12 Sep 2022 17:22:03 +0100 Subject: arm64: cpufeature: make cpus_have_cap() noinstr-safe Currently it isn't safe to use cpus_have_cap() from noinstr code as test_bit() is explicitly instrumented, and were cpus_have_cap() placed out-of-line, cpus_have_cap() itself could be instrumented. Make cpus_have_cap() noinstr safe by marking it __always_inline and using arch_test_bit(). Aside from the prevention of instrumentation, there should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: Ard Biesheuvel Cc: James Morse Cc: Joey Gouly Cc: Marc Zyngier Cc: Will Deacon Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20220912162210.3626215-2-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cpufeature.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index fd7d75a275f6..630c337c7774 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -448,11 +448,11 @@ static __always_inline bool system_capabilities_finalized(void) * * Before the capability is detected, this returns false. */ -static inline bool cpus_have_cap(unsigned int num) +static __always_inline bool cpus_have_cap(unsigned int num) { if (num >= ARM64_NCAPS) return false; - return test_bit(num, cpu_hwcaps); + return arch_test_bit(num, cpu_hwcaps); } /* -- cgit v1.2.3 From 34bbfdfb146be3ad485cbe526b2a1e9ce220649c Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 12 Sep 2022 17:22:04 +0100 Subject: arm64: alternatives: kvm: prepare for cap changes The KVM patching callbacks use cpus_have_final_cap() internally within has_vhe(), and subsequent patches will make it invalid to call cpus_have_final_cap() before alternatives patching has completed, and will mean that cpus_have_const_cap() will always fall back to dynamic checks prior to alternatives patching. In preparation for said change, this patch modifies the KVM patching callbacks to use cpus_have_cap() directly. This is not subject to patching, and will dynamically check the cpu_hwcaps array, which is functionally equivalent to the existing behaviour. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: Ard Biesheuvel Cc: James Morse Cc: Joey Gouly Cc: Marc Zyngier Cc: Will Deacon Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20220912162210.3626215-3-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kvm/va_layout.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c index acdb7b3cc97d..91b22a014610 100644 --- a/arch/arm64/kvm/va_layout.c +++ b/arch/arm64/kvm/va_layout.c @@ -169,7 +169,7 @@ void __init kvm_update_va_mask(struct alt_instr *alt, * dictates it and we don't have any spare bits in the * address), NOP everything after masking the kernel VA. */ - if (has_vhe() || (!tag_val && i > 0)) { + if (cpus_have_cap(ARM64_HAS_VIRT_HOST_EXTN) || (!tag_val && i > 0)) { updptr[i] = cpu_to_le32(aarch64_insn_gen_nop()); continue; } @@ -193,7 +193,8 @@ void kvm_patch_vector_branch(struct alt_instr *alt, BUG_ON(nr_inst != 4); - if (!cpus_have_const_cap(ARM64_SPECTRE_V3A) || WARN_ON_ONCE(has_vhe())) + if (!cpus_have_cap(ARM64_SPECTRE_V3A) || + WARN_ON_ONCE(cpus_have_cap(ARM64_HAS_VIRT_HOST_EXTN))) return; /* -- cgit v1.2.3 From 747ad8d557645b8c9adb83334a620426e4c9d60f Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 12 Sep 2022 17:22:05 +0100 Subject: arm64: alternatives: proton-pack: prepare for cap changes The spectre patching callbacks use cpus_have_final_cap(), and subsequent patches will make it invalid to call cpus_have_final_cap() before alternatives patching has completed. In preparation for said change, this patch modifies the spectre patching callbacks use cpus_have_cap(). This is not subject to patching, and will dynamically check the cpu_hwcaps array, which is functionally equivalent to the existing behaviour. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Reviewed-by: Joey Gouly Cc: Ard Biesheuvel Cc: James Morse Cc: Marc Zyngier Cc: Will Deacon Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20220912162210.3626215-4-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/proton-pack.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c index 40be3a7c2c53..8010b2e573fa 100644 --- a/arch/arm64/kernel/proton-pack.c +++ b/arch/arm64/kernel/proton-pack.c @@ -586,7 +586,7 @@ void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt, if (spectre_v4_mitigations_off()) return; - if (cpus_have_final_cap(ARM64_SSBS)) + if (cpus_have_cap(ARM64_SSBS)) return; if (spectre_v4_mitigations_dynamic()) -- cgit v1.2.3 From c5ba03260c7afad6977654d6bc7fcabbcd35c379 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 12 Sep 2022 17:22:06 +0100 Subject: arm64: alternatives: hoist print out of __apply_alternatives() Printing in the middle of __apply_alternatives() is potentially unsafe and not all that helpful given these days we practically always patch *something*. Hoist the print out of __apply_alternatives(), and add separate prints to __apply_alternatives() and apply_alternatives_all(), which will make it easier to spot if either patching call goes wrong. Signed-off-by: Mark Rutland Cc: Ard Biesheuvel Cc: James Morse Cc: Joey Gouly Cc: Marc Zyngier Cc: Will Deacon Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20220912162210.3626215-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/alternative.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index 9bcaa5eacf16..d94d97cb4a0b 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -156,8 +156,6 @@ static void __nocfi __apply_alternatives(struct alt_region *region, bool is_modu else BUG_ON(alt->alt_len != alt->orig_len); - pr_info_once("patching kernel code\n"); - origptr = ALT_ORIG_PTR(alt); updptr = is_module ? origptr : lm_alias(origptr); nr_inst = alt->orig_len / AARCH64_INSN_SIZE; @@ -225,6 +223,8 @@ static int __apply_alternatives_multi_stop(void *unused) void __init apply_alternatives_all(void) { + pr_info("applying system-wide alternatives\n"); + /* better not try code patching on a live SMP system */ stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask); } @@ -244,6 +244,8 @@ void __init apply_boot_alternatives(void) /* If called on non-boot cpu things could go wrong */ WARN_ON(smp_processor_id() != 0); + pr_info("applying boot alternatives\n"); + __apply_alternatives(®ion, false, &boot_capabilities[0]); } -- cgit v1.2.3 From b723edf3a12a2604cdc0dfcd7cc93d210a815e0c Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 12 Sep 2022 17:22:07 +0100 Subject: arm64: alternatives: make alt_region const We never alter a struct alt_region after creation, and we open-code the bounds of the kernel alternatives region in two functions. The duplication is a bit unfortunate for clarity (and in future we're likely to have more functions altering alternative regions), and to avoid accidents it would be good to make the structure const. This patch adds a shared struct `kernel_alternatives` alt_region for the main kernel image, and marks the alt_regions as const to prevent unintentional modification. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: Ard Biesheuvel Cc: James Morse Cc: Joey Gouly Cc: Marc Zyngier Cc: Will Deacon Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20220912162210.3626215-6-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/alternative.c | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index d94d97cb4a0b..2e18c9c0f612 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -133,8 +133,9 @@ static void clean_dcache_range_nopatch(u64 start, u64 end) } while (cur += d_size, cur < end); } -static void __nocfi __apply_alternatives(struct alt_region *region, bool is_module, - unsigned long *feature_mask) +static void __nocfi __apply_alternatives(const struct alt_region *region, + bool is_module, + unsigned long *feature_mask) { struct alt_instr *alt; __le32 *origptr, *updptr; @@ -190,17 +191,17 @@ static void __nocfi __apply_alternatives(struct alt_region *region, bool is_modu } } +static const struct alt_region kernel_alternatives = { + .begin = (struct alt_instr *)__alt_instructions, + .end = (struct alt_instr *)__alt_instructions_end, +}; + /* * We might be patching the stop_machine state machine, so implement a * really simple polling protocol here. */ static int __apply_alternatives_multi_stop(void *unused) { - struct alt_region region = { - .begin = (struct alt_instr *)__alt_instructions, - .end = (struct alt_instr *)__alt_instructions_end, - }; - /* We always have a CPU 0 at this point (__init) */ if (smp_processor_id()) { while (!all_alternatives_applied) @@ -213,7 +214,8 @@ static int __apply_alternatives_multi_stop(void *unused) ARM64_NPATCHABLE); BUG_ON(all_alternatives_applied); - __apply_alternatives(®ion, false, remaining_capabilities); + __apply_alternatives(&kernel_alternatives, false, + remaining_capabilities); /* Barriers provided by the cache flushing */ all_alternatives_applied = 1; } @@ -236,17 +238,13 @@ void __init apply_alternatives_all(void) */ void __init apply_boot_alternatives(void) { - struct alt_region region = { - .begin = (struct alt_instr *)__alt_instructions, - .end = (struct alt_instr *)__alt_instructions_end, - }; - /* If called on non-boot cpu things could go wrong */ WARN_ON(smp_processor_id() != 0); pr_info("applying boot alternatives\n"); - __apply_alternatives(®ion, false, &boot_capabilities[0]); + __apply_alternatives(&kernel_alternatives, false, + &boot_capabilities[0]); } #ifdef CONFIG_MODULES -- cgit v1.2.3 From 4c0bd995d73ed8897650095c7892b132a0bd66a4 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 12 Sep 2022 17:22:08 +0100 Subject: arm64: alternatives: have callbacks take a cap Today, callback alternatives are special-cased within __apply_alternatives(), and are applied alongside patching for system capabilities as ARM64_NCAPS is not part of the boot_capabilities feature mask. This special-casing is less than ideal. Giving special meaning to ARM64_NCAPS for this requires some structures and loops to use ARM64_NCAPS + 1 (AKA ARM64_NPATCHABLE), while others use ARM64_NCAPS. It's also not immediately clear callback alternatives are only applied when applying alternatives for system-wide features. To make this a bit clearer, changes the way that callback alternatives are identified to remove the special-casing of ARM64_NCAPS, and to allow callback alternatives to be associated with a cpucap as with all other alternatives. New cpucaps, ARM64_ALWAYS_BOOT and ARM64_ALWAYS_SYSTEM are added which are always detected alongside boot cpu capabilities and system capabilities respectively. All existing callback alternatives are made to use ARM64_ALWAYS_SYSTEM, and so will be patched at the same point during the boot flow as before. Subsequent patches will make more use of these new cpucaps. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: Ard Biesheuvel Cc: James Morse Cc: Joey Gouly Cc: Marc Zyngier Cc: Will Deacon Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20220912162210.3626215-7-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/alternative-macros.h | 18 ++++++++++++------ arch/arm64/include/asm/assembler.h | 10 +++++----- arch/arm64/include/asm/cpufeature.h | 4 +--- arch/arm64/include/asm/kvm_mmu.h | 5 +++-- arch/arm64/kernel/alternative.c | 26 ++++++++++++++------------ arch/arm64/kernel/cpufeature.c | 19 +++++++++++++++++-- arch/arm64/kernel/entry.S | 8 ++++---- arch/arm64/kvm/hyp/hyp-entry.S | 4 ++-- arch/arm64/tools/cpucaps | 2 ++ 9 files changed, 60 insertions(+), 36 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/alternative-macros.h b/arch/arm64/include/asm/alternative-macros.h index 7e157ab6cd50..189c31be163c 100644 --- a/arch/arm64/include/asm/alternative-macros.h +++ b/arch/arm64/include/asm/alternative-macros.h @@ -2,10 +2,16 @@ #ifndef __ASM_ALTERNATIVE_MACROS_H #define __ASM_ALTERNATIVE_MACROS_H +#include + #include #include -#define ARM64_CB_PATCH ARM64_NCAPS +#define ARM64_CB_BIT (UL(1) << 15) + +#if ARM64_NCAPS >= ARM64_CB_BIT +#error "cpucaps have overflown ARM64_CB_BIT" +#endif #ifndef __ASSEMBLY__ @@ -73,8 +79,8 @@ #define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \ __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg)) -#define ALTERNATIVE_CB(oldinstr, cb) \ - __ALTERNATIVE_CFG_CB(oldinstr, ARM64_CB_PATCH, 1, cb) +#define ALTERNATIVE_CB(oldinstr, feature, cb) \ + __ALTERNATIVE_CFG_CB(oldinstr, ARM64_CB_BIT | (feature), 1, cb) #else #include @@ -82,7 +88,7 @@ .macro altinstruction_entry orig_offset alt_offset feature orig_len alt_len .word \orig_offset - . .word \alt_offset - . - .hword \feature + .hword (\feature) .byte \orig_len .byte \alt_len .endm @@ -141,10 +147,10 @@ 661: .endm -.macro alternative_cb cb +.macro alternative_cb cap, cb .set .Lasm_alt_mode, 0 .pushsection .altinstructions, "a" - altinstruction_entry 661f, \cb, ARM64_CB_PATCH, 662f-661f, 0 + altinstruction_entry 661f, \cb, ARM64_CB_BIT | \cap, 662f-661f, 0 .popsection 661: .endm diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 5846145be523..d851e2733439 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -293,7 +293,7 @@ alternative_endif alternative_if_not ARM64_KVM_PROTECTED_MODE ASM_BUG() alternative_else_nop_endif -alternative_cb kvm_compute_final_ctr_el0 +alternative_cb ARM64_ALWAYS_SYSTEM, kvm_compute_final_ctr_el0 movz \reg, #0 movk \reg, #0, lsl #16 movk \reg, #0, lsl #32 @@ -877,7 +877,7 @@ alternative_endif .macro __mitigate_spectre_bhb_loop tmp #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY -alternative_cb spectre_bhb_patch_loop_iter +alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_loop_iter mov \tmp, #32 // Patched to correct the immediate alternative_cb_end .Lspectre_bhb_loop\@: @@ -890,7 +890,7 @@ alternative_cb_end .macro mitigate_spectre_bhb_loop tmp #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY -alternative_cb spectre_bhb_patch_loop_mitigation_enable +alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_loop_mitigation_enable b .L_spectre_bhb_loop_done\@ // Patched to NOP alternative_cb_end __mitigate_spectre_bhb_loop \tmp @@ -904,7 +904,7 @@ alternative_cb_end stp x0, x1, [sp, #-16]! stp x2, x3, [sp, #-16]! mov w0, #ARM_SMCCC_ARCH_WORKAROUND_3 -alternative_cb smccc_patch_fw_mitigation_conduit +alternative_cb ARM64_ALWAYS_SYSTEM, smccc_patch_fw_mitigation_conduit nop // Patched to SMC/HVC #0 alternative_cb_end ldp x2, x3, [sp], #16 @@ -914,7 +914,7 @@ alternative_cb_end .macro mitigate_spectre_bhb_clear_insn #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY -alternative_cb spectre_bhb_patch_clearbhb +alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_clearbhb /* Patched to NOP when not supported */ clearbhb isb diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 630c337c7774..f5852ad6e884 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -422,9 +422,7 @@ extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; extern struct static_key_false arm64_const_caps_ready; -/* ARM64 CAPS + alternative_cb */ -#define ARM64_NPATCHABLE (ARM64_NCAPS + 1) -extern DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE); +extern DECLARE_BITMAP(boot_capabilities, ARM64_NCAPS); #define for_each_available_cap(cap) \ for_each_set_bit(cap, cpu_hwcaps, ARM64_NCAPS) diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index b208da3bebec..7784081088e7 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -63,7 +63,7 @@ * specific registers encoded in the instructions). */ .macro kern_hyp_va reg -alternative_cb kvm_update_va_mask +alternative_cb ARM64_ALWAYS_SYSTEM, kvm_update_va_mask and \reg, \reg, #1 /* mask with va_mask */ ror \reg, \reg, #1 /* rotate to the first tag bit */ add \reg, \reg, #0 /* insert the low 12 bits of the tag */ @@ -97,7 +97,7 @@ alternative_cb_end hyp_pa \reg, \tmp /* Load kimage_voffset. */ -alternative_cb kvm_get_kimage_voffset +alternative_cb ARM64_ALWAYS_SYSTEM, kvm_get_kimage_voffset movz \tmp, #0 movk \tmp, #0, lsl #16 movk \tmp, #0, lsl #32 @@ -131,6 +131,7 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v) "add %0, %0, #0\n" "add %0, %0, #0, lsl 12\n" "ror %0, %0, #63\n", + ARM64_ALWAYS_SYSTEM, kvm_update_va_mask) : "+r" (v)); return v; diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index 2e18c9c0f612..9a071a5fcb67 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -21,6 +21,9 @@ #define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset) #define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset) +#define ALT_CAP(a) ((a)->cpufeature & ~ARM64_CB_BIT) +#define ALT_HAS_CB(a) ((a)->cpufeature & ARM64_CB_BIT) + /* Volatile, as we may be patching the guts of READ_ONCE() */ static volatile int all_alternatives_applied; @@ -143,16 +146,15 @@ static void __nocfi __apply_alternatives(const struct alt_region *region, for (alt = region->begin; alt < region->end; alt++) { int nr_inst; + int cap = ALT_CAP(alt); - if (!test_bit(alt->cpufeature, feature_mask)) + if (!test_bit(cap, feature_mask)) continue; - /* Use ARM64_CB_PATCH as an unconditional patch */ - if (alt->cpufeature < ARM64_CB_PATCH && - !cpus_have_cap(alt->cpufeature)) + if (!cpus_have_cap(cap)) continue; - if (alt->cpufeature == ARM64_CB_PATCH) + if (ALT_HAS_CB(alt)) BUG_ON(alt->alt_len != 0); else BUG_ON(alt->alt_len != alt->orig_len); @@ -161,10 +163,10 @@ static void __nocfi __apply_alternatives(const struct alt_region *region, updptr = is_module ? origptr : lm_alias(origptr); nr_inst = alt->orig_len / AARCH64_INSN_SIZE; - if (alt->cpufeature < ARM64_CB_PATCH) - alt_cb = patch_alternative; - else + if (ALT_HAS_CB(alt)) alt_cb = ALT_REPL_PTR(alt); + else + alt_cb = patch_alternative; alt_cb(alt, origptr, updptr, nr_inst); @@ -208,10 +210,10 @@ static int __apply_alternatives_multi_stop(void *unused) cpu_relax(); isb(); } else { - DECLARE_BITMAP(remaining_capabilities, ARM64_NPATCHABLE); + DECLARE_BITMAP(remaining_capabilities, ARM64_NCAPS); bitmap_complement(remaining_capabilities, boot_capabilities, - ARM64_NPATCHABLE); + ARM64_NCAPS); BUG_ON(all_alternatives_applied); __apply_alternatives(&kernel_alternatives, false, @@ -254,9 +256,9 @@ void apply_alternatives_module(void *start, size_t length) .begin = start, .end = start + length, }; - DECLARE_BITMAP(all_capabilities, ARM64_NPATCHABLE); + DECLARE_BITMAP(all_capabilities, ARM64_NCAPS); - bitmap_fill(all_capabilities, ARM64_NPATCHABLE); + bitmap_fill(all_capabilities, ARM64_NCAPS); __apply_alternatives(®ion, true, &all_capabilities[0]); } diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index af4de817d712..68a0545285a1 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -108,8 +108,7 @@ DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); EXPORT_SYMBOL(cpu_hwcaps); static struct arm64_cpu_capabilities const __ro_after_init *cpu_hwcaps_ptrs[ARM64_NCAPS]; -/* Need also bit for ARM64_CB_PATCH */ -DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE); +DECLARE_BITMAP(boot_capabilities, ARM64_NCAPS); bool arm64_use_ng_mappings = false; EXPORT_SYMBOL(arm64_use_ng_mappings); @@ -1391,6 +1390,12 @@ u64 __read_sysreg_by_encoding(u32 sys_id) #include +static bool +has_always(const struct arm64_cpu_capabilities *entry, int scope) +{ + return true; +} + static bool feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry) { @@ -2087,6 +2092,16 @@ cpucap_panic_on_conflict(const struct arm64_cpu_capabilities *cap) } static const struct arm64_cpu_capabilities arm64_features[] = { + { + .capability = ARM64_ALWAYS_BOOT, + .type = ARM64_CPUCAP_BOOT_CPU_FEATURE, + .matches = has_always, + }, + { + .capability = ARM64_ALWAYS_SYSTEM, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_always, + }, { .desc = "GIC system register CPU interface", .capability = ARM64_HAS_SYSREG_GIC_CPUIF, diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 2d73b3e793b2..e28137d64b76 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -114,7 +114,7 @@ * them if required. */ .macro apply_ssbd, state, tmp1, tmp2 -alternative_cb spectre_v4_patch_fw_mitigation_enable +alternative_cb ARM64_ALWAYS_SYSTEM, spectre_v4_patch_fw_mitigation_enable b .L__asm_ssbd_skip\@ // Patched to NOP alternative_cb_end ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1 @@ -123,7 +123,7 @@ alternative_cb_end tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2 mov w1, #\state -alternative_cb smccc_patch_fw_mitigation_conduit +alternative_cb ARM64_ALWAYS_SYSTEM, smccc_patch_fw_mitigation_conduit nop // Patched to SMC/HVC #0 alternative_cb_end .L__asm_ssbd_skip\@: @@ -175,7 +175,7 @@ alternative_else_nop_endif .macro mte_set_kernel_gcr, tmp, tmp2 #ifdef CONFIG_KASAN_HW_TAGS -alternative_cb kasan_hw_tags_enable +alternative_cb ARM64_ALWAYS_SYSTEM, kasan_hw_tags_enable b 1f alternative_cb_end mov \tmp, KERNEL_GCR_EL1 @@ -186,7 +186,7 @@ alternative_cb_end .macro mte_set_user_gcr, tsk, tmp, tmp2 #ifdef CONFIG_KASAN_HW_TAGS -alternative_cb kasan_hw_tags_enable +alternative_cb ARM64_ALWAYS_SYSTEM, kasan_hw_tags_enable b 1f alternative_cb_end ldr \tmp, [\tsk, #THREAD_MTE_CTRL] diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 7839d075729b..8f3f93fa119e 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -196,7 +196,7 @@ SYM_CODE_END(__kvm_hyp_vector) sub sp, sp, #(8 * 4) stp x2, x3, [sp, #(8 * 0)] stp x0, x1, [sp, #(8 * 2)] - alternative_cb spectre_bhb_patch_wa3 + alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_wa3 /* Patched to mov WA3 when supported */ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1 alternative_cb_end @@ -216,7 +216,7 @@ SYM_CODE_END(__kvm_hyp_vector) mitigate_spectre_bhb_clear_insn .endif .if \indirect != 0 - alternative_cb kvm_patch_vector_branch + alternative_cb ARM64_ALWAYS_SYSTEM, kvm_patch_vector_branch /* * For ARM64_SPECTRE_V3A configurations, these NOPs get replaced with: * diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index 63b2484ce6c3..ff882ec175e7 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -2,6 +2,8 @@ # # Internal CPU capabilities constants, keep this list sorted +ALWAYS_BOOT +ALWAYS_SYSTEM BTI # Unreliable: use system_supports_32bit_el0() instead. HAS_32BIT_EL0_DO_NOT_USE -- cgit v1.2.3 From 21fb26bfb01ffe0d9a9a967ffe061092128bbffe Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 12 Sep 2022 17:22:09 +0100 Subject: arm64: alternatives: add alternative_has_feature_*() Currrently we use a mixture of alternative sequences and static branches to handle features detected at boot time. For ease of maintenance we generally prefer to use static branches in C code, but this has a few downsides: * Each static branch has metadata in the __jump_table section, which is not discarded after features are finalized. This wastes some space, and slows down the patching of other static branches. * The static branches are patched at a different point in time from the alternatives, so changes are not atomic. This leaves a transient period where there could be a mismatch between the behaviour of alternatives and static branches, which could be problematic for some features (e.g. pseudo-NMI). * More (instrumentable) kernel code is executed to patch each static branch, which can be risky when patching certain features (e.g. irqflags management for pseudo-NMI). * When CONFIG_JUMP_LABEL=n, static branches are turned into a load of a flag and a conditional branch. This means it isn't safe to use such static branches in an alternative address space (e.g. the NVHE/PKVM hyp code), where the generated address isn't safe to acccess. To deal with these issues, this patch introduces new alternative_has_feature_*() helpers, which work like static branches but are patched using alternatives. This ensures the patching is performed at the same time as other alternative patching, allows the metadata to be freed after patching, and is safe for use in alternative address spaces. Note that all supported toolchains have asm goto support, and since commit: a0a12c3ed057af57 ("asm goto: eradicate CC_HAS_ASM_GOTO)" ... the CC_HAS_ASM_GOTO Kconfig symbol has been removed, so no feature check is necessary, and we can always make use of asm goto. Additionally, note that: * This has no impact on cpus_have_cap(), which is a dynamic check. * This has no functional impact on cpus_have_const_cap(). The branches are patched slightly later than before this patch, but these branches are not reachable until caps have been finalised. * It is now invalid to use cpus_have_final_cap() in the window between feature detection and patching. All existing uses are only expected after patching anyway, so this should not be a problem. * The LSE atomics will now be enabled during alternatives patching rather than immediately before. As the LL/SC an LSE atomics are functionally equivalent this should not be problematic. When building defconfig with GCC 12.1.0, the resulting Image is 64KiB smaller: | % ls -al Image-* | -rw-r--r-- 1 mark mark 37108224 Aug 23 09:56 Image-after | -rw-r--r-- 1 mark mark 37173760 Aug 23 09:54 Image-before According to bloat-o-meter.pl: | add/remove: 44/34 grow/shrink: 602/1294 up/down: 39692/-61108 (-21416) | Function old new delta | [...] | Total: Before=16618336, After=16596920, chg -0.13% | add/remove: 0/2 grow/shrink: 0/0 up/down: 0/-1296 (-1296) | Data old new delta | arm64_const_caps_ready 16 - -16 | cpu_hwcap_keys 1280 - -1280 | Total: Before=8987120, After=8985824, chg -0.01% | add/remove: 0/0 grow/shrink: 0/0 up/down: 0/0 (0) | RO Data old new delta | Total: Before=18408, After=18408, chg +0.00% Signed-off-by: Mark Rutland Cc: Ard Biesheuvel Cc: James Morse Cc: Joey Gouly Cc: Marc Zyngier Cc: Will Deacon Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20220912162210.3626215-8-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/alternative-macros.h | 41 +++++++++++++++++++++++++++++ arch/arm64/include/asm/cpufeature.h | 7 +++-- arch/arm64/include/asm/lse.h | 5 ++-- arch/arm64/kernel/cpufeature.c | 25 ------------------ arch/arm64/kernel/image-vars.h | 4 --- 5 files changed, 46 insertions(+), 36 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/alternative-macros.h b/arch/arm64/include/asm/alternative-macros.h index 189c31be163c..eaba9ec12789 100644 --- a/arch/arm64/include/asm/alternative-macros.h +++ b/arch/arm64/include/asm/alternative-macros.h @@ -213,4 +213,45 @@ alternative_endif #define ALTERNATIVE(oldinstr, newinstr, ...) \ _ALTERNATIVE_CFG(oldinstr, newinstr, __VA_ARGS__, 1) +#ifndef __ASSEMBLY__ + +#include +#include + +static __always_inline bool +alternative_has_feature_likely(unsigned long feature) +{ + BUILD_BUG_ON(feature >= ARM64_NCAPS); + + asm_volatile_goto( + ALTERNATIVE("b %l[l_no]", "nop", %[feature]) + : + : [feature] "i" (feature) + : + : l_no); + + return true; +l_no: + return false; +} + +static __always_inline bool +alternative_has_feature_unlikely(unsigned long feature) +{ + BUILD_BUG_ON(feature >= ARM64_NCAPS); + + asm_volatile_goto( + ALTERNATIVE("nop", "b %l[l_yes]", %[feature]) + : + : [feature] "i" (feature) + : + : l_yes); + + return false; +l_yes: + return true; +} + +#endif /* __ASSEMBLY__ */ + #endif /* __ASM_ALTERNATIVE_MACROS_H */ diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index f5852ad6e884..ba8a47433b5a 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -6,6 +6,7 @@ #ifndef __ASM_CPUFEATURE_H #define __ASM_CPUFEATURE_H +#include #include #include #include @@ -419,8 +420,6 @@ static __always_inline bool is_hyp_code(void) } extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); -extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; -extern struct static_key_false arm64_const_caps_ready; extern DECLARE_BITMAP(boot_capabilities, ARM64_NCAPS); @@ -438,7 +437,7 @@ unsigned long cpu_get_elf_hwcap2(void); static __always_inline bool system_capabilities_finalized(void) { - return static_branch_likely(&arm64_const_caps_ready); + return alternative_has_feature_likely(ARM64_ALWAYS_SYSTEM); } /* @@ -465,7 +464,7 @@ static __always_inline bool __cpus_have_const_cap(int num) { if (num >= ARM64_NCAPS) return false; - return static_branch_unlikely(&cpu_hwcap_keys[num]); + return alternative_has_feature_unlikely(num); } /* diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h index 29c85810ae69..c503db8e73b0 100644 --- a/arch/arm64/include/asm/lse.h +++ b/arch/arm64/include/asm/lse.h @@ -13,14 +13,13 @@ #include #include #include +#include #include #include -extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; - static __always_inline bool system_uses_lse_atomics(void) { - return static_branch_likely(&cpu_hwcap_keys[ARM64_HAS_LSE_ATOMICS]); + return alternative_has_feature_likely(ARM64_HAS_LSE_ATOMICS); } #define __lse_ll_sc_body(op, ...) \ diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 68a0545285a1..d9c3879728bc 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -133,31 +133,12 @@ DEFINE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0); */ static cpumask_var_t cpu_32bit_el0_mask __cpumask_var_read_mostly; -/* - * Flag to indicate if we have computed the system wide - * capabilities based on the boot time active CPUs. This - * will be used to determine if a new booting CPU should - * go through the verification process to make sure that it - * supports the system capabilities, without using a hotplug - * notifier. This is also used to decide if we could use - * the fast path for checking constant CPU caps. - */ -DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready); -EXPORT_SYMBOL(arm64_const_caps_ready); -static inline void finalize_system_capabilities(void) -{ - static_branch_enable(&arm64_const_caps_ready); -} - void dump_cpu_features(void) { /* file-wide pr_fmt adds "CPU features: " prefix */ pr_emerg("0x%*pb\n", ARM64_NCAPS, &cpu_hwcaps); } -DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS); -EXPORT_SYMBOL(cpu_hwcap_keys); - #define __ARM64_FTR_BITS(SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ { \ .sign = SIGNED, \ @@ -2944,9 +2925,6 @@ static void __init enable_cpu_capabilities(u16 scope_mask) if (!cpus_have_cap(num)) continue; - /* Ensure cpus_have_const_cap(num) works */ - static_branch_enable(&cpu_hwcap_keys[num]); - if (boot_scope && caps->cpu_enable) /* * Capabilities with SCOPE_BOOT_CPU scope are finalised @@ -3268,9 +3246,6 @@ void __init setup_cpu_features(void) sme_setup(); minsigstksz_setup(); - /* Advertise that we have computed the system capabilities */ - finalize_system_capabilities(); - /* * Check for sane CTR_EL0.CWG value. */ diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h index afa69e04e75e..118973a6ab05 100644 --- a/arch/arm64/kernel/image-vars.h +++ b/arch/arm64/kernel/image-vars.h @@ -89,10 +89,6 @@ KVM_NVHE_ALIAS(__icache_flags); /* VMID bits set by the KVM VMID allocator */ KVM_NVHE_ALIAS(kvm_arm_vmid_bits); -/* Kernel symbols needed for cpus_have_final/const_caps checks. */ -KVM_NVHE_ALIAS(arm64_const_caps_ready); -KVM_NVHE_ALIAS(cpu_hwcap_keys); - /* Static keys which are set if a vGIC trap should be handled in hyp. */ KVM_NVHE_ALIAS(vgic_v2_cpuif_trap); KVM_NVHE_ALIAS(vgic_v3_cpuif_trap); -- cgit v1.2.3 From d926079f17bf8aa47485b6a55be1fc0175dbd1db Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 12 Sep 2022 17:22:10 +0100 Subject: arm64: alternatives: add shared NOP callback For each instance of an alternative, the compiler outputs a distinct copy of the alternative instructions into a subsection. As the compiler doesn't have special knowledge of alternatives, it cannot coalesce these to save space. In a defconfig kernel built with GCC 12.1.0, there are approximately 10,000 instances of alternative_has_feature_likely(), where the replacement instruction is always a NOP. As NOPs are position-independent, we don't need a unique copy per alternative sequence. This patch adds a callback to patch an alternative sequence with NOPs, and make use of this in alternative_has_feature_likely(). So that this can be used for other sites in future, this is written to patch multiple instructions up to the original sequence length. For NVHE, an alias is added to image-vars.h. For modules, the callback is exported. Note that as modules are loaded within 2GiB of the kernel, an alt_instr entry in a module can always refer directly to the callback, and no special handling is necessary. When building with GCC 12.1.0, the vmlinux is ~158KiB smaller, though the resulting Image size is unchanged due to alignment constraints and padding: | % ls -al vmlinux-* | -rwxr-xr-x 1 mark mark 134644592 Sep 1 14:52 vmlinux-after | -rwxr-xr-x 1 mark mark 134486232 Sep 1 14:50 vmlinux-before | % ls -al Image-* | -rw-r--r-- 1 mark mark 37108224 Sep 1 14:52 Image-after | -rw-r--r-- 1 mark mark 37108224 Sep 1 14:50 Image-before Signed-off-by: Mark Rutland Cc: Ard Biesheuvel Cc: James Morse Cc: Joey Gouly Cc: Marc Zyngier Cc: Will Deacon Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20220912162210.3626215-9-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/alternative-macros.h | 2 +- arch/arm64/kernel/alternative.c | 8 ++++++++ arch/arm64/kernel/image-vars.h | 1 + 3 files changed, 10 insertions(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/alternative-macros.h b/arch/arm64/include/asm/alternative-macros.h index eaba9ec12789..4a2a98d6d222 100644 --- a/arch/arm64/include/asm/alternative-macros.h +++ b/arch/arm64/include/asm/alternative-macros.h @@ -224,7 +224,7 @@ alternative_has_feature_likely(unsigned long feature) BUILD_BUG_ON(feature >= ARM64_NCAPS); asm_volatile_goto( - ALTERNATIVE("b %l[l_no]", "nop", %[feature]) + ALTERNATIVE_CB("b %l[l_no]", %[feature], alt_cb_patch_nops) : : [feature] "i" (feature) : diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index 9a071a5fcb67..5a904d4e98ea 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -263,3 +263,11 @@ void apply_alternatives_module(void *start, size_t length) __apply_alternatives(®ion, true, &all_capabilities[0]); } #endif + +noinstr void alt_cb_patch_nops(struct alt_instr *alt, __le32 *origptr, + __le32 *updptr, int nr_inst) +{ + for (int i = 0; i < nr_inst; i++) + updptr[i] = cpu_to_le32(aarch64_insn_gen_nop()); +} +EXPORT_SYMBOL(alt_cb_patch_nops); diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h index 118973a6ab05..4aaa5f3d1f65 100644 --- a/arch/arm64/kernel/image-vars.h +++ b/arch/arm64/kernel/image-vars.h @@ -73,6 +73,7 @@ KVM_NVHE_ALIAS(spectre_bhb_patch_loop_iter); KVM_NVHE_ALIAS(spectre_bhb_patch_loop_mitigation_enable); KVM_NVHE_ALIAS(spectre_bhb_patch_wa3); KVM_NVHE_ALIAS(spectre_bhb_patch_clearbhb); +KVM_NVHE_ALIAS(alt_cb_patch_nops); /* Global kernel state accessed by nVHE hyp code. */ KVM_NVHE_ALIAS(kvm_vgic_global_state); -- cgit v1.2.3 From 0072dc1b53c39fb7c4cfc5c9e5d5a30622198613 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 20 Sep 2022 15:00:44 +0100 Subject: arm64: avoid BUILD_BUG_ON() in alternative-macros Nathan reports that the build fails when using clang and LTO: | In file included from kernel/bounds.c:10: | In file included from ./include/linux/page-flags.h:10: | In file included from ./include/linux/bug.h:5: | In file included from ./arch/arm64/include/asm/bug.h:26: | In file included from ./include/asm-generic/bug.h:5: | In file included from ./include/linux/compiler.h:248: | In file included from ./arch/arm64/include/asm/rwonce.h:11: | ./arch/arm64/include/asm/alternative-macros.h:224:2: error: call to undeclared function 'BUILD_BUG_ON'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration] | BUILD_BUG_ON(feature >= ARM64_NCAPS); | ^ | ./arch/arm64/include/asm/alternative-macros.h:241:2: error: call to undeclared function 'BUILD_BUG_ON'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration] | BUILD_BUG_ON(feature >= ARM64_NCAPS); | ^ | 2 errors generated. ... the problem being that when LTO is enabled, includes , and causes a circular include dependency through . This manifests as BUILD_BUG_ON() not being defined when used within . This patch avoids the problem and simplifies the include dependencies by using compiletime_assert() instead of BUILD_BUG_ON(). Signed-off-by: Mark Rutland Fixes: 21fb26bfb01f ("arm64: alternatives: add alternative_has_feature_*()") Reported-by: Nathan Chancellor Tested-by: Nathan Chancellor Link: http://lore.kernel.org/r/YyigTrxhE3IRPzjs@dev-arch.thelio-3990X Cc: Ard Biesheuvel Cc: James Morse Cc: Joey Gouly Cc: Marc Zyngier Cc: Will Deacon Reviewed-by: Ard Biesheuvel Acked-by: Marc Zyngier Link: https://lore.kernel.org/r/20220920140044.1709073-1-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/alternative-macros.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/alternative-macros.h b/arch/arm64/include/asm/alternative-macros.h index 4a2a98d6d222..966767debaa3 100644 --- a/arch/arm64/include/asm/alternative-macros.h +++ b/arch/arm64/include/asm/alternative-macros.h @@ -215,13 +215,13 @@ alternative_endif #ifndef __ASSEMBLY__ -#include #include static __always_inline bool alternative_has_feature_likely(unsigned long feature) { - BUILD_BUG_ON(feature >= ARM64_NCAPS); + compiletime_assert(feature < ARM64_NCAPS, + "feature must be < ARM64_NCAPS"); asm_volatile_goto( ALTERNATIVE_CB("b %l[l_no]", %[feature], alt_cb_patch_nops) @@ -238,7 +238,8 @@ l_no: static __always_inline bool alternative_has_feature_unlikely(unsigned long feature) { - BUILD_BUG_ON(feature >= ARM64_NCAPS); + compiletime_assert(feature < ARM64_NCAPS, + "feature must be < ARM64_NCAPS"); asm_volatile_goto( ALTERNATIVE("nop", "b %l[l_yes]", %[feature]) -- cgit v1.2.3 From ba00c2a04fa5431d204a4183062b30372c062aa7 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 29 Sep 2022 16:02:27 +0100 Subject: arm64: fix the build with binutils 2.27 Jon Hunter reports that for some toolchains the build has been broken since commit: 4c0bd995d73ed889 ("arm64: alternatives: have callbacks take a cap") ... with a stream of build-time splats of the form: | CC arch/arm64/kvm/hyp/vhe/debug-sr.o | /tmp/ccY3kbki.s: Assembler messages: | /tmp/ccY3kbki.s:1600: Error: found 'L', expected: ')' | /tmp/ccY3kbki.s:1600: Error: found 'L', expected: ')' | /tmp/ccY3kbki.s:1600: Error: found 'L', expected: ')' | /tmp/ccY3kbki.s:1600: Error: found 'L', expected: ')' | /tmp/ccY3kbki.s:1600: Error: junk at end of line, first unrecognized character | is `L' | /tmp/ccY3kbki.s:1723: Error: found 'L', expected: ')' | /tmp/ccY3kbki.s:1723: Error: found 'L', expected: ')' | /tmp/ccY3kbki.s:1723: Error: found 'L', expected: ')' | /tmp/ccY3kbki.s:1723: Error: found 'L', expected: ')' | /tmp/ccY3kbki.s:1723: Error: junk at end of line, first unrecognized character | is `L' | scripts/Makefile.build:249: recipe for target | 'arch/arm64/kvm/hyp/vhe/debug-sr.o' failed The issue here is that older versions of binutils (up to and including 2.27.0) don't like an 'L' suffix on constants. For plain assembly files, UL() avoids this suffix, but in C files this gets added, and so for inline assembly we can't directly use a constant defined with `UL()`. We could avoid this by passing the constant as an input parameter, but this isn't practical given the way we use the alternative macros. Instead, just open code the constant without the `UL` suffix, and for consistency do this for both the inline assembly macro and the regular assembly macro. Signed-off-by: Mark Rutland Fixes: 4c0bd995d73e ("arm64: alternatives: have callbacks take a cap") Reported-by: Jon Hunter Link: https://lore.kernel.org/linux-arm-kernel/3cecc3a5-30b0-f0bd-c3de-9e09bd21909b@nvidia.com/ Tested-by: Jon Hunter Cc: Ard Biesheuvel Cc: Will Deacon Link: https://lore.kernel.org/r/20220929150227.1028556-1-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/alternative-macros.h | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/alternative-macros.h b/arch/arm64/include/asm/alternative-macros.h index 966767debaa3..b5ac0b85c9bb 100644 --- a/arch/arm64/include/asm/alternative-macros.h +++ b/arch/arm64/include/asm/alternative-macros.h @@ -2,12 +2,18 @@ #ifndef __ASM_ALTERNATIVE_MACROS_H #define __ASM_ALTERNATIVE_MACROS_H +#include #include #include #include -#define ARM64_CB_BIT (UL(1) << 15) +/* + * Binutils 2.27.0 can't handle a 'UL' suffix on constants, so for the assembly + * macros below we must use we must use `(1 << ARM64_CB_SHIFT)`. + */ +#define ARM64_CB_SHIFT 15 +#define ARM64_CB_BIT BIT(ARM64_CB_SHIFT) #if ARM64_NCAPS >= ARM64_CB_BIT #error "cpucaps have overflown ARM64_CB_BIT" @@ -80,7 +86,7 @@ __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg)) #define ALTERNATIVE_CB(oldinstr, feature, cb) \ - __ALTERNATIVE_CFG_CB(oldinstr, ARM64_CB_BIT | (feature), 1, cb) + __ALTERNATIVE_CFG_CB(oldinstr, (1 << ARM64_CB_SHIFT) | (feature), 1, cb) #else #include @@ -150,7 +156,7 @@ .macro alternative_cb cap, cb .set .Lasm_alt_mode, 0 .pushsection .altinstructions, "a" - altinstruction_entry 661f, \cb, ARM64_CB_BIT | \cap, 662f-661f, 0 + altinstruction_entry 661f, \cb, (1 << ARM64_CB_SHIFT) | \cap, 662f-661f, 0 .popsection 661: .endm -- cgit v1.2.3