summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm/hyp/nvhe/switch.c
diff options
context:
space:
mode:
authorDavid Brazdil <dbrazdil@google.com>2020-06-25 14:14:19 +0100
committerMarc Zyngier <maz@kernel.org>2020-07-05 18:38:45 +0100
commitc50cb04303cb88c517715b078e3e010c024af1a5 (patch)
tree8b0ecfb937dbe66af0b99e884b6d7a1dbe3541c8 /arch/arm64/kvm/hyp/nvhe/switch.c
parentc04dd455eb311d2d289c9d81d080eaf11a06cebf (diff)
downloadlinux-c50cb04303cb88c517715b078e3e010c024af1a5.tar.bz2
KVM: arm64: Remove __hyp_text macro, use build rules instead
With nVHE code now fully separated from the rest of the kernel, the effects of the __hyp_text macro (which had to be applied on all nVHE code) can be achieved with build rules instead. The macro used to: (a) move code to .hyp.text ELF section, now done by renaming .text using `objcopy`, and (b) `notrace` and `__noscs` would negate effects of CC_FLAGS_FTRACE and CC_FLAGS_SCS, respectivelly, now those flags are erased from KBUILD_CFLAGS (same way as in EFI stub). Note that by removing __hyp_text from code shared with VHE, all VHE code is now compiled into .text and without `notrace` and `__noscs`. Use of '.pushsection .hyp.text' removed from assembly files as this is now also covered by the build rules. For MAINTAINERS: if needed to re-run, uses of macro were removed with the following command. Formatting was fixed up manually. find arch/arm64/kvm/hyp -type f -name '*.c' -o -name '*.h' \ -exec sed -i 's/ __hyp_text//g' {} + Signed-off-by: David Brazdil <dbrazdil@google.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20200625131420.71444-15-dbrazdil@google.com
Diffstat (limited to 'arch/arm64/kvm/hyp/nvhe/switch.c')
-rw-r--r--arch/arm64/kvm/hyp/nvhe/switch.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index f08bfb951df6..a1dcf59bd45e 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -27,7 +27,7 @@
#include <asm/processor.h>
#include <asm/thread_info.h>
-static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
+static void __activate_traps(struct kvm_vcpu *vcpu)
{
u64 val;
@@ -58,7 +58,7 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
}
}
-static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
+static void __deactivate_traps(struct kvm_vcpu *vcpu)
{
u64 mdcr_el2;
@@ -93,13 +93,13 @@ static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
}
-static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
+static void __deactivate_vm(struct kvm_vcpu *vcpu)
{
write_sysreg(0, vttbr_el2);
}
/* Save VGICv3 state on non-VHE systems */
-static void __hyp_text __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
+static void __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
{
if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
__vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
@@ -108,7 +108,7 @@ static void __hyp_text __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
}
/* Restore VGICv3 state on non_VEH systems */
-static void __hyp_text __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
+static void __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
{
if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
__vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
@@ -119,7 +119,7 @@ static void __hyp_text __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
/**
* Disable host events, enable guest events
*/
-static bool __hyp_text __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt)
+static bool __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt)
{
struct kvm_host_data *host;
struct kvm_pmu_events *pmu;
@@ -139,7 +139,7 @@ static bool __hyp_text __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt)
/**
* Disable guest events, enable host events
*/
-static void __hyp_text __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
+static void __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
{
struct kvm_host_data *host;
struct kvm_pmu_events *pmu;
@@ -155,7 +155,7 @@ static void __hyp_text __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
}
/* Switch to the guest for legacy non-VHE systems */
-int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
+int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *host_ctxt;
struct kvm_cpu_context *guest_ctxt;
@@ -242,7 +242,7 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
return exit_code;
}
-void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
+void __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
{
u64 spsr = read_sysreg_el2(SYS_SPSR);
u64 elr = read_sysreg_el2(SYS_ELR);