summaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/paravirt-spinlocks.c
diff options
context:
space:
mode:
authorJuergen Gross <jgross@suse.com>2021-03-11 15:23:13 +0100
committerBorislav Petkov <bp@suse.de>2021-03-11 19:51:49 +0100
commit4e6292114c741221479046515b1aa8145cf1e3f6 (patch)
treef3a8bb9b151d60b60c26f81872ebcc32cf55a94b /arch/x86/kernel/paravirt-spinlocks.c
parent2fe2a2c7a97c9bc32acc79154b75e754280f7867 (diff)
downloadlinux-4e6292114c741221479046515b1aa8145cf1e3f6.tar.bz2
x86/paravirt: Add new features for paravirt patching
For being able to switch paravirt patching from special cased custom code sequences to ALTERNATIVE handling some X86_FEATURE_* are needed as new features. This enables to have the standard indirect pv call as the default code and to patch that with the non-Xen custom code sequence via ALTERNATIVE patching later. Make sure paravirt patching is performed before alternatives patching. Signed-off-by: Juergen Gross <jgross@suse.com> Signed-off-by: Borislav Petkov <bp@suse.de> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20210311142319.4723-9-jgross@suse.com
Diffstat (limited to 'arch/x86/kernel/paravirt-spinlocks.c')
-rw-r--r--arch/x86/kernel/paravirt-spinlocks.c9
1 files changed, 9 insertions, 0 deletions
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
index 4f75d0cf6305..9e1ea99ad9df 100644
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -32,3 +32,12 @@ bool pv_is_native_vcpu_is_preempted(void)
return pv_ops.lock.vcpu_is_preempted.func ==
__raw_callee_save___native_vcpu_is_preempted;
}
+
+void __init paravirt_set_cap(void)
+{
+ if (!pv_is_native_spin_unlock())
+ setup_force_cpu_cap(X86_FEATURE_PVUNLOCK);
+
+ if (!pv_is_native_vcpu_is_preempted())
+ setup_force_cpu_cap(X86_FEATURE_VCPUPREEMPT);
+}