summaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/fpu/signal.c
diff options
context:
space:
mode:
authorYu-cheng Yu <yu-cheng.yu@intel.com>2020-05-12 07:54:37 -0700
committerBorislav Petkov <bp@suse.de>2020-05-13 10:31:07 +0200
commit524bb73bc15c56f5587e33c817e103a259b019d2 (patch)
treee539de40d35be74e8b43cb9daec3e80f773d70f3 /arch/x86/kernel/fpu/signal.c
parent8ab22804efefea9ecf3c68aa00f1fa69c70fcfad (diff)
downloadlinux-524bb73bc15c56f5587e33c817e103a259b019d2.tar.bz2
x86/fpu/xstate: Separate user and supervisor xfeatures mask
Before the introduction of XSAVES supervisor states, 'xfeatures_mask' is used at various places to determine XSAVE buffer components and XCR0 bits. It contains only user xstates. To support supervisor xstates, it is necessary to separate user and supervisor xstates: - First, change 'xfeatures_mask' to 'xfeatures_mask_all', which represents the full set of bits that should ever be set in a kernel XSAVE buffer. - Introduce xfeatures_mask_supervisor() and xfeatures_mask_user() to extract relevant xfeatures from xfeatures_mask_all. Co-developed-by: Fenghua Yu <fenghua.yu@intel.com> Signed-off-by: Fenghua Yu <fenghua.yu@intel.com> Signed-off-by: Yu-cheng Yu <yu-cheng.yu@intel.com> Signed-off-by: Borislav Petkov <bp@suse.de> Reviewed-by: Dave Hansen <dave.hansen@linux.intel.com> Reviewed-by: Tony Luck <tony.luck@intel.com> Link: https://lkml.kernel.org/r/20200512145444.15483-4-yu-cheng.yu@intel.com
Diffstat (limited to 'arch/x86/kernel/fpu/signal.c')
-rw-r--r--arch/x86/kernel/fpu/signal.c16
1 files changed, 11 insertions, 5 deletions
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index 585e3651b98f..3df0cfae535f 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -252,13 +252,17 @@ sanitize_restored_xstate(union fpregs_state *state,
*/
static int copy_user_to_fpregs_zeroing(void __user *buf, u64 xbv, int fx_only)
{
+ u64 init_bv;
+
if (use_xsave()) {
if (fx_only) {
- u64 init_bv = xfeatures_mask & ~XFEATURE_MASK_FPSSE;
+ init_bv = xfeatures_mask_user() & ~XFEATURE_MASK_FPSSE;
+
copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
return copy_user_to_fxregs(buf);
} else {
- u64 init_bv = xfeatures_mask & ~xbv;
+ init_bv = xfeatures_mask_user() & ~xbv;
+
if (unlikely(init_bv))
copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
return copy_user_to_xregs(buf, xbv);
@@ -358,7 +362,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
if (use_xsave() && !fx_only) {
- u64 init_bv = xfeatures_mask & ~xfeatures;
+ u64 init_bv = xfeatures_mask_user() & ~xfeatures;
if (using_compacted_format()) {
ret = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
@@ -389,7 +393,9 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
fpregs_lock();
if (use_xsave()) {
- u64 init_bv = xfeatures_mask & ~XFEATURE_MASK_FPSSE;
+ u64 init_bv;
+
+ init_bv = xfeatures_mask_user() & ~XFEATURE_MASK_FPSSE;
copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
}
@@ -465,7 +471,7 @@ void fpu__init_prepare_fx_sw_frame(void)
fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1;
fx_sw_reserved.extended_size = size;
- fx_sw_reserved.xfeatures = xfeatures_mask;
+ fx_sw_reserved.xfeatures = xfeatures_mask_user();
fx_sw_reserved.xstate_size = fpu_user_xstate_size;
if (IS_ENABLED(CONFIG_IA32_EMULATION) ||