summaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/bugs.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2018-05-03 22:09:15 +0200
committerThomas Gleixner <tglx@linutronix.de>2018-05-05 00:51:43 +0200
commit356e4bfff2c5489e016fdb925adbf12a1e3950ee (patch)
tree27bfbcbf2c23c3d6cfa0219e93ccf35c2e14f5b3 /arch/x86/kernel/cpu/bugs.c
parentf9544b2b076ca90d887c5ae5d74fab4c21bb7c13 (diff)
downloadlinux-356e4bfff2c5489e016fdb925adbf12a1e3950ee.tar.bz2
prctl: Add force disable speculation
For certain use cases it is desired to enforce mitigations so they cannot be undone afterwards. That's important for loader stubs which want to prevent a child from disabling the mitigation again. Will also be used for seccomp(). The extra state preserving of the prctl state for SSB is a preparatory step for EBPF dymanic speculation control. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/cpu/bugs.c')
-rw-r--r--arch/x86/kernel/cpu/bugs.c35
1 files changed, 25 insertions, 10 deletions
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index f8d9be0e86b1..7e0f28160e5e 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -533,21 +533,37 @@ static void ssb_select_mitigation()
static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
{
- bool rds = !!test_tsk_thread_flag(task, TIF_RDS);
+ bool update;
if (ssb_mode != SPEC_STORE_BYPASS_PRCTL)
return -ENXIO;
- if (ctrl == PR_SPEC_ENABLE)
- clear_tsk_thread_flag(task, TIF_RDS);
- else
- set_tsk_thread_flag(task, TIF_RDS);
+ switch (ctrl) {
+ case PR_SPEC_ENABLE:
+ /* If speculation is force disabled, enable is not allowed */
+ if (task_spec_ssb_force_disable(task))
+ return -EPERM;
+ task_clear_spec_ssb_disable(task);
+ update = test_and_clear_tsk_thread_flag(task, TIF_RDS);
+ break;
+ case PR_SPEC_DISABLE:
+ task_set_spec_ssb_disable(task);
+ update = !test_and_set_tsk_thread_flag(task, TIF_RDS);
+ break;
+ case PR_SPEC_FORCE_DISABLE:
+ task_set_spec_ssb_disable(task);
+ task_set_spec_ssb_force_disable(task);
+ update = !test_and_set_tsk_thread_flag(task, TIF_RDS);
+ break;
+ default:
+ return -ERANGE;
+ }
/*
* If being set on non-current task, delay setting the CPU
* mitigation until it is next scheduled.
*/
- if (task == current && rds != !!test_tsk_thread_flag(task, TIF_RDS))
+ if (task == current && update)
speculative_store_bypass_update();
return 0;
@@ -559,7 +575,9 @@ static int ssb_prctl_get(struct task_struct *task)
case SPEC_STORE_BYPASS_DISABLE:
return PR_SPEC_DISABLE;
case SPEC_STORE_BYPASS_PRCTL:
- if (test_tsk_thread_flag(task, TIF_RDS))
+ if (task_spec_ssb_force_disable(task))
+ return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
+ if (task_spec_ssb_disable(task))
return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
default:
@@ -572,9 +590,6 @@ static int ssb_prctl_get(struct task_struct *task)
int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
unsigned long ctrl)
{
- if (ctrl != PR_SPEC_ENABLE && ctrl != PR_SPEC_DISABLE)
- return -ERANGE;
-
switch (which) {
case PR_SPEC_STORE_BYPASS:
return ssb_prctl_set(task, ctrl);