diff options
author | Miroslav Benes <mbenes@suse.cz> | 2017-12-21 14:40:43 +0100 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2018-01-11 17:36:07 +0100 |
commit | 8869016d3a58cbe7c31c70f4f008a92122b271c7 (patch) | |
tree | 02ebd2a73b91b135034353426d3430e3b28c7832 /kernel/livepatch | |
parent | c99a2be790b07752d8cc694434d3450afd4c5a00 (diff) | |
download | linux-8869016d3a58cbe7c31c70f4f008a92122b271c7.tar.bz2 |
livepatch: add locking to force and signal functions
klp_send_signals() and klp_force_transition() do not acquire klp_mutex,
because it seemed to be superfluous. A potential race in
klp_send_signals() was harmless and there was nothing in
klp_force_transition() which needed to be synchronized. That changed
with the addition of klp_forced variable during the review process.
There is a small window now, when klp_complete_transition() does not see
klp_forced set to true while all tasks have been already transitioned to
the target state. module_put() is called and the module can be removed.
Acquire klp_mutex in sysfs callback to prevent it. Do the same for the
signal sending just to be sure. There is no real downside to that.
Fixes: c99a2be790b07 ("livepatch: force transition to finish")
Fixes: 43347d56c8d9d ("livepatch: send a fake signal to all blocking tasks")
Reported-by: Jason Baron <jbaron@akamai.com>
Signed-off-by: Miroslav Benes <mbenes@suse.cz>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Acked-by: Josh Poimboeuf <jpoimboe@redhat.com>
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
Diffstat (limited to 'kernel/livepatch')
-rw-r--r-- | kernel/livepatch/core.c | 52 |
1 files changed, 28 insertions, 24 deletions
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 1c3c9b27c916..8fd8e8f126da 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -537,22 +537,24 @@ static ssize_t signal_store(struct kobject *kobj, struct kobj_attribute *attr, int ret; bool val; - patch = container_of(kobj, struct klp_patch, kobj); - - /* - * klp_mutex lock is not grabbed here intentionally. It is not really - * needed. The race window is harmless and grabbing the lock would only - * hold the action back. - */ - if (patch != klp_transition_patch) - return -EINVAL; - ret = kstrtobool(buf, &val); if (ret) return ret; - if (val) - klp_send_signals(); + if (!val) + return count; + + mutex_lock(&klp_mutex); + + patch = container_of(kobj, struct klp_patch, kobj); + if (patch != klp_transition_patch) { + mutex_unlock(&klp_mutex); + return -EINVAL; + } + + klp_send_signals(); + + mutex_unlock(&klp_mutex); return count; } @@ -564,22 +566,24 @@ static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr, int ret; bool val; - patch = container_of(kobj, struct klp_patch, kobj); - - /* - * klp_mutex lock is not grabbed here intentionally. It is not really - * needed. The race window is harmless and grabbing the lock would only - * hold the action back. - */ - if (patch != klp_transition_patch) - return -EINVAL; - ret = kstrtobool(buf, &val); if (ret) return ret; - if (val) - klp_force_transition(); + if (!val) + return count; + + mutex_lock(&klp_mutex); + + patch = container_of(kobj, struct klp_patch, kobj); + if (patch != klp_transition_patch) { + mutex_unlock(&klp_mutex); + return -EINVAL; + } + + klp_force_transition(); + + mutex_unlock(&klp_mutex); return count; } |