summaryrefslogtreecommitdiffstats
path: root/kernel/bpf/bpf_task_storage.c
diff options
context:
space:
mode:
authorMartin KaFai Lau <martin.lau@kernel.org>2022-10-25 11:45:22 -0700
committerAlexei Starovoitov <ast@kernel.org>2022-10-25 23:11:46 -0700
commit8a7dac37f27a3dfbd814bf29a73d6417db2c81d9 (patch)
treecec23260f0e710a27b8ac5ad38e9275d3ed43562 /kernel/bpf/bpf_task_storage.c
parentfda64ae0bb3e37b5a4292625c6931cb156224d0f (diff)
downloadlinux-8a7dac37f27a3dfbd814bf29a73d6417db2c81d9.tar.bz2
bpf: Add new bpf_task_storage_delete proto with no deadlock detection
The bpf_lsm and bpf_iter do not recur that will cause a deadlock. The situation is similar to the bpf_pid_task_storage_delete_elem() which is called from the syscall map_delete_elem. It does not need deadlock detection. Otherwise, it will cause unnecessary failure when calling the bpf_task_storage_delete() helper. This patch adds bpf_task_storage_delete proto that does not do deadlock detection. It will be used by bpf_lsm and bpf_iter program. Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org> Link: https://lore.kernel.org/r/20221025184524.3526117-8-martin.lau@linux.dev Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel/bpf/bpf_task_storage.c')
-rw-r--r--kernel/bpf/bpf_task_storage.c28
1 files changed, 28 insertions, 0 deletions
diff --git a/kernel/bpf/bpf_task_storage.c b/kernel/bpf/bpf_task_storage.c
index f3f79b618a68..ba3fe72d1fa5 100644
--- a/kernel/bpf/bpf_task_storage.c
+++ b/kernel/bpf/bpf_task_storage.c
@@ -311,6 +311,25 @@ BPF_CALL_2(bpf_task_storage_delete_recur, struct bpf_map *, map, struct task_str
return ret;
}
+BPF_CALL_2(bpf_task_storage_delete, struct bpf_map *, map, struct task_struct *,
+ task)
+{
+ int ret;
+
+ WARN_ON_ONCE(!bpf_rcu_lock_held());
+ if (!task)
+ return -EINVAL;
+
+ bpf_task_storage_lock();
+ /* This helper must only be called from places where the lifetime of the task
+ * is guaranteed. Either by being refcounted or by being protected
+ * by an RCU read-side critical section.
+ */
+ ret = task_storage_delete(task, map, true);
+ bpf_task_storage_unlock();
+ return ret;
+}
+
static int notsupp_get_next_key(struct bpf_map *map, void *key, void *next_key)
{
return -ENOTSUPP;
@@ -382,3 +401,12 @@ const struct bpf_func_proto bpf_task_storage_delete_recur_proto = {
.arg2_type = ARG_PTR_TO_BTF_ID,
.arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
};
+
+const struct bpf_func_proto bpf_task_storage_delete_proto = {
+ .func = bpf_task_storage_delete,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_CONST_MAP_PTR,
+ .arg2_type = ARG_PTR_TO_BTF_ID,
+ .arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
+};