summaryrefslogtreecommitdiffstats
path: root/kernel/bpf/core.c
diff options
context:
space:
mode:
authorJiri Olsa <jolsa@kernel.org>2020-03-12 20:56:01 +0100
committerAlexei Starovoitov <ast@kernel.org>2020-03-13 12:49:51 -0700
commitca4424c920f574b7246ff1b6d83cfdfd709e42c8 (patch)
tree1fb56b4a04f00eeadb71455bbec75fa752051263 /kernel/bpf/core.c
parentecb60d1c670e9b205197d8e4381b19e77bc2d834 (diff)
downloadlinux-ca4424c920f574b7246ff1b6d83cfdfd709e42c8.tar.bz2
bpf: Move ksym_tnode to bpf_ksym
Moving ksym_tnode list node to 'struct bpf_ksym' object, so the symbol itself can be chained and used in other objects like bpf_trampoline and bpf_dispatcher. We need bpf_ksym object to be linked both in bpf_kallsyms via lnode for /proc/kallsyms and in bpf_tree via tnode for bpf address lookup functions like __bpf_address_lookup or bpf_prog_kallsyms_find. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/20200312195610.346362-7-jolsa@kernel.org Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel/bpf/core.c')
-rw-r--r--kernel/bpf/core.c24
1 files changed, 10 insertions, 14 deletions
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 5eb5d5bb7a95..ab1846c34167 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -572,31 +572,27 @@ bpf_prog_ksym_set_name(struct bpf_prog *prog)
*sym = 0;
}
-static __always_inline unsigned long
-bpf_get_prog_addr_start(struct latch_tree_node *n)
+static unsigned long bpf_get_ksym_start(struct latch_tree_node *n)
{
- const struct bpf_prog_aux *aux;
-
- aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
- return aux->ksym.start;
+ return container_of(n, struct bpf_ksym, tnode)->start;
}
static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
struct latch_tree_node *b)
{
- return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b);
+ return bpf_get_ksym_start(a) < bpf_get_ksym_start(b);
}
static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
{
unsigned long val = (unsigned long)key;
- const struct bpf_prog_aux *aux;
+ const struct bpf_ksym *ksym;
- aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
+ ksym = container_of(n, struct bpf_ksym, tnode);
- if (val < aux->ksym.start)
+ if (val < ksym->start)
return -1;
- if (val >= aux->ksym.end)
+ if (val >= ksym->end)
return 1;
return 0;
@@ -615,7 +611,7 @@ static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
{
WARN_ON_ONCE(!list_empty(&aux->ksym.lnode));
list_add_tail_rcu(&aux->ksym.lnode, &bpf_kallsyms);
- latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
+ latch_tree_insert(&aux->ksym.tnode, &bpf_tree, &bpf_tree_ops);
}
static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)
@@ -623,7 +619,7 @@ static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)
if (list_empty(&aux->ksym.lnode))
return;
- latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
+ latch_tree_erase(&aux->ksym.tnode, &bpf_tree, &bpf_tree_ops);
list_del_rcu(&aux->ksym.lnode);
}
@@ -668,7 +664,7 @@ static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
return n ?
- container_of(n, struct bpf_prog_aux, ksym_tnode)->prog :
+ container_of(n, struct bpf_prog_aux, ksym.tnode)->prog :
NULL;
}