summaryrefslogtreecommitdiffstats
path: root/kernel/bpf/cpumap.c
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2021-03-10 01:06:34 +0100
committerDaniel Borkmann <daniel@iogearbox.net>2021-03-10 01:07:21 +0100
commit32f91529e2bdbe0d92edb3ced41dfba4beffa84a (patch)
tree2e6ca2aa0d6d1ac694002dd4aff11915473d4118 /kernel/bpf/cpumap.c
parent11d39cfeecfc9d92a5faa2a55c228e796478e0cb (diff)
parentee75aef23afe6e88497151c127c13ed69f41aaa2 (diff)
downloadlinux-32f91529e2bdbe0d92edb3ced41dfba4beffa84a.tar.bz2
Merge branch 'bpf-xdp-redirect'
Björn Töpel says: ==================== This two patch series contain two optimizations for the bpf_redirect_map() helper and the xdp_do_redirect() function. The bpf_redirect_map() optimization is about avoiding the map lookup dispatching. Instead of having a switch-statement and selecting the correct lookup function, we let bpf_redirect_map() be a map operation, where each map has its own bpf_redirect_map() implementation. This way the run-time lookup is avoided. The xdp_do_redirect() patch restructures the code, so that the map pointer indirection can be avoided. Performance-wise I got 4% improvement for XSKMAP (sample:xdpsock/rx-drop), and 8% (sample:xdp_redirect_map) on my machine. v5->v6: Removed REDIR enum, and instead use map_id and map_type. (Daniel) Applied Daniel's fixups on patch 1. (Daniel) v4->v5: Renamed map operation to map_redirect. (Daniel) v3->v4: Made bpf_redirect_map() a map operation. (Daniel) v2->v3: Fix build when CONFIG_NET is not set. (lkp) v1->v2: Removed warning when CONFIG_BPF_SYSCALL was not set. (lkp) Cleaned up case-clause in xdp_do_generic_redirect_map(). (Toke) Re-added comment. (Toke) rfc->v1: Use map_id, and remove bpf_clear_redirect_map(). (Toke) Get rid of the macro and use __always_inline. (Jesper) ==================== Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Diffstat (limited to 'kernel/bpf/cpumap.c')
-rw-r--r--kernel/bpf/cpumap.c9
1 files changed, 7 insertions, 2 deletions
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index 5d1469de6921..0cf2791d5099 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -543,7 +543,6 @@ static void cpu_map_free(struct bpf_map *map)
* complete.
*/
- bpf_clear_redirect_map(map);
synchronize_rcu();
/* For cpu_map the remote CPUs can still be using the entries
@@ -563,7 +562,7 @@ static void cpu_map_free(struct bpf_map *map)
kfree(cmap);
}
-struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
+static void *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
{
struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
struct bpf_cpu_map_entry *rcpu;
@@ -600,6 +599,11 @@ static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
return 0;
}
+static int cpu_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
+{
+ return __bpf_xdp_redirect_map(map, ifindex, flags, __cpu_map_lookup_elem);
+}
+
static int cpu_map_btf_id;
const struct bpf_map_ops cpu_map_ops = {
.map_meta_equal = bpf_map_meta_equal,
@@ -612,6 +616,7 @@ const struct bpf_map_ops cpu_map_ops = {
.map_check_btf = map_check_no_btf,
.map_btf_name = "bpf_cpu_map",
.map_btf_id = &cpu_map_btf_id,
+ .map_redirect = cpu_map_redirect,
};
static void bq_flush_to_queue(struct xdp_bulk_queue *bq)