summaryrefslogtreecommitdiffstats
path: root/net/core/sock_map.c
diff options
context:
space:
mode:
authorJakub Sitnicki <jakub@cloudflare.com>2020-02-18 17:10:17 +0000
committerDaniel Borkmann <daniel@iogearbox.net>2020-02-21 22:29:45 +0100
commit6e830c2f6c9641217e22330cec1372acff78dcef (patch)
treee11fdef619c774d023e2294796acb479511e4c77 /net/core/sock_map.c
parent8ca30379a40103bf6734ae127ec940da798534dd (diff)
downloadlinux-6e830c2f6c9641217e22330cec1372acff78dcef.tar.bz2
bpf, sockmap: Don't set up upcalls and progs for listening sockets
Now that sockmap/sockhash can hold listening sockets, when setting up the psock we will (i) grab references to verdict/parser progs, and (2) override socket upcalls sk_data_ready and sk_write_space. However, since we cannot redirect to listening sockets so we don't need to link the socket to the BPF progs. And more importantly we don't want the listening socket to have overridden upcalls because they would get inherited by child sockets cloned from it. Introduce a separate initialization path for listening sockets that does not change the upcalls and ignores the BPF progs. Signed-off-by: Jakub Sitnicki <jakub@cloudflare.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: John Fastabend <john.fastabend@gmail.com> Link: https://lore.kernel.org/bpf/20200218171023.844439-6-jakub@cloudflare.com
Diffstat (limited to 'net/core/sock_map.c')
-rw-r--r--net/core/sock_map.c52
1 files changed, 45 insertions, 7 deletions
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index dd92a3556d73..a5103112a344 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -228,6 +228,30 @@ out:
return ret;
}
+static int sock_map_link_no_progs(struct bpf_map *map, struct sock *sk)
+{
+ struct sk_psock *psock;
+ int ret;
+
+ psock = sk_psock_get_checked(sk);
+ if (IS_ERR(psock))
+ return PTR_ERR(psock);
+
+ if (psock) {
+ tcp_bpf_reinit(sk);
+ return 0;
+ }
+
+ psock = sk_psock_init(sk, map->numa_node);
+ if (!psock)
+ return -ENOMEM;
+
+ ret = tcp_bpf_init(sk);
+ if (ret < 0)
+ sk_psock_put(sk, psock);
+ return ret;
+}
+
static void sock_map_free(struct bpf_map *map)
{
struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
@@ -334,6 +358,11 @@ static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next)
return 0;
}
+static bool sock_map_redirect_allowed(const struct sock *sk)
+{
+ return sk->sk_state != TCP_LISTEN;
+}
+
static int sock_map_update_common(struct bpf_map *map, u32 idx,
struct sock *sk, u64 flags)
{
@@ -356,7 +385,14 @@ static int sock_map_update_common(struct bpf_map *map, u32 idx,
if (!link)
return -ENOMEM;
- ret = sock_map_link(map, &stab->progs, sk);
+ /* Only sockets we can redirect into/from in BPF need to hold
+ * refs to parser/verdict progs and have their sk_data_ready
+ * and sk_write_space callbacks overridden.
+ */
+ if (sock_map_redirect_allowed(sk))
+ ret = sock_map_link(map, &stab->progs, sk);
+ else
+ ret = sock_map_link_no_progs(map, sk);
if (ret < 0)
goto out_free;
@@ -406,11 +442,6 @@ static bool sock_map_sk_state_allowed(const struct sock *sk)
return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_LISTEN);
}
-static bool sock_map_redirect_allowed(const struct sock *sk)
-{
- return sk->sk_state != TCP_LISTEN;
-}
-
static int sock_map_update_elem(struct bpf_map *map, void *key,
void *value, u64 flags)
{
@@ -700,7 +731,14 @@ static int sock_hash_update_common(struct bpf_map *map, void *key,
if (!link)
return -ENOMEM;
- ret = sock_map_link(map, &htab->progs, sk);
+ /* Only sockets we can redirect into/from in BPF need to hold
+ * refs to parser/verdict progs and have their sk_data_ready
+ * and sk_write_space callbacks overridden.
+ */
+ if (sock_map_redirect_allowed(sk))
+ ret = sock_map_link(map, &htab->progs, sk);
+ else
+ ret = sock_map_link_no_progs(map, sk);
if (ret < 0)
goto out_free;