diff options
author | Lorenz Bauer <lmb@cloudflare.com> | 2020-10-12 11:18:50 +0200 |
---|---|---|
committer | Daniel Borkmann <daniel@iogearbox.net> | 2020-10-15 20:49:56 +0200 |
commit | f58423aeab28f861b67933206f322f764f05787d (patch) | |
tree | eb606d94e8041a53fea006c6db698f0d1788c1b3 /net/core | |
parent | e688c3db7ca69bea1872c5706aec6a7fdf89df17 (diff) | |
download | linux-f58423aeab28f861b67933206f322f764f05787d.tar.bz2 |
bpf, sockmap: Add locking annotations to iterator
The sparse checker currently outputs the following warnings:
include/linux/rcupdate.h:632:9: sparse: sparse: context imbalance in 'sock_hash_seq_start' - wrong count at exit
include/linux/rcupdate.h:632:9: sparse: sparse: context imbalance in 'sock_map_seq_start' - wrong count at exit
Add the necessary __acquires and __release annotations to make the
iterator locking schema palatable to sparse. Also add __must_hold
for good measure.
The kernel codebase uses both __acquires(rcu) and __acquires(RCU).
I couldn't find any guidance which one is preferred, so I used
what is easier to type out.
Fixes: 0365351524d7 ("net: Allow iterating sockmap and sockhash")
Reported-by: kernel test robot <lkp@intel.com>
Signed-off-by: Lorenz Bauer <lmb@cloudflare.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: John Fastabend <john.fastabend@gmail.com>
Acked-by: Jakub Sitnicki <jakub@cloudflare.com>
Link: https://lore.kernel.org/bpf/20201012091850.67452-1-lmb@cloudflare.com
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/sock_map.c | 8 |
1 files changed, 8 insertions, 0 deletions
diff --git a/net/core/sock_map.c b/net/core/sock_map.c index df09c39a4dd2..203900a6ca5f 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -745,6 +745,7 @@ static void *sock_map_seq_lookup_elem(struct sock_map_seq_info *info) } static void *sock_map_seq_start(struct seq_file *seq, loff_t *pos) + __acquires(rcu) { struct sock_map_seq_info *info = seq->private; @@ -757,6 +758,7 @@ static void *sock_map_seq_start(struct seq_file *seq, loff_t *pos) } static void *sock_map_seq_next(struct seq_file *seq, void *v, loff_t *pos) + __must_hold(rcu) { struct sock_map_seq_info *info = seq->private; @@ -767,6 +769,7 @@ static void *sock_map_seq_next(struct seq_file *seq, void *v, loff_t *pos) } static int sock_map_seq_show(struct seq_file *seq, void *v) + __must_hold(rcu) { struct sock_map_seq_info *info = seq->private; struct bpf_iter__sockmap ctx = {}; @@ -789,6 +792,7 @@ static int sock_map_seq_show(struct seq_file *seq, void *v) } static void sock_map_seq_stop(struct seq_file *seq, void *v) + __releases(rcu) { if (!v) (void)sock_map_seq_show(seq, NULL); @@ -1353,6 +1357,7 @@ static void *sock_hash_seq_find_next(struct sock_hash_seq_info *info, } static void *sock_hash_seq_start(struct seq_file *seq, loff_t *pos) + __acquires(rcu) { struct sock_hash_seq_info *info = seq->private; @@ -1365,6 +1370,7 @@ static void *sock_hash_seq_start(struct seq_file *seq, loff_t *pos) } static void *sock_hash_seq_next(struct seq_file *seq, void *v, loff_t *pos) + __must_hold(rcu) { struct sock_hash_seq_info *info = seq->private; @@ -1373,6 +1379,7 @@ static void *sock_hash_seq_next(struct seq_file *seq, void *v, loff_t *pos) } static int sock_hash_seq_show(struct seq_file *seq, void *v) + __must_hold(rcu) { struct sock_hash_seq_info *info = seq->private; struct bpf_iter__sockmap ctx = {}; @@ -1396,6 +1403,7 @@ static int sock_hash_seq_show(struct seq_file *seq, void *v) } static void sock_hash_seq_stop(struct seq_file *seq, void *v) + __releases(rcu) { if (!v) (void)sock_hash_seq_show(seq, NULL); |