diff options
author | NeilBrown <neilb@suse.com> | 2019-04-12 11:52:07 +1000 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-04-12 17:34:45 -0700 |
commit | e4edbe3c1f44c84f319149aeb998e7e36b3b897f (patch) | |
tree | 147388b269b9b7d4418235592ecfabb4c092d262 | |
parent | c252aa3e8ed3ac54060b1838f6a47f29799a133d (diff) | |
download | linux-e4edbe3c1f44c84f319149aeb998e7e36b3b897f.tar.bz2 |
rhashtable: fix some __rcu annotation errors
With these annotations, the rhashtable now gets no
warnings when compiled with "C=1" for sparse checking.
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/linux/rhashtable.h | 11 | ||||
-rw-r--r-- | lib/rhashtable.c | 4 |
2 files changed, 8 insertions, 7 deletions
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 460c0eaf6b96..2711cbf01b64 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -40,7 +40,7 @@ * the chain. To avoid dereferencing this pointer without clearing * the bit first, we use an opaque 'struct rhash_lock_head *' for the * pointer stored in the bucket. This struct needs to be defined so - * that rcu_derefernce() works on it, but it has no content so a + * that rcu_dereference() works on it, but it has no content so a * cast is needed for it to be useful. This ensures it isn't * used by mistake with clearing the lock bit first. */ @@ -130,10 +130,10 @@ static inline void rht_unlock(struct bucket_table *tbl, } static inline void rht_assign_unlock(struct bucket_table *tbl, - struct rhash_lock_head **bkt, + struct rhash_lock_head __rcu **bkt, struct rhash_head *obj) { - struct rhash_head **p = (struct rhash_head **)bkt; + struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt; lock_map_release(&tbl->dep_map); rcu_assign_pointer(*p, obj); @@ -556,6 +556,7 @@ static inline struct rhash_head *__rhashtable_lookup( }; struct rhash_lock_head __rcu * const *bkt; struct bucket_table *tbl; + struct rhash_head __rcu *head; struct rhash_head *he; unsigned int hash; @@ -564,8 +565,8 @@ restart: hash = rht_key_hashfn(ht, tbl, key, params); bkt = rht_bucket(tbl, hash); do { - he = rht_ptr(rht_dereference_bucket_rcu(*bkt, tbl, hash)); - rht_for_each_rcu_from(he, he, tbl, hash) { + head = rht_ptr(rht_dereference_bucket_rcu(*bkt, tbl, hash)); + rht_for_each_rcu_from(he, head, tbl, hash) { if (params.obj_cmpfn ? params.obj_cmpfn(&arg, rht_obj(ht, he)) : rhashtable_compare(&arg, rht_obj(ht, he))) diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 9c84f5cef69c..e387ceb00e86 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -223,7 +223,7 @@ static int rhashtable_rehash_one(struct rhashtable *ht, struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl); int err = -EAGAIN; struct rhash_head *head, *next, *entry; - struct rhash_head **pprev = NULL; + struct rhash_head __rcu **pprev = NULL; unsigned int new_hash; if (new_tbl->nest) @@ -486,7 +486,7 @@ static void *rhashtable_lookup_one(struct rhashtable *ht, .ht = ht, .key = key, }; - struct rhash_head **pprev = NULL; + struct rhash_head __rcu **pprev = NULL; struct rhash_head *head; int elasticity; |