summaryrefslogtreecommitdiffstats
path: root/fs/gfs2
diff options
context:
space:
mode:
authorAndrew Price <anprice@redhat.com>2015-11-12 14:07:52 -0600
committerBob Peterson <rpeterso@redhat.com>2015-11-16 11:57:59 -0600
commit3dd1dd8c696bdb7c8dcc9456cb23558ad1b336b8 (patch)
tree3ec8d2b7d442737c15f6b83999713c1c38cbdfc7 /fs/gfs2
parent6fde22426be6af261816db5941744b8d3c4c7f96 (diff)
downloadlinux-3dd1dd8c696bdb7c8dcc9456cb23558ad1b336b8.tar.bz2
GFS2: Use rht_for_each_entry_rcu in glock_hash_walk
This lockdep splat was being triggered on umount: [55715.973122] =============================== [55715.980169] [ INFO: suspicious RCU usage. ] [55715.981021] 4.3.0-11553-g8d3de01-dirty #15 Tainted: G W [55715.982353] ------------------------------- [55715.983301] fs/gfs2/glock.c:1427 suspicious rcu_dereference_protected() usage! The code it refers to is the rht_for_each_entry_safe usage in glock_hash_walk. The condition that triggers the warning is lockdep_rht_bucket_is_held(tbl, hash) which is checked in the __rcu_dereference_protected macro. The rhashtable buckets are not changed in glock_hash_walk so it's safe to rely on the rcu protection. Replace the rht_for_each_entry_safe() usage with rht_for_each_entry_rcu(), which doesn't care whether the bucket lock is held if the rcu read lock is held. Signed-off-by: Andrew Price <anprice@redhat.com> Signed-off-by: Bob Peterson <rpeterso@redhat.com> Acked-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2')
-rw-r--r--fs/gfs2/glock.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 32e74710b1aa..430326e631dc 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1417,14 +1417,14 @@ static struct shrinker glock_shrinker = {
static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
{
struct gfs2_glock *gl;
- struct rhash_head *pos, *next;
+ struct rhash_head *pos;
const struct bucket_table *tbl;
int i;
rcu_read_lock();
tbl = rht_dereference_rcu(gl_hash_table.tbl, &gl_hash_table);
for (i = 0; i < tbl->size; i++) {
- rht_for_each_entry_safe(gl, pos, next, tbl, i, gl_node) {
+ rht_for_each_entry_rcu(gl, pos, tbl, i, gl_node) {
if ((gl->gl_name.ln_sbd == sdp) &&
lockref_get_not_dead(&gl->gl_lockref))
examiner(gl);