summaryrefslogtreecommitdiffstats
path: root/fs/gfs2
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2013-08-20 09:35:09 +0100
committerSteven Whitehouse <swhiteho@redhat.com>2013-08-20 09:35:09 +0100
commit7286b31eaba6404fa92f68d04626da1f395b3916 (patch)
tree1576b582aae540bb0069654b2c2d8171692dce73 /fs/gfs2
parent7c0ef28a2c9a768ffb63c1c3d9542b6e175ab260 (diff)
downloadlinux-7286b31eaba6404fa92f68d04626da1f395b3916.tar.bz2
GFS2: Take glock reference in examine_bucket()
We need to check the glock ref counter in a race free way in order to ensure that the gfs2_glock_hold() call will succeed. The easiest way to do that is to simply take the reference count early in the common code of examine_bucket, skipping any glocks with zero ref count. That means that the examiner functions all need to put their reference on the glock once they've performed their function. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com> Reported-by: David Teigland <teigland@redhat.com> Tested-by: David Teigland <teigland@redhat.com>
Diffstat (limited to 'fs/gfs2')
-rw-r--r--fs/gfs2/glock.c12
1 files changed, 5 insertions, 7 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 544a809819c3..ce7078d5aa97 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1488,7 +1488,7 @@ static void examine_bucket(glock_examiner examiner, const struct gfs2_sbd *sdp,
rcu_read_lock();
hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) {
- if ((gl->gl_sbd == sdp) && atomic_read(&gl->gl_ref))
+ if ((gl->gl_sbd == sdp) && atomic_inc_not_zero(&gl->gl_ref))
examiner(gl);
}
rcu_read_unlock();
@@ -1508,18 +1508,17 @@ static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
* thaw_glock - thaw out a glock which has an unprocessed reply waiting
* @gl: The glock to thaw
*
- * N.B. When we freeze a glock, we leave a ref to the glock outstanding,
- * so this has to result in the ref count being dropped by one.
*/
static void thaw_glock(struct gfs2_glock *gl)
{
if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
- return;
+ goto out;
set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
- gfs2_glock_hold(gl);
- if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
+ if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) {
+out:
gfs2_glock_put(gl);
+ }
}
/**
@@ -1536,7 +1535,6 @@ static void clear_glock(struct gfs2_glock *gl)
if (gl->gl_state != LM_ST_UNLOCKED)
handle_callback(gl, LM_ST_UNLOCKED, 0, false);
spin_unlock(&gl->gl_spin);
- gfs2_glock_hold(gl);
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
gfs2_glock_put(gl);
}