summaryrefslogtreecommitdiffstats
path: root/fs/gfs2/glock.c
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2010-01-08 16:14:29 +0000
committerSteven Whitehouse <swhiteho@redhat.com>2010-03-01 14:07:53 +0000
commitc1184f8ab7ea26681f3cab18284a870aad678b0f (patch)
treefec6b61ba9b1aa16aab0d1dfabf63a21bbe47ac7 /fs/gfs2/glock.c
parent009d851837ab26cab18adda6169a813f70b0b21b (diff)
downloadlinux-c1184f8ab7ea26681f3cab18284a870aad678b0f.tar.bz2
GFS2: Remove loopy umount code
As a consequence of the previous patch, we can now remove the loop which used to be required due to the circular dependency between the inodes and glocks. Instead we can just invalidate the inodes, and then clear up any glocks which are left. Also we no longer need the rwsem since there is no longer any danger of the inode invalidation calling back into the glock code (and from there back into the inode code). Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2/glock.c')
-rw-r--r--fs/gfs2/glock.c33
1 files changed, 2 insertions, 31 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index dfb10a4d467e..4773f9098a41 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -19,7 +19,6 @@
#include <linux/list.h>
#include <linux/wait.h>
#include <linux/module.h>
-#include <linux/rwsem.h>
#include <asm/uaccess.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
@@ -60,7 +59,6 @@ static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
-static DECLARE_RWSEM(gfs2_umount_flush_sem);
static struct dentry *gfs2_root;
static struct workqueue_struct *glock_workqueue;
struct workqueue_struct *gfs2_delete_workqueue;
@@ -714,7 +712,6 @@ static void glock_work_func(struct work_struct *work)
finish_xmote(gl, gl->gl_reply);
drop_ref = 1;
}
- down_read(&gfs2_umount_flush_sem);
spin_lock(&gl->gl_spin);
if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
gl->gl_state != LM_ST_UNLOCKED &&
@@ -727,7 +724,6 @@ static void glock_work_func(struct work_struct *work)
}
run_queue(gl, 0);
spin_unlock(&gl->gl_spin);
- up_read(&gfs2_umount_flush_sem);
if (!delay ||
queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
gfs2_glock_put(gl);
@@ -1512,35 +1508,10 @@ void gfs2_glock_thaw(struct gfs2_sbd *sdp)
void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
{
- unsigned long t;
unsigned int x;
- int cont;
- t = jiffies;
-
- for (;;) {
- cont = 0;
- for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
- if (examine_bucket(clear_glock, sdp, x))
- cont = 1;
- }
-
- if (!cont)
- break;
-
- if (time_after_eq(jiffies,
- t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
- fs_warn(sdp, "Unmount seems to be stalled. "
- "Dumping lock state...\n");
- gfs2_dump_lockstate(sdp);
- t = jiffies;
- }
-
- down_write(&gfs2_umount_flush_sem);
- invalidate_inodes(sdp->sd_vfs);
- up_write(&gfs2_umount_flush_sem);
- msleep(10);
- }
+ for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
+ examine_bucket(clear_glock, sdp, x);
flush_workqueue(glock_workqueue);
wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
gfs2_dump_lockstate(sdp);