summaryrefslogtreecommitdiffstats
path: root/fs/gfs2/glock.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-05-20 13:28:45 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2011-05-20 13:28:45 -0700
commit6c1b8d94bcc1882e451d0e7a28a4a5253f4970ab (patch)
tree1e612ce2e7b5e2164ffa49ad6518ca2834a7f9bb /fs/gfs2/glock.c
parent82aff107f8c9194586f68e86412246629d05670a (diff)
parentf2741d9898269e565c220ec295a8f5c3756c7585 (diff)
downloadlinux-6c1b8d94bcc1882e451d0e7a28a4a5253f4970ab.tar.bz2
Merge git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw
* git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw: (32 commits) GFS2: Move all locking inside the inode creation function GFS2: Clean up symlink creation GFS2: Clean up mkdir GFS2: Use UUID field in generic superblock GFS2: Rename ops_inode.c to inode.c GFS2: Inode.c is empty now, remove it GFS2: Move final part of inode.c into super.c GFS2: Move most of the remaining inode.c into ops_inode.c GFS2: Move gfs2_refresh_inode() and friends into glops.c GFS2: Remove gfs2_dinode_print() function GFS2: When adding a new dir entry, inc link count if it is a subdir GFS2: Make gfs2_dir_del update link count when required GFS2: Don't use gfs2_change_nlink in link syscall GFS2: Don't use a try lock when promoting to a higher mode GFS2: Double check link count under glock GFS2: Improve bug trap code in ->releasepage() GFS2: Fix ail list traversal GFS2: make sure fallocate bytes is a multiple of blksize GFS2: Add an AIL writeback tracepoint GFS2: Make writeback more responsive to system conditions ...
Diffstat (limited to 'fs/gfs2/glock.c')
-rw-r--r--fs/gfs2/glock.c94
1 files changed, 48 insertions, 46 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 7a4fb630a320..a2a6abbccc07 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -143,14 +143,9 @@ static int demote_ok(const struct gfs2_glock *gl)
{
const struct gfs2_glock_operations *glops = gl->gl_ops;
- /* assert_spin_locked(&gl->gl_spin); */
-
if (gl->gl_state == LM_ST_UNLOCKED)
return 0;
- if (test_bit(GLF_LFLUSH, &gl->gl_flags))
- return 0;
- if ((gl->gl_name.ln_type != LM_TYPE_INODE) &&
- !list_empty(&gl->gl_holders))
+ if (!list_empty(&gl->gl_holders))
return 0;
if (glops->go_demote_ok)
return glops->go_demote_ok(gl);
@@ -158,6 +153,31 @@ static int demote_ok(const struct gfs2_glock *gl)
}
+void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
+{
+ spin_lock(&lru_lock);
+
+ if (!list_empty(&gl->gl_lru))
+ list_del_init(&gl->gl_lru);
+ else
+ atomic_inc(&lru_count);
+
+ list_add_tail(&gl->gl_lru, &lru_list);
+ set_bit(GLF_LRU, &gl->gl_flags);
+ spin_unlock(&lru_lock);
+}
+
+static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
+{
+ spin_lock(&lru_lock);
+ if (!list_empty(&gl->gl_lru)) {
+ list_del_init(&gl->gl_lru);
+ atomic_dec(&lru_count);
+ clear_bit(GLF_LRU, &gl->gl_flags);
+ }
+ spin_unlock(&lru_lock);
+}
+
/**
* __gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
* @gl: the glock
@@ -168,24 +188,8 @@ static int demote_ok(const struct gfs2_glock *gl)
static void __gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
{
- if (demote_ok(gl)) {
- spin_lock(&lru_lock);
-
- if (!list_empty(&gl->gl_lru))
- list_del_init(&gl->gl_lru);
- else
- atomic_inc(&lru_count);
-
- list_add_tail(&gl->gl_lru, &lru_list);
- spin_unlock(&lru_lock);
- }
-}
-
-void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
-{
- spin_lock(&gl->gl_spin);
- __gfs2_glock_schedule_for_reclaim(gl);
- spin_unlock(&gl->gl_spin);
+ if (demote_ok(gl))
+ gfs2_glock_add_to_lru(gl);
}
/**
@@ -217,12 +221,7 @@ void gfs2_glock_put(struct gfs2_glock *gl)
spin_lock_bucket(gl->gl_hash);
hlist_bl_del_rcu(&gl->gl_list);
spin_unlock_bucket(gl->gl_hash);
- spin_lock(&lru_lock);
- if (!list_empty(&gl->gl_lru)) {
- list_del_init(&gl->gl_lru);
- atomic_dec(&lru_count);
- }
- spin_unlock(&lru_lock);
+ gfs2_glock_remove_from_lru(gl);
GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
trace_gfs2_glock_put(gl);
@@ -542,11 +541,6 @@ __acquires(&gl->gl_spin)
clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
gfs2_glock_hold(gl);
- if (target != LM_ST_UNLOCKED && (gl->gl_state == LM_ST_SHARED ||
- gl->gl_state == LM_ST_DEFERRED) &&
- !(lck_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
- lck_flags |= LM_FLAG_TRY_1CB;
-
if (sdp->sd_lockstruct.ls_ops->lm_lock) {
/* lock_dlm */
ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
@@ -648,7 +642,7 @@ static void delete_work_func(struct work_struct *work)
/* Note: Unsafe to dereference ip as we don't hold right refs/locks */
if (ip)
- inode = gfs2_ilookup(sdp->sd_vfs, no_addr);
+ inode = gfs2_ilookup(sdp->sd_vfs, no_addr, 1);
else
inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
if (inode && !IS_ERR(inode)) {
@@ -1025,6 +1019,9 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
return -EIO;
+ if (test_bit(GLF_LRU, &gl->gl_flags))
+ gfs2_glock_remove_from_lru(gl);
+
spin_lock(&gl->gl_spin);
add_to_queue(gh);
if ((LM_FLAG_NOEXP & gh->gh_flags) &&
@@ -1082,7 +1079,8 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
!test_bit(GLF_DEMOTE, &gl->gl_flags))
fast_path = 1;
}
- __gfs2_glock_schedule_for_reclaim(gl);
+ if (!test_bit(GLF_LFLUSH, &gl->gl_flags))
+ __gfs2_glock_schedule_for_reclaim(gl);
trace_gfs2_glock_queue(gh, 0);
spin_unlock(&gl->gl_spin);
if (likely(fast_path))
@@ -1365,6 +1363,7 @@ static int gfs2_shrink_glock_memory(struct shrinker *shrink, int nr, gfp_t gfp_m
while(nr && !list_empty(&lru_list)) {
gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
list_del_init(&gl->gl_lru);
+ clear_bit(GLF_LRU, &gl->gl_flags);
atomic_dec(&lru_count);
/* Test for being demotable */
@@ -1387,6 +1386,7 @@ static int gfs2_shrink_glock_memory(struct shrinker *shrink, int nr, gfp_t gfp_m
}
nr_skipped++;
list_add(&gl->gl_lru, &skipped);
+ set_bit(GLF_LRU, &gl->gl_flags);
}
list_splice(&skipped, &lru_list);
atomic_add(nr_skipped, &lru_count);
@@ -1459,12 +1459,7 @@ static void thaw_glock(struct gfs2_glock *gl)
static void clear_glock(struct gfs2_glock *gl)
{
- spin_lock(&lru_lock);
- if (!list_empty(&gl->gl_lru)) {
- list_del_init(&gl->gl_lru);
- atomic_dec(&lru_count);
- }
- spin_unlock(&lru_lock);
+ gfs2_glock_remove_from_lru(gl);
spin_lock(&gl->gl_spin);
if (gl->gl_state != LM_ST_UNLOCKED)
@@ -1599,9 +1594,11 @@ static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
return 0;
}
-static const char *gflags2str(char *buf, const unsigned long *gflags)
+static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
{
+ const unsigned long *gflags = &gl->gl_flags;
char *p = buf;
+
if (test_bit(GLF_LOCK, gflags))
*p++ = 'l';
if (test_bit(GLF_DEMOTE, gflags))
@@ -1624,6 +1621,10 @@ static const char *gflags2str(char *buf, const unsigned long *gflags)
*p++ = 'F';
if (test_bit(GLF_QUEUED, gflags))
*p++ = 'q';
+ if (test_bit(GLF_LRU, gflags))
+ *p++ = 'L';
+ if (gl->gl_object)
+ *p++ = 'o';
*p = 0;
return buf;
}
@@ -1658,14 +1659,15 @@ static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
dtime *= 1000000/HZ; /* demote time in uSec */
if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
dtime = 0;
- gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d r:%d\n",
+ gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d\n",
state2str(gl->gl_state),
gl->gl_name.ln_type,
(unsigned long long)gl->gl_name.ln_number,
- gflags2str(gflags_buf, &gl->gl_flags),
+ gflags2str(gflags_buf, gl),
state2str(gl->gl_target),
state2str(gl->gl_demote_state), dtime,
atomic_read(&gl->gl_ail_count),
+ atomic_read(&gl->gl_revokes),
atomic_read(&gl->gl_ref));
list_for_each_entry(gh, &gl->gl_holders, gh_list) {