diff options
author | Steven Whitehouse <swhiteho@redhat.com> | 2006-09-07 14:40:21 -0400 |
---|---|---|
committer | Steven Whitehouse <swhiteho@redhat.com> | 2006-09-07 14:40:21 -0400 |
commit | 85d1da67f7e1239afa3494d05be87da6fc3ecada (patch) | |
tree | 01508570249764d8b0e38183e1ea7e9666b34b78 /fs | |
parent | b8547856f9c158ff70effbcfd15969c908fbe1b3 (diff) | |
download | linux-85d1da67f7e1239afa3494d05be87da6fc3ecada.tar.bz2 |
[GFS2] Move glock hash table out of superblock
There are several reasons why we want to do this:
- Firstly its large and thus we'll scale better with multiple
GFS2 fs mounted at the same time
- Secondly its easier to scale its size as required (thats a plan
for later patches)
- Thirdly, we can use kzalloc rather than vmalloc when allocating
the superblock (its now only 4888 bytes)
- Fourth its all part of my plan to eventually be able to use RCU
with the glock hash.
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/gfs2/glock.c | 54 | ||||
-rw-r--r-- | fs/gfs2/glock.h | 2 | ||||
-rw-r--r-- | fs/gfs2/incore.h | 12 | ||||
-rw-r--r-- | fs/gfs2/main.c | 6 | ||||
-rw-r--r-- | fs/gfs2/ops_fstype.c | 9 |
5 files changed, 47 insertions, 36 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 00769674f2ea..5759f52a1cf9 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -49,6 +49,8 @@ typedef void (*glock_examiner) (struct gfs2_glock * gl); static int gfs2_dump_lockstate(struct gfs2_sbd *sdp); static int dump_glock(struct gfs2_glock *gl); +static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE]; + /** * relaxed_state_ok - is a requested lock compatible with the current lock mode? * @actual: the current state of the lock @@ -231,10 +233,10 @@ static struct gfs2_glock *search_bucket(struct gfs2_gl_hash_bucket *bucket, * Returns: NULL, or the struct gfs2_glock with the requested number */ -static struct gfs2_glock *gfs2_glock_find(struct gfs2_sbd *sdp, +static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp, const struct lm_lockname *name) { - struct gfs2_gl_hash_bucket *bucket = &sdp->sd_gl_hash[gl_hash(sdp, name)]; + struct gfs2_gl_hash_bucket *bucket = &gl_hash_table[gl_hash(sdp, name)]; struct gfs2_glock *gl; read_lock(&bucket->hb_lock); @@ -268,7 +270,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, name.ln_number = number; name.ln_type = glops->go_type; - bucket = &sdp->sd_gl_hash[gl_hash(sdp, &name)]; + bucket = &gl_hash_table[gl_hash(sdp, &name)]; read_lock(&bucket->hb_lock); gl = search_bucket(bucket, sdp, &name); @@ -648,9 +650,9 @@ static void gfs2_glmutex_lock(struct gfs2_glock *gl) set_bit(HIF_MUTEX, &gh.gh_iflags); spin_lock(&gl->gl_spin); - if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) + if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { list_add_tail(&gh.gh_list, &gl->gl_waiters1); - else { + } else { gl->gl_owner = current; gl->gl_ip = (unsigned long)__builtin_return_address(0); complete(&gh.gh_wait); @@ -673,9 +675,9 @@ static int gfs2_glmutex_trylock(struct gfs2_glock *gl) int acquired = 1; spin_lock(&gl->gl_spin); - if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) + if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { acquired = 0; - else { + } else { gl->gl_owner = current; gl->gl_ip = (unsigned long)__builtin_return_address(0); } @@ -830,9 +832,9 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret) spin_lock(&gl->gl_spin); list_del_init(&gh->gh_list); if (gl->gl_state == gh->gh_state || - gl->gl_state == LM_ST_UNLOCKED) + gl->gl_state == LM_ST_UNLOCKED) { gh->gh_error = 0; - else { + } else { if (gfs2_assert_warn(sdp, gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) == -1) fs_warn(sdp, "ret = 0x%.8X\n", ret); @@ -1090,8 +1092,7 @@ static int glock_wait_internal(struct gfs2_holder *gh) return gh->gh_error; gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags)); - gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, - gh->gh_state, + gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)); if (test_bit(HIF_FIRST, &gh->gh_iflags)) { @@ -1901,6 +1902,8 @@ static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp, if (test_bit(GLF_PLUG, &gl->gl_flags)) continue; + if (gl->gl_sbd != sdp) + continue; /* examiner() must glock_put() */ gfs2_glock_hold(gl); @@ -1953,7 +1956,7 @@ void gfs2_scand_internal(struct gfs2_sbd *sdp) unsigned int x; for (x = 0; x < GFS2_GL_HASH_SIZE; x++) { - examine_bucket(scan_glock, sdp, &sdp->sd_gl_hash[x]); + examine_bucket(scan_glock, sdp, &gl_hash_table[x]); cond_resched(); } } @@ -2012,7 +2015,7 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait) cont = 0; for (x = 0; x < GFS2_GL_HASH_SIZE; x++) - if (examine_bucket(clear_glock, sdp, &sdp->sd_gl_hash[x])) + if (examine_bucket(clear_glock, sdp, &gl_hash_table[x])) cont = 1; if (!wait || !cont) @@ -2114,14 +2117,13 @@ static int dump_glock(struct gfs2_glock *gl) spin_lock(&gl->gl_spin); - printk(KERN_INFO "Glock 0x%p (%u, %llu)\n", - gl, - gl->gl_name.ln_type, + printk(KERN_INFO "Glock 0x%p (%u, %llu)\n", gl, gl->gl_name.ln_type, (unsigned long long)gl->gl_name.ln_number); printk(KERN_INFO " gl_flags ="); - for (x = 0; x < 32; x++) + for (x = 0; x < 32; x++) { if (test_bit(x, &gl->gl_flags)) printk(" %u", x); + } printk(" \n"); printk(KERN_INFO " gl_ref = %d\n", atomic_read(&gl->gl_ref.refcount)); printk(KERN_INFO " gl_state = %u\n", gl->gl_state); @@ -2136,8 +2138,7 @@ static int dump_glock(struct gfs2_glock *gl) printk(KERN_INFO " reclaim = %s\n", (list_empty(&gl->gl_reclaim)) ? "no" : "yes"); if (gl->gl_aspace) - printk(KERN_INFO " aspace = 0x%p nrpages = %lu\n", - gl->gl_aspace, + printk(KERN_INFO " aspace = 0x%p nrpages = %lu\n", gl->gl_aspace, gl->gl_aspace->i_mapping->nrpages); else printk(KERN_INFO " aspace = no\n"); @@ -2203,13 +2204,15 @@ static int gfs2_dump_lockstate(struct gfs2_sbd *sdp) int error = 0; for (x = 0; x < GFS2_GL_HASH_SIZE; x++) { - bucket = &sdp->sd_gl_hash[x]; + bucket = &gl_hash_table[x]; read_lock(&bucket->hb_lock); list_for_each_entry(gl, &bucket->hb_list, gl_list) { if (test_bit(GLF_PLUG, &gl->gl_flags)) continue; + if (gl->gl_sbd != sdp) + continue; error = dump_glock(gl); if (error) @@ -2226,3 +2229,14 @@ static int gfs2_dump_lockstate(struct gfs2_sbd *sdp) return error; } +int __init gfs2_glock_init(void) +{ + unsigned i; + for(i = 0; i < GFS2_GL_HASH_SIZE; i++) { + struct gfs2_gl_hash_bucket *hb = &gl_hash_table[i]; + rwlock_init(&hb->hb_lock); + INIT_LIST_HEAD(&hb->hb_list); + } + return 0; +} + diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h index 2e1d32866321..0febca3d6d47 100644 --- a/fs/gfs2/glock.h +++ b/fs/gfs2/glock.h @@ -150,4 +150,6 @@ void gfs2_reclaim_glock(struct gfs2_sbd *sdp); void gfs2_scand_internal(struct gfs2_sbd *sdp); void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait); +int __init gfs2_glock_init(void); + #endif /* __GLOCK_DOT_H__ */ diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 225924ca6b3e..61849607211f 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -30,7 +30,6 @@ struct gfs2_quota_data; struct gfs2_trans; struct gfs2_ail; struct gfs2_jdesc; -struct gfs2_gl_hash_bucket; struct gfs2_sbd; typedef void (*gfs2_glop_bh_t) (struct gfs2_glock *gl, unsigned int ret); @@ -107,6 +106,11 @@ struct gfs2_bufdata { struct list_head bd_ail_gl_list; }; +struct gfs2_gl_hash_bucket { + rwlock_t hb_lock; + struct list_head hb_list; +}; + struct gfs2_glock_operations { void (*go_xmote_th) (struct gfs2_glock * gl, unsigned int state, int flags); @@ -442,11 +446,6 @@ struct gfs2_tune { unsigned int gt_statfs_slow; }; -struct gfs2_gl_hash_bucket { - rwlock_t hb_lock; - struct list_head hb_list; -}; - enum { SDF_JOURNAL_CHECKED = 0, SDF_JOURNAL_LIVE = 1, @@ -489,7 +488,6 @@ struct gfs2_sbd { /* Lock Stuff */ struct lm_lockstruct sd_lockstruct; - struct gfs2_gl_hash_bucket sd_gl_hash[GFS2_GL_HASH_SIZE]; struct list_head sd_reclaim_list; spinlock_t sd_reclaim_lock; wait_queue_head_t sd_reclaim_wq; diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c index 143fda727a9c..2bdf246436c7 100644 --- a/fs/gfs2/main.c +++ b/fs/gfs2/main.c @@ -23,6 +23,7 @@ #include "ops_fstype.h" #include "sys.h" #include "util.h" +#include "glock.h" static void gfs2_init_inode_once(void *foo, kmem_cache_t *cachep, unsigned long flags) { @@ -69,8 +70,11 @@ static int __init init_gfs2_fs(void) if (error) return error; - error = -ENOMEM; + error = gfs2_glock_init(); + if (error) + goto fail; + error = -ENOMEM; gfs2_glock_cachep = kmem_cache_create("gfs2_glock", sizeof(struct gfs2_glock), 0, 0, diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index c94422b30ceb..f5140bdc1027 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -45,23 +45,16 @@ extern struct dentry_operations gfs2_dops; static struct gfs2_sbd *init_sbd(struct super_block *sb) { struct gfs2_sbd *sdp; - unsigned int x; - sdp = vmalloc(sizeof(struct gfs2_sbd)); + sdp = kzalloc(sizeof(struct gfs2_sbd), GFP_KERNEL); if (!sdp) return NULL; - memset(sdp, 0, sizeof(struct gfs2_sbd)); - sb->s_fs_info = sdp; sdp->sd_vfs = sb; gfs2_tune_init(&sdp->sd_tune); - for (x = 0; x < GFS2_GL_HASH_SIZE; x++) { - rwlock_init(&sdp->sd_gl_hash[x].hb_lock); - INIT_LIST_HEAD(&sdp->sd_gl_hash[x].hb_list); - } INIT_LIST_HEAD(&sdp->sd_reclaim_list); spin_lock_init(&sdp->sd_reclaim_lock); init_waitqueue_head(&sdp->sd_reclaim_wq); |