diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-04-12 13:00:44 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-04-12 13:00:44 -0700 |
commit | 4ac1800f81eb52f776aca201e791eec2ef355259 (patch) | |
tree | 032e7be62091aa106e83307c805d1180db57ebd8 | |
parent | a1bf4c7da62fcadea065f7c9a561d61c26ea4882 (diff) | |
parent | 3e7aafc39c59c639ebd5961f893743f076df9b4e (diff) | |
download | linux-4ac1800f81eb52f776aca201e791eec2ef355259.tar.bz2 |
Merge tag 'gfs2-4.17.fixes2' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2
Pull more gfs2 updates from Bob Peterson:
"We decided to request the latest three patches to be merged into this
merge window while it's still open.
- The first patch adds a new function to lockref:
lockref_put_not_zero
- The second patch fixes GFS2's glock dump code so it uses the new
lockref function. This fixes a problem whereby lock dumps could
miss glocks.
- I made a minor patch to update some comments and fix the lock
ordering text in our gfs2-glocks.txt Documentation file"
* tag 'gfs2-4.17.fixes2' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2:
GFS2: Minor improvements to comments and documentation
gfs2: Stop using rhashtable_walk_peek
lockref: Add lockref_put_not_zero
-rw-r--r-- | Documentation/filesystems/gfs2-glocks.txt | 5 | ||||
-rw-r--r-- | fs/gfs2/bmap.c | 2 | ||||
-rw-r--r-- | fs/gfs2/glock.c | 47 | ||||
-rw-r--r-- | fs/gfs2/ops_fstype.c | 2 | ||||
-rw-r--r-- | include/linux/lockref.h | 1 | ||||
-rw-r--r-- | lib/lockref.c | 28 |
6 files changed, 62 insertions, 23 deletions
diff --git a/Documentation/filesystems/gfs2-glocks.txt b/Documentation/filesystems/gfs2-glocks.txt index 1fb12f9dfe48..7059623635b2 100644 --- a/Documentation/filesystems/gfs2-glocks.txt +++ b/Documentation/filesystems/gfs2-glocks.txt @@ -100,14 +100,15 @@ indicates that it is caching uptodate data. Glock locking order within GFS2: - 1. i_mutex (if required) + 1. i_rwsem (if required) 2. Rename glock (for rename only) 3. Inode glock(s) (Parents before children, inodes at "same level" with same parent in lock number order) 4. Rgrp glock(s) (for (de)allocation operations) 5. Transaction glock (via gfs2_trans_begin) for non-read operations - 6. Page lock (always last, very important!) + 6. i_rw_mutex (if required) + 7. Page lock (always last, very important!) There are two glocks per inode. One deals with access to the inode itself (locking order as above), and the other, known as the iopen diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 685c305cbeb6..278ed0869c3c 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -1744,7 +1744,7 @@ do_grow_qunlock: * @newsize: the size to make the file * * The file size can grow, shrink, or stay the same size. This - * is called holding i_mutex and an exclusive glock on the inode + * is called holding i_rwsem and an exclusive glock on the inode * in question. * * Returns: errno diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 82fb5583445c..097bd3c0f270 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -1923,28 +1923,37 @@ void gfs2_glock_exit(void) static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n) { - if (n == 0) - gi->gl = rhashtable_walk_peek(&gi->hti); - else { - gi->gl = rhashtable_walk_next(&gi->hti); - n--; + struct gfs2_glock *gl = gi->gl; + + if (gl) { + if (n == 0) + return; + if (!lockref_put_not_zero(&gl->gl_lockref)) + gfs2_glock_queue_put(gl); } for (;;) { - if (IS_ERR_OR_NULL(gi->gl)) { - if (!gi->gl) - return; - if (PTR_ERR(gi->gl) != -EAGAIN) { - gi->gl = NULL; - return; + gl = rhashtable_walk_next(&gi->hti); + if (IS_ERR_OR_NULL(gl)) { + if (gl == ERR_PTR(-EAGAIN)) { + n = 1; + continue; } - n = 0; - } else if (gi->sdp == gi->gl->gl_name.ln_sbd && - !__lockref_is_dead(&gi->gl->gl_lockref)) { - if (!n--) - break; + gl = NULL; + break; + } + if (gl->gl_name.ln_sbd != gi->sdp) + continue; + if (n <= 1) { + if (!lockref_get_not_dead(&gl->gl_lockref)) + continue; + break; + } else { + if (__lockref_is_dead(&gl->gl_lockref)) + continue; + n--; } - gi->gl = rhashtable_walk_next(&gi->hti); } + gi->gl = gl; } static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos) @@ -1988,7 +1997,6 @@ static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr) { struct gfs2_glock_iter *gi = seq->private; - gi->gl = NULL; rhashtable_walk_stop(&gi->hti); } @@ -2076,7 +2084,8 @@ static int gfs2_glocks_release(struct inode *inode, struct file *file) struct seq_file *seq = file->private_data; struct gfs2_glock_iter *gi = seq->private; - gi->gl = NULL; + if (gi->gl) + gfs2_glock_put(gi->gl); rhashtable_walk_exit(&gi->hti); return seq_release_private(inode, file); } diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index e6a0a8a89ea7..3ba3f167641c 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -825,7 +825,7 @@ static int init_inodes(struct gfs2_sbd *sdp, int undo) goto fail_rindex; } /* - * i_mutex on quota files is special. Since this inode is hidden system + * i_rwsem on quota files is special. Since this inode is hidden system * file, we are safe to define locking ourselves. */ lockdep_set_class(&sdp->sd_quota_inode->i_rwsem, diff --git a/include/linux/lockref.h b/include/linux/lockref.h index 2eac32095113..99f17cc8e163 100644 --- a/include/linux/lockref.h +++ b/include/linux/lockref.h @@ -37,6 +37,7 @@ struct lockref { extern void lockref_get(struct lockref *); extern int lockref_put_return(struct lockref *); extern int lockref_get_not_zero(struct lockref *); +extern int lockref_put_not_zero(struct lockref *); extern int lockref_get_or_lock(struct lockref *); extern int lockref_put_or_lock(struct lockref *); diff --git a/lib/lockref.c b/lib/lockref.c index 47169ed7e964..3d468b53d4c9 100644 --- a/lib/lockref.c +++ b/lib/lockref.c @@ -81,6 +81,34 @@ int lockref_get_not_zero(struct lockref *lockref) EXPORT_SYMBOL(lockref_get_not_zero); /** + * lockref_put_not_zero - Decrements count unless count <= 1 before decrement + * @lockref: pointer to lockref structure + * Return: 1 if count updated successfully or 0 if count would become zero + */ +int lockref_put_not_zero(struct lockref *lockref) +{ + int retval; + + CMPXCHG_LOOP( + new.count--; + if (old.count <= 1) + return 0; + , + return 1; + ); + + spin_lock(&lockref->lock); + retval = 0; + if (lockref->count > 1) { + lockref->count--; + retval = 1; + } + spin_unlock(&lockref->lock); + return retval; +} +EXPORT_SYMBOL(lockref_put_not_zero); + +/** * lockref_get_or_lock - Increments count unless the count is 0 or dead * @lockref: pointer to lockref structure * Return: 1 if count updated successfully or 0 if count was zero |