diff options
author | Christoph Hellwig <hch@lst.de> | 2009-12-17 14:25:01 +0100 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2009-12-17 11:03:25 -0500 |
commit | eaff8079d4f1016a12e34ab323737314f24127dd (patch) | |
tree | a3d9e00320c6195e55811d5247a521f99341a411 | |
parent | 7a0ad10c367ab57c899d340372f37880cbe6ab52 (diff) | |
download | linux-eaff8079d4f1016a12e34ab323737314f24127dd.tar.bz2 |
kill I_LOCK
After I_SYNC was split from I_LOCK the leftover is always used together with
I_NEW and thus superflous.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
-rw-r--r-- | fs/gfs2/inode.c | 2 | ||||
-rw-r--r-- | fs/inode.c | 26 | ||||
-rw-r--r-- | fs/jfs/jfs_txnmgr.c | 2 | ||||
-rw-r--r-- | fs/ntfs/inode.c | 6 | ||||
-rw-r--r-- | fs/ubifs/file.c | 2 | ||||
-rw-r--r-- | fs/xfs/linux-2.6/xfs_iops.c | 2 | ||||
-rw-r--r-- | fs/xfs/xfs_iget.c | 4 | ||||
-rw-r--r-- | include/linux/fs.h | 36 | ||||
-rw-r--r-- | include/linux/writeback.h | 3 |
9 files changed, 39 insertions, 44 deletions
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index 3ff32fa793da..6e220f4eee7d 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -125,7 +125,7 @@ static struct inode *gfs2_iget_skip(struct super_block *sb, * directory entry when gfs2_inode_lookup() is invoked. Part of the code * segment inside gfs2_inode_lookup code needs to get moved around. * - * Clean up I_LOCK and I_NEW as well. + * Clears I_NEW as well. **/ void gfs2_set_iop(struct inode *inode) diff --git a/fs/inode.c b/fs/inode.c index 06c1f02de611..03dfeb2e3928 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -113,7 +113,7 @@ static void wake_up_inode(struct inode *inode) * Prevent speculative execution through spin_unlock(&inode_lock); */ smp_mb(); - wake_up_bit(&inode->i_state, __I_LOCK); + wake_up_bit(&inode->i_state, __I_NEW); } /** @@ -690,17 +690,17 @@ void unlock_new_inode(struct inode *inode) } #endif /* - * This is special! We do not need the spinlock when clearing I_LOCK, + * This is special! We do not need the spinlock when clearing I_NEW, * because we're guaranteed that nobody else tries to do anything about * the state of the inode when it is locked, as we just created it (so - * there can be no old holders that haven't tested I_LOCK). + * there can be no old holders that haven't tested I_NEW). * However we must emit the memory barrier so that other CPUs reliably - * see the clearing of I_LOCK after the other inode initialisation has + * see the clearing of I_NEW after the other inode initialisation has * completed. */ smp_mb(); - WARN_ON((inode->i_state & (I_LOCK|I_NEW)) != (I_LOCK|I_NEW)); - inode->i_state &= ~(I_LOCK|I_NEW); + WARN_ON(!(inode->i_state & I_NEW)); + inode->i_state &= ~I_NEW; wake_up_inode(inode); } EXPORT_SYMBOL(unlock_new_inode); @@ -731,7 +731,7 @@ static struct inode *get_new_inode(struct super_block *sb, goto set_failed; __inode_add_to_lists(sb, head, inode); - inode->i_state = I_LOCK|I_NEW; + inode->i_state = I_NEW; spin_unlock(&inode_lock); /* Return the locked inode with I_NEW set, the @@ -778,7 +778,7 @@ static struct inode *get_new_inode_fast(struct super_block *sb, if (!old) { inode->i_ino = ino; __inode_add_to_lists(sb, head, inode); - inode->i_state = I_LOCK|I_NEW; + inode->i_state = I_NEW; spin_unlock(&inode_lock); /* Return the locked inode with I_NEW set, the @@ -1083,7 +1083,7 @@ int insert_inode_locked(struct inode *inode) ino_t ino = inode->i_ino; struct hlist_head *head = inode_hashtable + hash(sb, ino); - inode->i_state |= I_LOCK|I_NEW; + inode->i_state |= I_NEW; while (1) { struct hlist_node *node; struct inode *old = NULL; @@ -1120,7 +1120,7 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval, struct super_block *sb = inode->i_sb; struct hlist_head *head = inode_hashtable + hash(sb, hashval); - inode->i_state |= I_LOCK|I_NEW; + inode->i_state |= I_NEW; while (1) { struct hlist_node *node; @@ -1510,7 +1510,7 @@ EXPORT_SYMBOL(inode_wait); * until the deletion _might_ have completed. Callers are responsible * to recheck inode state. * - * It doesn't matter if I_LOCK is not set initially, a call to + * It doesn't matter if I_NEW is not set initially, a call to * wake_up_inode() after removing from the hash list will DTRT. * * This is called with inode_lock held. @@ -1518,8 +1518,8 @@ EXPORT_SYMBOL(inode_wait); static void __wait_on_freeing_inode(struct inode *inode) { wait_queue_head_t *wq; - DEFINE_WAIT_BIT(wait, &inode->i_state, __I_LOCK); - wq = bit_waitqueue(&inode->i_state, __I_LOCK); + DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW); + wq = bit_waitqueue(&inode->i_state, __I_NEW); prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); spin_unlock(&inode_lock); schedule(); diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c index f26e4d03ada5..d945ea76b445 100644 --- a/fs/jfs/jfs_txnmgr.c +++ b/fs/jfs/jfs_txnmgr.c @@ -1292,7 +1292,7 @@ int txCommit(tid_t tid, /* transaction identifier */ */ /* * I believe this code is no longer needed. Splitting I_LOCK - * into two bits, I_LOCK and I_SYNC should prevent this + * into two bits, I_NEW and I_SYNC should prevent this * deadlock as well. But since I don't have a JFS testload * to verify this, only a trivial s/I_LOCK/I_SYNC/ was done. * Joern diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c index 9938034762cc..dc2505abb6d7 100644 --- a/fs/ntfs/inode.c +++ b/fs/ntfs/inode.c @@ -530,7 +530,7 @@ err_corrupt_attr: * the ntfs inode. * * Q: What locks are held when the function is called? - * A: i_state has I_LOCK set, hence the inode is locked, also + * A: i_state has I_NEW set, hence the inode is locked, also * i_count is set to 1, so it is not going to go away * i_flags is set to 0 and we have no business touching it. Only an ioctl() * is allowed to write to them. We should of course be honouring them but @@ -1207,7 +1207,7 @@ err_out: * necessary fields in @vi as well as initializing the ntfs inode. * * Q: What locks are held when the function is called? - * A: i_state has I_LOCK set, hence the inode is locked, also + * A: i_state has I_NEW set, hence the inode is locked, also * i_count is set to 1, so it is not going to go away * * Return 0 on success and -errno on error. In the error case, the inode will @@ -1474,7 +1474,7 @@ err_out: * normal directory inodes. * * Q: What locks are held when the function is called? - * A: i_state has I_LOCK set, hence the inode is locked, also + * A: i_state has I_NEW set, hence the inode is locked, also * i_count is set to 1, so it is not going to go away * * Return 0 on success and -errno on error. In the error case, the inode will diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 39849f887e72..16a6444330ec 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -45,7 +45,7 @@ * * Similarly, @i_mutex is not always locked in 'ubifs_readpage()', e.g., the * read-ahead path does not lock it ("sys_read -> generic_file_aio_read -> - * ondemand_readahead -> readpage"). In case of readahead, @I_LOCK flag is not + * ondemand_readahead -> readpage"). In case of readahead, @I_SYNC flag is not * set as well. However, UBIFS disables readahead. */ diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c index 1d5b298ba8b2..225946012d0b 100644 --- a/fs/xfs/linux-2.6/xfs_iops.c +++ b/fs/xfs/linux-2.6/xfs_iops.c @@ -794,7 +794,7 @@ xfs_setup_inode( struct inode *inode = &ip->i_vnode; inode->i_ino = ip->i_ino; - inode->i_state = I_NEW|I_LOCK; + inode->i_state = I_NEW; inode_add_to_lists(ip->i_mount->m_super, inode); inode->i_mode = ip->i_d.di_mode; diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index 0de36c2a46f1..fa402a6bbbcf 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -91,7 +91,7 @@ xfs_inode_alloc( ip->i_new_size = 0; /* prevent anyone from using this yet */ - VFS_I(ip)->i_state = I_NEW|I_LOCK; + VFS_I(ip)->i_state = I_NEW; return ip; } @@ -217,7 +217,7 @@ xfs_iget_cache_hit( trace_xfs_iget_reclaim(ip); goto out_error; } - inode->i_state = I_LOCK|I_NEW; + inode->i_state = I_NEW; } else { /* If the VFS inode is being torn down, pause and try again. */ if (!igrab(inode)) { diff --git a/include/linux/fs.h b/include/linux/fs.h index 77a975089d9a..cca191933ff6 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1587,7 +1587,7 @@ struct super_operations { * until that flag is cleared. I_WILL_FREE, I_FREEING and I_CLEAR are set at * various stages of removing an inode. * - * Two bits are used for locking and completion notification, I_LOCK and I_SYNC. + * Two bits are used for locking and completion notification, I_NEW and I_SYNC. * * I_DIRTY_SYNC Inode is dirty, but doesn't have to be written on * fdatasync(). i_atime is the usual cause. @@ -1596,8 +1596,14 @@ struct super_operations { * don't have to write inode on fdatasync() when only * mtime has changed in it. * I_DIRTY_PAGES Inode has dirty pages. Inode itself may be clean. - * I_NEW get_new_inode() sets i_state to I_LOCK|I_NEW. Both - * are cleared by unlock_new_inode(), called from iget(). + * I_NEW Serves as both a mutex and completion notification. + * New inodes set I_NEW. If two processes both create + * the same inode, one of them will release its inode and + * wait for I_NEW to be released before returning. + * Inodes in I_WILL_FREE, I_FREEING or I_CLEAR state can + * also cause waiting on I_NEW, without I_NEW actually + * being set. find_inode() uses this to prevent returning + * nearly-dead inodes. * I_WILL_FREE Must be set when calling write_inode_now() if i_count * is zero. I_FREEING must be set when I_WILL_FREE is * cleared. @@ -1611,20 +1617,11 @@ struct super_operations { * prohibited for many purposes. iget() must wait for * the inode to be completely released, then create it * anew. Other functions will just ignore such inodes, - * if appropriate. I_LOCK is used for waiting. + * if appropriate. I_NEW is used for waiting. * - * I_LOCK Serves as both a mutex and completion notification. - * New inodes set I_LOCK. If two processes both create - * the same inode, one of them will release its inode and - * wait for I_LOCK to be released before returning. - * Inodes in I_WILL_FREE, I_FREEING or I_CLEAR state can - * also cause waiting on I_LOCK, without I_LOCK actually - * being set. find_inode() uses this to prevent returning - * nearly-dead inodes. - * I_SYNC Similar to I_LOCK, but limited in scope to writeback - * of inode dirty data. Having a separate lock for this - * purpose reduces latency and prevents some filesystem- - * specific deadlocks. + * I_SYNC Synchonized write of dirty inode data. The bits is + * set during data writeback, and cleared with a wakeup + * on the bit address once it is done. * * Q: What is the difference between I_WILL_FREE and I_FREEING? * Q: igrab() only checks on (I_FREEING|I_WILL_FREE). Should it also check on @@ -1633,13 +1630,12 @@ struct super_operations { #define I_DIRTY_SYNC 1 #define I_DIRTY_DATASYNC 2 #define I_DIRTY_PAGES 4 -#define I_NEW 8 +#define __I_NEW 3 +#define I_NEW (1 << __I_NEW) #define I_WILL_FREE 16 #define I_FREEING 32 #define I_CLEAR 64 -#define __I_LOCK 7 -#define I_LOCK (1 << __I_LOCK) -#define __I_SYNC 8 +#define __I_SYNC 7 #define I_SYNC (1 << __I_SYNC) #define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES) diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 705f01fe413a..c18c008f4bbf 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -79,8 +79,7 @@ void wakeup_flusher_threads(long nr_pages); static inline void wait_on_inode(struct inode *inode) { might_sleep(); - wait_on_bit(&inode->i_state, __I_LOCK, inode_wait, - TASK_UNINTERRUPTIBLE); + wait_on_bit(&inode->i_state, __I_NEW, inode_wait, TASK_UNINTERRUPTIBLE); } static inline void inode_sync_wait(struct inode *inode) { |