diff options
Diffstat (limited to 'fs/xfs/xfs_inode_item.c')
-rw-r--r-- | fs/xfs/xfs_inode_item.c | 176 |
1 files changed, 53 insertions, 123 deletions
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index 05d924efceaf..6cdbf90c6f7b 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c @@ -18,9 +18,7 @@ #include "xfs.h" #include "xfs_fs.h" #include "xfs_types.h" -#include "xfs_bit.h" #include "xfs_log.h" -#include "xfs_inum.h" #include "xfs_trans.h" #include "xfs_sb.h" #include "xfs_ag.h" @@ -480,25 +478,16 @@ xfs_inode_item_unpin( wake_up_bit(&ip->i_flags, __XFS_IPINNED_BIT); } -/* - * This is called to attempt to lock the inode associated with this - * inode log item, in preparation for the push routine which does the actual - * iflush. Don't sleep on the inode lock or the flush lock. - * - * If the flush lock is already held, indicating that the inode has - * been or is in the process of being flushed, then (ideally) we'd like to - * see if the inode's buffer is still incore, and if so give it a nudge. - * We delay doing so until the pushbuf routine, though, to avoid holding - * the AIL lock across a call to the blackhole which is the buffer cache. - * Also we don't want to sleep in any device strategy routines, which can happen - * if we do the subsequent bawrite in here. - */ STATIC uint -xfs_inode_item_trylock( - struct xfs_log_item *lip) +xfs_inode_item_push( + struct xfs_log_item *lip, + struct list_head *buffer_list) { struct xfs_inode_log_item *iip = INODE_ITEM(lip); struct xfs_inode *ip = iip->ili_inode; + struct xfs_buf *bp = NULL; + uint rval = XFS_ITEM_SUCCESS; + int error; if (xfs_ipincount(ip) > 0) return XFS_ITEM_PINNED; @@ -506,30 +495,50 @@ xfs_inode_item_trylock( if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) return XFS_ITEM_LOCKED; + /* + * Re-check the pincount now that we stabilized the value by + * taking the ilock. + */ + if (xfs_ipincount(ip) > 0) { + rval = XFS_ITEM_PINNED; + goto out_unlock; + } + + /* + * Someone else is already flushing the inode. Nothing we can do + * here but wait for the flush to finish and remove the item from + * the AIL. + */ if (!xfs_iflock_nowait(ip)) { - /* - * inode has already been flushed to the backing buffer, - * leave it locked in shared mode, pushbuf routine will - * unlock it. - */ - return XFS_ITEM_PUSHBUF; + rval = XFS_ITEM_FLUSHING; + goto out_unlock; } - /* Stale items should force out the iclog */ + /* + * Stale inode items should force out the iclog. + */ if (ip->i_flags & XFS_ISTALE) { xfs_ifunlock(ip); xfs_iunlock(ip, XFS_ILOCK_SHARED); return XFS_ITEM_PINNED; } -#ifdef DEBUG - if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { - ASSERT(iip->ili_fields != 0); - ASSERT(iip->ili_logged == 0); - ASSERT(lip->li_flags & XFS_LI_IN_AIL); + ASSERT(iip->ili_fields != 0 || XFS_FORCED_SHUTDOWN(ip->i_mount)); + ASSERT(iip->ili_logged == 0 || XFS_FORCED_SHUTDOWN(ip->i_mount)); + + spin_unlock(&lip->li_ailp->xa_lock); + + error = xfs_iflush(ip, &bp); + if (!error) { + if (!xfs_buf_delwri_queue(bp, buffer_list)) + rval = XFS_ITEM_FLUSHING; + xfs_buf_relse(bp); } -#endif - return XFS_ITEM_SUCCESS; + + spin_lock(&lip->li_ailp->xa_lock); +out_unlock: + xfs_iunlock(ip, XFS_ILOCK_SHARED); + return rval; } /* @@ -614,86 +623,6 @@ xfs_inode_item_committed( } /* - * This gets called by xfs_trans_push_ail(), when IOP_TRYLOCK - * failed to get the inode flush lock but did get the inode locked SHARED. - * Here we're trying to see if the inode buffer is incore, and if so whether it's - * marked delayed write. If that's the case, we'll promote it and that will - * allow the caller to write the buffer by triggering the xfsbufd to run. - */ -STATIC bool -xfs_inode_item_pushbuf( - struct xfs_log_item *lip) -{ - struct xfs_inode_log_item *iip = INODE_ITEM(lip); - struct xfs_inode *ip = iip->ili_inode; - struct xfs_buf *bp; - bool ret = true; - - ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED)); - - /* - * If a flush is not in progress anymore, chances are that the - * inode was taken off the AIL. So, just get out. - */ - if (!xfs_isiflocked(ip) || - !(lip->li_flags & XFS_LI_IN_AIL)) { - xfs_iunlock(ip, XFS_ILOCK_SHARED); - return true; - } - - bp = xfs_incore(ip->i_mount->m_ddev_targp, iip->ili_format.ilf_blkno, - iip->ili_format.ilf_len, XBF_TRYLOCK); - - xfs_iunlock(ip, XFS_ILOCK_SHARED); - if (!bp) - return true; - if (XFS_BUF_ISDELAYWRITE(bp)) - xfs_buf_delwri_promote(bp); - if (xfs_buf_ispinned(bp)) - ret = false; - xfs_buf_relse(bp); - return ret; -} - -/* - * This is called to asynchronously write the inode associated with this - * inode log item out to disk. The inode will already have been locked by - * a successful call to xfs_inode_item_trylock(). - */ -STATIC void -xfs_inode_item_push( - struct xfs_log_item *lip) -{ - struct xfs_inode_log_item *iip = INODE_ITEM(lip); - struct xfs_inode *ip = iip->ili_inode; - - ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED)); - ASSERT(xfs_isiflocked(ip)); - - /* - * Since we were able to lock the inode's flush lock and - * we found it on the AIL, the inode must be dirty. This - * is because the inode is removed from the AIL while still - * holding the flush lock in xfs_iflush_done(). Thus, if - * we found it in the AIL and were able to obtain the flush - * lock without sleeping, then there must not have been - * anyone in the process of flushing the inode. - */ - ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || iip->ili_fields != 0); - - /* - * Push the inode to it's backing buffer. This will not remove the - * inode from the AIL - a further push will be required to trigger a - * buffer push. However, this allows all the dirty inodes to be pushed - * to the buffer before it is pushed to disk. The buffer IO completion - * will pull the inode from the AIL, mark it clean and unlock the flush - * lock. - */ - (void) xfs_iflush(ip, SYNC_TRYLOCK); - xfs_iunlock(ip, XFS_ILOCK_SHARED); -} - -/* * XXX rcc - this one really has to do something. Probably needs * to stamp in a new field in the incore inode. */ @@ -713,11 +642,9 @@ static const struct xfs_item_ops xfs_inode_item_ops = { .iop_format = xfs_inode_item_format, .iop_pin = xfs_inode_item_pin, .iop_unpin = xfs_inode_item_unpin, - .iop_trylock = xfs_inode_item_trylock, .iop_unlock = xfs_inode_item_unlock, .iop_committed = xfs_inode_item_committed, .iop_push = xfs_inode_item_push, - .iop_pushbuf = xfs_inode_item_pushbuf, .iop_committing = xfs_inode_item_committing }; @@ -848,7 +775,8 @@ xfs_iflush_done( ASSERT(i <= need_ail); } /* xfs_trans_ail_delete_bulk() drops the AIL lock. */ - xfs_trans_ail_delete_bulk(ailp, log_items, i); + xfs_trans_ail_delete_bulk(ailp, log_items, i, + SHUTDOWN_CORRUPT_INCORE); } @@ -869,16 +797,15 @@ xfs_iflush_done( } /* - * This is the inode flushing abort routine. It is called - * from xfs_iflush when the filesystem is shutting down to clean - * up the inode state. - * It is responsible for removing the inode item - * from the AIL if it has not been re-logged, and unlocking the inode's - * flush lock. + * This is the inode flushing abort routine. It is called from xfs_iflush when + * the filesystem is shutting down to clean up the inode state. It is + * responsible for removing the inode item from the AIL if it has not been + * re-logged, and unlocking the inode's flush lock. */ void xfs_iflush_abort( - xfs_inode_t *ip) + xfs_inode_t *ip, + bool stale) { xfs_inode_log_item_t *iip = ip->i_itemp; @@ -888,7 +815,10 @@ xfs_iflush_abort( spin_lock(&ailp->xa_lock); if (iip->ili_item.li_flags & XFS_LI_IN_AIL) { /* xfs_trans_ail_delete() drops the AIL lock. */ - xfs_trans_ail_delete(ailp, (xfs_log_item_t *)iip); + xfs_trans_ail_delete(ailp, &iip->ili_item, + stale ? + SHUTDOWN_LOG_IO_ERROR : + SHUTDOWN_CORRUPT_INCORE); } else spin_unlock(&ailp->xa_lock); } @@ -915,7 +845,7 @@ xfs_istale_done( struct xfs_buf *bp, struct xfs_log_item *lip) { - xfs_iflush_abort(INODE_ITEM(lip)->ili_inode); + xfs_iflush_abort(INODE_ITEM(lip)->ili_inode, true); } /* |