summaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_inode_item.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_inode_item.c')
-rw-r--r--fs/xfs/xfs_inode_item.c308
1 files changed, 154 insertions, 154 deletions
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index ba47bf65b772..895f61b2b4f0 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -439,6 +439,7 @@ xfs_inode_item_pin(
struct xfs_inode *ip = INODE_ITEM(lip)->ili_inode;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
+ ASSERT(lip->li_buf);
trace_xfs_inode_pin(ip, _RET_IP_);
atomic_inc(&ip->i_pincount);
@@ -450,6 +451,12 @@ xfs_inode_item_pin(
* item which was previously pinned with a call to xfs_inode_item_pin().
*
* Also wake up anyone in xfs_iunpin_wait() if the count goes to 0.
+ *
+ * Note that unpin can race with inode cluster buffer freeing marking the buffer
+ * stale. In that case, flush completions are run from the buffer unpin call,
+ * which may happen before the inode is unpinned. If we lose the race, there
+ * will be no buffer attached to the log item, but the inode will be marked
+ * XFS_ISTALE.
*/
STATIC void
xfs_inode_item_unpin(
@@ -459,28 +466,12 @@ xfs_inode_item_unpin(
struct xfs_inode *ip = INODE_ITEM(lip)->ili_inode;
trace_xfs_inode_unpin(ip, _RET_IP_);
+ ASSERT(lip->li_buf || xfs_iflags_test(ip, XFS_ISTALE));
ASSERT(atomic_read(&ip->i_pincount) > 0);
if (atomic_dec_and_test(&ip->i_pincount))
wake_up_bit(&ip->i_flags, __XFS_IPINNED_BIT);
}
-/*
- * Callback used to mark a buffer with XFS_LI_FAILED when items in the buffer
- * have been failed during writeback
- *
- * This informs the AIL that the inode is already flush locked on the next push,
- * and acquires a hold on the buffer to ensure that it isn't reclaimed before
- * dirty data makes it to disk.
- */
-STATIC void
-xfs_inode_item_error(
- struct xfs_log_item *lip,
- struct xfs_buf *bp)
-{
- ASSERT(xfs_isiflocked(INODE_ITEM(lip)->ili_inode));
- xfs_set_li_failed(lip, bp);
-}
-
STATIC uint
xfs_inode_item_push(
struct xfs_log_item *lip,
@@ -494,55 +485,44 @@ xfs_inode_item_push(
uint rval = XFS_ITEM_SUCCESS;
int error;
- if (xfs_ipincount(ip) > 0)
+ ASSERT(iip->ili_item.li_buf);
+
+ if (xfs_ipincount(ip) > 0 || xfs_buf_ispinned(bp) ||
+ (ip->i_flags & XFS_ISTALE))
return XFS_ITEM_PINNED;
- if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED))
- return XFS_ITEM_LOCKED;
+ /* If the inode is already flush locked, we're already flushing. */
+ if (xfs_isiflocked(ip))
+ return XFS_ITEM_FLUSHING;
- /*
- * Re-check the pincount now that we stabilized the value by
- * taking the ilock.
- */
- if (xfs_ipincount(ip) > 0) {
- rval = XFS_ITEM_PINNED;
- goto out_unlock;
- }
+ if (!xfs_buf_trylock(bp))
+ return XFS_ITEM_LOCKED;
- /*
- * Stale inode items should force out the iclog.
- */
- if (ip->i_flags & XFS_ISTALE) {
- rval = XFS_ITEM_PINNED;
- goto out_unlock;
- }
+ spin_unlock(&lip->li_ailp->ail_lock);
/*
- * Someone else is already flushing the inode. Nothing we can do
- * here but wait for the flush to finish and remove the item from
- * the AIL.
+ * We need to hold a reference for flushing the cluster buffer as it may
+ * fail the buffer without IO submission. In which case, we better get a
+ * reference for that completion because otherwise we don't get a
+ * reference for IO until we queue the buffer for delwri submission.
*/
- if (!xfs_iflock_nowait(ip)) {
- rval = XFS_ITEM_FLUSHING;
- goto out_unlock;
- }
-
- ASSERT(iip->ili_fields != 0 || XFS_FORCED_SHUTDOWN(ip->i_mount));
- ASSERT(iip->ili_logged == 0 || XFS_FORCED_SHUTDOWN(ip->i_mount));
-
- spin_unlock(&lip->li_ailp->ail_lock);
-
- error = xfs_iflush(ip, &bp);
+ xfs_buf_hold(bp);
+ error = xfs_iflush_cluster(bp);
if (!error) {
if (!xfs_buf_delwri_queue(bp, buffer_list))
rval = XFS_ITEM_FLUSHING;
xfs_buf_relse(bp);
- } else if (error == -EAGAIN)
+ } else {
+ /*
+ * Release the buffer if we were unable to flush anything. On
+ * any other error, the buffer has already been released.
+ */
+ if (error == -EAGAIN)
+ xfs_buf_relse(bp);
rval = XFS_ITEM_LOCKED;
+ }
spin_lock(&lip->li_ailp->ail_lock);
-out_unlock:
- xfs_iunlock(ip, XFS_ILOCK_SHARED);
return rval;
}
@@ -621,7 +601,6 @@ static const struct xfs_item_ops xfs_inode_item_ops = {
.iop_committed = xfs_inode_item_committed,
.iop_push = xfs_inode_item_push,
.iop_committing = xfs_inode_item_committing,
- .iop_error = xfs_inode_item_error
};
@@ -636,9 +615,11 @@ xfs_inode_item_init(
struct xfs_inode_log_item *iip;
ASSERT(ip->i_itemp == NULL);
- iip = ip->i_itemp = kmem_zone_zalloc(xfs_ili_zone, 0);
+ iip = ip->i_itemp = kmem_cache_zalloc(xfs_ili_zone,
+ GFP_KERNEL | __GFP_NOFAIL);
iip->ili_inode = ip;
+ spin_lock_init(&iip->ili_lock);
xfs_log_item_init(mp, &iip->ili_item, XFS_LI_INODE,
&xfs_inode_item_ops);
}
@@ -648,110 +629,129 @@ xfs_inode_item_init(
*/
void
xfs_inode_item_destroy(
- xfs_inode_t *ip)
+ struct xfs_inode *ip)
{
- kmem_free(ip->i_itemp->ili_item.li_lv_shadow);
- kmem_cache_free(xfs_ili_zone, ip->i_itemp);
+ struct xfs_inode_log_item *iip = ip->i_itemp;
+
+ ASSERT(iip->ili_item.li_buf == NULL);
+
+ ip->i_itemp = NULL;
+ kmem_free(iip->ili_item.li_lv_shadow);
+ kmem_cache_free(xfs_ili_zone, iip);
}
/*
- * This is the inode flushing I/O completion routine. It is called
- * from interrupt level when the buffer containing the inode is
- * flushed to disk. It is responsible for removing the inode item
- * from the AIL if it has not been re-logged, and unlocking the inode's
- * flush lock.
- *
- * To reduce AIL lock traffic as much as possible, we scan the buffer log item
- * list for other inodes that will run this function. We remove them from the
- * buffer list so we can process all the inode IO completions in one AIL lock
- * traversal.
+ * We only want to pull the item from the AIL if it is actually there
+ * and its location in the log has not changed since we started the
+ * flush. Thus, we only bother if the inode's lsn has not changed.
*/
-void
-xfs_iflush_done(
- struct xfs_buf *bp,
- struct xfs_log_item *lip)
+static void
+xfs_iflush_ail_updates(
+ struct xfs_ail *ailp,
+ struct list_head *list)
{
- struct xfs_inode_log_item *iip;
- struct xfs_log_item *blip, *n;
- struct xfs_ail *ailp = lip->li_ailp;
- int need_ail = 0;
- LIST_HEAD(tmp);
+ struct xfs_log_item *lip;
+ xfs_lsn_t tail_lsn = 0;
- /*
- * Scan the buffer IO completions for other inodes being completed and
- * attach them to the current inode log item.
- */
+ /* this is an opencoded batch version of xfs_trans_ail_delete */
+ spin_lock(&ailp->ail_lock);
+ list_for_each_entry(lip, list, li_bio_list) {
+ xfs_lsn_t lsn;
- list_add_tail(&lip->li_bio_list, &tmp);
-
- list_for_each_entry_safe(blip, n, &bp->b_li_list, li_bio_list) {
- if (lip->li_cb != xfs_iflush_done)
+ clear_bit(XFS_LI_FAILED, &lip->li_flags);
+ if (INODE_ITEM(lip)->ili_flush_lsn != lip->li_lsn)
continue;
- list_move_tail(&blip->li_bio_list, &tmp);
+ lsn = xfs_ail_delete_one(ailp, lip);
+ if (!tail_lsn && lsn)
+ tail_lsn = lsn;
+ }
+ xfs_ail_update_finish(ailp, tail_lsn);
+}
+
+/*
+ * Walk the list of inodes that have completed their IOs. If they are clean
+ * remove them from the list and dissociate them from the buffer. Buffers that
+ * are still dirty remain linked to the buffer and on the list. Caller must
+ * handle them appropriately.
+ */
+static void
+xfs_iflush_finish(
+ struct xfs_buf *bp,
+ struct list_head *list)
+{
+ struct xfs_log_item *lip, *n;
+
+ list_for_each_entry_safe(lip, n, list, li_bio_list) {
+ struct xfs_inode_log_item *iip = INODE_ITEM(lip);
+ bool drop_buffer = false;
+
+ spin_lock(&iip->ili_lock);
+
/*
- * while we have the item, do the unlocked check for needing
- * the AIL lock.
+ * Remove the reference to the cluster buffer if the inode is
+ * clean in memory and drop the buffer reference once we've
+ * dropped the locks we hold.
*/
- iip = INODE_ITEM(blip);
- if ((iip->ili_logged && blip->li_lsn == iip->ili_flush_lsn) ||
- test_bit(XFS_LI_FAILED, &blip->li_flags))
- need_ail++;
+ ASSERT(iip->ili_item.li_buf == bp);
+ if (!iip->ili_fields) {
+ iip->ili_item.li_buf = NULL;
+ list_del_init(&lip->li_bio_list);
+ drop_buffer = true;
+ }
+ iip->ili_last_fields = 0;
+ iip->ili_flush_lsn = 0;
+ spin_unlock(&iip->ili_lock);
+ xfs_ifunlock(iip->ili_inode);
+ if (drop_buffer)
+ xfs_buf_rele(bp);
}
+}
- /* make sure we capture the state of the initial inode. */
- iip = INODE_ITEM(lip);
- if ((iip->ili_logged && lip->li_lsn == iip->ili_flush_lsn) ||
- test_bit(XFS_LI_FAILED, &lip->li_flags))
- need_ail++;
+/*
+ * Inode buffer IO completion routine. It is responsible for removing inodes
+ * attached to the buffer from the AIL if they have not been re-logged, as well
+ * as completing the flush and unlocking the inode.
+ */
+void
+xfs_iflush_done(
+ struct xfs_buf *bp)
+{
+ struct xfs_log_item *lip, *n;
+ LIST_HEAD(flushed_inodes);
+ LIST_HEAD(ail_updates);
/*
- * We only want to pull the item from the AIL if it is
- * actually there and its location in the log has not
- * changed since we started the flush. Thus, we only bother
- * if the ili_logged flag is set and the inode's lsn has not
- * changed. First we check the lsn outside
- * the lock since it's cheaper, and then we recheck while
- * holding the lock before removing the inode from the AIL.
+ * Pull the attached inodes from the buffer one at a time and take the
+ * appropriate action on them.
*/
- if (need_ail) {
- xfs_lsn_t tail_lsn = 0;
-
- /* this is an opencoded batch version of xfs_trans_ail_delete */
- spin_lock(&ailp->ail_lock);
- list_for_each_entry(blip, &tmp, li_bio_list) {
- if (INODE_ITEM(blip)->ili_logged &&
- blip->li_lsn == INODE_ITEM(blip)->ili_flush_lsn) {
- /*
- * xfs_ail_update_finish() only cares about the
- * lsn of the first tail item removed, any
- * others will be at the same or higher lsn so
- * we just ignore them.
- */
- xfs_lsn_t lsn = xfs_ail_delete_one(ailp, blip);
- if (!tail_lsn && lsn)
- tail_lsn = lsn;
- } else {
- xfs_clear_li_failed(blip);
- }
+ list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) {
+ struct xfs_inode_log_item *iip = INODE_ITEM(lip);
+
+ if (xfs_iflags_test(iip->ili_inode, XFS_ISTALE)) {
+ xfs_iflush_abort(iip->ili_inode);
+ continue;
}
- xfs_ail_update_finish(ailp, tail_lsn);
+ if (!iip->ili_last_fields)
+ continue;
+
+ /* Do an unlocked check for needing the AIL lock. */
+ if (iip->ili_flush_lsn == lip->li_lsn ||
+ test_bit(XFS_LI_FAILED, &lip->li_flags))
+ list_move_tail(&lip->li_bio_list, &ail_updates);
+ else
+ list_move_tail(&lip->li_bio_list, &flushed_inodes);
}
- /*
- * clean up and unlock the flush lock now we are done. We can clear the
- * ili_last_fields bits now that we know that the data corresponding to
- * them is safely on disk.
- */
- list_for_each_entry_safe(blip, n, &tmp, li_bio_list) {
- list_del_init(&blip->li_bio_list);
- iip = INODE_ITEM(blip);
- iip->ili_logged = 0;
- iip->ili_last_fields = 0;
- xfs_ifunlock(iip->ili_inode);
+ if (!list_empty(&ail_updates)) {
+ xfs_iflush_ail_updates(bp->b_mount->m_ail, &ail_updates);
+ list_splice_tail(&ail_updates, &flushed_inodes);
}
- list_del(&tmp);
+
+ xfs_iflush_finish(bp, &flushed_inodes);
+ if (!list_empty(&flushed_inodes))
+ list_splice_tail(&flushed_inodes, &bp->b_li_list);
}
/*
@@ -762,37 +762,37 @@ xfs_iflush_done(
*/
void
xfs_iflush_abort(
- struct xfs_inode *ip)
+ struct xfs_inode *ip)
{
- struct xfs_inode_log_item *iip = ip->i_itemp;
+ struct xfs_inode_log_item *iip = ip->i_itemp;
+ struct xfs_buf *bp = NULL;
if (iip) {
- xfs_trans_ail_delete(&iip->ili_item, 0);
- iip->ili_logged = 0;
/*
- * Clear the ili_last_fields bits now that we know that the
- * data corresponding to them is safely on disk.
+ * Clear the failed bit before removing the item from the AIL so
+ * xfs_trans_ail_delete() doesn't try to clear and release the
+ * buffer attached to the log item before we are done with it.
*/
- iip->ili_last_fields = 0;
+ clear_bit(XFS_LI_FAILED, &iip->ili_item.li_flags);
+ xfs_trans_ail_delete(&iip->ili_item, 0);
+
/*
* Clear the inode logging fields so no more flushes are
* attempted.
*/
+ spin_lock(&iip->ili_lock);
+ iip->ili_last_fields = 0;
iip->ili_fields = 0;
iip->ili_fsync_fields = 0;
+ iip->ili_flush_lsn = 0;
+ bp = iip->ili_item.li_buf;
+ iip->ili_item.li_buf = NULL;
+ list_del_init(&iip->ili_item.li_bio_list);
+ spin_unlock(&iip->ili_lock);
}
- /*
- * Release the inode's flush lock since we're done with it.
- */
xfs_ifunlock(ip);
-}
-
-void
-xfs_istale_done(
- struct xfs_buf *bp,
- struct xfs_log_item *lip)
-{
- xfs_iflush_abort(INODE_ITEM(lip)->ili_inode);
+ if (bp)
+ xfs_buf_rele(bp);
}
/*