diff options
author | Dave Jiang <dave.jiang@intel.com> | 2018-08-10 08:48:18 -0700 |
---|---|---|
committer | Darrick J. Wong <darrick.wong@oracle.com> | 2018-08-12 08:37:31 -0700 |
commit | e25ff835af89a80aa6a4de58f413e494b2b96bd1 (patch) | |
tree | 0fc0f6bd0cc21537f79509bba4761bb0f74696b3 /fs | |
parent | 13942aa94a8b5df662d93c42c307b2f50cbe88b0 (diff) | |
download | linux-e25ff835af89a80aa6a4de58f413e494b2b96bd1.tar.bz2 |
xfs: Close race between direct IO and xfs_break_layouts()
This patch is the duplicate of ross's fix for ext4 for xfs.
If the refcount of a page is lowered between the time that it is returned
by dax_busy_page() and when the refcount is again checked in
xfs_break_layouts() => ___wait_var_event(), the waiting function
xfs_wait_dax_page() will never be called. This means that
xfs_break_layouts() will still have 'retry' set to false, so we'll stop
looping and never check the refcount of other pages in this inode.
Instead, always continue looping as long as dax_layout_busy_page() gives us
a page which it found with an elevated refcount.
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Reviewed-by: Jan Kara <jack@suse.cz>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/xfs/xfs_file.c | 9 |
1 files changed, 4 insertions, 5 deletions
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 6b31f41eafa2..181e9084519b 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -721,12 +721,10 @@ xfs_file_write_iter( static void xfs_wait_dax_page( - struct inode *inode, - bool *did_unlock) + struct inode *inode) { struct xfs_inode *ip = XFS_I(inode); - *did_unlock = true; xfs_iunlock(ip, XFS_MMAPLOCK_EXCL); schedule(); xfs_ilock(ip, XFS_MMAPLOCK_EXCL); @@ -735,7 +733,7 @@ xfs_wait_dax_page( static int xfs_break_dax_layouts( struct inode *inode, - bool *did_unlock) + bool *retry) { struct page *page; @@ -745,9 +743,10 @@ xfs_break_dax_layouts( if (!page) return 0; + *retry = true; return ___wait_var_event(&page->_refcount, atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE, - 0, 0, xfs_wait_dax_page(inode, did_unlock)); + 0, 0, xfs_wait_dax_page(inode)); } int |