summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBen Myers <bpm@sgi.com>2012-05-25 15:45:36 -0500
committerBen Myers <bpm@sgi.com>2012-06-21 14:20:48 -0500
commit8866fc6fa55e31b2bce931b7963ff16641b39dc7 (patch)
treef86eb7377bc637f1cc4f007826ecda945525af48
parent59c84ed0ddc11f1823b4a33ace4fbcc948261bb2 (diff)
downloadlinux-8866fc6fa55e31b2bce931b7963ff16641b39dc7.tar.bz2
xfs: shutdown xfs_sync_worker before the log
Revert commit 1307bbd, which uses the s_umount semaphore to provide exclusion between xfs_sync_worker and unmount, in favor of shutting down the sync worker before freeing the log in xfs_log_unmount. This is a cleaner way of resolving the race between xfs_sync_worker and unmount than using s_umount. Signed-off-by: Ben Myers <bpm@sgi.com> Reviewed-by: Mark Tinguely <tinguely@sgi.com> Reviewed-by: Dave Chinner <dchinner@redhat.com>
-rw-r--r--fs/xfs/xfs_log.c1
-rw-r--r--fs/xfs/xfs_sync.c32
2 files changed, 17 insertions, 16 deletions
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index f30d9807dc48..0e1a64f0439c 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -810,6 +810,7 @@ xfs_log_unmount_write(xfs_mount_t *mp)
void
xfs_log_unmount(xfs_mount_t *mp)
{
+ cancel_delayed_work_sync(&mp->m_sync_work);
xfs_trans_ail_destroy(mp);
xlog_dealloc_log(mp->m_log);
}
diff --git a/fs/xfs/xfs_sync.c b/fs/xfs/xfs_sync.c
index c9d3409c5ca3..1e9ee064dbb2 100644
--- a/fs/xfs/xfs_sync.c
+++ b/fs/xfs/xfs_sync.c
@@ -386,23 +386,23 @@ xfs_sync_worker(
* We shouldn't write/force the log if we are in the mount/unmount
* process or on a read only filesystem. The workqueue still needs to be
* active in both cases, however, because it is used for inode reclaim
- * during these times. Use the s_umount semaphore to provide exclusion
- * with unmount.
+ * during these times. Use the MS_ACTIVE flag to avoid doing anything
+ * during mount. Doing work during unmount is avoided by calling
+ * cancel_delayed_work_sync on this work queue before tearing down
+ * the ail and the log in xfs_log_unmount.
*/
- if (down_read_trylock(&mp->m_super->s_umount)) {
- if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
- /* dgc: errors ignored here */
- if (mp->m_super->s_frozen == SB_UNFROZEN &&
- xfs_log_need_covered(mp))
- error = xfs_fs_log_dummy(mp);
- else
- xfs_log_force(mp, 0);
-
- /* start pushing all the metadata that is currently
- * dirty */
- xfs_ail_push_all(mp->m_ail);
- }
- up_read(&mp->m_super->s_umount);
+ if (!(mp->m_super->s_flags & MS_ACTIVE) &&
+ !(mp->m_flags & XFS_MOUNT_RDONLY)) {
+ /* dgc: errors ignored here */
+ if (mp->m_super->s_frozen == SB_UNFROZEN &&
+ xfs_log_need_covered(mp))
+ error = xfs_fs_log_dummy(mp);
+ else
+ xfs_log_force(mp, 0);
+
+ /* start pushing all the metadata that is currently
+ * dirty */
+ xfs_ail_push_all(mp->m_ail);
}
/* queue us up again */