diff options
author | Jan Kara <jack@suse.cz> | 2017-04-05 14:09:48 +0200 |
---|---|---|
committer | Jan Kara <jack@suse.cz> | 2017-04-05 14:24:16 +0200 |
commit | 71b0576bdb862e964a82c73327cdd1a249c53e67 (patch) | |
tree | 172df641c17c0d88efb755bbc7e8ff35d619ec40 /fs/reiserfs/super.c | |
parent | 65547661500885dde26ca4a75d4d7f7df587c88e (diff) | |
download | linux-71b0576bdb862e964a82c73327cdd1a249c53e67.tar.bz2 |
reiserfs: Make cancel_old_flush() reliable
Currently canceling of delayed work that flushes old data using
cancel_old_flush() does not prevent work from being requeued. Thus
in theory new work can be queued after cancel_old_flush() from
reiserfs_freeze() has run. This will become larger problem once
flush_old_commits() can requeue the work itself.
Fix the problem by recording in sbi->work_queue that flushing work is
canceled and should not be requeued.
Signed-off-by: Jan Kara <jack@suse.cz>
Diffstat (limited to 'fs/reiserfs/super.c')
-rw-r--r-- | fs/reiserfs/super.c | 21 |
1 files changed, 15 insertions, 6 deletions
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index feabcde0290d..91cf5cbd6332 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -90,7 +90,9 @@ static void flush_old_commits(struct work_struct *work) s = sbi->s_journal->j_work_sb; spin_lock(&sbi->old_work_lock); - sbi->work_queued = 0; + /* Avoid clobbering the cancel state... */ + if (sbi->work_queued == 1) + sbi->work_queued = 0; spin_unlock(&sbi->old_work_lock); reiserfs_sync_fs(s, 1); @@ -117,21 +119,22 @@ void reiserfs_schedule_old_flush(struct super_block *s) spin_unlock(&sbi->old_work_lock); } -static void cancel_old_flush(struct super_block *s) +void reiserfs_cancel_old_flush(struct super_block *s) { struct reiserfs_sb_info *sbi = REISERFS_SB(s); - cancel_delayed_work_sync(&REISERFS_SB(s)->old_work); spin_lock(&sbi->old_work_lock); - sbi->work_queued = 0; + /* Make sure no new flushes will be queued */ + sbi->work_queued = 2; spin_unlock(&sbi->old_work_lock); + cancel_delayed_work_sync(&REISERFS_SB(s)->old_work); } static int reiserfs_freeze(struct super_block *s) { struct reiserfs_transaction_handle th; - cancel_old_flush(s); + reiserfs_cancel_old_flush(s); reiserfs_write_lock(s); if (!(s->s_flags & MS_RDONLY)) { @@ -152,7 +155,13 @@ static int reiserfs_freeze(struct super_block *s) static int reiserfs_unfreeze(struct super_block *s) { + struct reiserfs_sb_info *sbi = REISERFS_SB(s); + reiserfs_allow_writes(s); + spin_lock(&sbi->old_work_lock); + /* Allow old_work to run again */ + sbi->work_queued = 0; + spin_unlock(&sbi->old_work_lock); return 0; } @@ -2194,7 +2203,7 @@ error_unlocked: if (sbi->commit_wq) destroy_workqueue(sbi->commit_wq); - cancel_delayed_work_sync(&REISERFS_SB(s)->old_work); + reiserfs_cancel_old_flush(s); reiserfs_free_bitmap_cache(s); if (SB_BUFFER_WITH_SB(s)) |