summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/btrfs/scrub.c39
1 files changed, 23 insertions, 16 deletions
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 6d6e155c8c8b..db21f17df996 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -193,6 +193,15 @@ struct scrub_ctx {
*/
struct btrfs_scrub_progress stat;
spinlock_t stat_lock;
+
+ /*
+ * Use a ref counter to avoid use-after-free issues. Scrub workers
+ * decrement bios_in_flight and workers_pending and then do a wakeup
+ * on the list_wait wait queue. We must ensure the main scrub task
+ * doesn't free the scrub context before or while the workers are
+ * doing the wakeup() call.
+ */
+ atomic_t refs;
};
struct scrub_fixup_nodatasum {
@@ -297,26 +306,20 @@ static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
static void copy_nocow_pages_worker(struct btrfs_work *work);
static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
+static void scrub_put_ctx(struct scrub_ctx *sctx);
static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
{
+ atomic_inc(&sctx->refs);
atomic_inc(&sctx->bios_in_flight);
}
static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
{
- struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
-
- /*
- * Hold the scrub_lock while doing the wakeup to ensure the
- * sctx (and its wait queue list_wait) isn't destroyed/freed
- * during the wakeup.
- */
- mutex_lock(&fs_info->scrub_lock);
atomic_dec(&sctx->bios_in_flight);
wake_up(&sctx->list_wait);
- mutex_unlock(&fs_info->scrub_lock);
+ scrub_put_ctx(sctx);
}
static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
@@ -350,6 +353,7 @@ static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
{
struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
+ atomic_inc(&sctx->refs);
/*
* increment scrubs_running to prevent cancel requests from
* completing as long as a worker is running. we must also
@@ -388,15 +392,11 @@ static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
mutex_lock(&fs_info->scrub_lock);
atomic_dec(&fs_info->scrubs_running);
atomic_dec(&fs_info->scrubs_paused);
+ mutex_unlock(&fs_info->scrub_lock);
atomic_dec(&sctx->workers_pending);
wake_up(&fs_info->scrub_pause_wait);
- /*
- * Hold the scrub_lock while doing the wakeup to ensure the
- * sctx (and its wait queue list_wait) isn't destroyed/freed
- * during the wakeup.
- */
wake_up(&sctx->list_wait);
- mutex_unlock(&fs_info->scrub_lock);
+ scrub_put_ctx(sctx);
}
static void scrub_free_csums(struct scrub_ctx *sctx)
@@ -442,6 +442,12 @@ static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
kfree(sctx);
}
+static void scrub_put_ctx(struct scrub_ctx *sctx)
+{
+ if (atomic_dec_and_test(&sctx->refs))
+ scrub_free_ctx(sctx);
+}
+
static noinline_for_stack
struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
{
@@ -466,6 +472,7 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
sctx = kzalloc(sizeof(*sctx), GFP_NOFS);
if (!sctx)
goto nomem;
+ atomic_set(&sctx->refs, 1);
sctx->is_dev_replace = is_dev_replace;
sctx->pages_per_rd_bio = pages_per_rd_bio;
sctx->curr = -1;
@@ -3739,7 +3746,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
scrub_workers_put(fs_info);
mutex_unlock(&fs_info->scrub_lock);
- scrub_free_ctx(sctx);
+ scrub_put_ctx(sctx);
return ret;
}