summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/scrub.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/scrub.c')
-rw-r--r--fs/btrfs/scrub.c154
1 files changed, 152 insertions, 2 deletions
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 5f4f88a4d2c8..c2900ebf767a 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -166,6 +166,7 @@ struct scrub_ctx {
int pages_per_rd_bio;
int is_dev_replace;
+ u64 write_pointer;
struct scrub_bio *wr_curr_bio;
struct mutex wr_lock;
@@ -856,6 +857,9 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
have_csum = sblock_to_check->pagev[0]->have_csum;
dev = sblock_to_check->pagev[0]->dev;
+ if (btrfs_is_zoned(fs_info) && !sctx->is_dev_replace)
+ return btrfs_repair_one_zone(fs_info, logical);
+
/*
* We must use GFP_NOFS because the scrub task might be waiting for a
* worker task executing this function and in turn a transaction commit
@@ -1619,6 +1623,28 @@ static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
return scrub_add_page_to_wr_bio(sblock->sctx, spage);
}
+static int fill_writer_pointer_gap(struct scrub_ctx *sctx, u64 physical)
+{
+ int ret = 0;
+ u64 length;
+
+ if (!btrfs_is_zoned(sctx->fs_info))
+ return 0;
+
+ if (!btrfs_dev_is_sequential(sctx->wr_tgtdev, physical))
+ return 0;
+
+ if (sctx->write_pointer < physical) {
+ length = physical - sctx->write_pointer;
+
+ ret = btrfs_zoned_issue_zeroout(sctx->wr_tgtdev,
+ sctx->write_pointer, length);
+ if (!ret)
+ sctx->write_pointer = physical;
+ }
+ return ret;
+}
+
static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
struct scrub_page *spage)
{
@@ -1641,6 +1667,13 @@ again:
if (sbio->page_count == 0) {
struct bio *bio;
+ ret = fill_writer_pointer_gap(sctx,
+ spage->physical_for_dev_replace);
+ if (ret) {
+ mutex_unlock(&sctx->wr_lock);
+ return ret;
+ }
+
sbio->physical = spage->physical_for_dev_replace;
sbio->logical = spage->logical;
sbio->dev = sctx->wr_tgtdev;
@@ -1695,13 +1728,16 @@ static void scrub_wr_submit(struct scrub_ctx *sctx)
sbio = sctx->wr_curr_bio;
sctx->wr_curr_bio = NULL;
- WARN_ON(!sbio->bio->bi_disk);
+ WARN_ON(!sbio->bio->bi_bdev);
scrub_pending_bio_inc(sctx);
/* process all writes in a single worker thread. Then the block layer
* orders the requests before sending them to the driver which
* doubled the write performance on spinning disks when measured
* with Linux 3.5 */
btrfsic_submit_bio(sbio->bio);
+
+ if (btrfs_is_zoned(sctx->fs_info))
+ sctx->write_pointer = sbio->physical + sbio->page_count * PAGE_SIZE;
}
static void scrub_wr_bio_end_io(struct bio *bio)
@@ -3025,6 +3061,46 @@ out:
return ret < 0 ? ret : 0;
}
+static void sync_replace_for_zoned(struct scrub_ctx *sctx)
+{
+ if (!btrfs_is_zoned(sctx->fs_info))
+ return;
+
+ sctx->flush_all_writes = true;
+ scrub_submit(sctx);
+ mutex_lock(&sctx->wr_lock);
+ scrub_wr_submit(sctx);
+ mutex_unlock(&sctx->wr_lock);
+
+ wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
+}
+
+static int sync_write_pointer_for_zoned(struct scrub_ctx *sctx, u64 logical,
+ u64 physical, u64 physical_end)
+{
+ struct btrfs_fs_info *fs_info = sctx->fs_info;
+ int ret = 0;
+
+ if (!btrfs_is_zoned(fs_info))
+ return 0;
+
+ wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
+
+ mutex_lock(&sctx->wr_lock);
+ if (sctx->write_pointer < physical_end) {
+ ret = btrfs_sync_zone_write_pointer(sctx->wr_tgtdev, logical,
+ physical,
+ sctx->write_pointer);
+ if (ret)
+ btrfs_err(fs_info,
+ "zoned: failed to recover write pointer");
+ }
+ mutex_unlock(&sctx->wr_lock);
+ btrfs_dev_clear_zone_empty(sctx->wr_tgtdev, physical);
+
+ return ret;
+}
+
static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
struct map_lookup *map,
struct btrfs_device *scrub_dev,
@@ -3165,6 +3241,14 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
*/
blk_start_plug(&plug);
+ if (sctx->is_dev_replace &&
+ btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) {
+ mutex_lock(&sctx->wr_lock);
+ sctx->write_pointer = physical;
+ mutex_unlock(&sctx->wr_lock);
+ sctx->flush_all_writes = true;
+ }
+
/*
* now find all extents for each stripe and scrub them
*/
@@ -3353,6 +3437,9 @@ again:
if (ret)
goto out;
+ if (sctx->is_dev_replace)
+ sync_replace_for_zoned(sctx);
+
if (extent_logical + extent_len <
key.objectid + bytes) {
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
@@ -3420,6 +3507,17 @@ out:
blk_finish_plug(&plug);
btrfs_free_path(path);
btrfs_free_path(ppath);
+
+ if (sctx->is_dev_replace && ret >= 0) {
+ int ret2;
+
+ ret2 = sync_write_pointer_for_zoned(sctx, base + offset,
+ map->stripes[num].physical,
+ physical_end);
+ if (ret2)
+ ret = ret2;
+ }
+
return ret < 0 ? ret : 0;
}
@@ -3475,6 +3573,25 @@ out:
return ret;
}
+static int finish_extent_writes_for_zoned(struct btrfs_root *root,
+ struct btrfs_block_group *cache)
+{
+ struct btrfs_fs_info *fs_info = cache->fs_info;
+ struct btrfs_trans_handle *trans;
+
+ if (!btrfs_is_zoned(fs_info))
+ return 0;
+
+ btrfs_wait_block_group_reservations(cache);
+ btrfs_wait_nocow_writers(cache);
+ btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, cache->length);
+
+ trans = btrfs_join_transaction(root);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+ return btrfs_commit_transaction(trans);
+}
+
static noinline_for_stack
int scrub_enumerate_chunks(struct scrub_ctx *sctx,
struct btrfs_device *scrub_dev, u64 start, u64 end)
@@ -3561,6 +3678,16 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
if (!cache)
goto skip;
+ if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) {
+ spin_lock(&cache->lock);
+ if (!cache->to_copy) {
+ spin_unlock(&cache->lock);
+ ro_set = 0;
+ goto done;
+ }
+ spin_unlock(&cache->lock);
+ }
+
/*
* Make sure that while we are scrubbing the corresponding block
* group doesn't get its logical address and its device extents
@@ -3619,6 +3746,16 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
* group is not RO.
*/
ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace);
+ if (!ret && sctx->is_dev_replace) {
+ ret = finish_extent_writes_for_zoned(root, cache);
+ if (ret) {
+ btrfs_dec_block_group_ro(cache);
+ scrub_pause_off(fs_info);
+ btrfs_put_block_group(cache);
+ break;
+ }
+ }
+
if (ret == 0) {
ro_set = 1;
} else if (ret == -ENOSPC && !sctx->is_dev_replace) {
@@ -3630,6 +3767,13 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
* commit_transactions.
*/
ro_set = 0;
+ } else if (ret == -ETXTBSY) {
+ btrfs_warn(fs_info,
+ "skipping scrub of block group %llu due to active swapfile",
+ cache->start);
+ scrub_pause_off(fs_info);
+ ret = 0;
+ goto skip_unfreeze;
} else {
btrfs_warn(fs_info,
"failed setting block group ro: %d", ret);
@@ -3692,6 +3836,12 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
scrub_pause_off(fs_info);
+ if (sctx->is_dev_replace &&
+ !btrfs_finish_block_group_to_copy(dev_replace->srcdev,
+ cache, found_key.offset))
+ ro_set = 0;
+
+done:
down_write(&dev_replace->rwsem);
dev_replace->cursor_left = dev_replace->cursor_right;
dev_replace->item_needs_writeback = 1;
@@ -3719,7 +3869,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
} else {
spin_unlock(&cache->lock);
}
-
+skip_unfreeze:
btrfs_unfreeze_block_group(cache);
btrfs_put_block_group(cache);
if (ret)