summaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
authorZhao Lei <zhaolei@cn.fujitsu.com>2015-01-20 15:11:42 +0800
committerChris Mason <clm@fb.com>2015-01-21 18:06:49 -0800
commitbe50a8ddaae1d07135fd7e1c7017c1611075a560 (patch)
tree2867a34bbb15f23f9fa8a659bc3147d6fba0f8cc /fs/btrfs
parentb968fed1c3810a0a0d575cab8a72431fbc807615 (diff)
downloadlinux-be50a8ddaae1d07135fd7e1c7017c1611075a560.tar.bz2
Btrfs: Simplify scrub_setup_recheck_block()'s argument
scrub_setup_recheck_block() have many arguments but most of them can be get from one of them, we can remove them to make code clean. Some other cleanup for that function also included in this patch. Signed-off-by: Zhao Lei <zhaolei@cn.fujitsu.com> Signed-off-by: Miao Xie <miaox@cn.fujitsu.com> Signed-off-by: Chris Mason <clm@fb.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/scrub.c25
1 files changed, 9 insertions, 16 deletions
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index aac40aefa552..c3a98931980f 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -235,10 +235,7 @@ static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
-static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
- struct btrfs_fs_info *fs_info,
- struct scrub_block *original_sblock,
- u64 length, u64 logical,
+static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
struct scrub_block *sblocks_for_recheck);
static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
struct scrub_block *sblock, int is_metadata,
@@ -960,8 +957,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
}
/* setup the context, map the logical blocks and alloc the pages */
- ret = scrub_setup_recheck_block(sctx, fs_info, sblock_to_check, length,
- logical, sblocks_for_recheck);
+ ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck);
if (ret) {
spin_lock(&sctx->stat_lock);
sctx->stat.read_errors++;
@@ -1301,19 +1297,20 @@ static inline void scrub_stripe_index_and_offset(u64 logical, u64 *raid_map,
}
}
-static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
- struct btrfs_fs_info *fs_info,
- struct scrub_block *original_sblock,
- u64 length, u64 logical,
+static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
struct scrub_block *sblocks_for_recheck)
{
+ struct scrub_ctx *sctx = original_sblock->sctx;
+ struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
+ u64 length = original_sblock->page_count * PAGE_SIZE;
+ u64 logical = original_sblock->pagev[0]->logical;
struct scrub_recover *recover;
struct btrfs_bio *bbio;
u64 sublen;
u64 mapped_length;
u64 stripe_offset;
int stripe_index;
- int page_index;
+ int page_index = 0;
int mirror_index;
int nmirrors;
int ret;
@@ -1324,7 +1321,6 @@ static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
* the recheck procedure
*/
- page_index = 0;
while (length > 0) {
sublen = min_t(u64, length, PAGE_SIZE);
mapped_length = sublen;
@@ -1353,15 +1349,12 @@ static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO);
- nmirrors = scrub_nr_raid_mirrors(bbio);
+ nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS);
for (mirror_index = 0; mirror_index < nmirrors;
mirror_index++) {
struct scrub_block *sblock;
struct scrub_page *page;
- if (mirror_index >= BTRFS_MAX_MIRRORS)
- break;
-
sblock = sblocks_for_recheck + mirror_index;
sblock->sctx = sctx;
page = kzalloc(sizeof(*page), GFP_NOFS);