diff options
author | Alasdair G Kergon <agk@redhat.com> | 2013-03-01 22:45:47 +0000 |
---|---|---|
committer | Alasdair G Kergon <agk@redhat.com> | 2013-03-01 22:45:47 +0000 |
commit | 14fe594d679c9ba8c8e3d6ad1a3ed9c0ba336df0 (patch) | |
tree | 9b0da04f6ddf9024f69f2196853b79c9ba6a440e /drivers/md/dm.c | |
parent | 55a62eef8d1b50ceff3b7bf46851103bdcc7e5b0 (diff) | |
download | linux-14fe594d679c9ba8c8e3d6ad1a3ed9c0ba336df0.tar.bz2 |
dm: rename bio cloning functions
Rename functions involved in splitting and cloning bios.
The sequence of functions is now:
(1) __split_and_process* - entry point that selects the processing strategy
(2) __send* - prepare the details for each bio needed and loop through them
(3) __clone_and_map* - creates a clone and maps it
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r-- | drivers/md/dm.c | 63 |
1 files changed, 31 insertions, 32 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index caef71befc43..02079cfccaf5 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1061,9 +1061,9 @@ static void clone_bio_integrity(struct bio *bio, struct bio *clone, /* * Creates a little bio that just does part of a bvec. */ -static void split_bvec(struct dm_target_io *tio, struct bio *bio, - sector_t sector, unsigned short idx, unsigned int offset, - unsigned int len) +static void clone_split_bio(struct dm_target_io *tio, struct bio *bio, + sector_t sector, unsigned short idx, + unsigned offset, unsigned len) { struct bio *clone = &tio->clone; struct bio_vec *bv = bio->bi_io_vec + idx; @@ -1119,8 +1119,9 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci, return tio; } -static void __issue_target_request(struct clone_info *ci, struct dm_target *ti, - unsigned target_bio_nr, sector_t len) +static void __clone_and_map_simple_bio(struct clone_info *ci, + struct dm_target *ti, + unsigned target_bio_nr, sector_t len) { struct dm_target_io *tio = alloc_tio(ci, ti, ci->bio->bi_max_vecs, target_bio_nr); struct bio *clone = &tio->clone; @@ -1137,31 +1138,29 @@ static void __issue_target_request(struct clone_info *ci, struct dm_target *ti, __map_bio(tio); } -static void __issue_target_bios(struct clone_info *ci, struct dm_target *ti, - unsigned num_bios, sector_t len) +static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, + unsigned num_bios, sector_t len) { unsigned target_bio_nr; for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++) - __issue_target_request(ci, ti, target_bio_nr, len); + __clone_and_map_simple_bio(ci, ti, target_bio_nr, len); } -static int __clone_and_map_empty_flush(struct clone_info *ci) +static int __send_empty_flush(struct clone_info *ci) { unsigned target_nr = 0; struct dm_target *ti; BUG_ON(bio_has_data(ci->bio)); while ((ti = dm_table_get_target(ci->map, target_nr++))) - __issue_target_bios(ci, ti, ti->num_flush_bios, 0); + __send_duplicate_bios(ci, ti, ti->num_flush_bios, 0); return 0; } -/* - * Perform all io with a single clone. - */ -static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti) +static void __clone_and_map_data_bio(struct clone_info *ci, + struct dm_target *ti) { struct bio *bio = ci->bio; struct dm_target_io *tio; @@ -1192,9 +1191,9 @@ static bool is_split_required_for_discard(struct dm_target *ti) return ti->split_discard_bios; } -static int __clone_and_map_changing_extent_only(struct clone_info *ci, - get_num_bios_fn get_num_bios, - is_split_required_fn is_split_required) +static int __send_changing_extent_only(struct clone_info *ci, + get_num_bios_fn get_num_bios, + is_split_required_fn is_split_required) { struct dm_target *ti; sector_t len; @@ -1220,7 +1219,7 @@ static int __clone_and_map_changing_extent_only(struct clone_info *ci, else len = min(ci->sector_count, max_io_len(ci->sector, ti)); - __issue_target_bios(ci, ti, num_bios, len); + __send_duplicate_bios(ci, ti, num_bios, len); ci->sector += len; } while (ci->sector_count -= len); @@ -1228,18 +1227,18 @@ static int __clone_and_map_changing_extent_only(struct clone_info *ci, return 0; } -static int __clone_and_map_discard(struct clone_info *ci) +static int __send_discard(struct clone_info *ci) { - return __clone_and_map_changing_extent_only(ci, get_num_discard_bios, - is_split_required_for_discard); + return __send_changing_extent_only(ci, get_num_discard_bios, + is_split_required_for_discard); } -static int __clone_and_map_write_same(struct clone_info *ci) +static int __send_write_same(struct clone_info *ci) { - return __clone_and_map_changing_extent_only(ci, get_num_write_same_bios, NULL); + return __send_changing_extent_only(ci, get_num_write_same_bios, NULL); } -static int __clone_and_map(struct clone_info *ci) +static int __split_and_process_non_flush(struct clone_info *ci) { struct bio *bio = ci->bio; struct dm_target *ti; @@ -1247,9 +1246,9 @@ static int __clone_and_map(struct clone_info *ci) struct dm_target_io *tio; if (unlikely(bio->bi_rw & REQ_DISCARD)) - return __clone_and_map_discard(ci); + return __send_discard(ci); else if (unlikely(bio->bi_rw & REQ_WRITE_SAME)) - return __clone_and_map_write_same(ci); + return __send_write_same(ci); ti = dm_table_find_target(ci->map, ci->sector); if (!dm_target_is_valid(ti)) @@ -1262,7 +1261,7 @@ static int __clone_and_map(struct clone_info *ci) * Optimise for the simple case where we can do all of * the remaining io with a single clone. */ - __clone_and_map_simple(ci, ti); + __clone_and_map_data_bio(ci, ti); } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) { /* @@ -1311,8 +1310,8 @@ static int __clone_and_map(struct clone_info *ci) len = min(remaining, max); tio = alloc_tio(ci, ti, 1, 0); - split_bvec(tio, bio, ci->sector, ci->idx, - bv->bv_offset + offset, len); + clone_split_bio(tio, bio, ci->sector, ci->idx, + bv->bv_offset + offset, len); __map_bio(tio); @@ -1328,7 +1327,7 @@ static int __clone_and_map(struct clone_info *ci) } /* - * Split the bio into several clones and submit it to targets. + * Entry point to split a bio into clones and submit them to the targets. */ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio) { @@ -1356,13 +1355,13 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio) if (bio->bi_rw & REQ_FLUSH) { ci.bio = &ci.md->flush_bio; ci.sector_count = 0; - error = __clone_and_map_empty_flush(&ci); + error = __send_empty_flush(&ci); /* dec_pending submits any data associated with flush */ } else { ci.bio = bio; ci.sector_count = bio_sectors(bio); while (ci.sector_count && !error) - error = __clone_and_map(&ci); + error = __split_and_process_non_flush(&ci); } /* drop the extra reference count */ |