diff options
author | Christoph Hellwig <hch@infradead.org> | 2011-09-12 12:12:01 +0200 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2011-09-12 12:12:01 +0200 |
commit | 5a7bbad27a410350e64a2d7f5ec18fc73836c14f (patch) | |
tree | 3447cd62dbcbd77b4071e2eb7576f1d7632ef2d3 /drivers/md | |
parent | c20e8de27fef9f59869c81c288ad6cf28200e00c (diff) | |
download | linux-5a7bbad27a410350e64a2d7f5ec18fc73836c14f.tar.bz2 |
block: remove support for bio remapping from ->make_request
There is very little benefit in allowing to let a ->make_request
instance update the bios device and sector and loop around it in
__generic_make_request when we can archive the same through calling
generic_make_request from the driver and letting the loop in
generic_make_request handle it.
Note that various drivers got the return value from ->make_request and
returned non-zero values for errors.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: NeilBrown <neilb@suse.de>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/dm.c | 14 | ||||
-rw-r--r-- | drivers/md/faulty.c | 14 | ||||
-rw-r--r-- | drivers/md/linear.c | 17 | ||||
-rw-r--r-- | drivers/md/md.c | 12 | ||||
-rw-r--r-- | drivers/md/md.h | 2 | ||||
-rw-r--r-- | drivers/md/multipath.c | 8 | ||||
-rw-r--r-- | drivers/md/raid0.c | 22 | ||||
-rw-r--r-- | drivers/md/raid1.c | 8 | ||||
-rw-r--r-- | drivers/md/raid10.c | 19 | ||||
-rw-r--r-- | drivers/md/raid5.c | 8 |
10 files changed, 53 insertions, 71 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 78b20868bcbc..7b986e77b75e 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1388,7 +1388,7 @@ out: * The request function that just remaps the bio built up by * dm_merge_bvec. */ -static int _dm_request(struct request_queue *q, struct bio *bio) +static void _dm_request(struct request_queue *q, struct bio *bio) { int rw = bio_data_dir(bio); struct mapped_device *md = q->queuedata; @@ -1409,12 +1409,12 @@ static int _dm_request(struct request_queue *q, struct bio *bio) queue_io(md, bio); else bio_io_error(bio); - return 0; + return; } __split_and_process_bio(md, bio); up_read(&md->io_lock); - return 0; + return; } static int dm_request_based(struct mapped_device *md) @@ -1422,14 +1422,14 @@ static int dm_request_based(struct mapped_device *md) return blk_queue_stackable(md->queue); } -static int dm_request(struct request_queue *q, struct bio *bio) +static void dm_request(struct request_queue *q, struct bio *bio) { struct mapped_device *md = q->queuedata; if (dm_request_based(md)) - return blk_queue_bio(q, bio); - - return _dm_request(q, bio); + blk_queue_bio(q, bio); + else + _dm_request(q, bio); } void dm_dispatch_request(struct request *rq) diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c index 23078dabb6df..5ef304d4341c 100644 --- a/drivers/md/faulty.c +++ b/drivers/md/faulty.c @@ -169,7 +169,7 @@ static void add_sector(conf_t *conf, sector_t start, int mode) conf->nfaults = n+1; } -static int make_request(mddev_t *mddev, struct bio *bio) +static void make_request(mddev_t *mddev, struct bio *bio) { conf_t *conf = mddev->private; int failit = 0; @@ -181,7 +181,7 @@ static int make_request(mddev_t *mddev, struct bio *bio) * just fail immediately */ bio_endio(bio, -EIO); - return 0; + return; } if (check_sector(conf, bio->bi_sector, bio->bi_sector+(bio->bi_size>>9), @@ -211,15 +211,15 @@ static int make_request(mddev_t *mddev, struct bio *bio) } if (failit) { struct bio *b = bio_clone_mddev(bio, GFP_NOIO, mddev); + b->bi_bdev = conf->rdev->bdev; b->bi_private = bio; b->bi_end_io = faulty_fail; - generic_make_request(b); - return 0; - } else { + bio = b; + } else bio->bi_bdev = conf->rdev->bdev; - return 1; - } + + generic_make_request(bio); } static void status(struct seq_file *seq, mddev_t *mddev) diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 6cd2c313e800..c6ee491d98e7 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@ -264,14 +264,14 @@ static int linear_stop (mddev_t *mddev) return 0; } -static int linear_make_request (mddev_t *mddev, struct bio *bio) +static void linear_make_request (mddev_t *mddev, struct bio *bio) { dev_info_t *tmp_dev; sector_t start_sector; if (unlikely(bio->bi_rw & REQ_FLUSH)) { md_flush_request(mddev, bio); - return 0; + return; } rcu_read_lock(); @@ -293,7 +293,7 @@ static int linear_make_request (mddev_t *mddev, struct bio *bio) (unsigned long long)start_sector); rcu_read_unlock(); bio_io_error(bio); - return 0; + return; } if (unlikely(bio->bi_sector + (bio->bi_size >> 9) > tmp_dev->end_sector)) { @@ -307,20 +307,17 @@ static int linear_make_request (mddev_t *mddev, struct bio *bio) bp = bio_split(bio, end_sector - bio->bi_sector); - if (linear_make_request(mddev, &bp->bio1)) - generic_make_request(&bp->bio1); - if (linear_make_request(mddev, &bp->bio2)) - generic_make_request(&bp->bio2); + linear_make_request(mddev, &bp->bio1); + linear_make_request(mddev, &bp->bio2); bio_pair_release(bp); - return 0; + return; } bio->bi_bdev = tmp_dev->rdev->bdev; bio->bi_sector = bio->bi_sector - start_sector + tmp_dev->rdev->data_offset; rcu_read_unlock(); - - return 1; + generic_make_request(bio); } static void linear_status (struct seq_file *seq, mddev_t *mddev) diff --git a/drivers/md/md.c b/drivers/md/md.c index 8e221a20f5d9..5c2178562c96 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -330,18 +330,17 @@ static DEFINE_SPINLOCK(all_mddevs_lock); * call has finished, the bio has been linked into some internal structure * and so is visible to ->quiesce(), so we don't need the refcount any more. */ -static int md_make_request(struct request_queue *q, struct bio *bio) +static void md_make_request(struct request_queue *q, struct bio *bio) { const int rw = bio_data_dir(bio); mddev_t *mddev = q->queuedata; - int rv; int cpu; unsigned int sectors; if (mddev == NULL || mddev->pers == NULL || !mddev->ready) { bio_io_error(bio); - return 0; + return; } smp_rmb(); /* Ensure implications of 'active' are visible */ rcu_read_lock(); @@ -366,7 +365,7 @@ static int md_make_request(struct request_queue *q, struct bio *bio) * go away inside make_request */ sectors = bio_sectors(bio); - rv = mddev->pers->make_request(mddev, bio); + mddev->pers->make_request(mddev, bio); cpu = part_stat_lock(); part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); @@ -375,8 +374,6 @@ static int md_make_request(struct request_queue *q, struct bio *bio) if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) wake_up(&mddev->sb_wait); - - return rv; } /* mddev_suspend makes sure no new requests are submitted @@ -475,8 +472,7 @@ static void md_submit_flush_data(struct work_struct *ws) bio_endio(bio, 0); else { bio->bi_rw &= ~REQ_FLUSH; - if (mddev->pers->make_request(mddev, bio)) - generic_make_request(bio); + mddev->pers->make_request(mddev, bio); } mddev->flush_bio = NULL; diff --git a/drivers/md/md.h b/drivers/md/md.h index 1e586bb4452e..bd47847cf7ca 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -424,7 +424,7 @@ struct mdk_personality int level; struct list_head list; struct module *owner; - int (*make_request)(mddev_t *mddev, struct bio *bio); + void (*make_request)(mddev_t *mddev, struct bio *bio); int (*run)(mddev_t *mddev); int (*stop)(mddev_t *mddev); void (*status)(struct seq_file *seq, mddev_t *mddev); diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 3535c23af288..407cb5691425 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -106,7 +106,7 @@ static void multipath_end_request(struct bio *bio, int error) rdev_dec_pending(rdev, conf->mddev); } -static int multipath_make_request(mddev_t *mddev, struct bio * bio) +static void multipath_make_request(mddev_t *mddev, struct bio * bio) { multipath_conf_t *conf = mddev->private; struct multipath_bh * mp_bh; @@ -114,7 +114,7 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio) if (unlikely(bio->bi_rw & REQ_FLUSH)) { md_flush_request(mddev, bio); - return 0; + return; } mp_bh = mempool_alloc(conf->pool, GFP_NOIO); @@ -126,7 +126,7 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio) if (mp_bh->path < 0) { bio_endio(bio, -EIO); mempool_free(mp_bh, conf->pool); - return 0; + return; } multipath = conf->multipaths + mp_bh->path; @@ -137,7 +137,7 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio) mp_bh->bio.bi_end_io = multipath_end_request; mp_bh->bio.bi_private = mp_bh; generic_make_request(&mp_bh->bio); - return 0; + return; } static void multipath_status (struct seq_file *seq, mddev_t *mddev) diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index e86bf3682e1e..4066615d61af 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -466,7 +466,7 @@ static inline int is_io_in_chunk_boundary(mddev_t *mddev, } } -static int raid0_make_request(mddev_t *mddev, struct bio *bio) +static void raid0_make_request(mddev_t *mddev, struct bio *bio) { unsigned int chunk_sects; sector_t sector_offset; @@ -475,7 +475,7 @@ static int raid0_make_request(mddev_t *mddev, struct bio *bio) if (unlikely(bio->bi_rw & REQ_FLUSH)) { md_flush_request(mddev, bio); - return 0; + return; } chunk_sects = mddev->chunk_sectors; @@ -495,13 +495,10 @@ static int raid0_make_request(mddev_t *mddev, struct bio *bio) else bp = bio_split(bio, chunk_sects - sector_div(sector, chunk_sects)); - if (raid0_make_request(mddev, &bp->bio1)) - generic_make_request(&bp->bio1); - if (raid0_make_request(mddev, &bp->bio2)) - generic_make_request(&bp->bio2); - + raid0_make_request(mddev, &bp->bio1); + raid0_make_request(mddev, &bp->bio2); bio_pair_release(bp); - return 0; + return; } sector_offset = bio->bi_sector; @@ -511,10 +508,9 @@ static int raid0_make_request(mddev_t *mddev, struct bio *bio) bio->bi_bdev = tmp_dev->bdev; bio->bi_sector = sector_offset + zone->dev_start + tmp_dev->data_offset; - /* - * Let the main block layer submit the IO and resolve recursion: - */ - return 1; + + generic_make_request(bio); + return; bad_map: printk("md/raid0:%s: make_request bug: can't convert block across chunks" @@ -523,7 +519,7 @@ bad_map: (unsigned long long)bio->bi_sector, bio->bi_size >> 10); bio_io_error(bio); - return 0; + return; } static void raid0_status(struct seq_file *seq, mddev_t *mddev) diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 32323f0afd89..97f2a5f977b1 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -785,7 +785,7 @@ do_sync_io: PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); } -static int make_request(mddev_t *mddev, struct bio * bio) +static void make_request(mddev_t *mddev, struct bio * bio) { conf_t *conf = mddev->private; mirror_info_t *mirror; @@ -870,7 +870,7 @@ read_again: if (rdisk < 0) { /* couldn't find anywhere to read from */ raid_end_bio_io(r1_bio); - return 0; + return; } mirror = conf->mirrors + rdisk; @@ -928,7 +928,7 @@ read_again: goto read_again; } else generic_make_request(read_bio); - return 0; + return; } /* @@ -1119,8 +1119,6 @@ read_again: if (do_sync || !bitmap || !plugged) md_wakeup_thread(mddev->thread); - - return 0; } static void status(struct seq_file *seq, mddev_t *mddev) diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 8b29cd4f01c8..04b625e1cb60 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -825,7 +825,7 @@ static void unfreeze_array(conf_t *conf) spin_unlock_irq(&conf->resync_lock); } -static int make_request(mddev_t *mddev, struct bio * bio) +static void make_request(mddev_t *mddev, struct bio * bio) { conf_t *conf = mddev->private; mirror_info_t *mirror; @@ -844,7 +844,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) if (unlikely(bio->bi_rw & REQ_FLUSH)) { md_flush_request(mddev, bio); - return 0; + return; } /* If this request crosses a chunk boundary, we need to @@ -876,10 +876,8 @@ static int make_request(mddev_t *mddev, struct bio * bio) conf->nr_waiting++; spin_unlock_irq(&conf->resync_lock); - if (make_request(mddev, &bp->bio1)) - generic_make_request(&bp->bio1); - if (make_request(mddev, &bp->bio2)) - generic_make_request(&bp->bio2); + make_request(mddev, &bp->bio1); + make_request(mddev, &bp->bio2); spin_lock_irq(&conf->resync_lock); conf->nr_waiting--; @@ -887,14 +885,14 @@ static int make_request(mddev_t *mddev, struct bio * bio) spin_unlock_irq(&conf->resync_lock); bio_pair_release(bp); - return 0; + return; bad_map: printk("md/raid10:%s: make_request bug: can't convert block across chunks" " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2, (unsigned long long)bio->bi_sector, bio->bi_size >> 10); bio_io_error(bio); - return 0; + return; } md_write_start(mddev, bio); @@ -937,7 +935,7 @@ read_again: slot = r10_bio->read_slot; if (disk < 0) { raid_end_bio_io(r10_bio); - return 0; + return; } mirror = conf->mirrors + disk; @@ -985,7 +983,7 @@ read_again: goto read_again; } else generic_make_request(read_bio); - return 0; + return; } /* @@ -1157,7 +1155,6 @@ retry_write: if (do_sync || !mddev->bitmap || !plugged) md_wakeup_thread(mddev->thread); - return 0; } static void status(struct seq_file *seq, mddev_t *mddev) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index dbae459fb02d..96b7f6a1b6f2 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -3695,7 +3695,7 @@ static struct stripe_head *__get_priority_stripe(raid5_conf_t *conf) return sh; } -static int make_request(mddev_t *mddev, struct bio * bi) +static void make_request(mddev_t *mddev, struct bio * bi) { raid5_conf_t *conf = mddev->private; int dd_idx; @@ -3708,7 +3708,7 @@ static int make_request(mddev_t *mddev, struct bio * bi) if (unlikely(bi->bi_rw & REQ_FLUSH)) { md_flush_request(mddev, bi); - return 0; + return; } md_write_start(mddev, bi); @@ -3716,7 +3716,7 @@ static int make_request(mddev_t *mddev, struct bio * bi) if (rw == READ && mddev->reshape_position == MaxSector && chunk_aligned_read(mddev,bi)) - return 0; + return; logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); last_sector = bi->bi_sector + (bi->bi_size>>9); @@ -3851,8 +3851,6 @@ static int make_request(mddev_t *mddev, struct bio * bi) bio_endio(bi, 0); } - - return 0; } static sector_t raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks); |