summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2017-08-23 19:10:32 +0200
committerJens Axboe <axboe@kernel.dk>2017-08-23 12:49:55 -0600
commit74d46992e0d9dee7f1f376de0d56d31614c8a17a (patch)
tree399c316bb960541135e557bc1765caeec6d00084 /drivers
parentc2ee070fb00365d7841f6661dcdc7fbe6620bdf8 (diff)
downloadlinux-74d46992e0d9dee7f1f376de0d56d31614c8a17a.tar.bz2
block: replace bi_bdev with a gendisk pointer and partitions index
This way we don't need a block_device structure to submit I/O. The block_device has different life time rules from the gendisk and request_queue and is usually only available when the block device node is open. Other callers need to explicitly create one (e.g. the lightnvm passthrough code, or the new nvme multipathing code). For the actual I/O path all that we need is the gendisk, which exists once per block device. But given that the block layer also does partition remapping we additionally need a partition index, which is used for said remapping in generic_make_request. Note that all the block drivers generally want request_queue or sometimes the gendisk, so this removes a layer of indirection all over the stack. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/brd.c5
-rw-r--r--drivers/block/drbd/drbd_actlog.c2
-rw-r--r--drivers/block/drbd/drbd_bitmap.c2
-rw-r--r--drivers/block/drbd/drbd_int.h4
-rw-r--r--drivers/block/drbd/drbd_receiver.c4
-rw-r--r--drivers/block/drbd/drbd_req.c2
-rw-r--r--drivers/block/drbd/drbd_worker.c2
-rw-r--r--drivers/block/floppy.c2
-rw-r--r--drivers/block/pktcdvd.c11
-rw-r--r--drivers/block/xen-blkback/blkback.c4
-rw-r--r--drivers/md/bcache/debug.c2
-rw-r--r--drivers/md/bcache/io.c2
-rw-r--r--drivers/md/bcache/journal.c6
-rw-r--r--drivers/md/bcache/request.c16
-rw-r--r--drivers/md/bcache/super.c6
-rw-r--r--drivers/md/bcache/writeback.c5
-rw-r--r--drivers/md/dm-bio-record.h9
-rw-r--r--drivers/md/dm-bufio.c2
-rw-r--r--drivers/md/dm-cache-target.c4
-rw-r--r--drivers/md/dm-crypt.c4
-rw-r--r--drivers/md/dm-delay.c4
-rw-r--r--drivers/md/dm-era-target.c2
-rw-r--r--drivers/md/dm-flakey.c2
-rw-r--r--drivers/md/dm-integrity.c11
-rw-r--r--drivers/md/dm-io.c2
-rw-r--r--drivers/md/dm-linear.c2
-rw-r--r--drivers/md/dm-log-writes.c8
-rw-r--r--drivers/md/dm-mpath.c2
-rw-r--r--drivers/md/dm-raid1.c12
-rw-r--r--drivers/md/dm-snap.c16
-rw-r--r--drivers/md/dm-stripe.c10
-rw-r--r--drivers/md/dm-switch.c2
-rw-r--r--drivers/md/dm-thin.c6
-rw-r--r--drivers/md/dm-verity-target.c2
-rw-r--r--drivers/md/dm-zoned-metadata.c6
-rw-r--r--drivers/md/dm-zoned-target.c4
-rw-r--r--drivers/md/dm.c10
-rw-r--r--drivers/md/faulty.c4
-rw-r--r--drivers/md/linear.c6
-rw-r--r--drivers/md/md.c10
-rw-r--r--drivers/md/md.h9
-rw-r--r--drivers/md/multipath.c8
-rw-r--r--drivers/md/raid0.c7
-rw-r--r--drivers/md/raid1.c34
-rw-r--r--drivers/md/raid10.c50
-rw-r--r--drivers/md/raid5-cache.c6
-rw-r--r--drivers/md/raid5-ppl.c6
-rw-r--r--drivers/md/raid5.c12
-rw-r--r--drivers/nvdimm/nd.h4
-rw-r--r--drivers/nvme/host/core.c11
-rw-r--r--drivers/nvme/host/lightnvm.c15
-rw-r--r--drivers/nvme/target/io-cmd.c6
-rw-r--r--drivers/s390/block/dcssblk.c4
-rw-r--r--drivers/s390/block/xpram.c2
-rw-r--r--drivers/target/target_core_iblock.c4
55 files changed, 188 insertions, 205 deletions
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 104b71c0490d..006e1cb7e6f0 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -294,14 +294,13 @@ out:
static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
{
- struct block_device *bdev = bio->bi_bdev;
- struct brd_device *brd = bdev->bd_disk->private_data;
+ struct brd_device *brd = bio->bi_disk->private_data;
struct bio_vec bvec;
sector_t sector;
struct bvec_iter iter;
sector = bio->bi_iter.bi_sector;
- if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
+ if (bio_end_sector(bio) > get_capacity(bio->bi_disk))
goto io_error;
bio_for_each_segment(bvec, bio, iter) {
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index e02c45cd3c5a..5f0eaee8c8a7 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -151,7 +151,7 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
op_flags |= REQ_SYNC;
bio = bio_alloc_drbd(GFP_NOIO);
- bio->bi_bdev = bdev->md_bdev;
+ bio_set_dev(bio, bdev->md_bdev);
bio->bi_iter.bi_sector = sector;
err = -EIO;
if (bio_add_page(bio, device->md_io.page, size, 0) != size)
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 809fd245c3dc..bd97908c766f 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -1019,7 +1019,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
bm_store_page_idx(page, page_nr);
} else
page = b->bm_pages[page_nr];
- bio->bi_bdev = device->ldev->md_bdev;
+ bio_set_dev(bio, device->ldev->md_bdev);
bio->bi_iter.bi_sector = on_disk_sector;
/* bio_add_page of a single page to an empty bio will always succeed,
* according to api. Do we want to assert that? */
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index d17b6e6393c7..819f9d0bc875 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1628,8 +1628,8 @@ static inline void drbd_generic_make_request(struct drbd_device *device,
int fault_type, struct bio *bio)
{
__release(local);
- if (!bio->bi_bdev) {
- drbd_err(device, "drbd_generic_make_request: bio->bi_bdev == NULL\n");
+ if (!bio->bi_disk) {
+ drbd_err(device, "drbd_generic_make_request: bio->bi_disk == NULL\n");
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
return;
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index c7e95e6380fb..ece6e5d7dc3f 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1265,7 +1265,7 @@ static void submit_one_flush(struct drbd_device *device, struct issue_flush_cont
octx->device = device;
octx->ctx = ctx;
- bio->bi_bdev = device->ldev->backing_bdev;
+ bio_set_dev(bio, device->ldev->backing_bdev);
bio->bi_private = octx;
bio->bi_end_io = one_flush_endio;
bio->bi_opf = REQ_OP_FLUSH | REQ_PREFLUSH;
@@ -1548,7 +1548,7 @@ next_bio:
}
/* > peer_req->i.sector, unless this is the first bio */
bio->bi_iter.bi_sector = sector;
- bio->bi_bdev = device->ldev->backing_bdev;
+ bio_set_dev(bio, device->ldev->backing_bdev);
bio_set_op_attrs(bio, op, op_flags);
bio->bi_private = peer_req;
bio->bi_end_io = drbd_peer_request_endio;
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 8d6b5d137b5e..447c975f5481 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -1179,7 +1179,7 @@ drbd_submit_req_private_bio(struct drbd_request *req)
else
type = DRBD_FAULT_DT_RD;
- bio->bi_bdev = device->ldev->backing_bdev;
+ bio_set_dev(bio, device->ldev->backing_bdev);
/* State may have changed since we grabbed our reference on the
* ->ldev member. Double check, and short-circuit to endio.
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 1d8726a8df34..c268d886c4f0 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -1513,7 +1513,7 @@ int w_restart_disk_io(struct drbd_work *w, int cancel)
drbd_al_begin_io(device, &req->i);
drbd_req_make_private_bio(req, req->master_bio);
- req->private_bio->bi_bdev = device->ldev->backing_bdev;
+ bio_set_dev(req->private_bio, device->ldev->backing_bdev);
generic_make_request(req->private_bio);
return 0;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 9c00f29e40c1..60c086a53609 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4134,7 +4134,7 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive)
cbdata.drive = drive;
bio_init(&bio, &bio_vec, 1);
- bio.bi_bdev = bdev;
+ bio_set_dev(&bio, bdev);
bio_add_page(&bio, page, size, 0);
bio.bi_iter.bi_sector = 0;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 6b8b097abbb9..67974796c350 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -1028,7 +1028,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
bio = pkt->r_bios[f];
bio_reset(bio);
bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
- bio->bi_bdev = pd->bdev;
+ bio_set_dev(bio, pd->bdev);
bio->bi_end_io = pkt_end_io_read;
bio->bi_private = pkt;
@@ -1122,7 +1122,7 @@ static int pkt_start_recovery(struct packet_data *pkt)
pkt->sector = new_sector;
bio_reset(pkt->bio);
- pkt->bio->bi_bdev = pd->bdev;
+ bio_set_set(pkt->bio, pd->bdev);
bio_set_op_attrs(pkt->bio, REQ_OP_WRITE, 0);
pkt->bio->bi_iter.bi_sector = new_sector;
pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE;
@@ -1267,7 +1267,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
bio_reset(pkt->w_bio);
pkt->w_bio->bi_iter.bi_sector = pkt->sector;
- pkt->w_bio->bi_bdev = pd->bdev;
+ bio_set_dev(pkt->w_bio, pd->bdev);
pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
pkt->w_bio->bi_private = pkt;
@@ -2314,7 +2314,7 @@ static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
psd->pd = pd;
psd->bio = bio;
- cloned_bio->bi_bdev = pd->bdev;
+ bio_set_dev(cloned_bio, pd->bdev);
cloned_bio->bi_private = psd;
cloned_bio->bi_end_io = pkt_end_io_read_cloned;
pd->stats.secs_r += bio_sectors(bio);
@@ -2415,8 +2415,7 @@ static blk_qc_t pkt_make_request(struct request_queue *q, struct bio *bio)
pd = q->queuedata;
if (!pd) {
- pr_err("%s incorrect request queue\n",
- bdevname(bio->bi_bdev, b));
+ pr_err("%s incorrect request queue\n", bio_devname(bio, b));
goto end_io;
}
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 5f3a813e7ae0..987d665e82de 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -1363,7 +1363,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
goto fail_put_bio;
biolist[nbio++] = bio;
- bio->bi_bdev = preq.bdev;
+ bio_set_dev(bio, preq.bdev);
bio->bi_private = pending_req;
bio->bi_end_io = end_block_io_op;
bio->bi_iter.bi_sector = preq.sector_number;
@@ -1382,7 +1382,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
goto fail_put_bio;
biolist[nbio++] = bio;
- bio->bi_bdev = preq.bdev;
+ bio_set_dev(bio, preq.bdev);
bio->bi_private = pending_req;
bio->bi_end_io = end_block_io_op;
bio_set_op_attrs(bio, operation, operation_flags);
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 35a5a7210e51..61076eda2e6d 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -49,7 +49,7 @@ void bch_btree_verify(struct btree *b)
v->keys.ops = b->keys.ops;
bio = bch_bbio_alloc(b->c);
- bio->bi_bdev = PTR_CACHE(b->c, &b->key, 0)->bdev;
+ bio_set_dev(bio, PTR_CACHE(b->c, &b->key, 0)->bdev);
bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9;
bio->bi_opf = REQ_OP_READ | REQ_META;
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 6a9b85095e7b..7e871bdc0097 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -34,7 +34,7 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
struct bbio *b = container_of(bio, struct bbio, bio);
bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
- bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev;
+ bio_set_dev(bio, PTR_CACHE(c, &b->key, 0)->bdev);
b->submit_time_us = local_clock_us();
closure_bio_submit(bio, bio->bi_private);
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 0352d05e495c..7e1d1c3ba33a 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -53,7 +53,7 @@ reread: left = ca->sb.bucket_size - offset;
bio_reset(bio);
bio->bi_iter.bi_sector = bucket + offset;
- bio->bi_bdev = ca->bdev;
+ bio_set_dev(bio, ca->bdev);
bio->bi_iter.bi_size = len << 9;
bio->bi_end_io = journal_read_endio;
@@ -452,7 +452,7 @@ static void do_journal_discard(struct cache *ca)
bio_set_op_attrs(bio, REQ_OP_DISCARD, 0);
bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
ca->sb.d[ja->discard_idx]);
- bio->bi_bdev = ca->bdev;
+ bio_set_dev(bio, ca->bdev);
bio->bi_iter.bi_size = bucket_bytes(ca);
bio->bi_end_io = journal_discard_endio;
@@ -623,7 +623,7 @@ static void journal_write_unlocked(struct closure *cl)
bio_reset(bio);
bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
- bio->bi_bdev = ca->bdev;
+ bio_set_dev(bio, ca->bdev);
bio->bi_iter.bi_size = sectors << 9;
bio->bi_end_io = journal_write_endio;
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 72eb97176403..0e1463d0c334 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -607,7 +607,7 @@ static void request_endio(struct bio *bio)
static void bio_complete(struct search *s)
{
if (s->orig_bio) {
- struct request_queue *q = bdev_get_queue(s->orig_bio->bi_bdev);
+ struct request_queue *q = s->orig_bio->bi_disk->queue;
generic_end_io_acct(q, bio_data_dir(s->orig_bio),
&s->d->disk->part0, s->start_time);
@@ -735,7 +735,7 @@ static void cached_dev_read_done(struct closure *cl)
if (s->iop.bio) {
bio_reset(s->iop.bio);
s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector;
- s->iop.bio->bi_bdev = s->cache_miss->bi_bdev;
+ bio_copy_dev(s->iop.bio, s->cache_miss);
s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
bch_bio_map(s->iop.bio, NULL);
@@ -794,7 +794,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
!(bio->bi_opf & REQ_META) &&
s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
reada = min_t(sector_t, dc->readahead >> 9,
- bdev_sectors(bio->bi_bdev) - bio_end_sector(bio));
+ get_capacity(bio->bi_disk) - bio_end_sector(bio));
s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
@@ -820,7 +820,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
goto out_submit;
cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector;
- cache_bio->bi_bdev = miss->bi_bdev;
+ bio_copy_dev(cache_bio, miss);
cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
cache_bio->bi_end_io = request_endio;
@@ -919,7 +919,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
dc->disk.bio_split);
- flush->bi_bdev = bio->bi_bdev;
+ bio_copy_dev(flush, bio);
flush->bi_end_io = request_endio;
flush->bi_private = cl;
flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
@@ -956,13 +956,13 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q,
struct bio *bio)
{
struct search *s;
- struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
+ struct bcache_device *d = bio->bi_disk->private_data;
struct cached_dev *dc = container_of(d, struct cached_dev, disk);
int rw = bio_data_dir(bio);
generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0);
- bio->bi_bdev = dc->bdev;
+ bio_set_dev(bio, dc->bdev);
bio->bi_iter.bi_sector += dc->sb.data_offset;
if (cached_dev_get(dc)) {
@@ -1072,7 +1072,7 @@ static blk_qc_t flash_dev_make_request(struct request_queue *q,
{
struct search *s;
struct closure *cl;
- struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
+ struct bcache_device *d = bio->bi_disk->private_data;
int rw = bio_data_dir(bio);
generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 8352fad765f6..974d832e54a6 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -257,7 +257,7 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
closure_init(cl, parent);
bio_reset(bio);
- bio->bi_bdev = dc->bdev;
+ bio_set_dev(bio, dc->bdev);
bio->bi_end_io = write_bdev_super_endio;
bio->bi_private = dc;
@@ -303,7 +303,7 @@ void bcache_write_super(struct cache_set *c)
SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
bio_reset(bio);
- bio->bi_bdev = ca->bdev;
+ bio_set_dev(bio, ca->bdev);
bio->bi_end_io = write_super_endio;
bio->bi_private = ca;
@@ -508,7 +508,7 @@ static void prio_io(struct cache *ca, uint64_t bucket, int op,
closure_init_stack(cl);
bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size;
- bio->bi_bdev = ca->bdev;
+ bio_set_dev(bio, ca->bdev);
bio->bi_iter.bi_size = bucket_bytes(ca);
bio->bi_end_io = prio_endio;
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 42c66e76f05e..c49022a8dc9d 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -181,7 +181,7 @@ static void write_dirty(struct closure *cl)
dirty_init(w);
bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0);
io->bio.bi_iter.bi_sector = KEY_START(&w->key);
- io->bio.bi_bdev = io->dc->bdev;
+ bio_set_dev(&io->bio, io->dc->bdev);
io->bio.bi_end_io = dirty_endio;
closure_bio_submit(&io->bio, cl);
@@ -250,8 +250,7 @@ static void read_dirty(struct cached_dev *dc)
dirty_init(w);
bio_set_op_attrs(&io->bio, REQ_OP_READ, 0);
io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
- io->bio.bi_bdev = PTR_CACHE(dc->disk.c,
- &w->key, 0)->bdev;
+ bio_set_dev(&io->bio, PTR_CACHE(dc->disk.c, &w->key, 0)->bdev);
io->bio.bi_end_io = read_dirty_endio;
if (bio_alloc_pages(&io->bio, GFP_KERNEL))
diff --git a/drivers/md/dm-bio-record.h b/drivers/md/dm-bio-record.h
index dd3646111561..c82578af56a5 100644
--- a/drivers/md/dm-bio-record.h
+++ b/drivers/md/dm-bio-record.h
@@ -18,21 +18,24 @@
*/
struct dm_bio_details {
- struct block_device *bi_bdev;
+ struct gendisk *bi_disk;
+ u8 bi_partno;
unsigned long bi_flags;
struct bvec_iter bi_iter;
};
static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
{
- bd->bi_bdev = bio->bi_bdev;
+ bd->bi_disk = bio->bi_disk;
+ bd->bi_partno = bio->bi_partno;
bd->bi_flags = bio->bi_flags;
bd->bi_iter = bio->bi_iter;
}
static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio)
{
- bio->bi_bdev = bd->bi_bdev;
+ bio->bi_disk = bd->bi_disk;
+ bio->bi_partno = bd->bi_partno;
bio->bi_flags = bd->bi_flags;
bio->bi_iter = bd->bi_iter;
}
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 44f4a8ac95bd..9601225e0ae9 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -616,7 +616,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t sector,
bio_init(&b->bio, b->bio_vec, DM_BUFIO_INLINE_VECS);
b->bio.bi_iter.bi_sector = sector;
- b->bio.bi_bdev = b->c->bdev;
+ bio_set_dev(&b->bio, b->c->bdev);
b->bio.bi_end_io = inline_endio;
/*
* Use of .bi_private isn't a problem here because
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index c5ea03fc7ee1..dcac25c2be7a 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -833,7 +833,7 @@ static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
*--------------------------------------------------------------*/
static void remap_to_origin(struct cache *cache, struct bio *bio)
{
- bio->bi_bdev = cache->origin_dev->bdev;
+ bio_set_dev(bio, cache->origin_dev->bdev);
}
static void remap_to_cache(struct cache *cache, struct bio *bio,
@@ -842,7 +842,7 @@ static void remap_to_cache(struct cache *cache, struct bio *bio,
sector_t bi_sector = bio->bi_iter.bi_sector;
sector_t block = from_cblock(cblock);
- bio->bi_bdev = cache->cache_dev->bdev;
+ bio_set_dev(bio, cache->cache_dev->bdev);
if (!block_size_is_power_of_two(cache))
bio->bi_iter.bi_sector =
(block * cache->sectors_per_block) +
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 73c2e270cda6..ca99147208a9 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1544,7 +1544,7 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
clone->bi_private = io;
clone->bi_end_io = crypt_endio;
- clone->bi_bdev = cc->dev->bdev;
+ bio_set_dev(clone, cc->dev->bdev);
clone->bi_opf = io->base_bio->bi_opf;
}
@@ -2793,7 +2793,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
*/
if (unlikely(bio->bi_opf & REQ_PREFLUSH ||
bio_op(bio) == REQ_OP_DISCARD)) {
- bio->bi_bdev = cc->dev->bdev;
+ bio_set_dev(bio, cc->dev->bdev);
if (bio_sectors(bio))
bio->bi_iter.bi_sector = cc->start +
dm_target_offset(ti, bio->bi_iter.bi_sector);
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index ae3158795d26..2209a9700acd 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -282,7 +282,7 @@ static int delay_map(struct dm_target *ti, struct bio *bio)
struct delay_c *dc = ti->private;
if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) {
- bio->bi_bdev = dc->dev_write->bdev;
+ bio_set_dev(bio, dc->dev_write->bdev);
if (bio_sectors(bio))
bio->bi_iter.bi_sector = dc->start_write +
dm_target_offset(ti, bio->bi_iter.bi_sector);
@@ -290,7 +290,7 @@ static int delay_map(struct dm_target *ti, struct bio *bio)
return delay_bio(dc, dc->write_delay, bio);
}
- bio->bi_bdev = dc->dev_read->bdev;
+ bio_set_dev(bio, dc->dev_read->bdev);
bio->bi_iter.bi_sector = dc->start_read +
dm_target_offset(ti, bio->bi_iter.bi_sector);
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
index e7ba89f98d8d..ba84b8d62cd0 100644
--- a/drivers/md/dm-era-target.c
+++ b/drivers/md/dm-era-target.c
@@ -1192,7 +1192,7 @@ static dm_block_t get_block(struct era *era, struct bio *bio)
static void remap_to_origin(struct era *era, struct bio *bio)
{
- bio->bi_bdev = era->origin_dev->bdev;
+ bio_set_dev(bio, era->origin_dev->bdev);
}
/*----------------------------------------------------------------
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index e2c7234931bc..7146c2d9762d 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -274,7 +274,7 @@ static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
{
struct flakey_c *fc = ti->private;
- bio->bi_bdev = fc->dev->bdev;
+ bio_set_dev(bio, fc->dev->bdev);
if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET)
bio->bi_iter.bi_sector =
flakey_map_sector(ti, bio->bi_iter.bi_sector);
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 3acce09bba35..27c0f223f8ea 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -250,7 +250,8 @@ struct dm_integrity_io {
struct completion *completion;
- struct block_device *orig_bi_bdev;
+ struct gendisk *orig_bi_disk;
+ u8 orig_bi_partno;
bio_end_io_t *orig_bi_end_io;
struct bio_integrity_payload *orig_bi_integrity;
struct bvec_iter orig_bi_iter;
@@ -1164,7 +1165,8 @@ static void integrity_end_io(struct bio *bio)
struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
bio->bi_iter = dio->orig_bi_iter;
- bio->bi_bdev = dio->orig_bi_bdev;
+ bio->bi_disk = dio->orig_bi_disk;
+ bio->bi_partno = dio->orig_bi_partno;
if (dio->orig_bi_integrity) {
bio->bi_integrity = dio->orig_bi_integrity;
bio->bi_opf |= REQ_INTEGRITY;
@@ -1681,8 +1683,9 @@ sleep:
dio->orig_bi_iter = bio->bi_iter;
- dio->orig_bi_bdev = bio->bi_bdev;
- bio->bi_bdev = ic->dev->bdev;
+ dio->orig_bi_disk = bio->bi_disk;
+ dio->orig_bi_partno = bio->bi_partno;
+ bio_set_dev(bio, ic->dev->bdev);
dio->orig_bi_integrity = bio_integrity(bio);
bio->bi_integrity = NULL;
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 25039607f3cb..b4357ed4d541 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -347,7 +347,7 @@ static void do_region(int op, int op_flags, unsigned region,
bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
- bio->bi_bdev = where->bdev;
+ bio_set_dev(bio, where->bdev);
bio->bi_end_io = endio;
bio_set_op_attrs(bio, op, op_flags);
store_io_and_region_in_bio(bio, io, region);
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 41971a090e34..405eca206d67 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -88,7 +88,7 @@ static void linear_map_bio(struct dm_target *ti, struct bio *bio)
{
struct linear_c *lc = ti->private;
- bio->bi_bdev = lc->dev->bdev;
+ bio_set_dev(bio, lc->dev->bdev);
if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET)
bio->bi_iter.bi_sector =
linear_map_sector(ti, bio->bi_iter.bi_sector);
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index a1da0eb58a93..534a254eb977 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -198,7 +198,7 @@ static int write_metadata(struct log_writes_c *lc, void *entry,
}
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector;
- bio->bi_bdev = lc->logdev->bdev;
+ bio_set_dev(bio, lc->logdev->bdev);
bio->bi_end_io = log_end_io;
bio->bi_private = lc;
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
@@ -263,7 +263,7 @@ static int log_one_block(struct log_writes_c *lc,
}
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector;
- bio->bi_bdev = lc->logdev->bdev;
+ bio_set_dev(bio, lc->logdev->bdev);
bio->bi_end_io = log_end_io;
bio->bi_private = lc;
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
@@ -285,7 +285,7 @@ static int log_one_block(struct log_writes_c *lc,
}
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector;
- bio->bi_bdev = lc->logdev->bdev;
+ bio_set_dev(bio, lc->logdev->bdev);
bio->bi_end_io = log_end_io;
bio->bi_private = lc;
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
@@ -539,7 +539,7 @@ static void normal_map_bio(struct dm_target *ti, struct bio *bio)
{
struct log_writes_c *lc = ti->private;
- bio->bi_bdev = lc->dev->bdev;
+ bio_set_dev(bio, lc->dev->bdev);
}
static int log_writes_map(struct dm_target *ti, struct bio *bio)
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 0e8ab5bb3575..573046bd5c46 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -566,7 +566,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
mpio->nr_bytes = nr_bytes;
bio->bi_status = 0;
- bio->bi_bdev = pgpath->path.dev->bdev;
+ bio_set_dev(bio, pgpath->path.dev->bdev);
bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
if (pgpath->pg->ps.type->start_io)
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index a4fbd911d566..c0b82136b2d1 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -145,7 +145,7 @@ static void dispatch_bios(void *context, struct bio_list *bio_list)
struct dm_raid1_bio_record {
struct mirror *m;
- /* if details->bi_bdev == NULL, details were not saved */
+ /* if details->bi_disk == NULL, details were not saved */
struct dm_bio_details details;
region_t write_region;
};
@@ -464,7 +464,7 @@ static sector_t map_sector(struct mirror *m, struct bio *bio)
static void map_bio(struct mirror *m, struct bio *bio)
{
- bio->bi_bdev = m->dev->bdev;
+ bio_set_dev(bio, m->dev->bdev);
bio->bi_iter.bi_sector = map_sector(m, bio);
}
@@ -1199,7 +1199,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
struct dm_raid1_bio_record *bio_record =
dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
- bio_record->details.bi_bdev = NULL;
+ bio_record->details.bi_disk = NULL;
if (rw == WRITE) {
/* Save region for mirror_end_io() handler */
@@ -1266,7 +1266,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
goto out;
if (unlikely(*error)) {
- if (!bio_record->details.bi_bdev) {
+ if (!bio_record->details.bi_disk) {
/*
* There wasn't enough memory to record necessary
* information for a retry or there was no other
@@ -1291,7 +1291,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
bd = &bio_record->details;
dm_bio_restore(bd, bio);
- bio_record->details.bi_bdev = NULL;
+ bio_record->details.bi_disk = NULL;
bio->bi_status = 0;
queue_bio(ms, bio, rw);
@@ -1301,7 +1301,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
}
out:
- bio_record->details.bi_bdev = NULL;
+ bio_record->details.bi_disk = NULL;
return DM_ENDIO_DONE;
}
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 1ba41048b438..1113b42e1eda 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1663,7 +1663,7 @@ __find_pending_exception(struct dm_snapshot *s,
static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
struct bio *bio, chunk_t chunk)
{
- bio->bi_bdev = s->cow->bdev;
+ bio_set_dev(bio, s->cow->bdev);
bio->bi_iter.bi_sector =
chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) +
(chunk - e->old_chunk)) +
@@ -1681,7 +1681,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
init_tracked_chunk(bio);
if (bio->bi_opf & REQ_PREFLUSH) {
- bio->bi_bdev = s->cow->bdev;
+ bio_set_dev(bio, s->cow->bdev);
return DM_MAPIO_REMAPPED;
}
@@ -1769,7 +1769,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
goto out;
}
} else {
- bio->bi_bdev = s->origin->bdev;
+ bio_set_dev(bio, s->origin->bdev);
track_chunk(s, bio, chunk);
}
@@ -1802,9 +1802,9 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
if (bio->bi_opf & REQ_PREFLUSH) {
if (!dm_bio_get_target_bio_nr(bio))
- bio->bi_bdev = s->origin->bdev;
+ bio_set_dev(bio, s->origin->bdev);
else
- bio->bi_bdev = s->cow->bdev;
+ bio_set_dev(bio, s->cow->bdev);
return DM_MAPIO_REMAPPED;
}
@@ -1824,7 +1824,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
chunk >= s->first_merging_chunk &&
chunk < (s->first_merging_chunk +
s->num_merging_chunks)) {
- bio->bi_bdev = s->origin->bdev;
+ bio_set_dev(bio, s->origin->bdev);
bio_list_add(&s->bios_queued_during_merge, bio);
r = DM_MAPIO_SUBMITTED;
goto out_unlock;
@@ -1838,7 +1838,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
}
redirect_to_origin:
- bio->bi_bdev = s->origin->bdev;
+ bio_set_dev(bio, s->origin->bdev);
if (bio_data_dir(bio) == WRITE) {
up_write(&s->lock);
@@ -2285,7 +2285,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio)
struct dm_origin *o = ti->private;
unsigned available_sectors;
- bio->bi_bdev = o->dev->bdev;
+ bio_set_dev(bio, o->dev->bdev);
if (unlikely(bio->bi_opf & REQ_PREFLUSH))
return DM_MAPIO_REMAPPED;
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index a0375530b07f..ab50d7c4377f 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -270,7 +270,7 @@ static int stripe_map_range(struct stripe_c *sc, struct bio *bio,
stripe_map_range_sector(sc, bio_end_sector(bio),
target_stripe, &end);
if (begin < end) {
- bio->bi_bdev = sc->stripe[target_stripe].dev->bdev;
+ bio_set_dev(bio, sc->stripe[target_stripe].dev->bdev);
bio->bi_iter.bi_sector = begin +
sc->stripe[target_stripe].physical_start;
bio->bi_iter.bi_size = to_bytes(end - begin);
@@ -291,7 +291,7 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
if (bio->bi_opf & REQ_PREFLUSH) {
target_bio_nr = dm_bio_get_target_bio_nr(bio);
BUG_ON(target_bio_nr >= sc->stripes);
- bio->bi_bdev = sc->stripe[target_bio_nr].dev->bdev;
+ bio_set_dev(bio, sc->stripe[target_bio_nr].dev->bdev);
return DM_MAPIO_REMAPPED;
}
if (unlikely(bio_op(bio) == REQ_OP_DISCARD) ||
@@ -306,7 +306,7 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
&stripe, &bio->bi_iter.bi_sector);
bio->bi_iter.bi_sector += sc->stripe[stripe].physical_start;
- bio->bi_bdev = sc->stripe[stripe].dev->bdev;
+ bio_set_dev(bio, sc->stripe[stripe].dev->bdev);
return DM_MAPIO_REMAPPED;
}
@@ -430,9 +430,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
return DM_ENDIO_DONE;
memset(major_minor, 0, sizeof(major_minor));
- sprintf(major_minor, "%d:%d",
- MAJOR(disk_devt(bio->bi_bdev->bd_disk)),
- MINOR(disk_devt(bio->bi_bdev->bd_disk)));
+ sprintf(major_minor, "%d:%d", MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)));
/*
* Test to see which stripe drive triggered the event
diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c
index 871c18fe000d..2dcea4c56f37 100644
--- a/drivers/md/dm-switch.c
+++ b/drivers/md/dm-switch.c
@@ -322,7 +322,7 @@ static int switch_map(struct dm_target *ti, struct bio *bio)
sector_t offset = dm_target_offset(ti, bio->bi_iter.bi_sector);
unsigned path_nr = switch_get_path_nr(sctx, offset);
- bio->bi_bdev = sctx->path_list[path_nr].dmdev->bdev;
+ bio_set_dev(bio, sctx->path_list[path_nr].dmdev->bdev);
bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset;
return DM_MAPIO_REMAPPED;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 9dec2f8cc739..69d88aee3055 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -679,7 +679,7 @@ static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
struct pool *pool = tc->pool;
sector_t bi_sector = bio->bi_iter.bi_sector;
- bio->bi_bdev = tc->pool_dev->bdev;
+ bio_set_dev(bio, tc->pool_dev->bdev);
if (block_size_is_power_of_two(pool))
bio->bi_iter.bi_sector =
(block << pool->sectors_per_block_shift) |
@@ -691,7 +691,7 @@ static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
static void remap_to_origin(struct thin_c *tc, struct bio *bio)
{
- bio->bi_bdev = tc->origin_dev->bdev;
+ bio_set_dev(bio, tc->origin_dev->bdev);
}
static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
@@ -3313,7 +3313,7 @@ static int pool_map(struct dm_target *ti, struct bio *bio)
* As this is a singleton target, ti->begin is always zero.
*/
spin_lock_irqsave(&pool->lock, flags);
- bio->bi_bdev = pt->data_dev->bdev;
+ bio_set_dev(bio, pt->data_dev->bdev);
r = DM_MAPIO_REMAPPED;
spin_unlock_irqrestore(&pool->lock, flags);
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index b46705ebf01f..1c5b6185c79d 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -637,7 +637,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
struct dm_verity *v = ti->private;
struct dm_verity_io *io;
- bio->bi_bdev = v->data_dev->bdev;
+ bio_set_dev(bio, v->data_dev->bdev);
bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector);
if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index a4fa2ada6883..70485de37b66 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -409,7 +409,7 @@ static struct dmz_mblock *dmz_fetch_mblock(struct dmz_metadata *zmd,
}
bio->bi_iter.bi_sector = dmz_blk2sect(block);
- bio->bi_bdev = zmd->dev->bdev;
+ bio_set_dev(bio, zmd->dev->bdev);
bio->bi_private = mblk;
bio->bi_end_io = dmz_mblock_bio_end_io;
bio_set_op_attrs(bio, REQ_OP_READ, REQ_META | REQ_PRIO);
@@ -564,7 +564,7 @@ static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
set_bit(DMZ_META_WRITING, &mblk->state);
bio->bi_iter.bi_sector = dmz_blk2sect(block);
- bio->bi_bdev = zmd->dev->bdev;
+ bio_set_dev(bio, zmd->dev->bdev);
bio->bi_private = mblk;
bio->bi_end_io = dmz_mblock_bio_end_io;
bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO);
@@ -586,7 +586,7 @@ static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block,
return -ENOMEM;
bio->bi_iter.bi_sector = dmz_blk2sect(block);
- bio->bi_bdev = zmd->dev->bdev;
+ bio_set_dev(bio, zmd->dev->bdev);
bio_set_op_attrs(bio, op, REQ_SYNC | REQ_META | REQ_PRIO);
bio_add_page(bio, page, DMZ_BLOCK_SIZE, 0);
ret = submit_bio_wait(bio);
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index b08bbbd4d902..b87c1741da4b 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -238,7 +238,7 @@ static void dmz_submit_write_bio(struct dmz_target *dmz, struct dm_zone *zone,
struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
/* Setup and submit the BIO */
- bio->bi_bdev = dmz->dev->bdev;
+ bio_set_dev(bio, dmz->dev->bdev);
bio->bi_iter.bi_sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
atomic_inc(&bioctx->ref);
generic_make_request(bio);
@@ -586,7 +586,7 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
(unsigned long long)dmz_chunk_block(dmz->dev, dmz_bio_block(bio)),
(unsigned int)dmz_bio_blocks(bio));
- bio->bi_bdev = dev->bdev;
+ bio_set_dev(bio, dev->bdev);
if (!nr_sectors && bio_op(bio) != REQ_OP_WRITE)
return DM_MAPIO_REMAPPED;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 8612a2d1ccd9..b28b9ce8f4ff 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -851,10 +851,10 @@ static void clone_endio(struct bio *bio)
if (unlikely(error == BLK_STS_TARGET)) {
if (bio_op(bio) == REQ_OP_WRITE_SAME &&
- !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors)
+ !bio->bi_disk->queue->limits.max_write_same_sectors)
disable_write_same(md);
if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
- !bdev_get_queue(bio->bi_bdev)->limits.max_write_zeroes_sectors)
+ !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
disable_write_zeroes(md);
}
@@ -1215,8 +1215,8 @@ static void __map_bio(struct dm_target_io *tio)
break;
case DM_MAPIO_REMAPPED:
/* the bio has been remapped so dispatch it */
- trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone,
- tio->io->bio->bi_bdev->bd_dev, sector);
+ trace_block_bio_remap(clone->bi_disk->queue, clone,
+ bio_dev(tio->io->bio), sector);
generic_make_request(clone);
break;
case DM_MAPIO_KILL:
@@ -1796,7 +1796,7 @@ static struct mapped_device *alloc_dev(int minor)
goto bad;
bio_init(&md->flush_bio, NULL, 0);
- md->flush_bio.bi_bdev = md->bdev;
+ bio_set_dev(&md->flush_bio, md->bdev);
md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
dm_stats_init(&md->stats);
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 06a64d5d8c6c..38264b38420f 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -216,12 +216,12 @@ static bool faulty_make_request(struct mddev *mddev, struct bio *bio)
if (failit) {
struct bio *b = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set);
- b->bi_bdev = conf->rdev->bdev;
+ bio_set_dev(b, conf->rdev->bdev);
b->bi_private = bio;
b->bi_end_io = faulty_fail;
bio = b;
} else
- bio->bi_bdev = conf->rdev->bdev;
+ bio_set_dev(bio, conf->rdev->bdev);
generic_make_request(bio);
return true;
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 5f1eb9189542..c464fb48039a 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -275,17 +275,17 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio)
bio = split;
}
- bio->bi_bdev = tmp_dev->rdev->bdev;
+ bio_set_dev(bio, tmp_dev->rdev->bdev);
bio->bi_iter.bi_sector = bio->bi_iter.bi_sector -
start_sector + data_offset;
if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) {
+ !blk_queue_discard(bio->bi_disk->queue))) {
/* Just ignore it */
bio_endio(bio);
} else {
if (mddev->gendisk)
- trace_block_bio_remap(bdev_get_queue(bio->bi_bdev),
+ trace_block_bio_remap(bio->bi_disk->queue,
bio, disk_devt(mddev->gendisk),
bio_sector);
mddev_check_writesame(mddev, bio);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index c99634612fc4..0afdc1bfd7cb 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -422,7 +422,7 @@ static void submit_flushes(struct work_struct *ws)
bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
bi->bi_end_io = md_end_flush;
bi->bi_private = rdev;
- bi->bi_bdev = rdev->bdev;
+ bio_set_dev(bi, rdev->bdev);
bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
atomic_inc(&mddev->flush_pending);
submit_bio(bi);
@@ -772,7 +772,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
atomic_inc(&rdev->nr_pending);
- bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
+ bio_set_dev(bio, rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev);
bio->bi_iter.bi_sector = sector;
bio_add_page(bio, page, size, 0);
bio->bi_private = rdev;
@@ -803,8 +803,10 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
struct bio *bio = md_bio_alloc_sync(rdev->mddev);
int ret;
- bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
- rdev->meta_bdev : rdev->bdev;
+ if (metadata_op && rdev->meta_bdev)
+ bio_set_dev(bio, rdev->meta_bdev);
+ else
+ bio_set_dev(bio, rdev->bdev);
bio_set_op_attrs(bio, op, op_flags);
if (metadata_op)
bio->bi_iter.bi_sector = sector + rdev->sb_start;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 09db03455801..c0d436fb88f0 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -509,6 +509,11 @@ static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sect
atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
}
+static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors)
+{
+ atomic_add(nr_sectors, &bio->bi_disk->sync_io);
+}
+
struct md_personality
{
char *name;
@@ -721,14 +726,14 @@ static inline void mddev_clear_unsupported_flags(struct mddev *mddev,
static inline void mddev_check_writesame(struct mddev *mddev, struct bio *bio)
{
if (bio_op(bio) == REQ_OP_WRITE_SAME &&
- !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors)
+ !bio->bi_disk->queue->limits.max_write_same_sectors)
mddev->queue->limits.max_write_same_sectors = 0;
}
static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio)
{
if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
- !bdev_get_queue(bio->bi_bdev)->limits.max_write_zeroes_sectors)
+ !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
mddev->queue->limits.max_write_zeroes_sectors = 0;
}
#endif /* _MD_MD_H */
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 23a162ba6c56..b68e0666b9b0 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -134,7 +134,7 @@ static bool multipath_make_request(struct mddev *mddev, struct bio * bio)
__bio_clone_fast(&mp_bh->bio, bio);
mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
- mp_bh->bio.bi_bdev = multipath->rdev->bdev;
+ bio_set_dev(&mp_bh->bio, multipath->rdev->bdev);
mp_bh->bio.bi_opf |= REQ_FAILFAST_TRANSPORT;
mp_bh->bio.bi_end_io = multipath_end_request;
mp_bh->bio.bi_private = mp_bh;
@@ -345,17 +345,17 @@ static void multipathd(struct md_thread *thread)
if ((mp_bh->path = multipath_map (conf))<0) {
pr_err("multipath: %s: unrecoverable IO read error for block %llu\n",
- bdevname(bio->bi_bdev,b),
+ bio_devname(bio, b),
(unsigned long long)bio->bi_iter.bi_sector);
multipath_end_bh_io(mp_bh, BLK_STS_IOERR);
} else {
pr_err("multipath: %s: redirecting sector %llu to another IO path\n",
- bdevname(bio->bi_bdev,b),
+ bio_devname(bio, b),
(unsigned long long)bio->bi_iter.bi_sector);
*bio = *(mp_bh->master_bio);
bio->bi_iter.bi_sector +=
conf->multipaths[mp_bh->path].rdev->data_offset;
- bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
+ bio_set_dev(bio, conf->multipaths[mp_bh->path].rdev->bdev);
bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
bio->bi_end_io = multipath_end_request;
bio->bi_private = mp_bh;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 94d9ae9b0fd0..05a4521b832f 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -588,14 +588,13 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
zone = find_zone(mddev->private, &sector);
tmp_dev = map_sector(mddev, zone, sector, &sector);
- bio->bi_bdev = tmp_dev->bdev;
+ bio_set_dev(bio, tmp_dev->bdev);
bio->bi_iter.bi_sector = sector + zone->dev_start +
tmp_dev->data_offset;
if (mddev->gendisk)
- trace_block_bio_remap(bdev_get_queue(bio->bi_bdev),
- bio, disk_devt(mddev->gendisk),
- bio_sector);
+ trace_block_bio_remap(bio->bi_disk->queue, bio,
+ disk_devt(mddev->gendisk), bio_sector);
mddev_check_writesame(mddev, bio);
mddev_check_write_zeroes(mddev, bio);
generic_make_request(bio);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index f50958ded9f0..baf5e358d22a 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -786,13 +786,13 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
- struct md_rdev *rdev = (void*)bio->bi_bdev;
+ struct md_rdev *rdev = (void *)bio->bi_disk;
bio->bi_next = NULL;
- bio->bi_bdev = rdev->bdev;
+ bio_set_dev(bio, rdev->bdev);
if (test_bit(Faulty, &rdev->flags)) {
bio_io_error(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
+ !blk_queue_discard(bio->bi_disk->queue)))
/* Just ignore it */
bio_endio(bio);
else
@@ -1273,7 +1273,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
read_bio->bi_iter.bi_sector = r1_bio->sector +
mirror->rdev->data_offset;
- read_bio->bi_bdev = mirror->rdev->bdev;
+ bio_set_dev(read_bio, mirror->rdev->bdev);
read_bio->bi_end_io = raid1_end_read_request;
bio_set_op_attrs(read_bio, op, do_sync);
if (test_bit(FailFast, &mirror->rdev->flags) &&
@@ -1282,9 +1282,8 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
read_bio->bi_private = r1_bio;
if (mddev->gendisk)
- trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
- read_bio, disk_devt(mddev->gendisk),
- r1_bio->sector);
+ trace_block_bio_remap(read_bio->bi_disk->queue, read_bio,
+ disk_devt(mddev->gendisk), r1_bio->sector);
generic_make_request(read_bio);
}
@@ -1496,7 +1495,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
mbio->bi_iter.bi_sector = (r1_bio->sector +
conf->mirrors[i].rdev->data_offset);
- mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
+ bio_set_dev(mbio, conf->mirrors[i].rdev->bdev);
mbio->bi_end_io = raid1_end_write_request;
mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA));
if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) &&
@@ -1508,11 +1507,11 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
atomic_inc(&r1_bio->remaining);
if (mddev->gendisk)
- trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev),
+ trace_block_bio_remap(mbio->bi_disk->queue,
mbio, disk_devt(mddev->gendisk),
r1_bio->sector);
/* flush_pending_writes() needs access to the rdev so...*/
- mbio->bi_bdev = (void*)conf->mirrors[i].rdev;
+ mbio->bi_disk = (void *)conf->mirrors[i].rdev;
cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
if (cb)
@@ -1990,8 +1989,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
* Don't fail devices as that won't really help.
*/
pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
- mdname(mddev),
- bdevname(bio->bi_bdev, b),
+ mdname(mddev), bio_devname(bio, b),
(unsigned long long)r1_bio->sector);
for (d = 0; d < conf->raid_disks * 2; d++) {
rdev = conf->mirrors[d].rdev;
@@ -2082,7 +2080,7 @@ static void process_checks(struct r1bio *r1_bio)
b->bi_status = status;
b->bi_iter.bi_sector = r1_bio->sector +
conf->mirrors[i].rdev->data_offset;
- b->bi_bdev = conf->mirrors[i].rdev->bdev;
+ bio_set_dev(b, conf->mirrors[i].rdev->bdev);
b->bi_end_io = end_sync_read;
rp->raid_bio = r1_bio;
b->bi_private = rp;
@@ -2350,7 +2348,7 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
bio_trim(wbio, sector - r1_bio->sector, sectors);
wbio->bi_iter.bi_sector += rdev->data_offset;
- wbio->bi_bdev = rdev->bdev;
+ bio_set_dev(wbio, rdev->bdev);
if (submit_bio_wait(wbio) < 0)
/* failure! */
@@ -2440,7 +2438,6 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
struct mddev *mddev = conf->mddev;
struct bio *bio;
struct md_rdev *rdev;
- dev_t bio_dev;
sector_t bio_sector;
clear_bit(R1BIO_ReadError, &r1_bio->state);
@@ -2454,7 +2451,6 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
*/
bio = r1_bio->bios[r1_bio->read_disk];
- bio_dev = bio->bi_bdev->bd_dev;
bio_sector = conf->mirrors[r1_bio->read_disk].rdev->data_offset + r1_bio->sector;
bio_put(bio);
r1_bio->bios[r1_bio->read_disk] = NULL;
@@ -2727,7 +2723,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
if (bio->bi_end_io) {
atomic_inc(&rdev->nr_pending);
bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
- bio->bi_bdev = rdev->bdev;
+ bio_set_dev(bio, rdev->bdev);
if (test_bit(FailFast, &rdev->flags))
bio->bi_opf |= MD_FAILFAST;
}
@@ -2853,7 +2849,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
bio = r1_bio->bios[i];
if (bio->bi_end_io == end_sync_read) {
read_targets--;
- md_sync_acct(bio->bi_bdev, nr_sectors);
+ md_sync_acct_bio(bio, nr_sectors);
if (read_targets == 1)
bio->bi_opf &= ~MD_FAILFAST;
generic_make_request(bio);
@@ -2862,7 +2858,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
} else {
atomic_set(&r1_bio->remaining, 1);
bio = r1_bio->bios[r1_bio->read_disk];
- md_sync_acct(bio->bi_bdev, nr_sectors);
+ md_sync_acct_bio(bio, nr_sectors);
if (read_targets == 1)
bio->bi_opf &= ~MD_FAILFAST;
generic_make_request(bio);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index f55d4cc085f6..d1f948e371e0 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -901,13 +901,13 @@ static void flush_pending_writes(struct r10conf *conf)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
- struct md_rdev *rdev = (void*)bio->bi_bdev;
+ struct md_rdev *rdev = (void*)bio->bi_disk;
bio->bi_next = NULL;
- bio->bi_bdev = rdev->bdev;
+ bio_set_dev(bio, rdev->bdev);
if (test_bit(Faulty, &rdev->flags)) {
bio_io_error(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
+ !blk_queue_discard(bio->bi_disk->queue)))
/* Just ignore it */
bio_endio(bio);
else
@@ -1085,13 +1085,13 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
- struct md_rdev *rdev = (void*)bio->bi_bdev;
+ struct md_rdev *rdev = (void*)bio->bi_disk;
bio->bi_next = NULL;
- bio->bi_bdev = rdev->bdev;
+ bio_set_dev(bio, rdev->bdev);
if (test_bit(Faulty, &rdev->flags)) {
bio_io_error(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
+ !blk_queue_discard(bio->bi_disk->queue)))
/* Just ignore it */
bio_endio(bio);
else
@@ -1200,7 +1200,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
choose_data_offset(r10_bio, rdev);
- read_bio->bi_bdev = rdev->bdev;
+ bio_set_dev(read_bio, rdev->bdev);
read_bio->bi_end_io = raid10_end_read_request;
bio_set_op_attrs(read_bio, op, do_sync);
if (test_bit(FailFast, &rdev->flags) &&
@@ -1209,7 +1209,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
read_bio->bi_private = r10_bio;
if (mddev->gendisk)
- trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
+ trace_block_bio_remap(read_bio->bi_disk->queue,
read_bio, disk_devt(mddev->gendisk),
r10_bio->sector);
generic_make_request(read_bio);
@@ -1249,7 +1249,7 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr +
choose_data_offset(r10_bio, rdev));
- mbio->bi_bdev = rdev->bdev;
+ bio_set_dev(mbio, rdev->bdev);
mbio->bi_end_io = raid10_end_write_request;
bio_set_op_attrs(mbio, op, do_sync | do_fua);
if (!replacement && test_bit(FailFast,
@@ -1259,11 +1259,11 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
mbio->bi_private = r10_bio;
if (conf->mddev->gendisk)
- trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev),
+ trace_block_bio_remap(mbio->bi_disk->queue,
mbio, disk_devt(conf->mddev->gendisk),
r10_bio->sector);
/* flush_pending_writes() needs access to the rdev so...*/
- mbio->bi_bdev = (void *)rdev;
+ mbio->bi_disk = (void *)rdev;
atomic_inc(&r10_bio->remaining);
@@ -2094,7 +2094,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
tbio->bi_opf |= MD_FAILFAST;
tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
- tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
+ bio_set_dev(tbio, conf->mirrors[d].rdev->bdev);
generic_make_request(tbio);
}
@@ -2552,7 +2552,7 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector);
wbio->bi_iter.bi_sector = wsector +
choose_data_offset(r10_bio, rdev);
- wbio->bi_bdev = rdev->bdev;
+ bio_set_dev(wbio, rdev->bdev);
bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
if (submit_bio_wait(wbio) < 0)
@@ -2575,7 +2575,6 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
struct bio *bio;
struct r10conf *conf = mddev->private;
struct md_rdev *rdev = r10_bio->devs[slot].rdev;
- dev_t bio_dev;
sector_t bio_last_sector;
/* we got a read error. Maybe the drive is bad. Maybe just
@@ -2587,7 +2586,6 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
* frozen.
*/
bio = r10_bio->devs[slot].bio;
- bio_dev = bio->bi_bdev->bd_dev;
bio_last_sector = r10_bio->devs[slot].addr + rdev->data_offset + r10_bio->sectors;
bio_put(bio);
r10_bio->devs[slot].bio = NULL;
@@ -2950,7 +2948,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
/* Again, very different code for resync and recovery.
* Both must result in an r10bio with a list of bios that
- * have bi_end_io, bi_sector, bi_bdev set,
+ * have bi_end_io, bi_sector, bi_disk set,
* and bi_private set to the r10bio.
* For recovery, we may actually create several r10bios
* with 2 bios in each, that correspond to the bios in the main one.
@@ -3095,7 +3093,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
from_addr = r10_bio->devs[j].addr;
bio->bi_iter.bi_sector = from_addr +
rdev->data_offset;
- bio->bi_bdev = rdev->bdev;
+ bio_set_dev(bio, rdev->bdev);
atomic_inc(&rdev->nr_pending);
/* and we write to 'i' (if not in_sync) */
@@ -3117,7 +3115,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio->bi_iter.bi_sector = to_addr
+ mrdev->data_offset;
- bio->bi_bdev = mrdev->bdev;
+ bio_set_dev(bio, mrdev->bdev);
atomic_inc(&r10_bio->remaining);
} else
r10_bio->devs[1].bio->bi_end_io = NULL;
@@ -3143,7 +3141,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio->bi_iter.bi_sector = to_addr +
mreplace->data_offset;
- bio->bi_bdev = mreplace->bdev;
+ bio_set_dev(bio, mreplace->bdev);
atomic_inc(&r10_bio->remaining);
break;
}
@@ -3289,7 +3287,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
if (test_bit(FailFast, &rdev->flags))
bio->bi_opf |= MD_FAILFAST;
bio->bi_iter.bi_sector = sector + rdev->data_offset;
- bio->bi_bdev = rdev->bdev;
+ bio_set_dev(bio, rdev->bdev);
count++;
rdev = rcu_dereference(conf->mirrors[d].replacement);
@@ -3311,7 +3309,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
if (test_bit(FailFast, &rdev->flags))
bio->bi_opf |= MD_FAILFAST;
bio->bi_iter.bi_sector = sector + rdev->data_offset;
- bio->bi_bdev = rdev->bdev;
+ bio_set_dev(bio, rdev->bdev);
count++;
rcu_read_unlock();
}
@@ -3367,7 +3365,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
r10_bio->sectors = nr_sectors;
if (bio->bi_end_io == end_sync_read) {
- md_sync_acct(bio->bi_bdev, nr_sectors);
+ md_sync_acct_bio(bio, nr_sectors);
bio->bi_status = 0;
generic_make_request(bio);
}
@@ -4383,7 +4381,7 @@ read_more:
read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev);
- read_bio->bi_bdev = rdev->bdev;
+ bio_set_dev(read_bio, rdev->bdev);
read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
+ rdev->data_offset);
read_bio->bi_private = r10_bio;
@@ -4417,7 +4415,7 @@ read_more:
if (!rdev2 || test_bit(Faulty, &rdev2->flags))
continue;
- b->bi_bdev = rdev2->bdev;
+ bio_set_dev(b, rdev2->bdev);
b->bi_iter.bi_sector = r10_bio->devs[s/2].addr +
rdev2->new_data_offset;
b->bi_end_io = end_reshape_write;
@@ -4449,7 +4447,7 @@ read_more:
r10_bio->sectors = nr_sectors;
/* Now submit the read */
- md_sync_acct(read_bio->bi_bdev, r10_bio->sectors);
+ md_sync_acct_bio(read_bio, r10_bio->sectors);
atomic_inc(&r10_bio->remaining);
read_bio->bi_next = NULL;
generic_make_request(read_bio);
@@ -4511,7 +4509,7 @@ static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
}
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
- md_sync_acct(b->bi_bdev, r10_bio->sectors);
+ md_sync_acct_bio(b, r10_bio->sectors);
atomic_inc(&r10_bio->remaining);
b->bi_next = NULL;
generic_make_request(b);
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index bfa1e907c472..f253a9c583c1 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -728,7 +728,7 @@ static struct bio *r5l_bio_alloc(struct r5l_log *log)
struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, log->bs);
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
- bio->bi_bdev = log->rdev->bdev;
+ bio_set_dev(bio, log->rdev->bdev);
bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start;
return bio;
@@ -1291,7 +1291,7 @@ void r5l_flush_stripe_to_raid(struct r5l_log *log)
if (!do_flush)
return;
bio_reset(&log->flush_bio);
- log->flush_bio.bi_bdev = log->rdev->bdev;
+ bio_set_dev(&log->flush_bio, log->rdev->bdev);
log->flush_bio.bi_end_io = r5l_log_flush_endio;
log->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
submit_bio(&log->flush_bio);
@@ -1669,7 +1669,7 @@ static int r5l_recovery_fetch_ra_pool(struct r5l_log *log,
sector_t offset)
{
bio_reset(ctx->ra_bio);
- ctx->ra_bio->bi_bdev = log->rdev->bdev;
+ bio_set_dev(ctx->ra_bio, log->rdev->bdev);
bio_set_op_attrs(ctx->ra_bio, REQ_OP_READ, 0);
ctx->ra_bio->bi_iter.bi_sector = log->rdev->data_offset + offset;
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index 44ad5baf3206..1e237c40d6fa 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -415,7 +415,7 @@ static void ppl_submit_iounit_bio(struct ppl_io_unit *io, struct bio *bio)
pr_debug("%s: seq: %llu size: %u sector: %llu dev: %s\n",
__func__, io->seq, bio->bi_iter.bi_size,
(unsigned long long)bio->bi_iter.bi_sector,
- bdevname(bio->bi_bdev, b));
+ bio_devname(bio, b));
submit_bio(bio);
}
@@ -453,7 +453,7 @@ static void ppl_submit_iounit(struct ppl_io_unit *io)
bio->bi_end_io = ppl_log_endio;
bio->bi_opf = REQ_OP_WRITE | REQ_FUA;
- bio->bi_bdev = log->rdev->bdev;
+ bio_set_dev(bio, log->rdev->bdev);
bio->bi_iter.bi_sector = log->rdev->ppl.sector;
bio_add_page(bio, io->header_page, PAGE_SIZE, 0);
@@ -468,7 +468,7 @@ static void ppl_submit_iounit(struct ppl_io_unit *io)
bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES,
ppl_conf->bs);
bio->bi_opf = prev->bi_opf;
- bio->bi_bdev = prev->bi_bdev;
+ bio_copy_dev(bio, prev);
bio->bi_iter.bi_sector = bio_end_sector(prev);
bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index d687aeb1b538..3ae8bbceb6c4 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1096,7 +1096,7 @@ again:
set_bit(STRIPE_IO_STARTED, &sh->state);
- bi->bi_bdev = rdev->bdev;
+ bio_set_dev(bi, rdev->bdev);
bio_set_op_attrs(bi, op, op_flags);
bi->bi_end_io = op_is_write(op)
? raid5_end_write_request
@@ -1145,7 +1145,7 @@ again:
set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
if (conf->mddev->gendisk)
- trace_block_bio_remap(bdev_get_queue(bi->bi_bdev),
+ trace_block_bio_remap(bi->bi_disk->queue,
bi, disk_devt(conf->mddev->gendisk),
sh->dev[i].sector);
if (should_defer && op_is_write(op))
@@ -1160,7 +1160,7 @@ again:
set_bit(STRIPE_IO_STARTED, &sh->state);
- rbi->bi_bdev = rrdev->bdev;
+ bio_set_dev(rbi, rrdev->bdev);
bio_set_op_attrs(rbi, op, op_flags);
BUG_ON(!op_is_write(op));
rbi->bi_end_io = raid5_end_write_request;
@@ -1193,7 +1193,7 @@ again:
if (op == REQ_OP_DISCARD)
rbi->bi_vcnt = 0;
if (conf->mddev->gendisk)
- trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
+ trace_block_bio_remap(rbi->bi_disk->queue,
rbi, disk_devt(conf->mddev->gendisk),
sh->dev[i].sector);
if (should_defer && op_is_write(op))
@@ -5233,7 +5233,7 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
raid_bio->bi_next = (void*)rdev;
- align_bi->bi_bdev = rdev->bdev;
+ bio_set_dev(align_bi, rdev->bdev);
bio_clear_flag(align_bi, BIO_SEG_VALID);
if (is_badblock(rdev, align_bi->bi_iter.bi_sector,
@@ -5255,7 +5255,7 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
spin_unlock_irq(&conf->device_lock);
if (mddev->gendisk)
- trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
+ trace_block_bio_remap(align_bi->bi_disk->queue,
align_bi, disk_devt(mddev->gendisk),
raid_bio->bi_iter.bi_sector);
generic_make_request(align_bi);
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 73062da3177f..a87f793f2945 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -390,7 +390,7 @@ int nd_region_activate(struct nd_region *nd_region);
void __nd_iostat_start(struct bio *bio, unsigned long *start);
static inline bool nd_iostat_start(struct bio *bio, unsigned long *start)
{
- struct gendisk *disk = bio->bi_bdev->bd_disk;
+ struct gendisk *disk = bio->bi_disk;
if (!blk_queue_io_stat(disk->queue))
return false;
@@ -402,7 +402,7 @@ static inline bool nd_iostat_start(struct bio *bio, unsigned long *start)
}
static inline void nd_iostat_end(struct bio *bio, unsigned long start)
{
- struct gendisk *disk = bio->bi_bdev->bd_disk;
+ struct gendisk *disk = bio->bi_disk;
generic_end_io_acct(disk->queue, bio_data_dir(bio), &disk->part0,
start);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index c49f1f8b2e57..f03452db7938 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -613,11 +613,7 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
if (!disk)
goto submit;
- bio->bi_bdev = bdget_disk(disk, 0);
- if (!bio->bi_bdev) {
- ret = -ENODEV;
- goto out_unmap;
- }
+ bio->bi_disk = disk;
if (meta_buffer && meta_len) {
struct bio_integrity_payload *bip;
@@ -668,11 +664,8 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
out_free_meta:
kfree(meta);
out_unmap:
- if (bio) {
- if (disk && bio->bi_bdev)
- bdput(bio->bi_bdev);
+ if (bio)
blk_rq_unmap_user(bio);
- }
out:
blk_mq_free_request(req);
return ret;
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index be8541335e31..c1a28569e843 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -643,17 +643,9 @@ static int nvme_nvm_submit_user_cmd(struct request_queue *q,
vcmd->ph_rw.metadata = cpu_to_le64(metadata_dma);
}
- if (!disk)
- goto submit;
-
- bio->bi_bdev = bdget_disk(disk, 0);
- if (!bio->bi_bdev) {
- ret = -ENODEV;
- goto err_meta;
- }
+ bio->bi_disk = disk;
}
-submit:
blk_execute_rq(q, NULL, rq, 0);
if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
@@ -673,11 +665,8 @@ err_meta:
if (meta_buf && meta_len)
dma_pool_free(dev->dma_pool, metadata, metadata_dma);
err_map:
- if (bio) {
- if (disk && bio->bi_bdev)
- bdput(bio->bi_bdev);
+ if (bio)
blk_rq_unmap_user(bio);
- }
err_ppa:
if (ppa_buf && ppa_len)
dma_pool_free(dev->dma_pool, ppa_list, ppa_dma);
diff --git a/drivers/nvme/target/io-cmd.c b/drivers/nvme/target/io-cmd.c
index 3b4d47a6abdb..0d4c23dc4532 100644
--- a/drivers/nvme/target/io-cmd.c
+++ b/drivers/nvme/target/io-cmd.c
@@ -68,7 +68,7 @@ static void nvmet_execute_rw(struct nvmet_req *req)
nvmet_inline_bio_init(req);
bio = &req->inline_bio;
- bio->bi_bdev = req->ns->bdev;
+ bio_set_dev(bio, req->ns->bdev);
bio->bi_iter.bi_sector = sector;
bio->bi_private = req;
bio->bi_end_io = nvmet_bio_done;
@@ -80,7 +80,7 @@ static void nvmet_execute_rw(struct nvmet_req *req)
struct bio *prev = bio;
bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
- bio->bi_bdev = req->ns->bdev;
+ bio_set_dev(bio, req->ns->bdev);
bio->bi_iter.bi_sector = sector;
bio_set_op_attrs(bio, op, op_flags);
@@ -104,7 +104,7 @@ static void nvmet_execute_flush(struct nvmet_req *req)
nvmet_inline_bio_init(req);
bio = &req->inline_bio;
- bio->bi_bdev = req->ns->bdev;
+ bio_set_dev(bio, req->ns->bdev);
bio->bi_private = req;
bio->bi_end_io = nvmet_bio_done;
bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 68bae4f6bd88..7abb240847c0 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -856,14 +856,14 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
blk_queue_split(q, &bio);
bytes_done = 0;
- dev_info = bio->bi_bdev->bd_disk->private_data;
+ dev_info = bio->bi_disk->private_data;
if (dev_info == NULL)
goto fail;
if ((bio->bi_iter.bi_sector & 7) != 0 ||
(bio->bi_iter.bi_size & 4095) != 0)
/* Request is not page-aligned. */
goto fail;
- if (bio_end_sector(bio) > get_capacity(bio->bi_bdev->bd_disk)) {
+ if (bio_end_sector(bio) > get_capacity(bio->bi_disk)) {
/* Request beyond end of DCSS segment. */
goto fail;
}
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index a48f0d40c1d2..571a0709e1e5 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -183,7 +183,7 @@ static unsigned long xpram_highest_page_index(void)
*/
static blk_qc_t xpram_make_request(struct request_queue *q, struct bio *bio)
{
- xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data;
+ xpram_device_t *xdev = bio->bi_disk->private_data;
struct bio_vec bvec;
struct bvec_iter iter;
unsigned int index;
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index ee7c7fa55dad..07c814c42648 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -338,7 +338,7 @@ iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num, int op,
return NULL;
}
- bio->bi_bdev = ib_dev->ibd_bd;
+ bio_set_dev(bio, ib_dev->ibd_bd);
bio->bi_private = cmd;
bio->bi_end_io = &iblock_bio_done;
bio->bi_iter.bi_sector = lba;
@@ -395,7 +395,7 @@ iblock_execute_sync_cache(struct se_cmd *cmd)
bio = bio_alloc(GFP_KERNEL, 0);
bio->bi_end_io = iblock_end_io_flush;
- bio->bi_bdev = ib_dev->ibd_bd;
+ bio_set_dev(bio, ib_dev->ibd_bd);
bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
if (!immed)
bio->bi_private = cmd;