diff options
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/bcache/btree.c | 4 | ||||
-rw-r--r-- | drivers/md/bcache/debug.c | 15 | ||||
-rw-r--r-- | drivers/md/bcache/io.c | 4 | ||||
-rw-r--r-- | drivers/md/bcache/journal.c | 4 | ||||
-rw-r--r-- | drivers/md/bcache/movinggc.c | 6 | ||||
-rw-r--r-- | drivers/md/bcache/request.c | 8 | ||||
-rw-r--r-- | drivers/md/bcache/super.c | 16 | ||||
-rw-r--r-- | drivers/md/bcache/writeback.c | 5 | ||||
-rw-r--r-- | drivers/md/bcache/writeback.h | 3 | ||||
-rw-r--r-- | drivers/md/dm-bufio.c | 6 | ||||
-rw-r--r-- | drivers/md/dm-crypt.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-log.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-raid1.c | 4 | ||||
-rw-r--r-- | drivers/md/dm-rq.c | 52 | ||||
-rw-r--r-- | drivers/md/dm-snap-persistent.c | 4 | ||||
-rw-r--r-- | drivers/md/dm.c | 4 | ||||
-rw-r--r-- | drivers/md/md.c | 4 | ||||
-rw-r--r-- | drivers/md/multipath.c | 2 | ||||
-rw-r--r-- | drivers/md/raid5-cache.c | 6 | ||||
-rw-r--r-- | drivers/md/raid5.c | 11 |
20 files changed, 57 insertions, 105 deletions
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 81d3db40cd7b..6fdd8e252760 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -297,7 +297,7 @@ static void bch_btree_node_read(struct btree *b) bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9; bio->bi_end_io = btree_node_read_endio; bio->bi_private = &cl; - bio_set_op_attrs(bio, REQ_OP_READ, REQ_META|READ_SYNC); + bio->bi_opf = REQ_OP_READ | REQ_META; bch_bio_map(bio, b->keys.set[0].data); @@ -393,7 +393,7 @@ static void do_btree_node_write(struct btree *b) b->bio->bi_end_io = btree_node_write_endio; b->bio->bi_private = cl; b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c)); - bio_set_op_attrs(b->bio, REQ_OP_WRITE, REQ_META|WRITE_SYNC|REQ_FUA); + b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA; bch_bio_map(b->bio, i); /* diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c index 333a1e5f6ae6..06f55056aaae 100644 --- a/drivers/md/bcache/debug.c +++ b/drivers/md/bcache/debug.c @@ -52,7 +52,7 @@ void bch_btree_verify(struct btree *b) bio->bi_bdev = PTR_CACHE(b->c, &b->key, 0)->bdev; bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9; - bio_set_op_attrs(bio, REQ_OP_READ, REQ_META|READ_SYNC); + bio->bi_opf = REQ_OP_READ | REQ_META; bch_bio_map(bio, sorted); submit_bio_wait(bio); @@ -107,22 +107,26 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio) { char name[BDEVNAME_SIZE]; struct bio *check; - struct bio_vec bv; - struct bvec_iter iter; + struct bio_vec bv, cbv; + struct bvec_iter iter, citer = { 0 }; check = bio_clone(bio, GFP_NOIO); if (!check) return; - bio_set_op_attrs(check, REQ_OP_READ, READ_SYNC); + check->bi_opf = REQ_OP_READ; if (bio_alloc_pages(check, GFP_NOIO)) goto out_put; submit_bio_wait(check); + citer.bi_size = UINT_MAX; bio_for_each_segment(bv, bio, iter) { void *p1 = kmap_atomic(bv.bv_page); - void *p2 = page_address(check->bi_io_vec[iter.bi_idx].bv_page); + void *p2; + + cbv = bio_iter_iovec(check, citer); + p2 = page_address(cbv.bv_page); cache_set_err_on(memcmp(p1 + bv.bv_offset, p2 + bv.bv_offset, @@ -133,6 +137,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio) (uint64_t) bio->bi_iter.bi_sector); kunmap_atomic(p1); + bio_advance_iter(check, &citer, bv.bv_len); } bio_free_pages(check); diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c index e97b0acf7b8d..db45a88c0ce9 100644 --- a/drivers/md/bcache/io.c +++ b/drivers/md/bcache/io.c @@ -24,9 +24,7 @@ struct bio *bch_bbio_alloc(struct cache_set *c) struct bbio *b = mempool_alloc(c->bio_meta, GFP_NOIO); struct bio *bio = &b->bio; - bio_init(bio); - bio->bi_max_vecs = bucket_pages(c); - bio->bi_io_vec = bio->bi_inline_vecs; + bio_init(bio, bio->bi_inline_vecs, bucket_pages(c)); return bio; } diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 6925023e12d4..1198e53d5670 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -448,13 +448,11 @@ static void do_journal_discard(struct cache *ca) atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT); - bio_init(bio); + bio_init(bio, bio->bi_inline_vecs, 1); bio_set_op_attrs(bio, REQ_OP_DISCARD, 0); bio->bi_iter.bi_sector = bucket_to_sector(ca->set, ca->sb.d[ja->discard_idx]); bio->bi_bdev = ca->bdev; - bio->bi_max_vecs = 1; - bio->bi_io_vec = bio->bi_inline_vecs; bio->bi_iter.bi_size = bucket_bytes(ca); bio->bi_end_io = journal_discard_endio; diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c index 5c4bddecfaf0..13b8a907006d 100644 --- a/drivers/md/bcache/movinggc.c +++ b/drivers/md/bcache/movinggc.c @@ -77,15 +77,13 @@ static void moving_init(struct moving_io *io) { struct bio *bio = &io->bio.bio; - bio_init(bio); + bio_init(bio, bio->bi_inline_vecs, + DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS)); bio_get(bio); bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); bio->bi_iter.bi_size = KEY_SIZE(&io->w->key) << 9; - bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key), - PAGE_SECTORS); bio->bi_private = &io->cl; - bio->bi_io_vec = bio->bi_inline_vecs; bch_bio_map(bio, NULL); } diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 40ffe5e424b3..f49c5417527d 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -404,8 +404,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) if (!congested && mode == CACHE_MODE_WRITEBACK && - op_is_write(bio_op(bio)) && - (bio->bi_opf & REQ_SYNC)) + op_is_write(bio->bi_opf) && + op_is_sync(bio->bi_opf)) goto rescale; spin_lock(&dc->io_lock); @@ -623,7 +623,7 @@ static void do_bio_hook(struct search *s, struct bio *orig_bio) { struct bio *bio = &s->bio.bio; - bio_init(bio); + bio_init(bio, NULL, 0); __bio_clone_fast(bio, orig_bio); bio->bi_end_io = request_endio; bio->bi_private = &s->cl; @@ -923,7 +923,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) flush->bi_bdev = bio->bi_bdev; flush->bi_end_io = request_endio; flush->bi_private = cl; - bio_set_op_attrs(flush, REQ_OP_WRITE, WRITE_FLUSH); + flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; closure_bio_submit(flush, cl); } diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 849ad441cd76..2fb5bfeb43e2 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -381,7 +381,7 @@ static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl) return "bad uuid pointer"; bkey_copy(&c->uuid_bucket, k); - uuid_io(c, REQ_OP_READ, READ_SYNC, k, cl); + uuid_io(c, REQ_OP_READ, 0, k, cl); if (j->version < BCACHE_JSET_VERSION_UUIDv1) { struct uuid_entry_v0 *u0 = (void *) c->uuids; @@ -600,7 +600,7 @@ static void prio_read(struct cache *ca, uint64_t bucket) ca->prio_last_buckets[bucket_nr] = bucket; bucket_nr++; - prio_io(ca, bucket, REQ_OP_READ, READ_SYNC); + prio_io(ca, bucket, REQ_OP_READ, 0); if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8)) pr_warn("bad csum reading priorities"); @@ -1152,9 +1152,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page, dc->bdev = bdev; dc->bdev->bd_holder = dc; - bio_init(&dc->sb_bio); - dc->sb_bio.bi_max_vecs = 1; - dc->sb_bio.bi_io_vec = dc->sb_bio.bi_inline_vecs; + bio_init(&dc->sb_bio, dc->sb_bio.bi_inline_vecs, 1); dc->sb_bio.bi_io_vec[0].bv_page = sb_page; get_page(sb_page); @@ -1814,9 +1812,7 @@ static int cache_alloc(struct cache *ca) __module_get(THIS_MODULE); kobject_init(&ca->kobj, &bch_cache_ktype); - bio_init(&ca->journal.bio); - ca->journal.bio.bi_max_vecs = 8; - ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs; + bio_init(&ca->journal.bio, ca->journal.bio.bi_inline_vecs, 8); free = roundup_pow_of_two(ca->sb.nbuckets) >> 10; @@ -1852,9 +1848,7 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, ca->bdev = bdev; ca->bdev->bd_holder = ca; - bio_init(&ca->sb_bio); - ca->sb_bio.bi_max_vecs = 1; - ca->sb_bio.bi_io_vec = ca->sb_bio.bi_inline_vecs; + bio_init(&ca->sb_bio, ca->sb_bio.bi_inline_vecs, 1); ca->sb_bio.bi_io_vec[0].bv_page = sb_page; get_page(sb_page); diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index e51644e503a5..69e1ae59cab8 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -106,14 +106,13 @@ static void dirty_init(struct keybuf_key *w) struct dirty_io *io = w->private; struct bio *bio = &io->bio; - bio_init(bio); + bio_init(bio, bio->bi_inline_vecs, + DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS)); if (!io->dc->writeback_percent) bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9; - bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS); bio->bi_private = w; - bio->bi_io_vec = bio->bi_inline_vecs; bch_bio_map(bio, NULL); } diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h index 301eaf565167..629bd1a502fd 100644 --- a/drivers/md/bcache/writeback.h +++ b/drivers/md/bcache/writeback.h @@ -57,8 +57,7 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio, if (would_skip) return false; - return bio->bi_opf & REQ_SYNC || - in_use <= CUTOFF_WRITEBACK; + return op_is_sync(bio->bi_opf) || in_use <= CUTOFF_WRITEBACK; } static inline void bch_writeback_queue(struct cached_dev *dc) diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 125aedc3875f..262e75365cc0 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -611,9 +611,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block, char *ptr; int len; - bio_init(&b->bio); - b->bio.bi_io_vec = b->bio_vec; - b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS; + bio_init(&b->bio, b->bio_vec, DM_BUFIO_INLINE_VECS); b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits; b->bio.bi_bdev = b->c->bdev; b->bio.bi_end_io = inline_endio; @@ -1316,7 +1314,7 @@ int dm_bufio_issue_flush(struct dm_bufio_client *c) { struct dm_io_request io_req = { .bi_op = REQ_OP_WRITE, - .bi_op_flags = WRITE_FLUSH, + .bi_op_flags = REQ_PREFLUSH, .mem.type = DM_IO_KMEM, .mem.ptr.addr = NULL, .client = c->dm_io, diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index a2768835d394..68a9eb4f3f36 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1135,7 +1135,7 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone) clone->bi_private = io; clone->bi_end_io = crypt_endio; clone->bi_bdev = cc->dev->bdev; - bio_set_op_attrs(clone, bio_op(io->base_bio), bio_flags(io->base_bio)); + clone->bi_opf = io->base_bio->bi_opf; } static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c index 07fc1ad42ec5..33e71ea6cc14 100644 --- a/drivers/md/dm-log.c +++ b/drivers/md/dm-log.c @@ -308,7 +308,7 @@ static int flush_header(struct log_c *lc) }; lc->io_req.bi_op = REQ_OP_WRITE; - lc->io_req.bi_op_flags = WRITE_FLUSH; + lc->io_req.bi_op_flags = REQ_PREFLUSH; return dm_io(&lc->io_req, 1, &null_location, NULL); } diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 9a8b71067c6e..2ddc2d20e62d 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -260,7 +260,7 @@ static int mirror_flush(struct dm_target *ti) struct mirror *m; struct dm_io_request io_req = { .bi_op = REQ_OP_WRITE, - .bi_op_flags = WRITE_FLUSH, + .bi_op_flags = REQ_PREFLUSH, .mem.type = DM_IO_KMEM, .mem.ptr.addr = NULL, .client = ms->io_client, @@ -656,7 +656,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio) struct mirror *m; struct dm_io_request io_req = { .bi_op = REQ_OP_WRITE, - .bi_op_flags = bio->bi_opf & WRITE_FLUSH_FUA, + .bi_op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH), .mem.type = DM_IO_BIO, .mem.ptr.bio = bio, .notify.fn = write_callback, diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 1d0d2adc050a..b2a9e2d161e4 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -75,12 +75,6 @@ static void dm_old_start_queue(struct request_queue *q) static void dm_mq_start_queue(struct request_queue *q) { - unsigned long flags; - - spin_lock_irqsave(q->queue_lock, flags); - queue_flag_clear(QUEUE_FLAG_STOPPED, q); - spin_unlock_irqrestore(q->queue_lock, flags); - blk_mq_start_stopped_hw_queues(q, true); blk_mq_kick_requeue_list(q); } @@ -105,20 +99,10 @@ static void dm_old_stop_queue(struct request_queue *q) static void dm_mq_stop_queue(struct request_queue *q) { - unsigned long flags; - - spin_lock_irqsave(q->queue_lock, flags); - if (blk_queue_stopped(q)) { - spin_unlock_irqrestore(q->queue_lock, flags); + if (blk_mq_queue_stopped(q)) return; - } - - queue_flag_set(QUEUE_FLAG_STOPPED, q); - spin_unlock_irqrestore(q->queue_lock, flags); - /* Avoid that requeuing could restart the queue. */ - blk_mq_cancel_requeue_work(q); - blk_mq_stop_hw_queues(q); + blk_mq_quiesce_queue(q); } void dm_stop_queue(struct request_queue *q) @@ -313,7 +297,7 @@ static void dm_unprep_request(struct request *rq) if (!rq->q->mq_ops) { rq->special = NULL; - rq->cmd_flags &= ~REQ_DONTPREP; + rq->rq_flags &= ~RQF_DONTPREP; } if (clone) @@ -338,12 +322,7 @@ static void dm_old_requeue_request(struct request *rq) static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs) { - unsigned long flags; - - spin_lock_irqsave(q->queue_lock, flags); - if (!blk_queue_stopped(q)) - blk_mq_delay_kick_requeue_list(q, msecs); - spin_unlock_irqrestore(q->queue_lock, flags); + blk_mq_delay_kick_requeue_list(q, msecs); } void dm_mq_kick_requeue_list(struct mapped_device *md) @@ -354,7 +333,7 @@ EXPORT_SYMBOL(dm_mq_kick_requeue_list); static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs) { - blk_mq_requeue_request(rq); + blk_mq_requeue_request(rq, false); __dm_mq_kick_requeue_list(rq->q, msecs); } @@ -431,7 +410,7 @@ static void dm_softirq_done(struct request *rq) return; } - if (rq->cmd_flags & REQ_FAILED) + if (rq->rq_flags & RQF_FAILED) mapped = false; dm_done(clone, tio->error, mapped); @@ -460,7 +439,7 @@ static void dm_complete_request(struct request *rq, int error) */ static void dm_kill_unmapped_request(struct request *rq, int error) { - rq->cmd_flags |= REQ_FAILED; + rq->rq_flags |= RQF_FAILED; dm_complete_request(rq, error); } @@ -476,7 +455,7 @@ static void end_clone_request(struct request *clone, int error) * For just cleaning up the information of the queue in which * the clone was dispatched. * The clone is *NOT* freed actually here because it is alloced - * from dm own mempool (REQ_ALLOCED isn't set). + * from dm own mempool (RQF_ALLOCED isn't set). */ __blk_put_request(clone->q, clone); } @@ -497,7 +476,7 @@ static void dm_dispatch_clone_request(struct request *clone, struct request *rq) int r; if (blk_queue_io_stat(clone->q)) - clone->cmd_flags |= REQ_IO_STAT; + clone->rq_flags |= RQF_IO_STAT; clone->start_time = jiffies; r = blk_insert_cloned_request(clone->q, clone); @@ -633,7 +612,7 @@ static int dm_old_prep_fn(struct request_queue *q, struct request *rq) return BLKPREP_DEFER; rq->special = tio; - rq->cmd_flags |= REQ_DONTPREP; + rq->rq_flags |= RQF_DONTPREP; return BLKPREP_OK; } @@ -904,17 +883,6 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, dm_put_live_table(md, srcu_idx); } - /* - * On suspend dm_stop_queue() handles stopping the blk-mq - * request_queue BUT: even though the hw_queues are marked - * BLK_MQ_S_STOPPED at that point there is still a race that - * is allowing block/blk-mq.c to call ->queue_rq against a - * hctx that it really shouldn't. The following check guards - * against this rarity (albeit _not_ race-free). - */ - if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state))) - return BLK_MQ_RQ_QUEUE_BUSY; - if (ti->type->busy && ti->type->busy(ti)) return BLK_MQ_RQ_QUEUE_BUSY; diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index b8cf956b577b..b93476c3ba3f 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c @@ -741,7 +741,7 @@ static void persistent_commit_exception(struct dm_exception_store *store, /* * Commit exceptions to disk. */ - if (ps->valid && area_io(ps, REQ_OP_WRITE, WRITE_FLUSH_FUA)) + if (ps->valid && area_io(ps, REQ_OP_WRITE, REQ_PREFLUSH | REQ_FUA)) ps->valid = 0; /* @@ -818,7 +818,7 @@ static int persistent_commit_merge(struct dm_exception_store *store, for (i = 0; i < nr_merged; i++) clear_exception(ps, ps->current_committed - 1 - i); - r = area_io(ps, REQ_OP_WRITE, WRITE_FLUSH_FUA); + r = area_io(ps, REQ_OP_WRITE, REQ_PREFLUSH | REQ_FUA); if (r < 0) return r; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index ef7bf1dd6900..ffa97b742a68 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1525,9 +1525,9 @@ static struct mapped_device *alloc_dev(int minor) if (!md->bdev) goto bad; - bio_init(&md->flush_bio); + bio_init(&md->flush_bio, NULL, 0); md->flush_bio.bi_bdev = md->bdev; - bio_set_op_attrs(&md->flush_bio, REQ_OP_WRITE, WRITE_FLUSH); + md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; dm_stats_init(&md->stats); diff --git a/drivers/md/md.c b/drivers/md/md.c index 2089d46b0eb8..f975cd08923d 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -394,7 +394,7 @@ static void submit_flushes(struct work_struct *ws) bi->bi_end_io = md_end_flush; bi->bi_private = rdev; bi->bi_bdev = rdev->bdev; - bio_set_op_attrs(bi, REQ_OP_WRITE, WRITE_FLUSH); + bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; atomic_inc(&mddev->flush_pending); submit_bio(bi); rcu_read_lock(); @@ -743,7 +743,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev, bio_add_page(bio, page, size, 0); bio->bi_private = rdev; bio->bi_end_io = super_written; - bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH_FUA); + bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA; atomic_inc(&mddev->pending_writes); submit_bio(bio); diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 673efbd6fc47..4da06d813b8f 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -130,7 +130,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio) } multipath = conf->multipaths + mp_bh->path; - bio_init(&mp_bh->bio); + bio_init(&mp_bh->bio, NULL, 0); __bio_clone_fast(&mp_bh->bio, bio); mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset; diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index a227a9f3ee65..8491edcfb5a6 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -685,7 +685,7 @@ void r5l_flush_stripe_to_raid(struct r5l_log *log) bio_reset(&log->flush_bio); log->flush_bio.bi_bdev = log->rdev->bdev; log->flush_bio.bi_end_io = r5l_log_flush_endio; - bio_set_op_attrs(&log->flush_bio, REQ_OP_WRITE, WRITE_FLUSH); + log->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; submit_bio(&log->flush_bio); } @@ -1053,7 +1053,7 @@ static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos, mb->checksum = cpu_to_le32(crc); if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE, - WRITE_FUA, false)) { + REQ_FUA, false)) { __free_page(page); return -EIO; } @@ -1205,7 +1205,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) INIT_LIST_HEAD(&log->io_end_ios); INIT_LIST_HEAD(&log->flushing_ios); INIT_LIST_HEAD(&log->finished_ios); - bio_init(&log->flush_bio); + bio_init(&log->flush_bio, NULL, 0); log->io_kc = KMEM_CACHE(r5l_io_unit, 0); if (!log->io_kc) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 92ac251e91e6..5f9e28443c8a 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -913,7 +913,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { op = REQ_OP_WRITE; if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags)) - op_flags = WRITE_FUA; + op_flags = REQ_FUA; if (test_bit(R5_Discard, &sh->dev[i].flags)) op = REQ_OP_DISCARD; } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) @@ -2004,13 +2004,8 @@ static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp, for (i = 0; i < disks; i++) { struct r5dev *dev = &sh->dev[i]; - bio_init(&dev->req); - dev->req.bi_io_vec = &dev->vec; - dev->req.bi_max_vecs = 1; - - bio_init(&dev->rreq); - dev->rreq.bi_io_vec = &dev->rvec; - dev->rreq.bi_max_vecs = 1; + bio_init(&dev->req, &dev->vec, 1); + bio_init(&dev->rreq, &dev->rvec, 1); } } return sh; |