summaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-08-03 11:57:03 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-08-03 11:57:03 -0700
commit382625d0d4325fb14a29444eb8dce8dcc2eb9b51 (patch)
treede35ff523e65c3a98fd3ac3a3595d517856d03da /drivers/md
parent99f6cf61f175c1239ed8e86d4a1757c380da52d1 (diff)
parentd958e343bdc3de2643ce25225bed082dc222858d (diff)
downloadlinux-382625d0d4325fb14a29444eb8dce8dcc2eb9b51.tar.bz2
Merge tag 'for-5.9/block-20200802' of git://git.kernel.dk/linux-block
Pull core block updates from Jens Axboe: "Good amount of cleanups and tech debt removals in here, and as a result, the diffstat shows a nice net reduction in code. - Softirq completion cleanups (Christoph) - Stop using ->queuedata (Christoph) - Cleanup bd claiming (Christoph) - Use check_events, moving away from the legacy media change (Christoph) - Use inode i_blkbits consistently (Christoph) - Remove old unused writeback congestion bits (Christoph) - Cleanup/unify submission path (Christoph) - Use bio_uninit consistently, instead of bio_disassociate_blkg (Christoph) - sbitmap cleared bits handling (John) - Request merging blktrace event addition (Jan) - sysfs add/remove race fixes (Luis) - blk-mq tag fixes/optimizations (Ming) - Duplicate words in comments (Randy) - Flush deferral cleanup (Yufen) - IO context locking/retry fixes (John) - struct_size() usage (Gustavo) - blk-iocost fixes (Chengming) - blk-cgroup IO stats fixes (Boris) - Various little fixes" * tag 'for-5.9/block-20200802' of git://git.kernel.dk/linux-block: (135 commits) block: blk-timeout: delete duplicated word block: blk-mq-sched: delete duplicated word block: blk-mq: delete duplicated word block: genhd: delete duplicated words block: elevator: delete duplicated word and fix typos block: bio: delete duplicated words block: bfq-iosched: fix duplicated word iocost_monitor: start from the oldest usage index iocost: Fix check condition of iocg abs_vdebt block: Remove callback typedefs for blk_mq_ops block: Use non _rcu version of list functions for tag_set_list blk-cgroup: show global disk stats in root cgroup io.stat blk-cgroup: make iostat functions visible to stat printing block: improve discard bio alignment in __blkdev_issue_discard() block: change REQ_OP_ZONE_RESET and REQ_OP_ZONE_RESET_ALL to be odd numbers block: defer flush request no matter whether we have elevator block: make blk_timeout_init() static block: remove retry loop in ioc_release_fn() block: remove unnecessary ioc nested locking block: integrate bd_start_claiming into __blkdev_get ...
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/bcache/bcache.h2
-rw-r--r--drivers/md/bcache/btree.c2
-rw-r--r--drivers/md/bcache/request.c58
-rw-r--r--drivers/md/bcache/request.h4
-rw-r--r--drivers/md/bcache/super.c25
-rw-r--r--drivers/md/dm-cache-target.c25
-rw-r--r--drivers/md/dm-clone-target.c25
-rw-r--r--drivers/md/dm-crypt.c6
-rw-r--r--drivers/md/dm-delay.c2
-rw-r--r--drivers/md/dm-era-target.c17
-rw-r--r--drivers/md/dm-integrity.c4
-rw-r--r--drivers/md/dm-mpath.c2
-rw-r--r--drivers/md/dm-raid.c12
-rw-r--r--drivers/md/dm-raid1.c2
-rw-r--r--drivers/md/dm-rq.c3
-rw-r--r--drivers/md/dm-snap-persistent.c2
-rw-r--r--drivers/md/dm-snap.c6
-rw-r--r--drivers/md/dm-table.c37
-rw-r--r--drivers/md/dm-thin.c20
-rw-r--r--drivers/md/dm-verity-target.c2
-rw-r--r--drivers/md/dm-writecache.c2
-rw-r--r--drivers/md/dm-zoned-target.c2
-rw-r--r--drivers/md/dm.c75
-rw-r--r--drivers/md/dm.h1
-rw-r--r--drivers/md/md-faulty.c4
-rw-r--r--drivers/md/md-linear.c28
-rw-r--r--drivers/md/md-multipath.c27
-rw-r--r--drivers/md/md.c51
-rw-r--r--drivers/md/md.h4
-rw-r--r--drivers/md/raid0.c24
-rw-r--r--drivers/md/raid1.c45
-rw-r--r--drivers/md/raid10.c54
-rw-r--r--drivers/md/raid5.c35
33 files changed, 122 insertions, 486 deletions
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 221e0191b687..3c708e8b5e2d 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -929,7 +929,7 @@ static inline void closure_bio_submit(struct cache_set *c,
bio_endio(bio);
return;
}
- generic_make_request(bio);
+ submit_bio_noacct(bio);
}
/*
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 6548a601edf0..d5c51e332046 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -959,7 +959,7 @@ err:
* bch_btree_node_get - find a btree node in the cache and lock it, reading it
* in from disk if necessary.
*
- * If IO is necessary and running under generic_make_request, returns -EAGAIN.
+ * If IO is necessary and running under submit_bio_noacct, returns -EAGAIN.
*
* The btree node will have either a read or a write lock held, depending on
* level and op->lock.
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 7acf024e99f3..a190bf47076d 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -1115,7 +1115,7 @@ static void detached_dev_do_request(struct bcache_device *d, struct bio *bio)
!blk_queue_discard(bdev_get_queue(dc->bdev)))
bio->bi_end_io(bio);
else
- generic_make_request(bio);
+ submit_bio_noacct(bio);
}
static void quit_max_writeback_rate(struct cache_set *c,
@@ -1158,7 +1158,7 @@ static void quit_max_writeback_rate(struct cache_set *c,
/* Cached devices - read & write stuff */
-blk_qc_t cached_dev_make_request(struct request_queue *q, struct bio *bio)
+blk_qc_t cached_dev_submit_bio(struct bio *bio)
{
struct search *s;
struct bcache_device *d = bio->bi_disk->private_data;
@@ -1197,7 +1197,7 @@ blk_qc_t cached_dev_make_request(struct request_queue *q, struct bio *bio)
if (!bio->bi_iter.bi_size) {
/*
* can't call bch_journal_meta from under
- * generic_make_request
+ * submit_bio_noacct
*/
continue_at_nobarrier(&s->cl,
cached_dev_nodata,
@@ -1228,36 +1228,8 @@ static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
}
-static int cached_dev_congested(void *data, int bits)
-{
- struct bcache_device *d = data;
- struct cached_dev *dc = container_of(d, struct cached_dev, disk);
- struct request_queue *q = bdev_get_queue(dc->bdev);
- int ret = 0;
-
- if (bdi_congested(q->backing_dev_info, bits))
- return 1;
-
- if (cached_dev_get(dc)) {
- unsigned int i;
- struct cache *ca;
-
- for_each_cache(ca, d->c, i) {
- q = bdev_get_queue(ca->bdev);
- ret |= bdi_congested(q->backing_dev_info, bits);
- }
-
- cached_dev_put(dc);
- }
-
- return ret;
-}
-
void bch_cached_dev_request_init(struct cached_dev *dc)
{
- struct gendisk *g = dc->disk.disk;
-
- g->queue->backing_dev_info->congested_fn = cached_dev_congested;
dc->disk.cache_miss = cached_dev_cache_miss;
dc->disk.ioctl = cached_dev_ioctl;
}
@@ -1291,7 +1263,7 @@ static void flash_dev_nodata(struct closure *cl)
continue_at(cl, search_free, NULL);
}
-blk_qc_t flash_dev_make_request(struct request_queue *q, struct bio *bio)
+blk_qc_t flash_dev_submit_bio(struct bio *bio)
{
struct search *s;
struct closure *cl;
@@ -1311,8 +1283,7 @@ blk_qc_t flash_dev_make_request(struct request_queue *q, struct bio *bio)
if (!bio->bi_iter.bi_size) {
/*
- * can't call bch_journal_meta from under
- * generic_make_request
+ * can't call bch_journal_meta from under submit_bio_noacct
*/
continue_at_nobarrier(&s->cl,
flash_dev_nodata,
@@ -1342,27 +1313,8 @@ static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
return -ENOTTY;
}
-static int flash_dev_congested(void *data, int bits)
-{
- struct bcache_device *d = data;
- struct request_queue *q;
- struct cache *ca;
- unsigned int i;
- int ret = 0;
-
- for_each_cache(ca, d->c, i) {
- q = bdev_get_queue(ca->bdev);
- ret |= bdi_congested(q->backing_dev_info, bits);
- }
-
- return ret;
-}
-
void bch_flash_dev_request_init(struct bcache_device *d)
{
- struct gendisk *g = d->disk;
-
- g->queue->backing_dev_info->congested_fn = flash_dev_congested;
d->cache_miss = flash_dev_cache_miss;
d->ioctl = flash_dev_ioctl;
}
diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h
index bb005c93dd72..82b38366a95d 100644
--- a/drivers/md/bcache/request.h
+++ b/drivers/md/bcache/request.h
@@ -37,10 +37,10 @@ unsigned int bch_get_congested(const struct cache_set *c);
void bch_data_insert(struct closure *cl);
void bch_cached_dev_request_init(struct cached_dev *dc);
-blk_qc_t cached_dev_make_request(struct request_queue *q, struct bio *bio);
+blk_qc_t cached_dev_submit_bio(struct bio *bio);
void bch_flash_dev_request_init(struct bcache_device *d);
-blk_qc_t flash_dev_make_request(struct request_queue *q, struct bio *bio);
+blk_qc_t flash_dev_submit_bio(struct bio *bio);
extern struct kmem_cache *bch_search_cache;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 2014016f9a60..9e45faa054b6 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -680,7 +680,16 @@ static int ioctl_dev(struct block_device *b, fmode_t mode,
return d->ioctl(d, mode, cmd, arg);
}
-static const struct block_device_operations bcache_ops = {
+static const struct block_device_operations bcache_cached_ops = {
+ .submit_bio = cached_dev_submit_bio,
+ .open = open_dev,
+ .release = release_dev,
+ .ioctl = ioctl_dev,
+ .owner = THIS_MODULE,
+};
+
+static const struct block_device_operations bcache_flash_ops = {
+ .submit_bio = flash_dev_submit_bio,
.open = open_dev,
.release = release_dev,
.ioctl = ioctl_dev,
@@ -820,8 +829,8 @@ static void bcache_device_free(struct bcache_device *d)
}
static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
- sector_t sectors, make_request_fn make_request_fn,
- struct block_device *cached_bdev)
+ sector_t sectors, struct block_device *cached_bdev,
+ const struct block_device_operations *ops)
{
struct request_queue *q;
const size_t max_stripes = min_t(size_t, INT_MAX,
@@ -868,16 +877,14 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
d->disk->major = bcache_major;
d->disk->first_minor = idx_to_first_minor(idx);
- d->disk->fops = &bcache_ops;
+ d->disk->fops = ops;
d->disk->private_data = d;
- q = blk_alloc_queue(make_request_fn, NUMA_NO_NODE);
+ q = blk_alloc_queue(NUMA_NO_NODE);
if (!q)
return -ENOMEM;
d->disk->queue = q;
- q->queuedata = d;
- q->backing_dev_info->congested_data = d;
q->limits.max_hw_sectors = UINT_MAX;
q->limits.max_sectors = UINT_MAX;
q->limits.max_segment_size = UINT_MAX;
@@ -1356,7 +1363,7 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
ret = bcache_device_init(&dc->disk, block_size,
dc->bdev->bd_part->nr_sects - dc->sb.data_offset,
- cached_dev_make_request, dc->bdev);
+ dc->bdev, &bcache_cached_ops);
if (ret)
return ret;
@@ -1469,7 +1476,7 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
kobject_init(&d->kobj, &bch_flash_dev_ktype);
if (bcache_device_init(d, block_bytes(c), u->sectors,
- flash_dev_make_request, NULL))
+ NULL, &bcache_flash_ops))
goto err;
bcache_device_attach(d, c, u - c->uuids);
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index d3bb355819a4..96c93802ee4d 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -421,8 +421,6 @@ struct cache {
struct rw_semaphore quiesce_lock;
- struct dm_target_callbacks callbacks;
-
/*
* origin_blocks entries, discarded if set.
*/
@@ -886,7 +884,7 @@ static void accounted_complete(struct cache *cache, struct bio *bio)
static void accounted_request(struct cache *cache, struct bio *bio)
{
accounted_begin(cache, bio);
- generic_make_request(bio);
+ submit_bio_noacct(bio);
}
static void issue_op(struct bio *bio, void *context)
@@ -1792,7 +1790,7 @@ static bool process_bio(struct cache *cache, struct bio *bio)
bool commit_needed;
if (map_bio(cache, bio, get_bio_block(cache, bio), &commit_needed) == DM_MAPIO_REMAPPED)
- generic_make_request(bio);
+ submit_bio_noacct(bio);
return commit_needed;
}
@@ -1858,7 +1856,7 @@ static bool process_discard_bio(struct cache *cache, struct bio *bio)
if (cache->features.discard_passdown) {
remap_to_origin(cache, bio);
- generic_make_request(bio);
+ submit_bio_noacct(bio);
} else
bio_endio(bio);
@@ -2423,20 +2421,6 @@ static void set_cache_size(struct cache *cache, dm_cblock_t size)
cache->cache_size = size;
}
-static int is_congested(struct dm_dev *dev, int bdi_bits)
-{
- struct request_queue *q = bdev_get_queue(dev->bdev);
- return bdi_congested(q->backing_dev_info, bdi_bits);
-}
-
-static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
-{
- struct cache *cache = container_of(cb, struct cache, callbacks);
-
- return is_congested(cache->origin_dev, bdi_bits) ||
- is_congested(cache->cache_dev, bdi_bits);
-}
-
#define DEFAULT_MIGRATION_THRESHOLD 2048
static int cache_create(struct cache_args *ca, struct cache **result)
@@ -2471,9 +2455,6 @@ static int cache_create(struct cache_args *ca, struct cache **result)
goto bad;
}
- cache->callbacks.congested_fn = cache_is_congested;
- dm_table_add_target_callbacks(ti->table, &cache->callbacks);
-
cache->metadata_dev = ca->metadata_dev;
cache->origin_dev = ca->origin_dev;
cache->cache_dev = ca->cache_dev;
diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c
index 5ce96ddf1ce1..bdb255edc200 100644
--- a/drivers/md/dm-clone-target.c
+++ b/drivers/md/dm-clone-target.c
@@ -68,7 +68,6 @@ struct hash_table_bucket;
struct clone {
struct dm_target *ti;
- struct dm_target_callbacks callbacks;
struct dm_dev *metadata_dev;
struct dm_dev *dest_dev;
@@ -330,7 +329,7 @@ static void submit_bios(struct bio_list *bios)
blk_start_plug(&plug);
while ((bio = bio_list_pop(bios)))
- generic_make_request(bio);
+ submit_bio_noacct(bio);
blk_finish_plug(&plug);
}
@@ -346,7 +345,7 @@ static void submit_bios(struct bio_list *bios)
static void issue_bio(struct clone *clone, struct bio *bio)
{
if (!bio_triggers_commit(clone, bio)) {
- generic_make_request(bio);
+ submit_bio_noacct(bio);
return;
}
@@ -473,7 +472,7 @@ static void complete_discard_bio(struct clone *clone, struct bio *bio, bool succ
bio_region_range(clone, bio, &rs, &nr_regions);
trim_bio(bio, region_to_sector(clone, rs),
nr_regions << clone->region_shift);
- generic_make_request(bio);
+ submit_bio_noacct(bio);
} else
bio_endio(bio);
}
@@ -865,7 +864,7 @@ static void hydration_overwrite(struct dm_clone_region_hydration *hd, struct bio
bio->bi_private = hd;
atomic_inc(&hd->clone->hydrations_in_flight);
- generic_make_request(bio);
+ submit_bio_noacct(bio);
}
/*
@@ -1281,7 +1280,7 @@ static void process_deferred_flush_bios(struct clone *clone)
*/
bio_endio(bio);
} else {
- generic_make_request(bio);
+ submit_bio_noacct(bio);
}
}
}
@@ -1518,18 +1517,6 @@ error:
DMEMIT("Error");
}
-static int clone_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
-{
- struct request_queue *dest_q, *source_q;
- struct clone *clone = container_of(cb, struct clone, callbacks);
-
- source_q = bdev_get_queue(clone->source_dev->bdev);
- dest_q = bdev_get_queue(clone->dest_dev->bdev);
-
- return (bdi_congested(dest_q->backing_dev_info, bdi_bits) |
- bdi_congested(source_q->backing_dev_info, bdi_bits));
-}
-
static sector_t get_dev_size(struct dm_dev *dev)
{
return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
@@ -1930,8 +1917,6 @@ static int clone_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto out_with_mempool;
mutex_init(&clone->commit_lock);
- clone->callbacks.congested_fn = clone_is_congested;
- dm_table_add_target_callbacks(ti->table, &clone->callbacks);
/* Enable flushes */
ti->num_flush_bios = 1;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 000ddfab5ba0..ad324abb8c49 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1789,7 +1789,7 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
return 1;
}
- generic_make_request(clone);
+ submit_bio_noacct(clone);
return 0;
}
@@ -1815,7 +1815,7 @@ static void kcryptd_io_write(struct dm_crypt_io *io)
{
struct bio *clone = io->ctx.bio_out;
- generic_make_request(clone);
+ submit_bio_noacct(clone);
}
#define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
@@ -1893,7 +1893,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
clone->bi_iter.bi_sector = cc->start + io->sector;
if (likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) {
- generic_make_request(clone);
+ submit_bio_noacct(clone);
return;
}
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index f496213f8b67..2628a832787b 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -72,7 +72,7 @@ static void flush_bios(struct bio *bio)
while (bio) {
n = bio->bi_next;
bio->bi_next = NULL;
- generic_make_request(bio);
+ submit_bio_noacct(bio);
bio = n;
}
}
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
index bdb84b8e7162..b24e3839bb3a 100644
--- a/drivers/md/dm-era-target.c
+++ b/drivers/md/dm-era-target.c
@@ -1137,7 +1137,6 @@ static int metadata_get_stats(struct era_metadata *md, void *ptr)
struct era {
struct dm_target *ti;
- struct dm_target_callbacks callbacks;
struct dm_dev *metadata_dev;
struct dm_dev *origin_dev;
@@ -1265,7 +1264,7 @@ static void process_deferred_bios(struct era *era)
bio_io_error(bio);
else
while ((bio = bio_list_pop(&marked_bios)))
- generic_make_request(bio);
+ submit_bio_noacct(bio);
}
static void process_rpc_calls(struct era *era)
@@ -1375,18 +1374,6 @@ static void stop_worker(struct era *era)
/*----------------------------------------------------------------
* Target methods
*--------------------------------------------------------------*/
-static int dev_is_congested(struct dm_dev *dev, int bdi_bits)
-{
- struct request_queue *q = bdev_get_queue(dev->bdev);
- return bdi_congested(q->backing_dev_info, bdi_bits);
-}
-
-static int era_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
-{
- struct era *era = container_of(cb, struct era, callbacks);
- return dev_is_congested(era->origin_dev, bdi_bits);
-}
-
static void era_destroy(struct era *era)
{
if (era->md)
@@ -1514,8 +1501,6 @@ static int era_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->flush_supported = true;
ti->num_discard_bios = 1;
- era->callbacks.congested_fn = era_is_congested;
- dm_table_add_target_callbacks(ti->table, &era->callbacks);
return 0;
}
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index a83a1de1e03f..5da3eb661e50 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -2115,12 +2115,12 @@ offload_to_thread:
dio->in_flight = (atomic_t)ATOMIC_INIT(1);
dio->completion = NULL;
- generic_make_request(bio);
+ submit_bio_noacct(bio);
return;
}
- generic_make_request(bio);
+ submit_bio_noacct(bio);
if (need_sync_io) {
wait_for_completion_io(&read_comp);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 78cff42d987e..73bb23de6336 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -677,7 +677,7 @@ static void process_queued_bios(struct work_struct *work)
bio_endio(bio);
break;
case DM_MAPIO_REMAPPED:
- generic_make_request(bio);
+ submit_bio_noacct(bio);
break;
case DM_MAPIO_SUBMITTED:
break;
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 10e8b2fe787b..d9e270957e18 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -242,7 +242,6 @@ struct raid_set {
struct mddev md;
struct raid_type *raid_type;
- struct dm_target_callbacks callbacks;
sector_t array_sectors;
sector_t dev_sectors;
@@ -1705,13 +1704,6 @@ static void do_table_event(struct work_struct *ws)
dm_table_event(rs->ti->table);
}
-static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
-{
- struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
-
- return mddev_congested(&rs->md, bits);
-}
-
/*
* Make sure a valid takover (level switch) is being requested on @rs
*
@@ -3248,9 +3240,6 @@ size_check:
goto bad_md_start;
}
- rs->callbacks.congested_fn = raid_is_congested;
- dm_table_add_target_callbacks(ti->table, &rs->callbacks);
-
/* If raid4/5/6 journal mode explicitly requested (only possible with journal dev) -> set it */
if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) {
r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode);
@@ -3310,7 +3299,6 @@ static void raid_dtr(struct dm_target *ti)
{
struct raid_set *rs = ti->private;
- list_del_init(&rs->callbacks.list);
md_stop(&rs->md);
raid_set_free(rs);
}
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 2f655d9f4200..fa09bc4e4c54 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -779,7 +779,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
wakeup_mirrord(ms);
} else {
map_bio(get_default_mirror(ms), bio);
- generic_make_request(bio);
+ submit_bio_noacct(bio);
}
}
}
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 85e0daabad49..7ce387a1cc6a 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -284,7 +284,8 @@ static void dm_complete_request(struct request *rq, blk_status_t error)
struct dm_rq_target_io *tio = tio_from_request(rq);
tio->error = error;
- blk_mq_complete_request(rq);
+ if (likely(!blk_should_fake_timeout(rq->q)))
+ blk_mq_complete_request(rq);
}
/*
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 963d3774c93e..2d1d4a4c399c 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -252,7 +252,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int op,
/*
* Issue the synchronous I/O from a different thread
- * to avoid generic_make_request recursion.
+ * to avoid submit_bio_noacct recursion.
*/
INIT_WORK_ONSTACK(&req.work, do_metadata);
queue_work(ps->metadata_wq, &req.work);
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 6b11a266299f..4668b2cd98f4 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1568,7 +1568,7 @@ static void flush_bios(struct bio *bio)
while (bio) {
n = bio->bi_next;
bio->bi_next = NULL;
- generic_make_request(bio);
+ submit_bio_noacct(bio);
bio = n;
}
}
@@ -1588,7 +1588,7 @@ static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
bio->bi_next = NULL;
r = do_origin(s->origin, bio, false);
if (r == DM_MAPIO_REMAPPED)
- generic_make_request(bio);
+ submit_bio_noacct(bio);
bio = n;
}
}
@@ -1829,7 +1829,7 @@ static void start_full_bio(struct dm_snap_pending_exception *pe,
bio->bi_end_io = full_bio_end_io;
bio->bi_private = callback_data;
- generic_make_request(bio);
+ submit_bio_noacct(bio);
}
static struct dm_snap_pending_exception *
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 8277b959e00b..0ea5b7367179 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -64,8 +64,6 @@ struct dm_table {
void *event_context;
struct dm_md_mempools *mempools;
-
- struct list_head target_callbacks;
};
/*
@@ -190,7 +188,6 @@ int dm_table_create(struct dm_table **result, fmode_t mode,
return -ENOMEM;
INIT_LIST_HEAD(&t->devices);
- INIT_LIST_HEAD(&t->target_callbacks);
if (!num_targets)
num_targets = KEYS_PER_NODE;
@@ -361,7 +358,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
* This upgrades the mode on an already open dm_dev, being
* careful to leave things as they were if we fail to reopen the
* device and not to touch the existing bdev field in case
- * it is accessed concurrently inside dm_table_any_congested().
+ * it is accessed concurrently.
*/
static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
struct mapped_device *md)
@@ -2052,38 +2049,6 @@ int dm_table_resume_targets(struct dm_table *t)
return 0;
}
-void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb)
-{
- list_add(&cb->list, &t->target_callbacks);
-}
-EXPORT_SYMBOL_GPL(dm_table_add_target_callbacks);
-
-int dm_table_any_congested(struct dm_table *t, int bdi_bits)
-{
- struct dm_dev_internal *dd;
- struct list_head *devices = dm_table_get_devices(t);
- struct dm_target_callbacks *cb;
- int r = 0;
-
- list_for_each_entry(dd, devices, list) {
- struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev);
- char b[BDEVNAME_SIZE];
-
- if (likely(q))
- r |= bdi_congested(q->backing_dev_info, bdi_bits);
- else
- DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
- dm_device_name(t->md),
- bdevname(dd->dm_dev->bdev, b));
- }
-
- list_for_each_entry(cb, &t->target_callbacks, list)
- if (cb->congested_fn)
- r |= cb->congested_fn(cb, bdi_bits);
-
- return r;
-}
-
struct mapped_device *dm_table_get_md(struct dm_table *t)
{
return t->md;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index fa8d5464c1fb..fff4c50df74d 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -326,7 +326,6 @@ struct pool_c {
struct pool *pool;
struct dm_dev *data_dev;
struct dm_dev *metadata_dev;
- struct dm_target_callbacks callbacks;
dm_block_t low_water_blocks;
struct pool_features requested_pf; /* Features requested during table load */
@@ -758,7 +757,7 @@ static void issue(struct thin_c *tc, struct bio *bio)
struct pool *pool = tc->pool;
if (!bio_triggers_commit(tc, bio)) {
- generic_make_request(bio);
+ submit_bio_noacct(bio);
return;
}
@@ -2394,7 +2393,7 @@ static void process_deferred_bios(struct pool *pool)
if (bio->bi_opf & REQ_PREFLUSH)
bio_endio(bio);
else
- generic_make_request(bio);
+ submit_bio_noacct(bio);
}
}
@@ -2796,18 +2795,6 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
}
}
-static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
-{
- struct pool_c *pt = container_of(cb, struct pool_c, callbacks);
- struct request_queue *q;
-
- if (get_pool_mode(pt->pool) == PM_OUT_OF_DATA_SPACE)
- return 1;
-
- q = bdev_get_queue(pt->data_dev->bdev);
- return bdi_congested(q->backing_dev_info, bdi_bits);
-}
-
static void requeue_bios(struct pool *pool)
{
struct thin_c *tc;
@@ -3420,9 +3407,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
dm_pool_register_pre_commit_callback(pool->pmd,
metadata_pre_commit_callback, pool);
- pt->callbacks.congested_fn = pool_is_congested;
- dm_table_add_target_callbacks(ti->table, &pt->callbacks);
-
mutex_unlock(&dm_thin_pool_table.mutex);
return 0;
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index eec9f252e935..75fa4d9b7617 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -681,7 +681,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
verity_submit_prefetch(v, io);
- generic_make_request(bio);
+ submit_bio_noacct(bio);
return DM_MAPIO_SUBMITTED;
}
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 5358894bb9fd..8aa306ebc2ab 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -1244,7 +1244,7 @@ static int writecache_flush_thread(void *data)
bio_end_sector(bio));
wc_unlock(wc);
bio_set_dev(bio, wc->dev->bdev);
- generic_make_request(bio);
+ submit_bio_noacct(bio);
} else {
writecache_flush(wc);
wc_unlock(wc);
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 42aa5139df7c..697f9de37355 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -140,7 +140,7 @@ static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
bio_advance(bio, clone->bi_iter.bi_size);
refcount_inc(&bioctx->ref);
- generic_make_request(clone);
+ submit_bio_noacct(clone);
if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
zone->wp_block += nr_blocks;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 5b9de2f71bb0..87cf45f619fd 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1273,7 +1273,6 @@ static blk_qc_t __map_bio(struct dm_target_io *tio)
sector_t sector;
struct bio *clone = &tio->clone;
struct dm_io *io = tio->io;
- struct mapped_device *md = io->md;
struct dm_target *ti = tio->ti;
blk_qc_t ret = BLK_QC_T_NONE;
@@ -1295,10 +1294,7 @@ static blk_qc_t __map_bio(struct dm_target_io *tio)
/* the bio has been remapped so dispatch it */
trace_block_bio_remap(clone->bi_disk->queue, clone,
bio_dev(io->orig_bio), sector);
- if (md->type == DM_TYPE_NVME_BIO_BASED)
- ret = direct_make_request(clone);
- else
- ret = generic_make_request(clone);
+ ret = submit_bio_noacct(clone);
break;
case DM_MAPIO_KILL:
free_tio(tio);
@@ -1645,7 +1641,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
error = __split_and_process_non_flush(&ci);
if (current->bio_list && ci.sector_count && !error) {
/*
- * Remainder must be passed to generic_make_request()
+ * Remainder must be passed to submit_bio_noacct()
* so that it gets handled *after* bios already submitted
* have been completely processed.
* We take a clone of the original to store in
@@ -1670,7 +1666,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
bio_chain(b, bio);
trace_block_split(md->queue, b, bio->bi_iter.bi_sector);
- ret = generic_make_request(bio);
+ ret = submit_bio_noacct(bio);
break;
}
}
@@ -1738,7 +1734,7 @@ static void dm_queue_split(struct mapped_device *md, struct dm_target *ti, struc
bio_chain(split, *bio);
trace_block_split(md->queue, split, (*bio)->bi_iter.bi_sector);
- generic_make_request(*bio);
+ submit_bio_noacct(*bio);
*bio = split;
}
}
@@ -1763,13 +1759,13 @@ static blk_qc_t dm_process_bio(struct mapped_device *md,
}
/*
- * If in ->make_request_fn we need to use blk_queue_split(), otherwise
+ * If in ->queue_bio we need to use blk_queue_split(), otherwise
* queue_limits for abnormal requests (e.g. discard, writesame, etc)
* won't be imposed.
*/
if (current->bio_list) {
if (is_abnormal_io(bio))
- blk_queue_split(md->queue, &bio);
+ blk_queue_split(&bio);
else
dm_queue_split(md, ti, &bio);
}
@@ -1780,9 +1776,9 @@ static blk_qc_t dm_process_bio(struct mapped_device *md,
return __split_and_process_bio(md, map, bio);
}
-static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
+static blk_qc_t dm_submit_bio(struct bio *bio)
{
- struct mapped_device *md = q->queuedata;
+ struct mapped_device *md = bio->bi_disk->private_data;
blk_qc_t ret = BLK_QC_T_NONE;
int srcu_idx;
struct dm_table *map;
@@ -1791,12 +1787,12 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
/*
* We are called with a live reference on q_usage_counter, but
* that one will be released as soon as we return. Grab an
- * extra one as blk_mq_make_request expects to be able to
- * consume a reference (which lives until the request is freed
- * in case a request is allocated).
+ * extra one as blk_mq_submit_bio expects to be able to consume
+ * a reference (which lives until the request is freed in case a
+ * request is allocated).
*/
- percpu_ref_get(&q->q_usage_counter);
- return blk_mq_make_request(q, bio);
+ percpu_ref_get(&bio->bi_disk->queue->q_usage_counter);
+ return blk_mq_submit_bio(bio);
}
map = dm_get_live_table(md, &srcu_idx);
@@ -1818,31 +1814,6 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
return ret;
}
-static int dm_any_congested(void *congested_data, int bdi_bits)
-{
- int r = bdi_bits;
- struct mapped_device *md = congested_data;
- struct dm_table *map;
-
- if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
- if (dm_request_based(md)) {
- /*
- * With request-based DM we only need to check the
- * top-level queue for congestion.
- */
- struct backing_dev_info *bdi = md->queue->backing_dev_info;
- r = bdi->wb.congested->state & bdi_bits;
- } else {
- map = dm_get_live_table_fast(md);
- if (map)
- r = dm_table_any_congested(map, bdi_bits);
- dm_put_live_table_fast(md);
- }
- }
-
- return r;
-}
-
/*-----------------------------------------------------------------
* An IDR is used to keep track of allocated minor numbers.
*---------------------------------------------------------------*/
@@ -1981,14 +1952,13 @@ static struct mapped_device *alloc_dev(int minor)
spin_lock_init(&md->uevent_lock);
/*
- * default to bio-based required ->make_request_fn until DM
- * table is loaded and md->type established. If request-based
- * table is loaded: blk-mq will override accordingly.
+ * default to bio-based until DM table is loaded and md->type
+ * established. If request-based table is loaded: blk-mq will
+ * override accordingly.
*/
- md->queue = blk_alloc_queue(dm_make_request, numa_node_id);
+ md->queue = blk_alloc_queue(numa_node_id);
if (!md->queue)
goto bad;
- md->queue->queuedata = md;
md->disk = alloc_disk_node(1, md->numa_node_id);
if (!md->disk)
@@ -2282,12 +2252,6 @@ struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
}
EXPORT_SYMBOL_GPL(dm_get_queue_limits);
-static void dm_init_congested_fn(struct mapped_device *md)
-{
- md->queue->backing_dev_info->congested_data = md;
- md->queue->backing_dev_info->congested_fn = dm_any_congested;
-}
-
/*
* Setup the DM device's queue based on md's type
*/
@@ -2304,12 +2268,10 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
DMERR("Cannot initialize queue for request-based dm-mq mapped device");
return r;
}
- dm_init_congested_fn(md);
break;
case DM_TYPE_BIO_BASED:
case DM_TYPE_DAX_BIO_BASED:
case DM_TYPE_NVME_BIO_BASED:
- dm_init_congested_fn(md);
break;
case DM_TYPE_NONE:
WARN_ON_ONCE(true);
@@ -2531,7 +2493,7 @@ static void dm_wq_work(struct work_struct *work)
break;
if (dm_request_based(md))
- (void) generic_make_request(c);
+ (void) submit_bio_noacct(c);
else
(void) dm_process_bio(md, map, c);
}
@@ -3286,6 +3248,7 @@ static const struct pr_ops dm_pr_ops = {
};
static const struct block_device_operations dm_blk_dops = {
+ .submit_bio = dm_submit_bio,
.open = dm_blk_open,
.release = dm_blk_close,
.ioctl = dm_blk_ioctl,
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index d7c4f6606b5f..4f5fe664d05a 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -63,7 +63,6 @@ void dm_table_presuspend_targets(struct dm_table *t);
void dm_table_presuspend_undo_targets(struct dm_table *t);
void dm_table_postsuspend_targets(struct dm_table *t);
int dm_table_resume_targets(struct dm_table *t);
-int dm_table_any_congested(struct dm_table *t, int bdi_bits);
enum dm_queue_mode dm_table_get_type(struct dm_table *t);
struct target_type *dm_table_get_immutable_target_type(struct dm_table *t);
struct dm_target *dm_table_get_immutable_target(struct dm_table *t);
diff --git a/drivers/md/md-faulty.c b/drivers/md/md-faulty.c
index 50ad4ba86f0e..fda4cb3f936f 100644
--- a/drivers/md/md-faulty.c
+++ b/drivers/md/md-faulty.c
@@ -169,7 +169,7 @@ static bool faulty_make_request(struct mddev *mddev, struct bio *bio)
if (bio_data_dir(bio) == WRITE) {
/* write request */
if (atomic_read(&conf->counters[WriteAll])) {
- /* special case - don't decrement, don't generic_make_request,
+ /* special case - don't decrement, don't submit_bio_noacct,
* just fail immediately
*/
bio_io_error(bio);
@@ -214,7 +214,7 @@ static bool faulty_make_request(struct mddev *mddev, struct bio *bio)
} else
bio_set_dev(bio, conf->rdev->bdev);
- generic_make_request(bio);
+ submit_bio_noacct(bio);
return true;
}
diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
index 26c75c0199fa..c2ae9125c4c3 100644
--- a/drivers/md/md-linear.c
+++ b/drivers/md/md-linear.c
@@ -46,29 +46,6 @@ static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector)
return conf->disks + lo;
}
-/*
- * In linear_congested() conf->raid_disks is used as a copy of
- * mddev->raid_disks to iterate conf->disks[], because conf->raid_disks
- * and conf->disks[] are created in linear_conf(), they are always
- * consitent with each other, but mddev->raid_disks does not.
- */
-static int linear_congested(struct mddev *mddev, int bits)
-{
- struct linear_conf *conf;
- int i, ret = 0;
-
- rcu_read_lock();
- conf = rcu_dereference(mddev->private);
-
- for (i = 0; i < conf->raid_disks && !ret ; i++) {
- struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev);
- ret |= bdi_congested(q->backing_dev_info, bits);
- }
-
- rcu_read_unlock();
- return ret;
-}
-
static sector_t linear_size(struct mddev *mddev, sector_t sectors, int raid_disks)
{
struct linear_conf *conf;
@@ -267,7 +244,7 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio)
struct bio *split = bio_split(bio, end_sector - bio_sector,
GFP_NOIO, &mddev->bio_set);
bio_chain(split, bio);
- generic_make_request(bio);
+ submit_bio_noacct(bio);
bio = split;
}
@@ -286,7 +263,7 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio)
bio_sector);
mddev_check_writesame(mddev, bio);
mddev_check_write_zeroes(mddev, bio);
- generic_make_request(bio);
+ submit_bio_noacct(bio);
}
return true;
@@ -322,7 +299,6 @@ static struct md_personality linear_personality =
.hot_add_disk = linear_add,
.size = linear_size,
.quiesce = linear_quiesce,
- .congested = linear_congested,
};
static int __init linear_init (void)
diff --git a/drivers/md/md-multipath.c b/drivers/md/md-multipath.c
index 152f9e65a226..776bbe542db5 100644
--- a/drivers/md/md-multipath.c
+++ b/drivers/md/md-multipath.c
@@ -131,7 +131,7 @@ static bool multipath_make_request(struct mddev *mddev, struct bio * bio)
mp_bh->bio.bi_private = mp_bh;
mddev_check_writesame(mddev, &mp_bh->bio);
mddev_check_write_zeroes(mddev, &mp_bh->bio);
- generic_make_request(&mp_bh->bio);
+ submit_bio_noacct(&mp_bh->bio);
return true;
}
@@ -151,28 +151,6 @@ static void multipath_status(struct seq_file *seq, struct mddev *mddev)
seq_putc(seq, ']');
}
-static int multipath_congested(struct mddev *mddev, int bits)
-{
- struct mpconf *conf = mddev->private;
- int i, ret = 0;
-
- rcu_read_lock();
- for (i = 0; i < mddev->raid_disks ; i++) {
- struct md_rdev *rdev = rcu_dereference(conf->multipaths[i].rdev);
- if (rdev && !test_bit(Faulty, &rdev->flags)) {
- struct request_queue *q = bdev_get_queue(rdev->bdev);
-
- ret |= bdi_congested(q->backing_dev_info, bits);
- /* Just like multipath_map, we just check the
- * first available device
- */
- break;
- }
- }
- rcu_read_unlock();
- return ret;
-}
-
/*
* Careful, this can execute in IRQ contexts as well!
*/
@@ -348,7 +326,7 @@ static void multipathd(struct md_thread *thread)
bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
bio->bi_end_io = multipath_end_request;
bio->bi_private = mp_bh;
- generic_make_request(bio);
+ submit_bio_noacct(bio);
}
}
spin_unlock_irqrestore(&conf->device_lock, flags);
@@ -478,7 +456,6 @@ static struct md_personality multipath_personality =
.hot_add_disk = multipath_add_disk,
.hot_remove_disk= multipath_remove_disk,
.size = multipath_size,
- .congested = multipath_congested,
};
static int __init multipath_init (void)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index f567f536b529..96b28f6d025c 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -199,7 +199,7 @@ static int rdevs_init_serial(struct mddev *mddev)
static int rdev_need_serial(struct md_rdev *rdev)
{
return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 &&
- rdev->bdev->bd_queue->nr_hw_queues != 1 &&
+ rdev->bdev->bd_disk->queue->nr_hw_queues != 1 &&
test_bit(WriteMostly, &rdev->flags));
}
@@ -463,7 +463,7 @@ check_suspended:
}
EXPORT_SYMBOL(md_handle_request);
-static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
+static blk_qc_t md_submit_bio(struct bio *bio)
{
const int rw = bio_data_dir(bio);
const int sgrp = op_stat_group(bio_op(bio));
@@ -475,7 +475,7 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
return BLK_QC_T_NONE;
}
- blk_queue_split(q, &bio);
+ blk_queue_split(&bio);
if (mddev == NULL || mddev->pers == NULL) {
bio_io_error(bio);
@@ -549,26 +549,6 @@ void mddev_resume(struct mddev *mddev)
}
EXPORT_SYMBOL_GPL(mddev_resume);
-int mddev_congested(struct mddev *mddev, int bits)
-{
- struct md_personality *pers = mddev->pers;
- int ret = 0;
-
- rcu_read_lock();
- if (mddev->suspended)
- ret = 1;
- else if (pers && pers->congested)
- ret = pers->congested(mddev, bits);
- rcu_read_unlock();
- return ret;
-}
-EXPORT_SYMBOL_GPL(mddev_congested);
-static int md_congested(void *data, int bits)
-{
- struct mddev *mddev = data;
- return mddev_congested(mddev, bits);
-}
-
/*
* Generic flush handling for md
*/
@@ -5641,7 +5621,7 @@ static int md_alloc(dev_t dev, char *name)
mddev->hold_active = UNTIL_STOP;
error = -ENOMEM;
- mddev->queue = blk_alloc_queue(md_make_request, NUMA_NO_NODE);
+ mddev->queue = blk_alloc_queue(NUMA_NO_NODE);
if (!mddev->queue)
goto abort;
@@ -5670,6 +5650,7 @@ static int md_alloc(dev_t dev, char *name)
* remove it now.
*/
disk->flags |= GENHD_FL_EXT_DEVT;
+ disk->events |= DISK_EVENT_MEDIA_CHANGE;
mddev->gendisk = disk;
/* As soon as we call add_disk(), another thread could get
* through to md_open, so make sure it doesn't get too far
@@ -5964,8 +5945,6 @@ int md_run(struct mddev *mddev)
blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue);
else
blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue);
- mddev->queue->backing_dev_info->congested_data = mddev;
- mddev->queue->backing_dev_info->congested_fn = md_congested;
}
if (pers->sync_request) {
if (mddev->kobj.sd &&
@@ -6350,7 +6329,6 @@ static int do_md_stop(struct mddev *mddev, int mode,
__md_stop_writes(mddev);
__md_stop(mddev);
- mddev->queue->backing_dev_info->congested_fn = NULL;
/* tell userspace to handle 'inactive' */
sysfs_notify_dirent_safe(mddev->sysfs_state);
@@ -7806,23 +7784,21 @@ static void md_release(struct gendisk *disk, fmode_t mode)
mddev_put(mddev);
}
-static int md_media_changed(struct gendisk *disk)
-{
- struct mddev *mddev = disk->private_data;
-
- return mddev->changed;
-}
-
-static int md_revalidate(struct gendisk *disk)
+static unsigned int md_check_events(struct gendisk *disk, unsigned int clearing)
{
struct mddev *mddev = disk->private_data;
+ unsigned int ret = 0;
+ if (mddev->changed)
+ ret = DISK_EVENT_MEDIA_CHANGE;
mddev->changed = 0;
- return 0;
+ return ret;
}
+
static const struct block_device_operations md_fops =
{
.owner = THIS_MODULE,
+ .submit_bio = md_submit_bio,
.open = md_open,
.release = md_release,
.ioctl = md_ioctl,
@@ -7830,8 +7806,7 @@ static const struct block_device_operations md_fops =
.compat_ioctl = md_compat_ioctl,
#endif
.getgeo = md_getgeo,
- .media_changed = md_media_changed,
- .revalidate_disk= md_revalidate,
+ .check_events = md_check_events,
};
static int md_thread(void *arg)
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 612814d07d35..e2f1ad9afc48 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -597,9 +597,6 @@ struct md_personality
* array.
*/
void *(*takeover) (struct mddev *mddev);
- /* congested implements bdi.congested_fn().
- * Will not be called while array is 'suspended' */
- int (*congested)(struct mddev *mddev, int bits);
/* Changes the consistency policy of an active array. */
int (*change_consistency_policy)(struct mddev *mddev, const char *buf);
};
@@ -710,7 +707,6 @@ extern void md_done_sync(struct mddev *mddev, int blocks, int ok);
extern void md_error(struct mddev *mddev, struct md_rdev *rdev);
extern void md_finish_reshape(struct mddev *mddev);
-extern int mddev_congested(struct mddev *mddev, int bits);
extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio);
extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
sector_t sector, int size, struct page *page);
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 322386ff5d22..f54a449f97aa 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -29,21 +29,6 @@ module_param(default_layout, int, 0644);
(1L << MD_HAS_PPL) | \
(1L << MD_HAS_MULTIPLE_PPLS))
-static int raid0_congested(struct mddev *mddev, int bits)
-{
- struct r0conf *conf = mddev->private;
- struct md_rdev **devlist = conf->devlist;
- int raid_disks = conf->strip_zone[0].nb_dev;
- int i, ret = 0;
-
- for (i = 0; i < raid_disks && !ret ; i++) {
- struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
-
- ret |= bdi_congested(q->backing_dev_info, bits);
- }
- return ret;
-}
-
/*
* inform the user of the raid configuration
*/
@@ -495,7 +480,7 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO,
&mddev->bio_set);
bio_chain(split, bio);
- generic_make_request(bio);
+ submit_bio_noacct(bio);
bio = split;
end = zone->zone_end;
} else
@@ -559,7 +544,7 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
trace_block_bio_remap(bdev_get_queue(rdev->bdev),
discard_bio, disk_devt(mddev->gendisk),
bio->bi_iter.bi_sector);
- generic_make_request(discard_bio);
+ submit_bio_noacct(discard_bio);
}
bio_endio(bio);
}
@@ -600,7 +585,7 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
struct bio *split = bio_split(bio, sectors, GFP_NOIO,
&mddev->bio_set);
bio_chain(split, bio);
- generic_make_request(bio);
+ submit_bio_noacct(bio);
bio = split;
}
@@ -633,7 +618,7 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
disk_devt(mddev->gendisk), bio_sector);
mddev_check_writesame(mddev, bio);
mddev_check_write_zeroes(mddev, bio);
- generic_make_request(bio);
+ submit_bio_noacct(bio);
return true;
}
@@ -818,7 +803,6 @@ static struct md_personality raid0_personality=
.size = raid0_size,
.takeover = raid0_takeover,
.quiesce = raid0_quiesce,
- .congested = raid0_congested,
};
static int __init raid0_init (void)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index dcd27f3da84e..960d854c07f8 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -786,36 +786,6 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
return best_disk;
}
-static int raid1_congested(struct mddev *mddev, int bits)
-{
- struct r1conf *conf = mddev->private;
- int i, ret = 0;
-
- if ((bits & (1 << WB_async_congested)) &&
- conf->pending_count >= max_queued_requests)
- return 1;
-
- rcu_read_lock();
- for (i = 0; i < conf->raid_disks * 2; i++) {
- struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
- if (rdev && !test_bit(Faulty, &rdev->flags)) {
- struct request_queue *q = bdev_get_queue(rdev->bdev);
-
- BUG_ON(!q);
-
- /* Note the '|| 1' - when read_balance prefers
- * non-congested targets, it can be removed
- */
- if ((bits & (1 << WB_async_congested)) || 1)
- ret |= bdi_congested(q->backing_dev_info, bits);
- else
- ret &= bdi_congested(q->backing_dev_info, bits);
- }
- }
- rcu_read_unlock();
- return ret;
-}
-
static void flush_bio_list(struct r1conf *conf, struct bio *bio)
{
/* flush any pending bitmap writes to disk before proceeding w/ I/O */
@@ -834,7 +804,7 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio)
/* Just ignore it */
bio_endio(bio);
else
- generic_make_request(bio);
+ submit_bio_noacct(bio);
bio = next;
cond_resched();
}
@@ -1312,7 +1282,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
struct bio *split = bio_split(bio, max_sectors,
gfp, &conf->bio_split);
bio_chain(split, bio);
- generic_make_request(bio);
+ submit_bio_noacct(bio);
bio = split;
r1_bio->master_bio = bio;
r1_bio->sectors = max_sectors;
@@ -1338,7 +1308,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
trace_block_bio_remap(read_bio->bi_disk->queue, read_bio,
disk_devt(mddev->gendisk), r1_bio->sector);
- generic_make_request(read_bio);
+ submit_bio_noacct(read_bio);
}
static void raid1_write_request(struct mddev *mddev, struct bio *bio,
@@ -1483,7 +1453,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
struct bio *split = bio_split(bio, max_sectors,
GFP_NOIO, &conf->bio_split);
bio_chain(split, bio);
- generic_make_request(bio);
+ submit_bio_noacct(bio);
bio = split;
r1_bio->master_bio = bio;
r1_bio->sectors = max_sectors;
@@ -2240,7 +2210,7 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
atomic_inc(&r1_bio->remaining);
md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
- generic_make_request(wbio);
+ submit_bio_noacct(wbio);
}
put_sync_write_buf(r1_bio, 1);
@@ -2926,7 +2896,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
md_sync_acct_bio(bio, nr_sectors);
if (read_targets == 1)
bio->bi_opf &= ~MD_FAILFAST;
- generic_make_request(bio);
+ submit_bio_noacct(bio);
}
}
} else {
@@ -2935,7 +2905,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
md_sync_acct_bio(bio, nr_sectors);
if (read_targets == 1)
bio->bi_opf &= ~MD_FAILFAST;
- generic_make_request(bio);
+ submit_bio_noacct(bio);
}
return nr_sectors;
}
@@ -3396,7 +3366,6 @@ static struct md_personality raid1_personality =
.check_reshape = raid1_reshape,
.quiesce = raid1_quiesce,
.takeover = raid1_takeover,
- .congested = raid1_congested,
};
static int __init raid_init(void)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index ec136e44aef7..353288bc4cb7 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -848,31 +848,6 @@ static struct md_rdev *read_balance(struct r10conf *conf,
return rdev;
}
-static int raid10_congested(struct mddev *mddev, int bits)
-{
- struct r10conf *conf = mddev->private;
- int i, ret = 0;
-
- if ((bits & (1 << WB_async_congested)) &&
- conf->pending_count >= max_queued_requests)
- return 1;
-
- rcu_read_lock();
- for (i = 0;
- (i < conf->geo.raid_disks || i < conf->prev.raid_disks)
- && ret == 0;
- i++) {
- struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
- if (rdev && !test_bit(Faulty, &rdev->flags)) {
- struct request_queue *q = bdev_get_queue(rdev->bdev);
-
- ret |= bdi_congested(q->backing_dev_info, bits);
- }
- }
- rcu_read_unlock();
- return ret;
-}
-
static void flush_pending_writes(struct r10conf *conf)
{
/* Any writes that have been queued but are awaiting
@@ -917,7 +892,7 @@ static void flush_pending_writes(struct r10conf *conf)
/* Just ignore it */
bio_endio(bio);
else
- generic_make_request(bio);
+ submit_bio_noacct(bio);
bio = next;
}
blk_finish_plug(&plug);
@@ -1102,7 +1077,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
/* Just ignore it */
bio_endio(bio);
else
- generic_make_request(bio);
+ submit_bio_noacct(bio);
bio = next;
}
kfree(plug);
@@ -1194,7 +1169,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
gfp, &conf->bio_split);
bio_chain(split, bio);
allow_barrier(conf);
- generic_make_request(bio);
+ submit_bio_noacct(bio);
wait_barrier(conf);
bio = split;
r10_bio->master_bio = bio;
@@ -1221,7 +1196,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
trace_block_bio_remap(read_bio->bi_disk->queue,
read_bio, disk_devt(mddev->gendisk),
r10_bio->sector);
- generic_make_request(read_bio);
+ submit_bio_noacct(read_bio);
return;
}
@@ -1479,7 +1454,7 @@ retry_write:
GFP_NOIO, &conf->bio_split);
bio_chain(split, bio);
allow_barrier(conf);
- generic_make_request(bio);
+ submit_bio_noacct(bio);
wait_barrier(conf);
bio = split;
r10_bio->master_bio = bio;
@@ -2099,7 +2074,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
tbio->bi_opf |= MD_FAILFAST;
tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
bio_set_dev(tbio, conf->mirrors[d].rdev->bdev);
- generic_make_request(tbio);
+ submit_bio_noacct(tbio);
}
/* Now write out to any replacement devices
@@ -2118,7 +2093,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
atomic_inc(&r10_bio->remaining);
md_sync_acct(conf->mirrors[d].replacement->bdev,
bio_sectors(tbio));
- generic_make_request(tbio);
+ submit_bio_noacct(tbio);
}
done:
@@ -2241,7 +2216,7 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
wbio = r10_bio->devs[1].bio;
wbio2 = r10_bio->devs[1].repl_bio;
/* Need to test wbio2->bi_end_io before we call
- * generic_make_request as if the former is NULL,
+ * submit_bio_noacct as if the former is NULL,
* the latter is free to free wbio2.
*/
if (wbio2 && !wbio2->bi_end_io)
@@ -2249,13 +2224,13 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
if (wbio->bi_end_io) {
atomic_inc(&conf->mirrors[d].rdev->nr_pending);
md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
- generic_make_request(wbio);
+ submit_bio_noacct(wbio);
}
if (wbio2) {
atomic_inc(&conf->mirrors[d].replacement->nr_pending);
md_sync_acct(conf->mirrors[d].replacement->bdev,
bio_sectors(wbio2));
- generic_make_request(wbio2);
+ submit_bio_noacct(wbio2);
}
}
@@ -2889,7 +2864,7 @@ static void raid10_set_cluster_sync_high(struct r10conf *conf)
* a number of r10_bio structures, one for each out-of-sync device.
* As we setup these structures, we collect all bio's together into a list
* which we then process collectively to add pages, and then process again
- * to pass to generic_make_request.
+ * to pass to submit_bio_noacct.
*
* The r10_bio structures are linked using a borrowed master_bio pointer.
* This link is counted in ->remaining. When the r10_bio that points to NULL
@@ -3496,7 +3471,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
if (bio->bi_end_io == end_sync_read) {
md_sync_acct_bio(bio, nr_sectors);
bio->bi_status = 0;
- generic_make_request(bio);
+ submit_bio_noacct(bio);
}
}
@@ -4654,7 +4629,7 @@ read_more:
md_sync_acct_bio(read_bio, r10_bio->sectors);
atomic_inc(&r10_bio->remaining);
read_bio->bi_next = NULL;
- generic_make_request(read_bio);
+ submit_bio_noacct(read_bio);
sectors_done += nr_sectors;
if (sector_nr <= last)
goto read_more;
@@ -4717,7 +4692,7 @@ static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
md_sync_acct_bio(b, r10_bio->sectors);
atomic_inc(&r10_bio->remaining);
b->bi_next = NULL;
- generic_make_request(b);
+ submit_bio_noacct(b);
}
end_reshape_request(r10_bio);
}
@@ -4929,7 +4904,6 @@ static struct md_personality raid10_personality =
.start_reshape = raid10_start_reshape,
.finish_reshape = raid10_finish_reshape,
.update_reshape_pos = raid10_update_reshape_pos,
- .congested = raid10_congested,
};
static int __init raid_init(void)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index ab8067f9ce8c..774ea893d47e 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -873,7 +873,7 @@ static void dispatch_bio_list(struct bio_list *tmp)
struct bio *bio;
while ((bio = bio_list_pop(tmp)))
- generic_make_request(bio);
+ submit_bio_noacct(bio);
}
static int cmp_stripe(void *priv, struct list_head *a, struct list_head *b)
@@ -1151,7 +1151,7 @@ again:
if (should_defer && op_is_write(op))
bio_list_add(&pending_bios, bi);
else
- generic_make_request(bi);
+ submit_bio_noacct(bi);
}
if (rrdev) {
if (s->syncing || s->expanding || s->expanded
@@ -1201,7 +1201,7 @@ again:
if (should_defer && op_is_write(op))
bio_list_add(&pending_bios, rbi);
else
- generic_make_request(rbi);
+ submit_bio_noacct(rbi);
}
if (!rdev && !rrdev) {
if (op_is_write(op))
@@ -5099,28 +5099,6 @@ static void activate_bit_delay(struct r5conf *conf,
}
}
-static int raid5_congested(struct mddev *mddev, int bits)
-{
- struct r5conf *conf = mddev->private;
-
- /* No difference between reads and writes. Just check
- * how busy the stripe_cache is
- */
-
- if (test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state))
- return 1;
-
- /* Also checks whether there is pressure on r5cache log space */
- if (test_bit(R5C_LOG_TIGHT, &conf->cache_state))
- return 1;
- if (conf->quiesce)
- return 1;
- if (atomic_read(&conf->empty_inactive_list_nr))
- return 1;
-
- return 0;
-}
-
static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
{
struct r5conf *conf = mddev->private;
@@ -5289,7 +5267,7 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
trace_block_bio_remap(align_bi->bi_disk->queue,
align_bi, disk_devt(mddev->gendisk),
raid_bio->bi_iter.bi_sector);
- generic_make_request(align_bi);
+ submit_bio_noacct(align_bi);
return 1;
} else {
rcu_read_unlock();
@@ -5309,7 +5287,7 @@ static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio)
struct r5conf *conf = mddev->private;
split = bio_split(raid_bio, sectors, GFP_NOIO, &conf->bio_split);
bio_chain(split, raid_bio);
- generic_make_request(raid_bio);
+ submit_bio_noacct(raid_bio);
raid_bio = split;
}
@@ -8427,7 +8405,6 @@ static struct md_personality raid6_personality =
.finish_reshape = raid5_finish_reshape,
.quiesce = raid5_quiesce,
.takeover = raid6_takeover,
- .congested = raid5_congested,
.change_consistency_policy = raid5_change_consistency_policy,
};
static struct md_personality raid5_personality =
@@ -8452,7 +8429,6 @@ static struct md_personality raid5_personality =
.finish_reshape = raid5_finish_reshape,
.quiesce = raid5_quiesce,
.takeover = raid5_takeover,
- .congested = raid5_congested,
.change_consistency_policy = raid5_change_consistency_policy,
};
@@ -8478,7 +8454,6 @@ static struct md_personality raid4_personality =
.finish_reshape = raid5_finish_reshape,
.quiesce = raid5_quiesce,
.takeover = raid4_takeover,
- .congested = raid5_congested,
.change_consistency_policy = raid5_change_consistency_policy,
};