summaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2020-10-07 15:15:08 -0400
committerMike Snitzer <snitzer@redhat.com>2020-10-07 18:08:41 -0400
commit9c37de297f6590937f95a28bec1b7ac68a38618f (patch)
treefd07681563007ebf787675a33191ff81a117a181 /drivers/md
parent61931c0ee9cf5da575996b977a2358b598ef84bb (diff)
downloadlinux-9c37de297f6590937f95a28bec1b7ac68a38618f.tar.bz2
dm: remove special-casing of bio-based immutable singleton target on NVMe
Since commit 5a6c35f9af416 ("block: remove direct_make_request") there is no benefit to DM special-casing NVMe. Remove all code used to establish DM_TYPE_NVME_BIO_BASED. Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-table.c32
-rw-r--r--drivers/md/dm.c55
2 files changed, 7 insertions, 80 deletions
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 3ad22adf322d..ce543b761be7 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -804,8 +804,7 @@ EXPORT_SYMBOL(dm_consume_args);
static bool __table_type_bio_based(enum dm_queue_mode table_type)
{
return (table_type == DM_TYPE_BIO_BASED ||
- table_type == DM_TYPE_DAX_BIO_BASED ||
- table_type == DM_TYPE_NVME_BIO_BASED);
+ table_type == DM_TYPE_DAX_BIO_BASED);
}
static bool __table_type_request_based(enum dm_queue_mode table_type)
@@ -861,8 +860,6 @@ bool dm_table_supports_dax(struct dm_table *t,
return true;
}
-static bool dm_table_does_not_support_partial_completion(struct dm_table *t);
-
static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
@@ -892,7 +889,6 @@ static int dm_table_determine_type(struct dm_table *t)
goto verify_bio_based;
}
BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED);
- BUG_ON(t->type == DM_TYPE_NVME_BIO_BASED);
goto verify_rq_based;
}
@@ -931,15 +927,6 @@ verify_bio_based:
if (dm_table_supports_dax(t, device_supports_dax, &page_size) ||
(list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) {
t->type = DM_TYPE_DAX_BIO_BASED;
- } else {
- /* Check if upgrading to NVMe bio-based is valid or required */
- tgt = dm_table_get_immutable_target(t);
- if (tgt && !tgt->max_io_len && dm_table_does_not_support_partial_completion(t)) {
- t->type = DM_TYPE_NVME_BIO_BASED;
- goto verify_rq_based; /* must be stacked directly on NVMe (blk-mq) */
- } else if (list_empty(devices) && live_md_type == DM_TYPE_NVME_BIO_BASED) {
- t->type = DM_TYPE_NVME_BIO_BASED;
- }
}
return 0;
}
@@ -956,8 +943,7 @@ verify_rq_based:
* (e.g. request completion process for partial completion.)
*/
if (t->num_targets > 1) {
- DMERR("%s DM doesn't support multiple targets",
- t->type == DM_TYPE_NVME_BIO_BASED ? "nvme bio-based" : "request-based");
+ DMERR("request-based DM doesn't support multiple targets");
return -EINVAL;
}
@@ -1651,20 +1637,6 @@ static bool dm_table_all_devices_attribute(struct dm_table *t,
return true;
}
-static int device_no_partial_completion(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
-{
- char b[BDEVNAME_SIZE];
-
- /* For now, NVMe devices are the only devices of this class */
- return (strncmp(bdevname(dev->bdev, b), "nvme", 4) == 0);
-}
-
-static bool dm_table_does_not_support_partial_completion(struct dm_table *t)
-{
- return dm_table_all_devices_attribute(t, device_no_partial_completion);
-}
-
static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 32ac19645255..af1bab3a810e 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -975,7 +975,7 @@ static void clone_endio(struct bio *bio)
dm_endio_fn endio = tio->ti->type->end_io;
struct bio *orig_bio = io->orig_bio;
- if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
+ if (unlikely(error == BLK_STS_TARGET)) {
if (bio_op(bio) == REQ_OP_DISCARD &&
!bio->bi_disk->queue->limits.max_discard_sectors)
disable_discard(md);
@@ -1626,45 +1626,6 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
return ret;
}
-/*
- * Optimized variant of __split_and_process_bio that leverages the
- * fact that targets that use it do _not_ have a need to split bios.
- */
-static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map,
- struct bio *bio)
-{
- struct clone_info ci;
- blk_qc_t ret = BLK_QC_T_NONE;
- int error = 0;
-
- init_clone_info(&ci, md, map, bio);
-
- if (bio->bi_opf & REQ_PREFLUSH) {
- error = __send_empty_flush(&ci);
- /* dec_pending submits any data associated with flush */
- } else {
- struct dm_target_io *tio;
- struct dm_target *ti = md->immutable_target;
-
- if (WARN_ON_ONCE(!ti)) {
- error = -EIO;
- goto out;
- }
-
- ci.bio = bio;
- ci.sector_count = bio_sectors(bio);
- if (__process_abnormal_io(&ci, ti, &error))
- goto out;
-
- tio = alloc_tio(&ci, ti, 0, GFP_NOIO);
- ret = __clone_and_map_simple_bio(&ci, tio, NULL);
- }
-out:
- /* drop the extra reference count */
- dec_pending(ci.io, errno_to_blk_status(error));
- return ret;
-}
-
static blk_qc_t dm_submit_bio(struct bio *bio)
{
struct mapped_device *md = bio->bi_disk->private_data;
@@ -1710,10 +1671,7 @@ static blk_qc_t dm_submit_bio(struct bio *bio)
if (is_abnormal_io(bio))
blk_queue_split(&bio);
- if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
- ret = __process_bio(md, map, bio);
- else
- ret = __split_and_process_bio(md, map, bio);
+ ret = __split_and_process_bio(md, map, bio);
out:
dm_put_live_table(md, srcu_idx);
return ret;
@@ -2038,11 +1996,10 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
if (request_based)
dm_stop_queue(q);
- if (request_based || md->type == DM_TYPE_NVME_BIO_BASED) {
+ if (request_based) {
/*
- * Leverage the fact that request-based DM targets and
- * NVMe bio based targets are immutable singletons
- * - used to optimize both __process_bio and dm_mq_queue_rq
+ * Leverage the fact that request-based DM targets are
+ * immutable singletons - used to optimize dm_mq_queue_rq.
*/
md->immutable_target = dm_table_get_immutable_target(t);
}
@@ -2164,7 +2121,6 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
break;
case DM_TYPE_BIO_BASED:
case DM_TYPE_DAX_BIO_BASED:
- case DM_TYPE_NVME_BIO_BASED:
break;
case DM_TYPE_NONE:
WARN_ON_ONCE(true);
@@ -2922,7 +2878,6 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_qu
switch (type) {
case DM_TYPE_BIO_BASED:
case DM_TYPE_DAX_BIO_BASED:
- case DM_TYPE_NVME_BIO_BASED:
pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
io_front_pad = roundup(front_pad, __alignof__(struct dm_io)) + offsetof(struct dm_io, tio);