diff options
author | Mike Snitzer <snitzer@redhat.com> | 2018-01-12 09:32:21 -0500 |
---|---|---|
committer | Mike Snitzer <snitzer@redhat.com> | 2018-01-29 13:44:55 -0500 |
commit | c12c9a3c3860c76ba273798c0c34c6f1294cc759 (patch) | |
tree | 7743e38d1794d2fb5709d4c22909b0fc3dcb63c8 /drivers/md | |
parent | ac514ffc968bf14649dd0e048447dc966ee49555 (diff) | |
download | linux-c12c9a3c3860c76ba273798c0c34c6f1294cc759.tar.bz2 |
dm: various cleanups to md->queue initialization code
Also, add dm_sysfs_init() error handling to dm_create().
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/dm-core.h | 2 | ||||
-rw-r--r-- | drivers/md/dm-rq.c | 2 | ||||
-rw-r--r-- | drivers/md/dm.c | 30 |
3 files changed, 12 insertions, 22 deletions
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index 124ffa2d6b9a..3222e21cbbf8 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -129,8 +129,6 @@ struct mapped_device { struct srcu_struct io_barrier; }; -void dm_init_md_queue(struct mapped_device *md); -void dm_init_normal_md_queue(struct mapped_device *md); int md_in_flight(struct mapped_device *md); void disable_write_same(struct mapped_device *md); void disable_write_zeroes(struct mapped_device *md); diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index b78ff6921cfb..c59c59cfd2a5 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -704,7 +704,6 @@ int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t) /* disable dm_old_request_fn's merge heuristic by default */ md->seq_rq_merge_deadline_usecs = 0; - dm_init_normal_md_queue(md); blk_queue_softirq_done(md->queue, dm_softirq_done); /* Initialize the request-based DM worker thread */ @@ -814,7 +813,6 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t) err = PTR_ERR(q); goto out_tag_set; } - dm_init_md_queue(md); /* backfill 'mq' sysfs registration normally done in blk_register_queue */ err = blk_mq_register_dev(disk_to_dev(md->disk), q); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 67bf11610e4d..148087932679 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1733,20 +1733,9 @@ static const struct dax_operations dm_dax_ops; static void dm_wq_work(struct work_struct *work); -void dm_init_md_queue(struct mapped_device *md) -{ - /* - * Initialize data that will only be used by a non-blk-mq DM queue - * - must do so here (in alloc_dev callchain) before queue is used - */ - md->queue->queuedata = md; - md->queue->backing_dev_info->congested_data = md; -} - -void dm_init_normal_md_queue(struct mapped_device *md) +static void dm_init_normal_md_queue(struct mapped_device *md) { md->use_blk_mq = false; - dm_init_md_queue(md); /* * Initialize aspects of queue that aren't relevant for blk-mq @@ -1846,10 +1835,10 @@ static struct mapped_device *alloc_dev(int minor) md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id); if (!md->queue) goto bad; + md->queue->queuedata = md; + md->queue->backing_dev_info->congested_data = md; - dm_init_md_queue(md); - - md->disk = alloc_disk_node(1, numa_node_id); + md->disk = alloc_disk_node(1, md->numa_node_id); if (!md->disk) goto bad; @@ -2082,13 +2071,18 @@ static struct dm_table *__unbind(struct mapped_device *md) */ int dm_create(int minor, struct mapped_device **result) { + int r; struct mapped_device *md; md = alloc_dev(minor); if (!md) return -ENXIO; - dm_sysfs_init(md); + r = dm_sysfs_init(md); + if (r) { + free_dev(md); + return r; + } *result = md; return 0; @@ -2145,6 +2139,7 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) switch (type) { case DM_TYPE_REQUEST_BASED: + dm_init_normal_md_queue(md); r = dm_old_init_request_queue(md, t); if (r) { DMERR("Cannot initialize queue for request-based mapped device"); @@ -2236,7 +2231,6 @@ EXPORT_SYMBOL_GPL(dm_device_name); static void __dm_destroy(struct mapped_device *md, bool wait) { - struct request_queue *q = dm_get_md_queue(md); struct dm_table *map; int srcu_idx; @@ -2247,7 +2241,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait) set_bit(DMF_FREEING, &md->flags); spin_unlock(&_minor_lock); - blk_set_queue_dying(q); + blk_set_queue_dying(md->queue); if (dm_request_based(md) && md->kworker_task) kthread_flush_worker(&md->kworker); |