summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c4
-rw-r--r--block/blk-mq-sysfs.c6
-rw-r--r--block/blk-mq.c7
-rw-r--r--block/blk-sysfs.c11
-rw-r--r--block/genhd.c4
5 files changed, 24 insertions, 8 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index d6ec7db9a9f4..0421b53e6431 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -236,7 +236,7 @@ EXPORT_SYMBOL(blk_stop_queue);
* this function.
*
* This function does not cancel any asynchronous activity arising
- * out of elevator or throttling code. That would require elevaotor_exit()
+ * out of elevator or throttling code. That would require elevator_exit()
* and blkcg_exit_queue() to be called with queue lock initialized.
*
*/
@@ -1236,7 +1236,7 @@ struct request *blk_make_request(struct request_queue *q, struct bio *bio,
EXPORT_SYMBOL(blk_make_request);
/**
- * blk_rq_set_block_pc - initialize a requeest to type BLOCK_PC
+ * blk_rq_set_block_pc - initialize a request to type BLOCK_PC
* @rq: request to be initialized
*
*/
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index ed5217867555..371d8800b48a 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -402,6 +402,12 @@ static void blk_mq_sysfs_init(struct request_queue *q)
}
}
+/* see blk_register_queue() */
+void blk_mq_finish_init(struct request_queue *q)
+{
+ percpu_ref_switch_to_percpu(&q->mq_usage_counter);
+}
+
int blk_mq_register_disk(struct gendisk *disk)
{
struct device *dev = disk_to_dev(disk);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 79aa11b3efa5..68929bad9a6a 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1815,7 +1815,12 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
if (!q)
goto err_hctxs;
- if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release))
+ /*
+ * Init percpu_ref in atomic mode so that it's faster to shutdown.
+ * See blk_register_queue() for details.
+ */
+ if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
+ PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
goto err_map;
setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index e8f38a36c625..1fac43408911 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -551,12 +551,19 @@ int blk_register_queue(struct gendisk *disk)
return -ENXIO;
/*
- * Initialization must be complete by now. Finish the initial
- * bypass from queue allocation.
+ * SCSI probing may synchronously create and destroy a lot of
+ * request_queues for non-existent devices. Shutting down a fully
+ * functional queue takes measureable wallclock time as RCU grace
+ * periods are involved. To avoid excessive latency in these
+ * cases, a request_queue starts out in a degraded mode which is
+ * faster to shut down and is made fully functional here as
+ * request_queues for non-existent devices never get registered.
*/
if (!blk_queue_init_done(q)) {
queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
blk_queue_bypass_end(q);
+ if (q->mq_ops)
+ blk_mq_finish_init(q);
}
ret = blk_trace_init_sysfs(dev);
diff --git a/block/genhd.c b/block/genhd.c
index 09da5e4a8e03..bd3060684ab2 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -445,8 +445,6 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
*/
void blk_free_devt(dev_t devt)
{
- might_sleep();
-
if (devt == MKDEV(0, 0))
return;
@@ -1547,7 +1545,7 @@ void disk_flush_events(struct gendisk *disk, unsigned int mask)
/**
* disk_clear_events - synchronously check, clear and return pending events
* @disk: disk to fetch and clear events from
- * @mask: mask of events to be fetched and clearted
+ * @mask: mask of events to be fetched and cleared
*
* Disk events are synchronously checked and pending events in @mask
* are cleared and returned. This ignores the block count.