diff options
author | Bart Van Assche <bart.vanassche@sandisk.com> | 2016-10-28 17:20:49 -0700 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2016-11-02 12:50:19 -0600 |
commit | 9b7dd572cc439fa92e120290eb74d0295567c5a0 (patch) | |
tree | e6bb3de7badc03785a2f00805424063643a13ff0 | |
parent | 52d7f1b5c2f33b5d34dc2b6af5175fb6a44999f6 (diff) | |
download | linux-9b7dd572cc439fa92e120290eb74d0295567c5a0.tar.bz2 |
blk-mq: Remove blk_mq_cancel_requeue_work()
Since blk_mq_requeue_work() no longer restarts stopped queues
canceling requeue work is no longer needed to prevent that a
stopped queue would be restarted. Hence remove this function.
Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
Cc: Mike Snitzer <snitzer@redhat.com>
Cc: Keith Busch <keith.busch@intel.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r-- | block/blk-mq.c | 6 | ||||
-rw-r--r-- | drivers/md/dm-rq.c | 2 | ||||
-rw-r--r-- | drivers/nvme/host/core.c | 1 | ||||
-rw-r--r-- | include/linux/blk-mq.h | 1 |
4 files changed, 0 insertions, 10 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index d95034ae64f6..a461823644fb 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -526,12 +526,6 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head) } EXPORT_SYMBOL(blk_mq_add_to_requeue_list); -void blk_mq_cancel_requeue_work(struct request_queue *q) -{ - cancel_delayed_work_sync(&q->requeue_work); -} -EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work); - void blk_mq_kick_requeue_list(struct request_queue *q) { kblockd_schedule_delayed_work(&q->requeue_work, 0); diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index a9e9e781bb77..060ccc5a4b1c 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -116,8 +116,6 @@ static void dm_mq_stop_queue(struct request_queue *q) queue_flag_set(QUEUE_FLAG_STOPPED, q); spin_unlock_irqrestore(q->queue_lock, flags); - /* Avoid that requeuing could restart the queue. */ - blk_mq_cancel_requeue_work(q); blk_mq_stop_hw_queues(q); } diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 329381a28edf..a764c2aa00a1 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2081,7 +2081,6 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl) queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue); spin_unlock_irq(ns->queue->queue_lock); - blk_mq_cancel_requeue_work(ns->queue); blk_mq_stop_hw_queues(ns->queue); } mutex_unlock(&ctrl->namespaces_mutex); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index aa930009fcd3..a85a20f80aaa 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -217,7 +217,6 @@ void __blk_mq_end_request(struct request *rq, int error); void blk_mq_requeue_request(struct request *rq); void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); -void blk_mq_cancel_requeue_work(struct request_queue *q); void blk_mq_kick_requeue_list(struct request_queue *q); void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); void blk_mq_abort_requeue_list(struct request_queue *q); |