summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2017-10-03 10:47:00 +0200
committerJens Axboe <axboe@kernel.dk>2017-10-03 08:43:04 -0600
commit9c9883744dda1cc38339a448dd8435140537027e (patch)
treef12a16e699415343d5757d7b118dc59f6a295d3d /block
parent7cb04004fa371a626c1a5ebe6d977f70285759ed (diff)
downloadlinux-9c9883744dda1cc38339a448dd8435140537027e.tar.bz2
block: move __elv_next_request to blk-core.c
No need to have this helper inline in a header. Also drop the __ prefix. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c42
-rw-r--r--block/blk.h39
2 files changed, 40 insertions, 41 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 048be4aa6024..14f7674fa0b1 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2517,6 +2517,45 @@ void blk_account_io_start(struct request *rq, bool new_io)
part_stat_unlock();
}
+static struct request *elv_next_request(struct request_queue *q)
+{
+ struct request *rq;
+ struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
+
+ WARN_ON_ONCE(q->mq_ops);
+
+ while (1) {
+ if (!list_empty(&q->queue_head)) {
+ rq = list_entry_rq(q->queue_head.next);
+ return rq;
+ }
+
+ /*
+ * Flush request is running and flush request isn't queueable
+ * in the drive, we can hold the queue till flush request is
+ * finished. Even we don't do this, driver can't dispatch next
+ * requests and will requeue them. And this can improve
+ * throughput too. For example, we have request flush1, write1,
+ * flush 2. flush1 is dispatched, then queue is hold, write1
+ * isn't inserted to queue. After flush1 is finished, flush2
+ * will be dispatched. Since disk cache is already clean,
+ * flush2 will be finished very soon, so looks like flush2 is
+ * folded to flush1.
+ * Since the queue is hold, a flag is set to indicate the queue
+ * should be restarted later. Please see flush_end_io() for
+ * details.
+ */
+ if (fq->flush_pending_idx != fq->flush_running_idx &&
+ !queue_flush_queueable(q)) {
+ fq->flush_queue_delayed = 1;
+ return NULL;
+ }
+ if (unlikely(blk_queue_bypass(q)) ||
+ !q->elevator->type->ops.sq.elevator_dispatch_fn(q, 0))
+ return NULL;
+ }
+}
+
/**
* blk_peek_request - peek at the top of a request queue
* @q: request queue to peek at
@@ -2538,8 +2577,7 @@ struct request *blk_peek_request(struct request_queue *q)
lockdep_assert_held(q->queue_lock);
WARN_ON_ONCE(q->mq_ops);
- while ((rq = __elv_next_request(q)) != NULL) {
-
+ while ((rq = elv_next_request(q)) != NULL) {
rq = blk_pm_peek_request(q, rq);
if (!rq)
break;
diff --git a/block/blk.h b/block/blk.h
index fcb9775b997d..fda5a4632aba 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -148,45 +148,6 @@ static inline void blk_clear_rq_complete(struct request *rq)
void blk_insert_flush(struct request *rq);
-static inline struct request *__elv_next_request(struct request_queue *q)
-{
- struct request *rq;
- struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
-
- WARN_ON_ONCE(q->mq_ops);
-
- while (1) {
- if (!list_empty(&q->queue_head)) {
- rq = list_entry_rq(q->queue_head.next);
- return rq;
- }
-
- /*
- * Flush request is running and flush request isn't queueable
- * in the drive, we can hold the queue till flush request is
- * finished. Even we don't do this, driver can't dispatch next
- * requests and will requeue them. And this can improve
- * throughput too. For example, we have request flush1, write1,
- * flush 2. flush1 is dispatched, then queue is hold, write1
- * isn't inserted to queue. After flush1 is finished, flush2
- * will be dispatched. Since disk cache is already clean,
- * flush2 will be finished very soon, so looks like flush2 is
- * folded to flush1.
- * Since the queue is hold, a flag is set to indicate the queue
- * should be restarted later. Please see flush_end_io() for
- * details.
- */
- if (fq->flush_pending_idx != fq->flush_running_idx &&
- !queue_flush_queueable(q)) {
- fq->flush_queue_delayed = 1;
- return NULL;
- }
- if (unlikely(blk_queue_bypass(q)) ||
- !q->elevator->type->ops.sq.elevator_dispatch_fn(q, 0))
- return NULL;
- }
-}
-
static inline void elv_activate_rq(struct request_queue *q, struct request *rq)
{
struct elevator_queue *e = q->elevator;