summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2017-01-17 06:03:22 -0700
committerJens Axboe <axboe@fb.com>2017-01-17 10:04:20 -0700
commitbd166ef183c263c5ced656d49ef19c7da4adc774 (patch)
tree449bbd3b4e671b370b96e3846b2281116e7089e9 /include
parent2af8cbe30531eca73c8f3ba277f155fc0020b01a (diff)
downloadlinux-bd166ef183c263c5ced656d49ef19c7da4adc774.tar.bz2
blk-mq-sched: add framework for MQ capable IO schedulers
This adds a set of hooks that intercepts the blk-mq path of allocating/inserting/issuing/completing requests, allowing us to develop a scheduler within that framework. We reuse the existing elevator scheduler API on the registration side, but augment that with the scheduler flagging support for the blk-mq interfce, and with a separate set of ops hooks for MQ devices. We split driver and scheduler tags, so we can run the scheduling independently of device queue depth. Signed-off-by: Jens Axboe <axboe@fb.com> Reviewed-by: Bart Van Assche <bart.vanassche@sandisk.com> Reviewed-by: Omar Sandoval <osandov@fb.com>
Diffstat (limited to 'include')
-rw-r--r--include/linux/blk-mq.h5
-rw-r--r--include/linux/blkdev.h4
-rw-r--r--include/linux/elevator.h32
3 files changed, 39 insertions, 2 deletions
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 2686f9e7302a..63569eb46d15 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -22,6 +22,7 @@ struct blk_mq_hw_ctx {
unsigned long flags; /* BLK_MQ_F_* flags */
+ void *sched_data;
struct request_queue *queue;
struct blk_flush_queue *fq;
@@ -35,6 +36,7 @@ struct blk_mq_hw_ctx {
atomic_t wait_index;
struct blk_mq_tags *tags;
+ struct blk_mq_tags *sched_tags;
struct srcu_struct queue_rq_srcu;
@@ -156,6 +158,7 @@ enum {
BLK_MQ_S_STOPPED = 0,
BLK_MQ_S_TAG_ACTIVE = 1,
+ BLK_MQ_S_SCHED_RESTART = 2,
BLK_MQ_MAX_DEPTH = 10240,
@@ -179,13 +182,13 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
-void blk_mq_insert_request(struct request *, bool, bool, bool);
void blk_mq_free_request(struct request *rq);
bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
enum {
BLK_MQ_REQ_NOWAIT = (1 << 0), /* return when out of requests */
BLK_MQ_REQ_RESERVED = (1 << 1), /* allocate from reserved pool */
+ BLK_MQ_REQ_INTERNAL = (1 << 2), /* allocate internal/sched tag */
};
struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 2e99d659b0f1..25564857f5f8 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -154,6 +154,7 @@ struct request {
/* the following two fields are internal, NEVER access directly */
unsigned int __data_len; /* total data len */
+ int tag;
sector_t __sector; /* sector cursor */
struct bio *bio;
@@ -220,9 +221,10 @@ struct request {
unsigned short ioprio;
+ int internal_tag;
+
void *special; /* opaque pointer available for LLD use */
- int tag;
int errors;
/*
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 2a9e966eed03..ecb96fd67c6d 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -77,6 +77,34 @@ struct elevator_ops
elevator_registered_fn *elevator_registered_fn;
};
+struct blk_mq_alloc_data;
+struct blk_mq_hw_ctx;
+
+struct elevator_mq_ops {
+ int (*init_sched)(struct request_queue *, struct elevator_type *);
+ void (*exit_sched)(struct elevator_queue *);
+
+ bool (*allow_merge)(struct request_queue *, struct request *, struct bio *);
+ bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *);
+ int (*request_merge)(struct request_queue *q, struct request **, struct bio *);
+ void (*request_merged)(struct request_queue *, struct request *, int);
+ void (*requests_merged)(struct request_queue *, struct request *, struct request *);
+ struct request *(*get_request)(struct request_queue *, unsigned int, struct blk_mq_alloc_data *);
+ void (*put_request)(struct request *);
+ void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool);
+ void (*dispatch_requests)(struct blk_mq_hw_ctx *, struct list_head *);
+ bool (*has_work)(struct blk_mq_hw_ctx *);
+ void (*completed_request)(struct blk_mq_hw_ctx *, struct request *);
+ void (*started_request)(struct request *);
+ void (*requeue_request)(struct request *);
+ struct request *(*former_request)(struct request_queue *, struct request *);
+ struct request *(*next_request)(struct request_queue *, struct request *);
+ int (*get_rq_priv)(struct request_queue *, struct request *);
+ void (*put_rq_priv)(struct request_queue *, struct request *);
+ void (*init_icq)(struct io_cq *);
+ void (*exit_icq)(struct io_cq *);
+};
+
#define ELV_NAME_MAX (16)
struct elv_fs_entry {
@@ -96,12 +124,14 @@ struct elevator_type
/* fields provided by elevator implementation */
union {
struct elevator_ops sq;
+ struct elevator_mq_ops mq;
} ops;
size_t icq_size; /* see iocontext.h */
size_t icq_align; /* ditto */
struct elv_fs_entry *elevator_attrs;
char elevator_name[ELV_NAME_MAX];
struct module *elevator_owner;
+ bool uses_mq;
/* managed by elevator core */
char icq_cache_name[ELV_NAME_MAX + 5]; /* elvname + "_io_cq" */
@@ -125,6 +155,7 @@ struct elevator_queue
struct kobject kobj;
struct mutex sysfs_lock;
unsigned int registered:1;
+ unsigned int uses_mq:1;
DECLARE_HASHTABLE(hash, ELV_HASH_BITS);
};
@@ -141,6 +172,7 @@ extern void elv_merge_requests(struct request_queue *, struct request *,
extern void elv_merged_request(struct request_queue *, struct request *, int);
extern void elv_bio_merged(struct request_queue *q, struct request *,
struct bio *);
+extern bool elv_attempt_insert_merge(struct request_queue *, struct request *);
extern void elv_requeue_request(struct request_queue *, struct request *);
extern struct request *elv_former_request(struct request_queue *, struct request *);
extern struct request *elv_latter_request(struct request_queue *, struct request *);