diff options
Diffstat (limited to 'block/blk.h')
-rw-r--r-- | block/blk.h | 58 |
1 files changed, 50 insertions, 8 deletions
diff --git a/block/blk.h b/block/blk.h index 3f6551b3c92d..7efd772336de 100644 --- a/block/blk.h +++ b/block/blk.h @@ -1,6 +1,8 @@ #ifndef BLK_INTERNAL_H #define BLK_INTERNAL_H +#include <linux/idr.h> + /* Amount of time in which a process may batch requests */ #define BLK_BATCH_TIME (HZ/50UL) @@ -9,6 +11,12 @@ extern struct kmem_cache *blk_requestq_cachep; extern struct kobj_type blk_queue_ktype; +extern struct ida blk_queue_ida; + +static inline void __blk_get_queue(struct request_queue *q) +{ + kobject_get(&q->kobj); +} void init_request_from_bio(struct request *req, struct bio *bio); void blk_rq_bio_prep(struct request_queue *q, struct request *rq, @@ -85,8 +93,8 @@ static inline struct request *__elv_next_request(struct request_queue *q) q->flush_queue_delayed = 1; return NULL; } - if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags) || - !q->elevator->ops->elevator_dispatch_fn(q, 0)) + if (unlikely(blk_queue_dead(q)) || + !q->elevator->type->ops.elevator_dispatch_fn(q, 0)) return NULL; } } @@ -95,16 +103,16 @@ static inline void elv_activate_rq(struct request_queue *q, struct request *rq) { struct elevator_queue *e = q->elevator; - if (e->ops->elevator_activate_req_fn) - e->ops->elevator_activate_req_fn(q, rq); + if (e->type->ops.elevator_activate_req_fn) + e->type->ops.elevator_activate_req_fn(q, rq); } static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq) { struct elevator_queue *e = q->elevator; - if (e->ops->elevator_deactivate_req_fn) - e->ops->elevator_deactivate_req_fn(q, rq); + if (e->type->ops.elevator_deactivate_req_fn) + e->type->ops.elevator_deactivate_req_fn(q, rq); } #ifdef CONFIG_FAIL_IO_TIMEOUT @@ -119,8 +127,6 @@ static inline int blk_should_fake_timeout(struct request_queue *q) } #endif -struct io_context *current_io_context(gfp_t gfp_flags, int node); - int ll_back_merge_fn(struct request_queue *q, struct request *req, struct bio *bio); int ll_front_merge_fn(struct request_queue *q, struct request *req, @@ -189,6 +195,42 @@ static inline int blk_do_io_stat(struct request *rq) (rq->cmd_flags & REQ_DISCARD)); } +/* + * Internal io_context interface + */ +void get_io_context(struct io_context *ioc); +struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q); +struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask); +void ioc_clear_queue(struct request_queue *q); + +void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_mask, + int node); + +/** + * create_io_context - try to create task->io_context + * @task: target task + * @gfp_mask: allocation mask + * @node: allocation node + * + * If @task->io_context is %NULL, allocate a new io_context and install it. + * Returns the current @task->io_context which may be %NULL if allocation + * failed. + * + * Note that this function can't be called with IRQ disabled because + * task_lock which protects @task->io_context is IRQ-unsafe. + */ +static inline struct io_context *create_io_context(struct task_struct *task, + gfp_t gfp_mask, int node) +{ + WARN_ON_ONCE(irqs_disabled()); + if (unlikely(!task->io_context)) + create_io_context_slowpath(task, gfp_mask, node); + return task->io_context; +} + +/* + * Internal throttling interface + */ #ifdef CONFIG_BLK_DEV_THROTTLING extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio); extern void blk_throtl_drain(struct request_queue *q); |