summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/cfq-iosched.c49
1 files changed, 15 insertions, 34 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index f3ea8a198def..fa2a2f7dbecd 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -3640,33 +3640,6 @@ static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { }
#endif /* CONFIG_CFQ_GROUP_IOSCHED */
-static struct cfq_queue *
-cfq_find_alloc_queue(struct cfq_data *cfqd, struct cfq_group *cfqg, bool is_sync,
- struct cfq_io_cq *cic, struct bio *bio)
-{
- struct cfq_queue *cfqq;
-
- cfqq = cic_to_cfqq(cic, is_sync);
-
- /*
- * Always try a new alloc if we fell back to the OOM cfqq
- * originally, since it should just be a temporary situation.
- */
- if (!cfqq || cfqq == &cfqd->oom_cfqq) {
- cfqq = kmem_cache_alloc_node(cfq_pool,
- GFP_NOWAIT | __GFP_ZERO,
- cfqd->queue->node);
- if (cfqq) {
- cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
- cfq_init_prio_data(cfqq, cic);
- cfq_link_cfqq_cfqg(cfqq, cfqg);
- cfq_log_cfqq(cfqd, cfqq, "alloced");
- } else
- cfqq = &cfqd->oom_cfqq;
- }
- return cfqq;
-}
-
static struct cfq_queue **
cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
{
@@ -3691,7 +3664,7 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
{
int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
- struct cfq_queue **async_cfqq;
+ struct cfq_queue **async_cfqq = NULL;
struct cfq_queue *cfqq;
struct cfq_group *cfqg;
@@ -3714,12 +3687,20 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
goto out;
}
- cfqq = cfq_find_alloc_queue(cfqd, cfqg, is_sync, cic, bio);
+ cfqq = kmem_cache_alloc_node(cfq_pool, GFP_NOWAIT | __GFP_ZERO,
+ cfqd->queue->node);
+ if (!cfqq) {
+ cfqq = &cfqd->oom_cfqq;
+ goto out;
+ }
+
+ cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
+ cfq_init_prio_data(cfqq, cic);
+ cfq_link_cfqq_cfqg(cfqq, cfqg);
+ cfq_log_cfqq(cfqd, cfqq, "alloced");
- /*
- * pin the queue now that it's allocated, scheduler exit will prune it
- */
- if (!is_sync && cfqq != &cfqd->oom_cfqq) {
+ if (async_cfqq) {
+ /* a new async queue is created, pin and remember */
cfqq->ref++;
*async_cfqq = cfqq;
}
@@ -4469,7 +4450,7 @@ static int cfq_init_queue(struct request_queue *q, struct elevator_type *e)
cfqd->prio_trees[i] = RB_ROOT;
/*
- * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
+ * Our fallback cfqq if cfq_get_queue() runs into OOM issues.
* Grab a permanent reference to it, so that the normal code flow
* will not attempt to free it. oom_cfqq is linked to root_group
* but shouldn't hold a reference as it'll never be unlinked. Lose