summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/Kconfig2
-rw-r--r--block/blk-throttle.c10
-rw-r--r--block/cfq-iosched.c32
3 files changed, 38 insertions, 6 deletions
diff --git a/block/Kconfig b/block/Kconfig
index 6c9213ef15a1..60be1e0455da 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -2,7 +2,7 @@
# Block layer core configuration
#
menuconfig BLOCK
- bool "Enable the block layer" if EMBEDDED
+ bool "Enable the block layer" if EXPERT
default y
help
Provide block layer support for the kernel.
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 381b09bb562b..a89043a3caa4 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -168,7 +168,15 @@ static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td,
* tree of blkg (instead of traversing through hash list all
* the time.
*/
- tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key));
+
+ /*
+ * This is the common case when there are no blkio cgroups.
+ * Avoid lookup in this case
+ */
+ if (blkcg == &blkio_root_cgroup)
+ tg = &td->root_tg;
+ else
+ tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key));
/* Fill in device details for root group */
if (tg && !tg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 8427697c5437..7be4c7959625 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -598,8 +598,8 @@ cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
return cfq_target_latency * cfqg->weight / st->total_weight;
}
-static inline void
-cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+static inline unsigned
+cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
if (cfqd->cfq_latency) {
@@ -625,6 +625,14 @@ cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
low_slice);
}
}
+ return slice;
+}
+
+static inline void
+cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+ unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
+
cfqq->slice_start = jiffies;
cfqq->slice_end = jiffies + slice;
cfqq->allocated_slice = slice;
@@ -1661,8 +1669,11 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
/*
* store what was left of this slice, if the queue idled/timed out
*/
- if (timed_out && !cfq_cfqq_slice_new(cfqq)) {
- cfqq->slice_resid = cfqq->slice_end - jiffies;
+ if (timed_out) {
+ if (cfq_cfqq_slice_new(cfqq))
+ cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
+ else
+ cfqq->slice_resid = cfqq->slice_end - jiffies;
cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
}
@@ -3284,10 +3295,19 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
*/
static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
+ struct cfq_queue *old_cfqq = cfqd->active_queue;
+
cfq_log_cfqq(cfqd, cfqq, "preempt");
cfq_slice_expired(cfqd, 1);
/*
+ * workload type is changed, don't save slice, otherwise preempt
+ * doesn't happen
+ */
+ if (cfqq_type(old_cfqq) != cfqq_type(cfqq))
+ cfqq->cfqg->saved_workload_slice = 0;
+
+ /*
* Put the new queue at the front of the of the current list,
* so we know that it will be selected next.
*/
@@ -3412,6 +3432,10 @@ static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
struct cfq_io_context *cic = cfqd->active_cic;
+ /* If the queue already has requests, don't wait */
+ if (!RB_EMPTY_ROOT(&cfqq->sort_list))
+ return false;
+
/* If there are other queues in the group, don't wait */
if (cfqq->cfqg->nr_cfqq > 1)
return false;