diff options
| author | Shaohua Li <shaohua.li@intel.com> | 2010-03-01 09:20:54 +0100 | 
|---|---|---|
| committer | Jens Axboe <jens.axboe@oracle.com> | 2010-03-01 09:20:54 +0100 | 
| commit | abc3c744d0d7f4ad710a948ae73852ffea5fbc3b (patch) | |
| tree | f3aebe4f1ee8138db560b049f84d30a4b7348e8a /block | |
| parent | 9a8c28c8311e30ba97499447d5a11662f5aea094 (diff) | |
| download | linux-abc3c744d0d7f4ad710a948ae73852ffea5fbc3b.tar.bz2 | |
cfq-iosched: quantum check tweak
Currently a queue can only dispatch up to 4 requests if there are other queues.
This isn't optimal, device can handle more requests, for example, AHCI can
handle 31 requests. I can understand the limit is for fairness, but we could
do a tweak: if the queue still has a lot of slice left, sounds we could
ignore the limit. Test shows this boost my workload (two thread randread of
a SSD) from 78m/s to 100m/s.
Thanks for suggestions from Corrado and Vivek for the patch.
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
| -rw-r--r-- | block/cfq-iosched.c | 30 | 
1 files changed, 26 insertions, 4 deletions
| diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index f27e535ce262..0db07d7771b5 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -19,7 +19,7 @@   * tunables   */  /* max queue in one round of service */ -static const int cfq_quantum = 4; +static const int cfq_quantum = 8;  static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };  /* maximum backwards seek, in KiB */  static const int cfq_back_max = 16 * 1024; @@ -2197,6 +2197,19 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd)  	return dispatched;  } +static inline bool cfq_slice_used_soon(struct cfq_data *cfqd, +	struct cfq_queue *cfqq) +{ +	/* the queue hasn't finished any request, can't estimate */ +	if (cfq_cfqq_slice_new(cfqq)) +		return 1; +	if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched, +		cfqq->slice_end)) +		return 1; + +	return 0; +} +  static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)  {  	unsigned int max_dispatch; @@ -2213,7 +2226,7 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)  	if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))  		return false; -	max_dispatch = cfqd->cfq_quantum; +	max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);  	if (cfq_class_idle(cfqq))  		max_dispatch = 1; @@ -2230,13 +2243,22 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)  		/*  		 * We have other queues, don't allow more IO from this one  		 */ -		if (cfqd->busy_queues > 1) +		if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq))  			return false;  		/*  		 * Sole queue user, no limit  		 */ -		max_dispatch = -1; +		if (cfqd->busy_queues == 1) +			max_dispatch = -1; +		else +			/* +			 * Normally we start throttling cfqq when cfq_quantum/2 +			 * requests have been dispatched. But we can drive +			 * deeper queue depths at the beginning of slice +			 * subjected to upper limit of cfq_quantum. +			 * */ +			max_dispatch = cfqd->cfq_quantum;  	}  	/* |