summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorShaohua Li <shaohua.li@intel.com>2011-07-08 08:19:20 +0200
committerJens Axboe <jaxboe@fusionio.com>2011-07-08 08:19:20 +0200
commit55c022bbddb2c056b5dff1bd1b1758d31b6d64c9 (patch)
treefdd3aa29a1407bbd19b8efe47b2538544da85a70
parent719c0c590609809365c6f3da2f923cd84dc99113 (diff)
downloadlinux-55c022bbddb2c056b5dff1bd1b1758d31b6d64c9.tar.bz2
block: avoid building too big plug list
When I test fio script with big I/O depth, I found the total throughput drops compared to some relative small I/O depth. The reason is the thread accumulates big requests in its plug list and causes some delays (surely this depends on CPU speed). I thought we'd better have a threshold for requests. When a threshold reaches, this means there is no request merge and queue lock contention isn't severe when pushing per-task requests to queue, so the main advantages of blk plug don't exist. We can force a plug list flush in this case. With this, my test throughput actually increases and almost equals to small I/O depth. Another side effect is irq off time decreases in blk_flush_plug_list() for big I/O depth. The BLK_MAX_REQUEST_COUNT is choosen arbitarily, but 16 is efficiently to reduce lock contention to me. But I'm open here, 32 is ok in my test too. Signed-off-by: Shaohua Li <shaohua.li@intel.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
-rw-r--r--block/blk-core.c5
-rw-r--r--include/linux/blkdev.h3
2 files changed, 8 insertions, 0 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index d2f8f4049abd..a56485292062 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1302,7 +1302,10 @@ get_rq:
plug->should_sort = 1;
}
list_add_tail(&req->queuelist, &plug->list);
+ plug->count++;
drive_stat_acct(req, 1);
+ if (plug->count >= BLK_MAX_REQUEST_COUNT)
+ blk_flush_plug_list(plug, false);
} else {
spin_lock_irq(q->queue_lock);
add_acct_request(q, req, where);
@@ -2626,6 +2629,7 @@ void blk_start_plug(struct blk_plug *plug)
INIT_LIST_HEAD(&plug->list);
INIT_LIST_HEAD(&plug->cb_list);
plug->should_sort = 0;
+ plug->count = 0;
/*
* If this is a nested plug, don't actually assign it. It will be
@@ -2709,6 +2713,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
return;
list_splice_init(&plug->list, &list);
+ plug->count = 0;
if (plug->should_sort) {
list_sort(NULL, &list, plug_rq_cmp);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 36f2e2b99ae3..92edb9601242 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -862,7 +862,10 @@ struct blk_plug {
struct list_head list;
struct list_head cb_list;
unsigned int should_sort;
+ unsigned int count;
};
+#define BLK_MAX_REQUEST_COUNT 16
+
struct blk_plug_cb {
struct list_head list;
void (*callback)(struct blk_plug_cb *);