summaryrefslogtreecommitdiffstats
path: root/block/deadline-iosched.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@suse.de>2006-07-28 09:23:08 +0200
committerJens Axboe <axboe@nelson.home.kernel.dk>2006-09-30 20:26:56 +0200
commit9817064b68fef7e4580c6df1ea597e106b9ff88b (patch)
tree76c27990626247613e9efa45b792d51ad79635d7 /block/deadline-iosched.c
parent4aff5e2333c9a1609662f2091f55c3f6fffdad36 (diff)
downloadlinux-9817064b68fef7e4580c6df1ea597e106b9ff88b.tar.bz2
[PATCH] elevator: move the backmerging logic into the elevator core
Right now, every IO scheduler implements its own backmerging (except for noop, which does no merging). That results in duplicated code for essentially the same operation, which is never a good thing. This patch moves the backmerging out of the io schedulers and into the elevator core. We save 1.6kb of text and as a bonus get backmerging for noop as well. Win-win! Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'block/deadline-iosched.c')
-rw-r--r--block/deadline-iosched.c128
1 files changed, 2 insertions, 126 deletions
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index c7ca9f0b6498..b66e820f544d 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -12,7 +12,6 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/compiler.h>
-#include <linux/hash.h>
#include <linux/rbtree.h>
/*
@@ -24,13 +23,6 @@ static const int writes_starved = 2; /* max times reads can starve a write */
static const int fifo_batch = 16; /* # of sequential requests treated as one
by the above parameters. For throughput. */
-static const int deadline_hash_shift = 5;
-#define DL_HASH_BLOCK(sec) ((sec) >> 3)
-#define DL_HASH_FN(sec) (hash_long(DL_HASH_BLOCK((sec)), deadline_hash_shift))
-#define DL_HASH_ENTRIES (1 << deadline_hash_shift)
-#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
-#define ON_HASH(drq) (!hlist_unhashed(&(drq)->hash))
-
struct deadline_data {
/*
* run time data
@@ -46,7 +38,6 @@ struct deadline_data {
* next in sort order. read, write or both are NULL
*/
struct deadline_rq *next_drq[2];
- struct hlist_head *hash; /* request hash */
unsigned int batching; /* number of sequential requests made */
sector_t last_sector; /* head position */
unsigned int starved; /* times reads have starved writes */
@@ -75,11 +66,6 @@ struct deadline_rq {
struct request *request;
/*
- * request hash, key is the ending offset (for back merge lookup)
- */
- struct hlist_node hash;
-
- /*
* expire fifo
*/
struct list_head fifo;
@@ -93,69 +79,6 @@ static kmem_cache_t *drq_pool;
#define RQ_DATA(rq) ((struct deadline_rq *) (rq)->elevator_private)
/*
- * the back merge hash support functions
- */
-static inline void __deadline_del_drq_hash(struct deadline_rq *drq)
-{
- hlist_del_init(&drq->hash);
-}
-
-static inline void deadline_del_drq_hash(struct deadline_rq *drq)
-{
- if (ON_HASH(drq))
- __deadline_del_drq_hash(drq);
-}
-
-static inline void
-deadline_add_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
-{
- struct request *rq = drq->request;
-
- BUG_ON(ON_HASH(drq));
-
- hlist_add_head(&drq->hash, &dd->hash[DL_HASH_FN(rq_hash_key(rq))]);
-}
-
-/*
- * move hot entry to front of chain
- */
-static inline void
-deadline_hot_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
-{
- struct request *rq = drq->request;
- struct hlist_head *head = &dd->hash[DL_HASH_FN(rq_hash_key(rq))];
-
- if (ON_HASH(drq) && &drq->hash != head->first) {
- hlist_del(&drq->hash);
- hlist_add_head(&drq->hash, head);
- }
-}
-
-static struct request *
-deadline_find_drq_hash(struct deadline_data *dd, sector_t offset)
-{
- struct hlist_head *hash_list = &dd->hash[DL_HASH_FN(offset)];
- struct hlist_node *entry, *next;
- struct deadline_rq *drq;
-
- hlist_for_each_entry_safe(drq, entry, next, hash_list, hash) {
- struct request *__rq = drq->request;
-
- BUG_ON(!ON_HASH(drq));
-
- if (!rq_mergeable(__rq)) {
- __deadline_del_drq_hash(drq);
- continue;
- }
-
- if (rq_hash_key(__rq) == offset)
- return __rq;
- }
-
- return NULL;
-}
-
-/*
* rb tree support functions
*/
#define rb_entry_drq(node) rb_entry((node), struct deadline_rq, rb_node)
@@ -267,22 +190,19 @@ deadline_add_request(struct request_queue *q, struct request *rq)
{
struct deadline_data *dd = q->elevator->elevator_data;
struct deadline_rq *drq = RQ_DATA(rq);
-
const int data_dir = rq_data_dir(drq->request);
deadline_add_drq_rb(dd, drq);
+
/*
* set expire time (only used for reads) and add to fifo list
*/
drq->expires = jiffies + dd->fifo_expire[data_dir];
list_add_tail(&drq->fifo, &dd->fifo_list[data_dir]);
-
- if (rq_mergeable(rq))
- deadline_add_drq_hash(dd, drq);
}
/*
- * remove rq from rbtree, fifo, and hash
+ * remove rq from rbtree and fifo.
*/
static void deadline_remove_request(request_queue_t *q, struct request *rq)
{
@@ -291,7 +211,6 @@ static void deadline_remove_request(request_queue_t *q, struct request *rq)
list_del_init(&drq->fifo);
deadline_del_drq_rb(dd, drq);
- deadline_del_drq_hash(drq);
}
static int
@@ -302,19 +221,6 @@ deadline_merge(request_queue_t *q, struct request **req, struct bio *bio)
int ret;
/*
- * see if the merge hash can satisfy a back merge
- */
- __rq = deadline_find_drq_hash(dd, bio->bi_sector);
- if (__rq) {
- BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector);
-
- if (elv_rq_merge_ok(__rq, bio)) {
- ret = ELEVATOR_BACK_MERGE;
- goto out;
- }
- }
-
- /*
* check for front merge
*/
if (dd->front_merges) {
@@ -333,8 +239,6 @@ deadline_merge(request_queue_t *q, struct request **req, struct bio *bio)
return ELEVATOR_NO_MERGE;
out:
- if (ret)
- deadline_hot_drq_hash(dd, RQ_DATA(__rq));
*req = __rq;
return ret;
}
@@ -345,12 +249,6 @@ static void deadline_merged_request(request_queue_t *q, struct request *req)
struct deadline_rq *drq = RQ_DATA(req);
/*
- * hash always needs to be repositioned, key is end sector
- */
- deadline_del_drq_hash(drq);
- deadline_add_drq_hash(dd, drq);
-
- /*
* if the merge was a front merge, we need to reposition request
*/
if (rq_rb_key(req) != drq->rb_key) {
@@ -370,13 +268,6 @@ deadline_merged_requests(request_queue_t *q, struct request *req,
BUG_ON(!drq);
BUG_ON(!dnext);
- /*
- * reposition drq (this is the merged request) in hash, and in rbtree
- * in case of a front merge
- */
- deadline_del_drq_hash(drq);
- deadline_add_drq_hash(dd, drq);
-
if (rq_rb_key(req) != drq->rb_key) {
deadline_del_drq_rb(dd, drq);
deadline_add_drq_rb(dd, drq);
@@ -594,7 +485,6 @@ static void deadline_exit_queue(elevator_t *e)
BUG_ON(!list_empty(&dd->fifo_list[WRITE]));
mempool_destroy(dd->drq_pool);
- kfree(dd->hash);
kfree(dd);
}
@@ -605,7 +495,6 @@ static void deadline_exit_queue(elevator_t *e)
static void *deadline_init_queue(request_queue_t *q, elevator_t *e)
{
struct deadline_data *dd;
- int i;
if (!drq_pool)
return NULL;
@@ -615,24 +504,13 @@ static void *deadline_init_queue(request_queue_t *q, elevator_t *e)
return NULL;
memset(dd, 0, sizeof(*dd));
- dd->hash = kmalloc_node(sizeof(struct hlist_head)*DL_HASH_ENTRIES,
- GFP_KERNEL, q->node);
- if (!dd->hash) {
- kfree(dd);
- return NULL;
- }
-
dd->drq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
mempool_free_slab, drq_pool, q->node);
if (!dd->drq_pool) {
- kfree(dd->hash);
kfree(dd);
return NULL;
}
- for (i = 0; i < DL_HASH_ENTRIES; i++)
- INIT_HLIST_HEAD(&dd->hash[i]);
-
INIT_LIST_HEAD(&dd->fifo_list[READ]);
INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
dd->sort_list[READ] = RB_ROOT;
@@ -667,8 +545,6 @@ deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
RB_CLEAR_NODE(&drq->rb_node);
drq->request = rq;
- INIT_HLIST_NODE(&drq->hash);
-
INIT_LIST_HEAD(&drq->fifo);
rq->elevator_private = drq;