From 6b41fd1785f4effe2f3bd40da864415812f5b8c9 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Tue, 20 Jun 2006 19:44:03 -0700 Subject: Fix up CFQ scheduler for recent rbtree node shrinkage The color is now in the low bits of the parent pointer, and initializing it to 0 happens as part of the whole memset above, so just remove the unnecessary RB_CLEAR_COLOR. Signed-off-by: Linus Torvalds --- block/cfq-iosched.c | 1 - 1 file changed, 1 deletion(-) (limited to 'block') diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 6200d9b9af28..e2e6ad0a158e 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -1318,7 +1318,6 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) if (cic) { memset(cic, 0, sizeof(*cic)); - RB_CLEAR_COLOR(&cic->rb_node); cic->last_end_request = jiffies; INIT_LIST_HEAD(&cic->queue_list); cic->dtor = cfq_free_io_context; -- cgit v1.2.3 From b9d9c82b4d081feb464f62dfc786c8621d09ecd2 Mon Sep 17 00:00:00 2001 From: Kay Sievers Date: Thu, 15 Jun 2006 15:31:56 +0200 Subject: [PATCH] Driver core: add generic "subsystem" link to all devices Like the SUBSYTEM= key we find in the environment of the uevent, this creates a generic "subsystem" link in sysfs for every device. Userspace usually doesn't care at all if its a "class" or a "bus" device. This provides an unified way to determine the subsytem of a device, regardless of the way the driver core has created it. Signed-off-by: Kay Sievers Signed-off-by: Greg Kroah-Hartman --- block/genhd.c | 7 ++----- drivers/base/bus.c | 2 ++ drivers/base/class.c | 2 ++ drivers/base/core.c | 9 +++++++-- fs/partitions/check.c | 4 ++++ 5 files changed, 17 insertions(+), 7 deletions(-) (limited to 'block') diff --git a/block/genhd.c b/block/genhd.c index 5a8d3bf02f17..8d7339511e5e 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -17,8 +17,7 @@ #include #include -static struct subsystem block_subsys; - +struct subsystem block_subsys; static DEFINE_MUTEX(block_subsys_lock); /* @@ -511,9 +510,7 @@ static struct kset_uevent_ops block_uevent_ops = { .uevent = block_uevent, }; -/* declare block_subsys. */ -static decl_subsys(block, &ktype_block, &block_uevent_ops); - +decl_subsys(block, &ktype_block, &block_uevent_ops); /* * aggregate disk stat collector. Uses the same stats that the sysfs diff --git a/drivers/base/bus.c b/drivers/base/bus.c index 64ba9011d1a8..050d86d0b872 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -374,6 +374,7 @@ int bus_add_device(struct device * dev) error = device_add_attrs(bus, dev); if (!error) { sysfs_create_link(&bus->devices.kobj, &dev->kobj, dev->bus_id); + sysfs_create_link(&dev->kobj, &dev->bus->subsys.kset.kobj, "subsystem"); sysfs_create_link(&dev->kobj, &dev->bus->subsys.kset.kobj, "bus"); } } @@ -408,6 +409,7 @@ void bus_attach_device(struct device * dev) void bus_remove_device(struct device * dev) { if (dev->bus) { + sysfs_remove_link(&dev->kobj, "subsystem"); sysfs_remove_link(&dev->kobj, "bus"); sysfs_remove_link(&dev->bus->devices.kobj, dev->bus_id); device_remove_attrs(dev->bus, dev); diff --git a/drivers/base/class.c b/drivers/base/class.c index 50e841a33af0..9aa127460262 100644 --- a/drivers/base/class.c +++ b/drivers/base/class.c @@ -561,6 +561,7 @@ int class_device_add(struct class_device *class_dev) goto out2; /* add the needed attributes to this device */ + sysfs_create_link(&class_dev->kobj, &parent_class->subsys.kset.kobj, "subsystem"); class_dev->uevent_attr.attr.name = "uevent"; class_dev->uevent_attr.attr.mode = S_IWUSR; class_dev->uevent_attr.attr.owner = parent_class->owner; @@ -737,6 +738,7 @@ void class_device_del(struct class_device *class_dev) sysfs_remove_link(&class_dev->kobj, "device"); sysfs_remove_link(&class_dev->dev->kobj, class_name); } + sysfs_remove_link(&class_dev->kobj, "subsystem"); class_device_remove_file(class_dev, &class_dev->uevent_attr); if (class_dev->devt_attr) class_device_remove_file(class_dev, class_dev->devt_attr); diff --git a/drivers/base/core.c b/drivers/base/core.c index 252cf403f891..cc8bb97427d0 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -319,9 +319,12 @@ int device_add(struct device *dev) dev->devt_attr = attr; } - if (dev->class) + if (dev->class) { + sysfs_create_link(&dev->kobj, &dev->class->subsys.kset.kobj, + "subsystem"); sysfs_create_link(&dev->class->subsys.kset.kobj, &dev->kobj, dev->bus_id); + } if ((error = device_pm_add(dev))) goto PMError; @@ -422,8 +425,10 @@ void device_del(struct device * dev) klist_del(&dev->knode_parent); if (dev->devt_attr) device_remove_file(dev, dev->devt_attr); - if (dev->class) + if (dev->class) { + sysfs_remove_link(&dev->kobj, "subsystem"); sysfs_remove_link(&dev->class->subsys.kset.kobj, dev->bus_id); + } device_remove_file(dev, &dev->uevent_attr); /* Notify the platform of the removal, in case they diff --git a/fs/partitions/check.c b/fs/partitions/check.c index 7ef1f094de91..8851b81e7c5a 100644 --- a/fs/partitions/check.c +++ b/fs/partitions/check.c @@ -329,6 +329,7 @@ void delete_partition(struct gendisk *disk, int part) p->ios[0] = p->ios[1] = 0; p->sectors[0] = p->sectors[1] = 0; devfs_remove("%s/part%d", disk->devfs_name, part); + sysfs_remove_link(&p->kobj, "subsystem"); if (p->holder_dir) kobject_unregister(p->holder_dir); kobject_uevent(&p->kobj, KOBJ_REMOVE); @@ -363,6 +364,7 @@ void add_partition(struct gendisk *disk, int part, sector_t start, sector_t len) kobject_add(&p->kobj); if (!disk->part_uevent_suppress) kobject_uevent(&p->kobj, KOBJ_ADD); + sysfs_create_link(&p->kobj, &block_subsys.kset.kobj, "subsystem"); partition_sysfs_add_subdir(p); disk->part[part-1] = p; } @@ -398,6 +400,7 @@ static void disk_sysfs_symlinks(struct gendisk *disk) kfree(disk_name); } } + sysfs_create_link(&disk->kobj, &block_subsys.kset.kobj, "subsystem"); } /* Not exported, helper to add_disk(). */ @@ -548,5 +551,6 @@ void del_gendisk(struct gendisk *disk) put_device(disk->driverfs_dev); disk->driverfs_dev = NULL; } + sysfs_remove_link(&disk->kobj, "subsystem"); kobject_del(&disk->kobj); } -- cgit v1.2.3 From 626ab0e69d376fa07599af669af8ba92d58e87c1 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 23 Jun 2006 02:05:55 -0700 Subject: [PATCH] list: use list_replace_init() instead of list_splice_init() list_splice_init(list, head) does unneeded job if it is known that list_empty(head) == 1. We can use list_replace_init() instead. Signed-off-by: Oleg Nesterov Acked-by: David S. Miller Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/i386/mm/pageattr.c | 8 ++++---- block/ll_rw_blk.c | 5 ++--- fs/aio.c | 4 ++-- kernel/timer.c | 8 ++++---- kernel/workqueue.c | 4 ++-- net/core/dev.c | 6 +++--- net/core/link_watch.c | 5 ++--- 7 files changed, 19 insertions(+), 21 deletions(-) (limited to 'block') diff --git a/arch/i386/mm/pageattr.c b/arch/i386/mm/pageattr.c index 92c3d9f0e731..0887b34bc59b 100644 --- a/arch/i386/mm/pageattr.c +++ b/arch/i386/mm/pageattr.c @@ -209,19 +209,19 @@ int change_page_attr(struct page *page, int numpages, pgprot_t prot) } void global_flush_tlb(void) -{ - LIST_HEAD(l); +{ + struct list_head l; struct page *pg, *next; BUG_ON(irqs_disabled()); spin_lock_irq(&cpa_lock); - list_splice_init(&df_list, &l); + list_replace_init(&df_list, &l); spin_unlock_irq(&cpa_lock); flush_map(); list_for_each_entry_safe(pg, next, &l, lru) __free_page(pg); -} +} #ifdef CONFIG_DEBUG_PAGEALLOC void kernel_map_pages(struct page *page, int numpages, int enable) diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 7eb36c53f4b7..465b54312c59 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -3359,12 +3359,11 @@ EXPORT_SYMBOL(end_that_request_chunk); */ static void blk_done_softirq(struct softirq_action *h) { - struct list_head *cpu_list; - LIST_HEAD(local_list); + struct list_head *cpu_list, local_list; local_irq_disable(); cpu_list = &__get_cpu_var(blk_cpu_done); - list_splice_init(cpu_list, &local_list); + list_replace_init(cpu_list, &local_list); local_irq_enable(); while (!list_empty(&local_list)) { diff --git a/fs/aio.c b/fs/aio.c index e41e932ba489..8c34a62df7d7 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -777,11 +777,11 @@ out: static int __aio_run_iocbs(struct kioctx *ctx) { struct kiocb *iocb; - LIST_HEAD(run_list); + struct list_head run_list; assert_spin_locked(&ctx->ctx_lock); - list_splice_init(&ctx->run_list, &run_list); + list_replace_init(&ctx->run_list, &run_list); while (!list_empty(&run_list)) { iocb = list_entry(run_list.next, struct kiocb, ki_run_list); diff --git a/kernel/timer.c b/kernel/timer.c index 9e49deed468c..3bf0e9ed2dbe 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -419,10 +419,10 @@ static inline void __run_timers(tvec_base_t *base) spin_lock_irq(&base->lock); while (time_after_eq(jiffies, base->timer_jiffies)) { - struct list_head work_list = LIST_HEAD_INIT(work_list); + struct list_head work_list; struct list_head *head = &work_list; int index = base->timer_jiffies & TVR_MASK; - + /* * Cascade timers: */ @@ -431,8 +431,8 @@ static inline void __run_timers(tvec_base_t *base) (!cascade(base, &base->tv3, INDEX(1))) && !cascade(base, &base->tv4, INDEX(2))) cascade(base, &base->tv5, INDEX(3)); - ++base->timer_jiffies; - list_splice_init(base->tv1.vec + index, &work_list); + ++base->timer_jiffies; + list_replace_init(base->tv1.vec + index, &work_list); while (!list_empty(head)) { void (*fn)(unsigned long); unsigned long data; diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 880fb415a8f6..740c5abceb07 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -531,11 +531,11 @@ int current_is_keventd(void) static void take_over_work(struct workqueue_struct *wq, unsigned int cpu) { struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); - LIST_HEAD(list); + struct list_head list; struct work_struct *work; spin_lock_irq(&cwq->lock); - list_splice_init(&cwq->worklist, &list); + list_replace_init(&cwq->worklist, &list); while (!list_empty(&list)) { printk("Taking work for %s\n", wq->name); diff --git a/net/core/dev.c b/net/core/dev.c index ab39fe17cb58..195a5e96b2d1 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2980,7 +2980,7 @@ static void netdev_wait_allrefs(struct net_device *dev) static DEFINE_MUTEX(net_todo_run_mutex); void netdev_run_todo(void) { - struct list_head list = LIST_HEAD_INIT(list); + struct list_head list; /* Need to guard against multiple cpu's getting out of order. */ mutex_lock(&net_todo_run_mutex); @@ -2995,9 +2995,9 @@ void netdev_run_todo(void) /* Snapshot list, allow later requests */ spin_lock(&net_todo_list_lock); - list_splice_init(&net_todo_list, &list); + list_replace_init(&net_todo_list, &list); spin_unlock(&net_todo_list_lock); - + while (!list_empty(&list)) { struct net_device *dev = list_entry(list.next, struct net_device, todo_list); diff --git a/net/core/link_watch.c b/net/core/link_watch.c index 646937cc2d84..0f37266411b5 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c @@ -91,11 +91,10 @@ static void rfc2863_policy(struct net_device *dev) /* Must be called with the rtnl semaphore held */ void linkwatch_run_queue(void) { - LIST_HEAD(head); - struct list_head *n, *next; + struct list_head head, *n, *next; spin_lock_irq(&lweventlist_lock); - list_splice_init(&lweventlist, &head); + list_replace_init(&lweventlist, &head); spin_unlock_irq(&lweventlist_lock); list_for_each_safe(n, next, &head) { -- cgit v1.2.3 From bae386f7884aa3720cc7880b36a41a1d2b9c327b Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Mon, 24 Apr 2006 21:12:59 +0200 Subject: [PATCH] iosched: use hlist for request hashtable Use hlist instead of list_head for request hashtable in deadline-iosched and as-iosched. It also can remove the flag to know hashed or unhashed. Signed-off-by: Akinobu Mita Signed-off-by: Jens Axboe block/as-iosched.c | 45 +++++++++++++++++++-------------------------- block/deadline-iosched.c | 39 ++++++++++++++++----------------------- 2 files changed, 35 insertions(+), 49 deletions(-) --- block/as-iosched.c | 45 +++++++++++++++++++-------------------------- block/deadline-iosched.c | 39 ++++++++++++++++----------------------- 2 files changed, 35 insertions(+), 49 deletions(-) (limited to 'block') diff --git a/block/as-iosched.c b/block/as-iosched.c index 0c750393be4a..9b13d72ffefa 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c @@ -96,7 +96,7 @@ struct as_data { struct as_rq *next_arq[2]; /* next in sort order */ sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */ - struct list_head *hash; /* request hash */ + struct hlist_head *hash; /* request hash */ unsigned long exit_prob; /* probability a task will exit while being waited on */ @@ -165,8 +165,7 @@ struct as_rq { /* * request hash, key is the ending offset (for back merge lookup) */ - struct list_head hash; - unsigned int on_hash; + struct hlist_node hash; /* * expire fifo @@ -282,17 +281,15 @@ static const int as_hash_shift = 6; #define AS_HASH_FN(sec) (hash_long(AS_HASH_BLOCK((sec)), as_hash_shift)) #define AS_HASH_ENTRIES (1 << as_hash_shift) #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) -#define list_entry_hash(ptr) list_entry((ptr), struct as_rq, hash) static inline void __as_del_arq_hash(struct as_rq *arq) { - arq->on_hash = 0; - list_del_init(&arq->hash); + hlist_del_init(&arq->hash); } static inline void as_del_arq_hash(struct as_rq *arq) { - if (arq->on_hash) + if (!hlist_unhashed(&arq->hash)) __as_del_arq_hash(arq); } @@ -300,10 +297,9 @@ static void as_add_arq_hash(struct as_data *ad, struct as_rq *arq) { struct request *rq = arq->request; - BUG_ON(arq->on_hash); + BUG_ON(!hlist_unhashed(&arq->hash)); - arq->on_hash = 1; - list_add(&arq->hash, &ad->hash[AS_HASH_FN(rq_hash_key(rq))]); + hlist_add_head(&arq->hash, &ad->hash[AS_HASH_FN(rq_hash_key(rq))]); } /* @@ -312,31 +308,29 @@ static void as_add_arq_hash(struct as_data *ad, struct as_rq *arq) static inline void as_hot_arq_hash(struct as_data *ad, struct as_rq *arq) { struct request *rq = arq->request; - struct list_head *head = &ad->hash[AS_HASH_FN(rq_hash_key(rq))]; + struct hlist_head *head = &ad->hash[AS_HASH_FN(rq_hash_key(rq))]; - if (!arq->on_hash) { + if (hlist_unhashed(&arq->hash)) { WARN_ON(1); return; } - if (arq->hash.prev != head) { - list_del(&arq->hash); - list_add(&arq->hash, head); + if (&arq->hash != head->first) { + hlist_del(&arq->hash); + hlist_add_head(&arq->hash, head); } } static struct request *as_find_arq_hash(struct as_data *ad, sector_t offset) { - struct list_head *hash_list = &ad->hash[AS_HASH_FN(offset)]; - struct list_head *entry, *next = hash_list->next; + struct hlist_head *hash_list = &ad->hash[AS_HASH_FN(offset)]; + struct hlist_node *entry, *next; + struct as_rq *arq; - while ((entry = next) != hash_list) { - struct as_rq *arq = list_entry_hash(entry); + hlist_for_each_entry_safe(arq, entry, next, hash_list, hash) { struct request *__rq = arq->request; - next = entry->next; - - BUG_ON(!arq->on_hash); + BUG_ON(hlist_unhashed(&arq->hash)); if (!rq_mergeable(__rq)) { as_del_arq_hash(arq); @@ -1601,8 +1595,7 @@ static int as_set_request(request_queue_t *q, struct request *rq, arq->request = rq; arq->state = AS_RQ_PRESCHED; arq->io_context = NULL; - INIT_LIST_HEAD(&arq->hash); - arq->on_hash = 0; + INIT_HLIST_NODE(&arq->hash); INIT_LIST_HEAD(&arq->fifo); rq->elevator_private = arq; return 0; @@ -1662,7 +1655,7 @@ static void *as_init_queue(request_queue_t *q, elevator_t *e) ad->q = q; /* Identify what queue the data belongs to */ - ad->hash = kmalloc_node(sizeof(struct list_head)*AS_HASH_ENTRIES, + ad->hash = kmalloc_node(sizeof(struct hlist_head)*AS_HASH_ENTRIES, GFP_KERNEL, q->node); if (!ad->hash) { kfree(ad); @@ -1684,7 +1677,7 @@ static void *as_init_queue(request_queue_t *q, elevator_t *e) INIT_WORK(&ad->antic_work, as_work_handler, q); for (i = 0; i < AS_HASH_ENTRIES; i++) - INIT_LIST_HEAD(&ad->hash[i]); + INIT_HLIST_HEAD(&ad->hash[i]); INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]); INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]); diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c index c94de8e12fbf..e5bccaaed563 100644 --- a/block/deadline-iosched.c +++ b/block/deadline-iosched.c @@ -30,8 +30,7 @@ static const int deadline_hash_shift = 5; #define DL_HASH_FN(sec) (hash_long(DL_HASH_BLOCK((sec)), deadline_hash_shift)) #define DL_HASH_ENTRIES (1 << deadline_hash_shift) #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) -#define list_entry_hash(ptr) list_entry((ptr), struct deadline_rq, hash) -#define ON_HASH(drq) (drq)->on_hash +#define ON_HASH(drq) (!hlist_unhashed(&(drq)->hash)) struct deadline_data { /* @@ -48,7 +47,7 @@ struct deadline_data { * next in sort order. read, write or both are NULL */ struct deadline_rq *next_drq[2]; - struct list_head *hash; /* request hash */ + struct hlist_head *hash; /* request hash */ unsigned int batching; /* number of sequential requests made */ sector_t last_sector; /* head position */ unsigned int starved; /* times reads have starved writes */ @@ -79,8 +78,7 @@ struct deadline_rq { /* * request hash, key is the ending offset (for back merge lookup) */ - struct list_head hash; - char on_hash; + struct hlist_node hash; /* * expire fifo @@ -100,8 +98,7 @@ static kmem_cache_t *drq_pool; */ static inline void __deadline_del_drq_hash(struct deadline_rq *drq) { - drq->on_hash = 0; - list_del_init(&drq->hash); + hlist_del_init(&drq->hash); } static inline void deadline_del_drq_hash(struct deadline_rq *drq) @@ -117,8 +114,7 @@ deadline_add_drq_hash(struct deadline_data *dd, struct deadline_rq *drq) BUG_ON(ON_HASH(drq)); - drq->on_hash = 1; - list_add(&drq->hash, &dd->hash[DL_HASH_FN(rq_hash_key(rq))]); + hlist_add_head(&drq->hash, &dd->hash[DL_HASH_FN(rq_hash_key(rq))]); } /* @@ -128,26 +124,24 @@ static inline void deadline_hot_drq_hash(struct deadline_data *dd, struct deadline_rq *drq) { struct request *rq = drq->request; - struct list_head *head = &dd->hash[DL_HASH_FN(rq_hash_key(rq))]; + struct hlist_head *head = &dd->hash[DL_HASH_FN(rq_hash_key(rq))]; - if (ON_HASH(drq) && drq->hash.prev != head) { - list_del(&drq->hash); - list_add(&drq->hash, head); + if (ON_HASH(drq) && &drq->hash != head->first) { + hlist_del(&drq->hash); + hlist_add_head(&drq->hash, head); } } static struct request * deadline_find_drq_hash(struct deadline_data *dd, sector_t offset) { - struct list_head *hash_list = &dd->hash[DL_HASH_FN(offset)]; - struct list_head *entry, *next = hash_list->next; + struct hlist_head *hash_list = &dd->hash[DL_HASH_FN(offset)]; + struct hlist_node *entry, *next; + struct deadline_rq *drq; - while ((entry = next) != hash_list) { - struct deadline_rq *drq = list_entry_hash(entry); + hlist_for_each_entry_safe(drq, entry, next, hash_list, hash) { struct request *__rq = drq->request; - next = entry->next; - BUG_ON(!ON_HASH(drq)); if (!rq_mergeable(__rq)) { @@ -625,7 +619,7 @@ static void *deadline_init_queue(request_queue_t *q, elevator_t *e) return NULL; memset(dd, 0, sizeof(*dd)); - dd->hash = kmalloc_node(sizeof(struct list_head)*DL_HASH_ENTRIES, + dd->hash = kmalloc_node(sizeof(struct hlist_head)*DL_HASH_ENTRIES, GFP_KERNEL, q->node); if (!dd->hash) { kfree(dd); @@ -641,7 +635,7 @@ static void *deadline_init_queue(request_queue_t *q, elevator_t *e) } for (i = 0; i < DL_HASH_ENTRIES; i++) - INIT_LIST_HEAD(&dd->hash[i]); + INIT_HLIST_HEAD(&dd->hash[i]); INIT_LIST_HEAD(&dd->fifo_list[READ]); INIT_LIST_HEAD(&dd->fifo_list[WRITE]); @@ -677,8 +671,7 @@ deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio, RB_CLEAR(&drq->rb_node); drq->request = rq; - INIT_LIST_HEAD(&drq->hash); - drq->on_hash = 0; + INIT_HLIST_NODE(&drq->hash); INIT_LIST_HEAD(&drq->fifo); -- cgit v1.2.3 From a038e2536472b4dd932399b5277e65f188811de5 Mon Sep 17 00:00:00 2001 From: Paolo 'Blaisorblade' Giarrusso Date: Mon, 5 Jun 2006 12:09:01 +0200 Subject: [PATCH] blk_start_queue() must be called with irq disabled - add warning The queue lock can be taken from interrupts so it must always be taken with irq disabling primitives. Some primitives already verify this. blk_start_queue() is called under this lock, so interrupts must be disabled. Also document this requirement clearly in blk_init_queue(), where the queue spinlock is set. Signed-off-by: Paolo 'Blaisorblade' Giarrusso Signed-off-by: Andrew Morton Signed-off-by: Jens Axboe --- block/ll_rw_blk.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'block') diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 465b54312c59..17c42ddd31db 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -1663,6 +1663,8 @@ static void blk_unplug_timeout(unsigned long data) **/ void blk_start_queue(request_queue_t *q) { + WARN_ON(!irqs_disabled()); + clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags); /* @@ -1878,7 +1880,8 @@ EXPORT_SYMBOL(blk_alloc_queue_node); * get dealt with eventually. * * The queue spin lock must be held while manipulating the requests on the - * request queue. + * request queue; this lock will be taken also from interrupt context, so irq + * disabling is needed for it. * * Function returns a pointer to the initialized request queue, or NULL if * it didn't succeed. -- cgit v1.2.3 From acf421755593f7d7bd9352d57eda796c6eb4fa43 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Mon, 12 Jun 2006 14:20:58 +0200 Subject: [PATCH] remove dead code from elevator switching We already drop the refcount in elevator_exit(), and as we're setting 'e' to NULL, we'll never take that branch anyway. Finally, as 'e' is a local var that isn't referenced afterwards, setting it to NULL is pointless. Signed-off-by: Dave Jones Signed-off-by: Jens Axboe --- block/elevator.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'block') diff --git a/block/elevator.c b/block/elevator.c index a0afdd317cef..d00b283f31d2 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -850,12 +850,9 @@ fail_register: * one again (along with re-adding the sysfs dir) */ elevator_exit(e); - e = NULL; q->elevator = old_elevator; elv_register_queue(q); clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); - if (e) - kobject_put(&e->kobj); return 0; } -- cgit v1.2.3 From 271f18f102c789f59644bb6c53a69da1df72b2f4 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 13 Jun 2006 08:08:38 +0200 Subject: [PATCH] cfq-iosched: Don't set the queue batching limits We cannot update them if the user changes nr_requests, so don't set it in the first place. The gains are pretty questionable as well. The batching loss has been shown to decrease throughput. Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 48 +++--------------------------------------------- 1 file changed, 3 insertions(+), 45 deletions(-) (limited to 'block') diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index e2e6ad0a158e..c88f161d3fb3 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -123,8 +123,6 @@ struct cfq_data { */ struct hlist_head *crq_hash; - unsigned int max_queued; - mempool_t *crq_pool; int rq_in_driver; @@ -1910,7 +1908,6 @@ static inline int __cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq, struct task_struct *task, int rw) { -#if 1 if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) && !cfq_cfqq_must_alloc_slice(cfqq)) { cfq_mark_cfqq_must_alloc_slice(cfqq); @@ -1918,39 +1915,6 @@ __cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq, } return ELV_MQUEUE_MAY; -#else - if (!cfqq || task->flags & PF_MEMALLOC) - return ELV_MQUEUE_MAY; - if (!cfqq->allocated[rw] || cfq_cfqq_must_alloc(cfqq)) { - if (cfq_cfqq_wait_request(cfqq)) - return ELV_MQUEUE_MUST; - - /* - * only allow 1 ELV_MQUEUE_MUST per slice, otherwise we - * can quickly flood the queue with writes from a single task - */ - if (rw == READ || !cfq_cfqq_must_alloc_slice(cfqq)) { - cfq_mark_cfqq_must_alloc_slice(cfqq); - return ELV_MQUEUE_MUST; - } - - return ELV_MQUEUE_MAY; - } - if (cfq_class_idle(cfqq)) - return ELV_MQUEUE_NO; - if (cfqq->allocated[rw] >= cfqd->max_queued) { - struct io_context *ioc = get_io_context(GFP_ATOMIC); - int ret = ELV_MQUEUE_NO; - - if (ioc && ioc->nr_batch_requests) - ret = ELV_MQUEUE_MAY; - - put_io_context(ioc); - return ret; - } - - return ELV_MQUEUE_MAY; -#endif } static int cfq_may_queue(request_queue_t *q, int rw, struct bio *bio) @@ -1979,16 +1943,13 @@ static int cfq_may_queue(request_queue_t *q, int rw, struct bio *bio) static void cfq_check_waiters(request_queue_t *q, struct cfq_queue *cfqq) { struct cfq_data *cfqd = q->elevator->elevator_data; - struct request_list *rl = &q->rq; - if (cfqq->allocated[READ] <= cfqd->max_queued || cfqd->rq_starved) { + if (unlikely(cfqd->rq_starved)) { + struct request_list *rl = &q->rq; + smp_mb(); if (waitqueue_active(&rl->wait[READ])) wake_up(&rl->wait[READ]); - } - - if (cfqq->allocated[WRITE] <= cfqd->max_queued || cfqd->rq_starved) { - smp_mb(); if (waitqueue_active(&rl->wait[WRITE])) wake_up(&rl->wait[WRITE]); } @@ -2278,9 +2239,6 @@ static void *cfq_init_queue(request_queue_t *q, elevator_t *e) cfqd->queue = q; - cfqd->max_queued = q->nr_requests / 4; - q->nr_batching = cfq_queued; - init_timer(&cfqd->idle_slice_timer); cfqd->idle_slice_timer.function = cfq_idle_slice_timer; cfqd->idle_slice_timer.data = (unsigned long) cfqd; -- cgit v1.2.3 From b31dc66a54ad986b6b73bdc49c8efc17cbad1833 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 13 Jun 2006 08:26:10 +0200 Subject: [PATCH] Kill PF_SYNCWRITE flag A process flag to indicate whether we are doing sync io is incredibly ugly. It also causes performance problems when one does a lot of async io and then proceeds to sync it. Part of the io will go out as async, and the other part as sync. This causes a disconnect between the previously submitted io and the synced io. For io schedulers such as CFQ, this will cause us lost merges and suboptimal behaviour in scheduling. Remove PF_SYNCWRITE completely from the fsync/msync paths, and let the O_DIRECT path just directly indicate that the writes are sync by using WRITE_SYNC instead. Signed-off-by: Jens Axboe --- block/as-iosched.c | 2 +- block/cfq-iosched.c | 4 +--- block/ll_rw_blk.c | 3 +++ drivers/usb/gadget/file_storage.c | 2 -- fs/buffer.c | 2 -- fs/direct-io.c | 18 ++++++++---------- fs/fs-writeback.c | 2 -- include/linux/blkdev.h | 2 ++ include/linux/sched.h | 11 +++++------ mm/msync.c | 3 --- 10 files changed, 20 insertions(+), 29 deletions(-) (limited to 'block') diff --git a/block/as-iosched.c b/block/as-iosched.c index 9b13d72ffefa..56c99fa037df 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c @@ -1339,7 +1339,7 @@ static void as_add_request(request_queue_t *q, struct request *rq) arq->state = AS_RQ_NEW; if (rq_data_dir(arq->request) == READ - || current->flags&PF_SYNCWRITE) + || (arq->request->flags & REQ_RW_SYNC)) arq->is_sync = 1; else arq->is_sync = 0; diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index c88f161d3fb3..4c4e9cc3ae26 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -277,8 +277,6 @@ static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsi static void cfq_dispatch_insert(request_queue_t *, struct cfq_rq *); static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, gfp_t gfp_mask); -#define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE) - /* * lots of deadline iosched dupes, can be abstracted later... */ @@ -334,7 +332,7 @@ static int cfq_queue_empty(request_queue_t *q) static inline pid_t cfq_queue_pid(struct task_struct *task, int rw) { - if (rw == READ || process_sync(task)) + if (rw == READ || rw == WRITE_SYNC) return task->pid; return CFQ_KEY_ASYNC; diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 17c42ddd31db..2270bb451385 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -2827,6 +2827,9 @@ static void init_request_from_bio(struct request *req, struct bio *bio) if (unlikely(bio_barrier(bio))) req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE); + if (bio_sync(bio)) + req->flags |= REQ_RW_SYNC; + req->errors = 0; req->hard_sector = req->sector = bio->bi_sector; req->hard_nr_sectors = req->nr_sectors = bio_sectors(bio); diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c index 6f887478b148..a43dc908ac59 100644 --- a/drivers/usb/gadget/file_storage.c +++ b/drivers/usb/gadget/file_storage.c @@ -1906,7 +1906,6 @@ static int fsync_sub(struct lun *curlun) inode = filp->f_dentry->d_inode; mutex_lock(&inode->i_mutex); - current->flags |= PF_SYNCWRITE; rc = filemap_fdatawrite(inode->i_mapping); err = filp->f_op->fsync(filp, filp->f_dentry, 1); if (!rc) @@ -1914,7 +1913,6 @@ static int fsync_sub(struct lun *curlun) err = filemap_fdatawait(inode->i_mapping); if (!rc) rc = err; - current->flags &= ~PF_SYNCWRITE; mutex_unlock(&inode->i_mutex); VLDBG(curlun, "fdatasync -> %d\n", rc); return rc; diff --git a/fs/buffer.c b/fs/buffer.c index 23f1f3a68077..373bb6292bdc 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -331,7 +331,6 @@ long do_fsync(struct file *file, int datasync) goto out; } - current->flags |= PF_SYNCWRITE; ret = filemap_fdatawrite(mapping); /* @@ -346,7 +345,6 @@ long do_fsync(struct file *file, int datasync) err = filemap_fdatawait(mapping); if (!ret) ret = err; - current->flags &= ~PF_SYNCWRITE; out: return ret; } diff --git a/fs/direct-io.c b/fs/direct-io.c index b05d1b218776..538fb0418fba 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -162,7 +162,7 @@ static int dio_refill_pages(struct dio *dio) NULL); /* vmas */ up_read(¤t->mm->mmap_sem); - if (ret < 0 && dio->blocks_available && (dio->rw == WRITE)) { + if (ret < 0 && dio->blocks_available && (dio->rw & WRITE)) { struct page *page = ZERO_PAGE(dio->curr_user_address); /* * A memory fault, but the filesystem has some outstanding @@ -535,7 +535,7 @@ static int get_more_blocks(struct dio *dio) map_bh->b_state = 0; map_bh->b_size = fs_count << dio->inode->i_blkbits; - create = dio->rw == WRITE; + create = dio->rw & WRITE; if (dio->lock_type == DIO_LOCKING) { if (dio->block_in_file < (i_size_read(dio->inode) >> dio->blkbits)) @@ -867,7 +867,7 @@ do_holes: loff_t i_size_aligned; /* AKPM: eargh, -ENOTBLK is a hack */ - if (dio->rw == WRITE) { + if (dio->rw & WRITE) { page_cache_release(page); return -ENOTBLK; } @@ -1045,7 +1045,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode, } } /* end iovec loop */ - if (ret == -ENOTBLK && rw == WRITE) { + if (ret == -ENOTBLK && (rw & WRITE)) { /* * The remaining part of the request will be * be handled by buffered I/O when we return @@ -1089,7 +1089,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode, if (dio->is_async) { int should_wait = 0; - if (dio->result < dio->size && rw == WRITE) { + if (dio->result < dio->size && (rw & WRITE)) { dio->waiter = current; should_wait = 1; } @@ -1142,7 +1142,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode, ret = transferred; /* We could have also come here on an AIO file extend */ - if (!is_sync_kiocb(iocb) && rw == WRITE && + if (!is_sync_kiocb(iocb) && (rw & WRITE) && ret >= 0 && dio->result == dio->size) /* * For AIO writes where we have completed the @@ -1194,7 +1194,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, int acquire_i_mutex = 0; if (rw & WRITE) - current->flags |= PF_SYNCWRITE; + rw = WRITE_SYNC; if (bdev) bdev_blkbits = blksize_bits(bdev_hardsect_size(bdev)); @@ -1270,7 +1270,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, * even for AIO, we need to wait for i/o to complete before * returning in this case. */ - dio->is_async = !is_sync_kiocb(iocb) && !((rw == WRITE) && + dio->is_async = !is_sync_kiocb(iocb) && !((rw & WRITE) && (end > i_size_read(inode))); retval = direct_io_worker(rw, iocb, inode, iov, offset, @@ -1284,8 +1284,6 @@ out: mutex_unlock(&inode->i_mutex); else if (acquire_i_mutex) mutex_lock(&inode->i_mutex); - if (rw & WRITE) - current->flags &= ~PF_SYNCWRITE; return retval; } EXPORT_SYMBOL(__blockdev_direct_IO); diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 6db95cf3aaa2..031b27a4bc9a 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -623,7 +623,6 @@ int generic_osync_inode(struct inode *inode, struct address_space *mapping, int int need_write_inode_now = 0; int err2; - current->flags |= PF_SYNCWRITE; if (what & OSYNC_DATA) err = filemap_fdatawrite(mapping); if (what & (OSYNC_METADATA|OSYNC_DATA)) { @@ -636,7 +635,6 @@ int generic_osync_inode(struct inode *inode, struct address_space *mapping, int if (!err) err = err2; } - current->flags &= ~PF_SYNCWRITE; spin_lock(&inode_lock); if ((inode->i_state & I_DIRTY) && diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 3457e7b97363..482a21d67627 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -241,6 +241,7 @@ enum rq_flag_bits { __REQ_PM_RESUME, /* resume request */ __REQ_PM_SHUTDOWN, /* shutdown request */ __REQ_ORDERED_COLOR, /* is before or after barrier */ + __REQ_RW_SYNC, /* request is sync (O_DIRECT) */ __REQ_NR_BITS, /* stops here */ }; @@ -270,6 +271,7 @@ enum rq_flag_bits { #define REQ_PM_RESUME (1 << __REQ_PM_RESUME) #define REQ_PM_SHUTDOWN (1 << __REQ_PM_SHUTDOWN) #define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR) +#define REQ_RW_SYNC (1 << __REQ_RW_SYNC) /* * State information carried for REQ_PM_SUSPEND and REQ_PM_RESUME diff --git a/include/linux/sched.h b/include/linux/sched.h index a9d23c7d1b25..38b4791e6a5d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -941,12 +941,11 @@ static inline void put_task_struct(struct task_struct *t) #define PF_KSWAPD 0x00040000 /* I am kswapd */ #define PF_SWAPOFF 0x00080000 /* I am in swapoff */ #define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ -#define PF_SYNCWRITE 0x00200000 /* I am doing a sync write */ -#define PF_BORROWED_MM 0x00400000 /* I am a kthread doing use_mm */ -#define PF_RANDOMIZE 0x00800000 /* randomize virtual address space */ -#define PF_SWAPWRITE 0x01000000 /* Allowed to write to swap */ -#define PF_SPREAD_PAGE 0x04000000 /* Spread page cache over cpuset */ -#define PF_SPREAD_SLAB 0x08000000 /* Spread some slab caches over cpuset */ +#define PF_BORROWED_MM 0x00200000 /* I am a kthread doing use_mm */ +#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ +#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ +#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ +#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ /* diff --git a/mm/msync.c b/mm/msync.c index bc6c95376366..d083544df21b 100644 --- a/mm/msync.c +++ b/mm/msync.c @@ -170,8 +170,6 @@ asmlinkage long sys_msync(unsigned long start, size_t len, int flags) * just ignore them, but return -ENOMEM at the end. */ down_read(¤t->mm->mmap_sem); - if (flags & MS_SYNC) - current->flags |= PF_SYNCWRITE; vma = find_vma(current->mm, start); if (!vma) { error = -ENOMEM; @@ -228,7 +226,6 @@ asmlinkage long sys_msync(unsigned long start, size_t len, int flags) } } while (vma && !done); out_unlock: - current->flags &= ~PF_SYNCWRITE; up_read(¤t->mm->mmap_sem); out: return error; -- cgit v1.2.3 From b17fd9bceb99610f6dc7998c9a4ed6b71520be2b Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 19 Jun 2006 10:06:48 +0200 Subject: [PATCH] Make CFQ the default IO scheduler Signed-off-by: Jens Axboe --- block/Kconfig.iosched | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'block') diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched index f3b7753aac99..48d090e266fc 100644 --- a/block/Kconfig.iosched +++ b/block/Kconfig.iosched @@ -40,7 +40,7 @@ config IOSCHED_CFQ choice prompt "Default I/O scheduler" - default DEFAULT_AS + default DEFAULT_CFQ help Select the I/O scheduler which will be used by default for all block devices. -- cgit v1.2.3 From 35e6077cb16f93517ba5a51ba849b186d2474d60 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 14 Jun 2006 09:10:45 +0200 Subject: [PATCH] cfq-iosched: correctly set ioprio on both targets Patch originally from Vasily Tarasov If you set io-priority of process 1 using sys_ioprio_set system call by another process 2 (like ionice do), then cfq_init_prio_data() function sets priority of process 2 (current) on queue of process 1 and clears the flag, that designates change of ioprio. So the process 1 will work like with priority of process 2. I propose not to call cfq_init_prio_data() on io-priority change, but only mark queue as queue with changed prority. Every time when new request comes cfq-scheduler checks for this flag and atomaticaly changes priority of queue to new value. Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'block') diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 4c4e9cc3ae26..84b75f88c279 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -1388,10 +1388,9 @@ static inline void changed_ioprio(struct cfq_io_context *cic) } } cfqq = cic->cfqq[SYNC]; - if (cfqq) { + if (cfqq) cfq_mark_cfqq_prio_changed(cfqq); - cfq_init_prio_data(cfqq); - } + spin_unlock(cfqd->queue->queue_lock); } } -- cgit v1.2.3 From caaa5f9f0a75d1dc5e812e69afdbb8720e077fd3 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 16 Jun 2006 11:23:00 +0200 Subject: [PATCH] cfq-iosched: many performance fixes This is a collection of patches that greatly improve CFQ performance in some circumstances. - Change the idling logic to only kick in after a request is done and we are deciding what to do. Before the idling included the request service time, so it was hard to adjust. Now it's true think/idle time. - Take advantage of TCQ/NCQ/queueing for seeky sync workloads, but keep it in control for sync and sequential (or close to) workloads. - Expire queues immediately and move on to other busy queues, if we are not going to idle after the current one finishes. - Don't rearm idle timer if there are no busy queues. Just leave the system idle. Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 116 ++++++++++++++++++++++++++++++++++------------------ 1 file changed, 76 insertions(+), 40 deletions(-) (limited to 'block') diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 84b75f88c279..13c4793fdf5f 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -26,7 +26,7 @@ static const int cfq_back_penalty = 2; /* penalty of a backwards seek */ static const int cfq_slice_sync = HZ / 10; static int cfq_slice_async = HZ / 25; static const int cfq_slice_async_rq = 2; -static int cfq_slice_idle = HZ / 70; +static int cfq_slice_idle = HZ / 125; #define CFQ_IDLE_GRACE (HZ / 10) #define CFQ_SLICE_SCALE (5) @@ -906,6 +906,8 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd) return cfqq; } +#define CIC_SEEKY(cic) ((cic)->seek_mean > (128 * 1024)) + static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) { @@ -939,7 +941,7 @@ static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) * fair distribution of slice time for a process doing back-to-back * seeks. so allow a little bit of time for him to submit a new rq */ - if (sample_valid(cic->seek_samples) && cic->seek_mean > 131072) + if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic)) sl = 2; mod_timer(&cfqd->idle_slice_timer, jiffies + sl); @@ -1038,8 +1040,10 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) */ if (!RB_EMPTY(&cfqq->sort_list)) goto keep_queue; - else if (cfq_cfqq_class_sync(cfqq) && - time_before(now, cfqq->slice_end)) { + else if (cfq_cfqq_dispatched(cfqq)) { + cfqq = NULL; + goto keep_queue; + } else if (cfq_cfqq_class_sync(cfqq)) { if (cfq_arm_slice_timer(cfqd, cfqq)) return NULL; } @@ -1088,8 +1092,7 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, } while (dispatched < max_dispatch); /* - * if slice end isn't set yet, set it. if at least one request was - * sync, use the sync time slice value + * if slice end isn't set yet, set it. */ if (!cfqq->slice_end) cfq_set_prio_slice(cfqd, cfqq); @@ -1100,7 +1103,8 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, */ if ((!cfq_cfqq_sync(cfqq) && cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) || - cfq_class_idle(cfqq)) + cfq_class_idle(cfqq) || + !cfq_cfqq_idle_window(cfqq)) cfq_slice_expired(cfqd, 0); return dispatched; @@ -1109,10 +1113,11 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, static int cfq_forced_dispatch_cfqqs(struct list_head *list) { - int dispatched = 0; struct cfq_queue *cfqq, *next; struct cfq_rq *crq; + int dispatched; + dispatched = 0; list_for_each_entry_safe(cfqq, next, list, cfq_list) { while ((crq = cfqq->next_crq)) { cfq_dispatch_insert(cfqq->cfqd->queue, crq); @@ -1120,6 +1125,7 @@ cfq_forced_dispatch_cfqqs(struct list_head *list) } BUG_ON(!list_empty(&cfqq->fifo)); } + return dispatched; } @@ -1146,7 +1152,8 @@ static int cfq_dispatch_requests(request_queue_t *q, int force) { struct cfq_data *cfqd = q->elevator->elevator_data; - struct cfq_queue *cfqq; + struct cfq_queue *cfqq, *prev_cfqq; + int dispatched; if (!cfqd->busy_queues) return 0; @@ -1154,10 +1161,17 @@ cfq_dispatch_requests(request_queue_t *q, int force) if (unlikely(force)) return cfq_forced_dispatch(cfqd); - cfqq = cfq_select_queue(cfqd); - if (cfqq) { + dispatched = 0; + prev_cfqq = NULL; + while ((cfqq = cfq_select_queue(cfqd)) != NULL) { int max_dispatch; + /* + * Don't repeat dispatch from the previous queue. + */ + if (prev_cfqq == cfqq) + break; + cfq_clear_cfqq_must_dispatch(cfqq); cfq_clear_cfqq_wait_request(cfqq); del_timer(&cfqd->idle_slice_timer); @@ -1166,10 +1180,19 @@ cfq_dispatch_requests(request_queue_t *q, int force) if (cfq_class_idle(cfqq)) max_dispatch = 1; - return __cfq_dispatch_requests(cfqd, cfqq, max_dispatch); + dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch); + + /* + * If the dispatch cfqq has idling enabled and is still + * the active queue, break out. + */ + if (cfq_cfqq_idle_window(cfqq) && cfqd->active_queue) + break; + + prev_cfqq = cfqq; } - return 0; + return dispatched; } /* @@ -1375,24 +1398,28 @@ static inline void changed_ioprio(struct cfq_io_context *cic) { struct cfq_data *cfqd = cic->key; struct cfq_queue *cfqq; - if (cfqd) { - spin_lock(cfqd->queue->queue_lock); - cfqq = cic->cfqq[ASYNC]; - if (cfqq) { - struct cfq_queue *new_cfqq; - new_cfqq = cfq_get_queue(cfqd, CFQ_KEY_ASYNC, - cic->ioc->task, GFP_ATOMIC); - if (new_cfqq) { - cic->cfqq[ASYNC] = new_cfqq; - cfq_put_queue(cfqq); - } - } - cfqq = cic->cfqq[SYNC]; - if (cfqq) - cfq_mark_cfqq_prio_changed(cfqq); - spin_unlock(cfqd->queue->queue_lock); + if (unlikely(!cfqd)) + return; + + spin_lock(cfqd->queue->queue_lock); + + cfqq = cic->cfqq[ASYNC]; + if (cfqq) { + struct cfq_queue *new_cfqq; + new_cfqq = cfq_get_queue(cfqd, CFQ_KEY_ASYNC, cic->ioc->task, + GFP_ATOMIC); + if (new_cfqq) { + cic->cfqq[ASYNC] = new_cfqq; + cfq_put_queue(cfqq); + } } + + cfqq = cic->cfqq[SYNC]; + if (cfqq) + cfq_mark_cfqq_prio_changed(cfqq); + + spin_unlock(cfqd->queue->queue_lock); } /* @@ -1461,8 +1488,7 @@ retry: * set ->slice_left to allow preemption for a new process */ cfqq->slice_left = 2 * cfqd->cfq_slice_idle; - if (!cfqd->hw_tag) - cfq_mark_cfqq_idle_window(cfqq); + cfq_mark_cfqq_idle_window(cfqq); cfq_mark_cfqq_prio_changed(cfqq); cfq_init_prio_data(cfqq); } @@ -1653,7 +1679,8 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, { int enable_idle = cfq_cfqq_idle_window(cfqq); - if (!cic->ioc->task || !cfqd->cfq_slice_idle || cfqd->hw_tag) + if (!cic->ioc->task || !cfqd->cfq_slice_idle || + (cfqd->hw_tag && CIC_SEEKY(cic))) enable_idle = 0; else if (sample_valid(cic->ttime_samples)) { if (cic->ttime_mean > cfqd->cfq_slice_idle) @@ -1683,7 +1710,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, return 0; if (!cfqq) - return 1; + return 0; if (cfq_class_idle(cfqq)) return 1; @@ -1715,7 +1742,7 @@ static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) cfqq->slice_left = cfq_prio_to_slice(cfqd, cfqq) / 2; cfqq->slice_end = cfqq->slice_left + jiffies; - __cfq_slice_expired(cfqd, cfqq, 1); + cfq_slice_expired(cfqd, 1); __cfq_set_active_queue(cfqd, cfqq); } @@ -1834,11 +1861,23 @@ static void cfq_completed_request(request_queue_t *q, struct request *rq) cfqq->service_last = now; cfq_resort_rr_list(cfqq, 0); } - cfq_schedule_dispatch(cfqd); } - if (cfq_crq_is_sync(crq)) + if (sync) crq->io_context->last_end_request = now; + + /* + * If this is the active queue, check if it needs to be expired, + * or if we want to idle in case it has no pending requests. + */ + if (cfqd->active_queue == cfqq) { + if (time_after(now, cfqq->slice_end)) + cfq_slice_expired(cfqd, 0); + else if (sync && RB_EMPTY(&cfqq->sort_list)) { + if (!cfq_arm_slice_timer(cfqd, cfqq)) + cfq_schedule_dispatch(cfqd); + } + } } static struct request * @@ -2106,11 +2145,8 @@ static void cfq_idle_slice_timer(unsigned long data) * only expire and reinvoke request handler, if there are * other queues with pending requests */ - if (!cfqd->busy_queues) { - cfqd->idle_slice_timer.expires = min(now + cfqd->cfq_slice_idle, cfqq->slice_end); - add_timer(&cfqd->idle_slice_timer); + if (!cfqd->busy_queues) goto out_cont; - } /* * not expired and it has a request pending, let it dispatch -- cgit v1.2.3 From fd61af0384014ca29428ace7c17a978b755aeddd Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 16 Jun 2006 15:35:39 +0200 Subject: [PATCH] cfq-iosched: rq update fixes - Remember to set ->last_sector so that the cfq_choose_req() logic works correctly. - Remove redundant call to cfq_choose_req() Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'block') diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 13c4793fdf5f..940364edf2b9 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -952,11 +952,15 @@ static void cfq_dispatch_insert(request_queue_t *q, struct cfq_rq *crq) { struct cfq_data *cfqd = q->elevator->elevator_data; struct cfq_queue *cfqq = crq->cfq_queue; + struct request *rq; cfqq->next_crq = cfq_find_next_crq(cfqd, cfqq, crq); cfq_remove_request(crq->request); cfqq->on_dispatch[cfq_crq_is_sync(crq)]++; elv_dispatch_sort(q, crq->request); + + rq = list_entry(q->queue_head.prev, struct request, queuelist); + cfqd->last_sector = rq->sector + rq->nr_sectors; } /* @@ -1767,11 +1771,7 @@ static void cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, struct cfq_rq *crq) { - struct cfq_io_context *cic; - - cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq); - - cic = crq->io_context; + struct cfq_io_context *cic = crq->io_context; /* * we never wait for an async request and we don't allow preemption -- cgit v1.2.3 From dd67d051529387f6e44d22d1d5540ef281965fdd Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 21 Jun 2006 09:36:18 +0200 Subject: [PATCH] rbtree: support functions used by the io schedulers They all duplicate macros to check for empty root and/or node, and clearing a node. So put those in rbtree.h. Signed-off-by: Jens Axboe --- block/as-iosched.c | 17 +++++++---------- block/cfq-iosched.c | 22 ++++++++-------------- block/deadline-iosched.c | 13 +++++-------- include/linux/rbtree.h | 4 ++++ 4 files changed, 24 insertions(+), 32 deletions(-) (limited to 'block') diff --git a/block/as-iosched.c b/block/as-iosched.c index 56c99fa037df..1ec5df466708 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c @@ -347,9 +347,6 @@ static struct request *as_find_arq_hash(struct as_data *ad, sector_t offset) /* * rb tree support functions */ -#define RB_EMPTY(root) ((root)->rb_node == NULL) -#define ON_RB(node) (rb_parent(node) != node) -#define RB_CLEAR(node) (rb_set_parent(node, node)) #define rb_entry_arq(node) rb_entry((node), struct as_rq, rb_node) #define ARQ_RB_ROOT(ad, arq) (&(ad)->sort_list[(arq)->is_sync]) #define rq_rb_key(rq) (rq)->sector @@ -418,13 +415,13 @@ static void as_add_arq_rb(struct as_data *ad, struct as_rq *arq) static inline void as_del_arq_rb(struct as_data *ad, struct as_rq *arq) { - if (!ON_RB(&arq->rb_node)) { + if (!RB_EMPTY_NODE(&arq->rb_node)) { WARN_ON(1); return; } rb_erase(&arq->rb_node, ARQ_RB_ROOT(ad, arq)); - RB_CLEAR(&arq->rb_node); + RB_CLEAR_NODE(&arq->rb_node); } static struct request * @@ -545,7 +542,7 @@ static struct as_rq *as_find_next_arq(struct as_data *ad, struct as_rq *last) struct rb_node *rbprev = rb_prev(&last->rb_node); struct as_rq *arq_next, *arq_prev; - BUG_ON(!ON_RB(&last->rb_node)); + BUG_ON(!RB_EMPTY_NODE(&last->rb_node)); if (rbprev) arq_prev = rb_entry_arq(rbprev); @@ -1122,7 +1119,7 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq) struct request *rq = arq->request; const int data_dir = arq->is_sync; - BUG_ON(!ON_RB(&arq->rb_node)); + BUG_ON(!RB_EMPTY_NODE(&arq->rb_node)); as_antic_stop(ad); ad->antic_status = ANTIC_OFF; @@ -1247,7 +1244,7 @@ static int as_dispatch_request(request_queue_t *q, int force) */ if (reads) { - BUG_ON(RB_EMPTY(&ad->sort_list[REQ_SYNC])); + BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[REQ_SYNC])); if (writes && ad->batch_data_dir == REQ_SYNC) /* @@ -1271,7 +1268,7 @@ static int as_dispatch_request(request_queue_t *q, int force) if (writes) { dispatch_writes: - BUG_ON(RB_EMPTY(&ad->sort_list[REQ_ASYNC])); + BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[REQ_ASYNC])); if (ad->batch_data_dir == REQ_SYNC) { ad->changed_batch = 1; @@ -1591,7 +1588,7 @@ static int as_set_request(request_queue_t *q, struct request *rq, if (arq) { memset(arq, 0, sizeof(*arq)); - RB_CLEAR(&arq->rb_node); + RB_CLEAR_NODE(&arq->rb_node); arq->request = rq; arq->state = AS_RQ_PRESCHED; arq->io_context = NULL; diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 940364edf2b9..e25223e147a2 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -60,11 +60,6 @@ static DEFINE_SPINLOCK(cfq_exit_lock); /* * rb-tree defines */ -#define RB_EMPTY(node) ((node)->rb_node == NULL) -#define RB_CLEAR(node) do { \ - memset(node, 0, sizeof(*node)); \ -} while (0) -#define RB_CLEAR_ROOT(root) ((root)->rb_node = NULL) #define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node) #define rq_rb_key(rq) (rq)->sector @@ -559,7 +554,7 @@ static inline void cfq_del_crq_rb(struct cfq_rq *crq) rb_erase(&crq->rb_node, &cfqq->sort_list); - if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY(&cfqq->sort_list)) + if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) cfq_del_cfqq_rr(cfqd, cfqq); } @@ -914,7 +909,7 @@ static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) struct cfq_io_context *cic; unsigned long sl; - WARN_ON(!RB_EMPTY(&cfqq->sort_list)); + WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list)); WARN_ON(cfqq != cfqd->active_queue); /* @@ -1042,7 +1037,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) * if queue has requests, dispatch one. if not, check if * enough slice is left to wait for one */ - if (!RB_EMPTY(&cfqq->sort_list)) + if (!RB_EMPTY_ROOT(&cfqq->sort_list)) goto keep_queue; else if (cfq_cfqq_dispatched(cfqq)) { cfqq = NULL; @@ -1066,7 +1061,7 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, { int dispatched = 0; - BUG_ON(RB_EMPTY(&cfqq->sort_list)); + BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)); do { struct cfq_rq *crq; @@ -1090,7 +1085,7 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, cfqd->active_cic = crq->io_context; } - if (RB_EMPTY(&cfqq->sort_list)) + if (RB_EMPTY_ROOT(&cfqq->sort_list)) break; } while (dispatched < max_dispatch); @@ -1480,7 +1475,6 @@ retry: INIT_HLIST_NODE(&cfqq->cfq_hash); INIT_LIST_HEAD(&cfqq->cfq_list); - RB_CLEAR_ROOT(&cfqq->sort_list); INIT_LIST_HEAD(&cfqq->fifo); cfqq->key = key; @@ -1873,7 +1867,7 @@ static void cfq_completed_request(request_queue_t *q, struct request *rq) if (cfqd->active_queue == cfqq) { if (time_after(now, cfqq->slice_end)) cfq_slice_expired(cfqd, 0); - else if (sync && RB_EMPTY(&cfqq->sort_list)) { + else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list)) { if (!cfq_arm_slice_timer(cfqd, cfqq)) cfq_schedule_dispatch(cfqd); } @@ -2059,7 +2053,7 @@ cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio, crq = mempool_alloc(cfqd->crq_pool, gfp_mask); if (crq) { - RB_CLEAR(&crq->rb_node); + RB_CLEAR_NODE(&crq->rb_node); crq->rb_key = 0; crq->request = rq; INIT_HLIST_NODE(&crq->hash); @@ -2151,7 +2145,7 @@ static void cfq_idle_slice_timer(unsigned long data) /* * not expired and it has a request pending, let it dispatch */ - if (!RB_EMPTY(&cfqq->sort_list)) { + if (!RB_EMPTY_ROOT(&cfqq->sort_list)) { cfq_mark_cfqq_must_dispatch(cfqq); goto out_kick; } diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c index e5bccaaed563..4469dd84623c 100644 --- a/block/deadline-iosched.c +++ b/block/deadline-iosched.c @@ -159,9 +159,6 @@ deadline_find_drq_hash(struct deadline_data *dd, sector_t offset) /* * rb tree support functions */ -#define RB_EMPTY(root) ((root)->rb_node == NULL) -#define ON_RB(node) (rb_parent(node) != node) -#define RB_CLEAR(node) (rb_set_parent(node, node)) #define rb_entry_drq(node) rb_entry((node), struct deadline_rq, rb_node) #define DRQ_RB_ROOT(dd, drq) (&(dd)->sort_list[rq_data_dir((drq)->request)]) #define rq_rb_key(rq) (rq)->sector @@ -220,9 +217,9 @@ deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq) dd->next_drq[data_dir] = rb_entry_drq(rbnext); } - BUG_ON(!ON_RB(&drq->rb_node)); + BUG_ON(!RB_EMPTY_NODE(&drq->rb_node)); rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq)); - RB_CLEAR(&drq->rb_node); + RB_CLEAR_NODE(&drq->rb_node); } static struct request * @@ -496,7 +493,7 @@ static int deadline_dispatch_requests(request_queue_t *q, int force) */ if (reads) { - BUG_ON(RB_EMPTY(&dd->sort_list[READ])); + BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ])); if (writes && (dd->starved++ >= dd->writes_starved)) goto dispatch_writes; @@ -512,7 +509,7 @@ static int deadline_dispatch_requests(request_queue_t *q, int force) if (writes) { dispatch_writes: - BUG_ON(RB_EMPTY(&dd->sort_list[WRITE])); + BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE])); dd->starved = 0; @@ -668,7 +665,7 @@ deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio, drq = mempool_alloc(dd->drq_pool, gfp_mask); if (drq) { memset(drq, 0, sizeof(*drq)); - RB_CLEAR(&drq->rb_node); + RB_CLEAR_NODE(&drq->rb_node); drq->request = rq; INIT_HLIST_NODE(&drq->hash); diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h index f37006f21664..8d5382e62c08 100644 --- a/include/linux/rbtree.h +++ b/include/linux/rbtree.h @@ -132,6 +132,10 @@ static inline void rb_set_color(struct rb_node *rb, int color) #define RB_ROOT (struct rb_root) { NULL, } #define rb_entry(ptr, type, member) container_of(ptr, type, member) +#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL) +#define RB_EMPTY_NODE(node) (rb_parent(node) != node) +#define RB_CLEAR_NODE(node) (rb_set_parent(node, node)) + extern void rb_insert_color(struct rb_node *, struct rb_root *); extern void rb_erase(struct rb_node *, struct rb_root *); -- cgit v1.2.3 From 8269730b389d4793348d521140f9c76fb1828249 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 21 Jun 2006 14:48:09 +0200 Subject: [BLOCK] Fix bounce limit address check Do a safer check for when to enable DMA. Currently we enable ISA DMA for cases that do not need it, resulting in OOM conditions when ZONE_DMA runs out of space. Signed-off-by: Jens Axboe --- block/ll_rw_blk.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'block') diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 2270bb451385..0603ab2f3692 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -638,7 +638,7 @@ void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr) /* Assume anything <= 4GB can be handled by IOMMU. Actually some IOMMUs can handle everything, but I don't know of a way to test this here. */ - if (bounce_pfn < (0xffffffff>>PAGE_SHIFT)) + if (bounce_pfn < (min_t(u64,0xffffffff,BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) dma = 1; q->bounce_pfn = max_low_pfn; #else -- cgit v1.2.3