summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorDavid Woodhouse <dwmw2@infradead.org>2006-05-03 13:30:35 +0100
committerDavid Woodhouse <dwmw2@infradead.org>2006-05-03 13:30:35 +0100
commitedc4ff7c08e9885c40e60c4fb39fa42cc91a0602 (patch)
treef375d28043dd4457428a841167dc93d760ba9a46 /block
parentcbb9a56177b16294ed347ba7fcb1c66c8adb5dc4 (diff)
parente17df688f7064dae1417ce425dd1e4b71d24d63b (diff)
downloadlinux-edc4ff7c08e9885c40e60c4fb39fa42cc91a0602.tar.bz2
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'block')
-rw-r--r--block/as-iosched.c5
-rw-r--r--block/cfq-iosched.c53
-rw-r--r--block/elevator.c2
-rw-r--r--block/ll_rw_blk.c6
4 files changed, 46 insertions, 20 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 296708ceceb2..e25a5d79ab27 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -1844,9 +1844,10 @@ static void __exit as_exit(void)
DECLARE_COMPLETION(all_gone);
elv_unregister(&iosched_as);
ioc_gone = &all_gone;
- barrier();
+ /* ioc_gone's update must be visible before reading ioc_count */
+ smp_wmb();
if (atomic_read(&ioc_count))
- complete(ioc_gone);
+ wait_for_completion(ioc_gone);
synchronize_rcu();
kmem_cache_destroy(arq_pool);
}
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 67d446de0227..2540dfaa3e38 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1472,19 +1472,37 @@ out:
return cfqq;
}
+static void
+cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic)
+{
+ read_lock(&cfq_exit_lock);
+ rb_erase(&cic->rb_node, &ioc->cic_root);
+ read_unlock(&cfq_exit_lock);
+ kmem_cache_free(cfq_ioc_pool, cic);
+ atomic_dec(&ioc_count);
+}
+
static struct cfq_io_context *
cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc)
{
- struct rb_node *n = ioc->cic_root.rb_node;
+ struct rb_node *n;
struct cfq_io_context *cic;
- void *key = cfqd;
+ void *k, *key = cfqd;
+restart:
+ n = ioc->cic_root.rb_node;
while (n) {
cic = rb_entry(n, struct cfq_io_context, rb_node);
+ /* ->key must be copied to avoid race with cfq_exit_queue() */
+ k = cic->key;
+ if (unlikely(!k)) {
+ cfq_drop_dead_cic(ioc, cic);
+ goto restart;
+ }
- if (key < cic->key)
+ if (key < k)
n = n->rb_left;
- else if (key > cic->key)
+ else if (key > k)
n = n->rb_right;
else
return cic;
@@ -1497,29 +1515,37 @@ static inline void
cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
struct cfq_io_context *cic)
{
- struct rb_node **p = &ioc->cic_root.rb_node;
- struct rb_node *parent = NULL;
+ struct rb_node **p;
+ struct rb_node *parent;
struct cfq_io_context *__cic;
-
- read_lock(&cfq_exit_lock);
+ void *k;
cic->ioc = ioc;
cic->key = cfqd;
ioc->set_ioprio = cfq_ioc_set_ioprio;
-
+restart:
+ parent = NULL;
+ p = &ioc->cic_root.rb_node;
while (*p) {
parent = *p;
__cic = rb_entry(parent, struct cfq_io_context, rb_node);
+ /* ->key must be copied to avoid race with cfq_exit_queue() */
+ k = __cic->key;
+ if (unlikely(!k)) {
+ cfq_drop_dead_cic(ioc, cic);
+ goto restart;
+ }
- if (cic->key < __cic->key)
+ if (cic->key < k)
p = &(*p)->rb_left;
- else if (cic->key > __cic->key)
+ else if (cic->key > k)
p = &(*p)->rb_right;
else
BUG();
}
+ read_lock(&cfq_exit_lock);
rb_link_node(&cic->rb_node, parent, p);
rb_insert_color(&cic->rb_node, &ioc->cic_root);
list_add(&cic->queue_list, &cfqd->cic_list);
@@ -2439,9 +2465,10 @@ static void __exit cfq_exit(void)
DECLARE_COMPLETION(all_gone);
elv_unregister(&iosched_cfq);
ioc_gone = &all_gone;
- barrier();
+ /* ioc_gone's update must be visible before reading ioc_count */
+ smp_wmb();
if (atomic_read(&ioc_count))
- complete(ioc_gone);
+ wait_for_completion(ioc_gone);
synchronize_rcu();
cfq_slab_kill();
}
diff --git a/block/elevator.c b/block/elevator.c
index 0d6be03d929e..29825792cbd5 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -895,10 +895,8 @@ ssize_t elv_iosched_show(request_queue_t *q, char *name)
EXPORT_SYMBOL(elv_dispatch_sort);
EXPORT_SYMBOL(elv_add_request);
EXPORT_SYMBOL(__elv_add_request);
-EXPORT_SYMBOL(elv_requeue_request);
EXPORT_SYMBOL(elv_next_request);
EXPORT_SYMBOL(elv_dequeue_request);
EXPORT_SYMBOL(elv_queue_empty);
-EXPORT_SYMBOL(elv_completed_request);
EXPORT_SYMBOL(elevator_exit);
EXPORT_SYMBOL(elevator_init);
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index e112d1a5dab6..e5041a02e21f 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -1554,7 +1554,7 @@ void blk_plug_device(request_queue_t *q)
* don't plug a stopped queue, it must be paired with blk_start_queue()
* which will restart the queueing
*/
- if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))
+ if (blk_queue_stopped(q))
return;
if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
@@ -1587,7 +1587,7 @@ EXPORT_SYMBOL(blk_remove_plug);
*/
void __generic_unplug_device(request_queue_t *q)
{
- if (unlikely(test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags)))
+ if (unlikely(blk_queue_stopped(q)))
return;
if (!blk_remove_plug(q))
@@ -3385,7 +3385,7 @@ static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
}
-static struct notifier_block __devinitdata blk_cpu_notifier = {
+static struct notifier_block blk_cpu_notifier = {
.notifier_call = blk_cpu_notify,
};