summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-03-04 19:09:02 +0100
committerTejun Heo <tj@kernel.org>2011-03-04 19:09:02 +0100
commite83a46bbb1d4c03defd733a64b727632a40059ad (patch)
treec4bc4822b2d3af1bf38095f531adc0a2aac054a5 /block
parentda527770007fce8e4541947d47918248286da875 (diff)
parentfd51469fb68b987032e46297e0a4fe9020063c20 (diff)
downloadlinux-e83a46bbb1d4c03defd733a64b727632a40059ad.tar.bz2
Merge branch 'for-linus' of ../linux-2.6-block into block-for-2.6.39/core
This merge creates two set of conflicts. One is simple context conflicts caused by removal of throtl_scheduled_delayed_work() in for-linus and removal of throtl_shutdown_timer_wq() in for-2.6.39/core. The other is caused by commit 255bb490c8 (block: blk-flush shouldn't call directly into q->request_fn() __blk_run_queue()) in for-linus crashing with FLUSH reimplementation in for-2.6.39/core. The conflict isn't trivial but the resolution is straight-forward. * __blk_run_queue() calls in flush_end_io() and flush_data_end_io() should be called with @force_kblockd set to %true. * elv_insert() in blk_kick_flush() should use %ELEVATOR_INSERT_REQUEUE. Both changes are to avoid invoking ->request_fn() directly from request completion path and closely match the changes in the commit 255bb490c8. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c18
-rw-r--r--block/blk-flush.c18
-rw-r--r--block/blk-lib.c2
-rw-r--r--block/blk-throttle.c29
-rw-r--r--block/cfq-iosched.c6
-rw-r--r--block/elevator.c4
-rw-r--r--block/genhd.c2
-rw-r--r--block/ioctl.c8
8 files changed, 49 insertions, 38 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index accff29ad674..74d496ccf4d7 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -342,7 +342,7 @@ void blk_start_queue(struct request_queue *q)
WARN_ON(!irqs_disabled());
queue_flag_clear(QUEUE_FLAG_STOPPED, q);
- __blk_run_queue(q);
+ __blk_run_queue(q, false);
}
EXPORT_SYMBOL(blk_start_queue);
@@ -396,13 +396,14 @@ EXPORT_SYMBOL(blk_sync_queue);
/**
* __blk_run_queue - run a single device queue
* @q: The queue to run
+ * @force_kblockd: Don't run @q->request_fn directly. Use kblockd.
*
* Description:
* See @blk_run_queue. This variant must be called with the queue lock
* held and interrupts disabled.
*
*/
-void __blk_run_queue(struct request_queue *q)
+void __blk_run_queue(struct request_queue *q, bool force_kblockd)
{
blk_remove_plug(q);
@@ -416,7 +417,7 @@ void __blk_run_queue(struct request_queue *q)
* Only recurse once to avoid overrunning the stack, let the unplug
* handling reinvoke the handler shortly if we already got there.
*/
- if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
+ if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
q->request_fn(q);
queue_flag_clear(QUEUE_FLAG_REENTER, q);
} else {
@@ -439,7 +440,7 @@ void blk_run_queue(struct request_queue *q)
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
- __blk_run_queue(q);
+ __blk_run_queue(q, false);
spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(blk_run_queue);
@@ -1085,7 +1086,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
drive_stat_acct(rq, 1);
__elv_add_request(q, rq, where, 0);
- __blk_run_queue(q);
+ __blk_run_queue(q, false);
spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(blk_insert_request);
@@ -2642,13 +2643,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
}
EXPORT_SYMBOL(kblockd_schedule_work);
-int kblockd_schedule_delayed_work(struct request_queue *q,
- struct delayed_work *dwork, unsigned long delay)
-{
- return queue_delayed_work(kblockd_workqueue, dwork, delay);
-}
-EXPORT_SYMBOL(kblockd_schedule_delayed_work);
-
int __init blk_dev_init(void)
{
BUILD_BUG_ON(__REQ_NR_BITS > 8 *
diff --git a/block/blk-flush.c b/block/blk-flush.c
index a867e3f524f3..0bd8c9c5d6e5 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -212,9 +212,14 @@ static void flush_end_io(struct request *flush_rq, int error)
queued |= blk_flush_complete_seq(rq, seq, error);
}
- /* after populating an empty queue, kick it to avoid stall */
+ /*
+ * Moving a request silently to empty queue_head may stall the
+ * queue. Kick the queue in those cases. This function is called
+ * from request completion path and calling directly into
+ * request_fn may confuse the driver. Always use kblockd.
+ */
if (queued && was_empty)
- __blk_run_queue(q);
+ __blk_run_queue(q, true);
}
/**
@@ -257,7 +262,7 @@ static bool blk_kick_flush(struct request_queue *q)
q->flush_rq.end_io = flush_end_io;
q->flush_pending_idx ^= 1;
- elv_insert(q, &q->flush_rq, ELEVATOR_INSERT_FRONT);
+ elv_insert(q, &q->flush_rq, ELEVATOR_INSERT_REQUEUE);
return true;
}
@@ -266,9 +271,12 @@ static void flush_data_end_io(struct request *rq, int error)
struct request_queue *q = rq->q;
bool was_empty = elv_queue_empty(q);
- /* after populating an empty queue, kick it to avoid stall */
+ /*
+ * After populating an empty queue, kick it to avoid stall. Read
+ * the comment in flush_end_io().
+ */
if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error) && was_empty)
- __blk_run_queue(q);
+ __blk_run_queue(q, true);
}
/**
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 1a320d2406b0..eec78becb355 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -132,7 +132,7 @@ static void bio_batch_end_io(struct bio *bio, int err)
}
/**
- * blkdev_issue_zeroout generate number of zero filed write bios
+ * blkdev_issue_zeroout - generate number of zero filed write bios
* @bdev: blockdev to issue
* @sector: start sector
* @nr_sects: number of sectors to write
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index c0f623742165..061dee66e2a6 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -20,6 +20,11 @@ static int throtl_quantum = 32;
/* Throttling is performed over 100ms slice and after that slice is renewed */
static unsigned long throtl_slice = HZ/10; /* 100 ms */
+/* A workqueue to queue throttle related work */
+static struct workqueue_struct *kthrotld_workqueue;
+static void throtl_schedule_delayed_work(struct throtl_data *td,
+ unsigned long delay);
+
struct throtl_rb_root {
struct rb_root rb;
struct rb_node *left;
@@ -345,10 +350,9 @@ static void throtl_schedule_next_dispatch(struct throtl_data *td)
update_min_dispatch_time(st);
if (time_before_eq(st->min_disptime, jiffies))
- throtl_schedule_delayed_work(td->queue, 0);
+ throtl_schedule_delayed_work(td, 0);
else
- throtl_schedule_delayed_work(td->queue,
- (st->min_disptime - jiffies));
+ throtl_schedule_delayed_work(td, (st->min_disptime - jiffies));
}
static inline void
@@ -815,10 +819,10 @@ void blk_throtl_work(struct work_struct *work)
}
/* Call with queue lock held */
-void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay)
+static void
+throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
{
- struct throtl_data *td = q->td;
struct delayed_work *dwork = &td->throtl_work;
if (total_nr_queued(td) > 0) {
@@ -827,12 +831,11 @@ void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay)
* Cancel that and schedule a new one.
*/
__cancel_delayed_work(dwork);
- kblockd_schedule_delayed_work(q, dwork, delay);
+ queue_delayed_work(kthrotld_workqueue, dwork, delay);
throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
delay, jiffies);
}
}
-EXPORT_SYMBOL(throtl_schedule_delayed_work);
static void
throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg)
@@ -920,7 +923,7 @@ static void throtl_update_blkio_group_read_bps(void *key,
smp_mb__after_atomic_inc();
/* Schedule a work now to process the limit change */
- throtl_schedule_delayed_work(td->queue, 0);
+ throtl_schedule_delayed_work(td, 0);
}
static void throtl_update_blkio_group_write_bps(void *key,
@@ -934,7 +937,7 @@ static void throtl_update_blkio_group_write_bps(void *key,
smp_mb__before_atomic_inc();
atomic_inc(&td->limits_changed);
smp_mb__after_atomic_inc();
- throtl_schedule_delayed_work(td->queue, 0);
+ throtl_schedule_delayed_work(td, 0);
}
static void throtl_update_blkio_group_read_iops(void *key,
@@ -948,7 +951,7 @@ static void throtl_update_blkio_group_read_iops(void *key,
smp_mb__before_atomic_inc();
atomic_inc(&td->limits_changed);
smp_mb__after_atomic_inc();
- throtl_schedule_delayed_work(td->queue, 0);
+ throtl_schedule_delayed_work(td, 0);
}
static void throtl_update_blkio_group_write_iops(void *key,
@@ -962,7 +965,7 @@ static void throtl_update_blkio_group_write_iops(void *key,
smp_mb__before_atomic_inc();
atomic_inc(&td->limits_changed);
smp_mb__after_atomic_inc();
- throtl_schedule_delayed_work(td->queue, 0);
+ throtl_schedule_delayed_work(td, 0);
}
static void throtl_shutdown_wq(struct request_queue *q)
@@ -1135,6 +1138,10 @@ void blk_throtl_exit(struct request_queue *q)
static int __init throtl_init(void)
{
+ kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
+ if (!kthrotld_workqueue)
+ panic("Failed to create kthrotld\n");
+
blkio_policy_register(&blkio_policy_throtl);
return 0;
}
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 3202c7e87fb3..89dc745c7d94 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -3324,7 +3324,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfqd->busy_queues > 1) {
cfq_del_timer(cfqd, cfqq);
cfq_clear_cfqq_wait_request(cfqq);
- __blk_run_queue(cfqd->queue);
+ __blk_run_queue(cfqd->queue, false);
} else {
cfq_blkiocg_update_idle_time_stats(
&cfqq->cfqg->blkg);
@@ -3339,7 +3339,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
* this new queue is RT and the current one is BE
*/
cfq_preempt_queue(cfqd, cfqq);
- __blk_run_queue(cfqd->queue);
+ __blk_run_queue(cfqd->queue, false);
}
}
@@ -3700,7 +3700,7 @@ static void cfq_kick_queue(struct work_struct *work)
struct request_queue *q = cfqd->queue;
spin_lock_irq(q->queue_lock);
- __blk_run_queue(cfqd->queue);
+ __blk_run_queue(cfqd->queue, false);
spin_unlock_irq(q->queue_lock);
}
diff --git a/block/elevator.c b/block/elevator.c
index f98e92edc937..fabf3675c913 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -602,7 +602,7 @@ void elv_quiesce_start(struct request_queue *q)
*/
elv_drain_elevator(q);
while (q->rq.elvpriv) {
- __blk_run_queue(q);
+ __blk_run_queue(q, false);
spin_unlock_irq(q->queue_lock);
msleep(10);
spin_lock_irq(q->queue_lock);
@@ -651,7 +651,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
* with anything. There's no point in delaying queue
* processing.
*/
- __blk_run_queue(q);
+ __blk_run_queue(q, false);
break;
case ELEVATOR_INSERT_SORT:
diff --git a/block/genhd.c b/block/genhd.c
index 73d85a8e3c85..3e2b57b55e38 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1355,7 +1355,7 @@ int invalidate_partition(struct gendisk *disk, int partno)
struct block_device *bdev = bdget_disk(disk, partno);
if (bdev) {
fsync_bdev(bdev);
- res = __invalidate_device(bdev);
+ res = __invalidate_device(bdev, true);
bdput(bdev);
}
return res;
diff --git a/block/ioctl.c b/block/ioctl.c
index 9049d460fa89..1124cd297263 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -294,9 +294,11 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
return -EINVAL;
if (get_user(n, (int __user *) arg))
return -EFAULT;
- if (!(mode & FMODE_EXCL) &&
- blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0)
- return -EBUSY;
+ if (!(mode & FMODE_EXCL)) {
+ bdgrab(bdev);
+ if (blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0)
+ return -EBUSY;
+ }
ret = set_blocksize(bdev, n);
if (!(mode & FMODE_EXCL))
blkdev_put(bdev, mode | FMODE_EXCL);