From 2397a15aff35b5b4eed732ce81fda5a9d15053f9 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 31 Aug 2016 15:18:11 -0700 Subject: dm rq: factor out dm_mq_stop_queue() Also, check that the blk-mq request_queue isn't already stopped. Signed-off-by: Bart Van Assche Signed-off-by: Mike Snitzer --- drivers/md/dm-rq.c | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) (limited to 'drivers/md/dm-rq.c') diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 1ca7463e8bb2..2f605f62e47d 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -98,18 +98,30 @@ static void dm_old_stop_queue(struct request_queue *q) spin_unlock_irqrestore(q->queue_lock, flags); } +static void dm_mq_stop_queue(struct request_queue *q) +{ + unsigned long flags; + + spin_lock_irqsave(q->queue_lock, flags); + if (blk_queue_stopped(q)) { + spin_unlock_irqrestore(q->queue_lock, flags); + return; + } + + queue_flag_set(QUEUE_FLAG_STOPPED, q); + spin_unlock_irqrestore(q->queue_lock, flags); + + /* Avoid that requeuing could restart the queue. */ + blk_mq_cancel_requeue_work(q); + blk_mq_stop_hw_queues(q); +} + void dm_stop_queue(struct request_queue *q) { if (!q->mq_ops) dm_old_stop_queue(q); - else { - spin_lock_irq(q->queue_lock); - queue_flag_set(QUEUE_FLAG_STOPPED, q); - spin_unlock_irq(q->queue_lock); - - blk_mq_cancel_requeue_work(q); - blk_mq_stop_hw_queues(q); - } + else + dm_mq_stop_queue(q); } static struct dm_rq_target_io *alloc_old_rq_tio(struct mapped_device *md, -- cgit v1.2.3 From 9dbeaeabacb26260d1621fe58f0f6fdedc8860d4 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Thu, 1 Sep 2016 11:59:33 -0400 Subject: dm rq: take request_queue lock while clearing QUEUE_FLAG_STOPPED Every call of queue_flag_clear_unlocked() after block device initialization has finished is wrong if blk_cleanup_queue() can be called concurrently. Convert queue_flag_clear_unlocked() into queue_flag_clear() and protect it by the block layer queue lock. Also, factor out dm_mq_start_queue(). Reported-by: Bart Van Assche Signed-off-by: Mike Snitzer Cc: stable@vger.kernel.org --- drivers/md/dm-rq.c | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) (limited to 'drivers/md/dm-rq.c') diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 2f605f62e47d..bd3ba97d44a2 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -73,15 +73,24 @@ static void dm_old_start_queue(struct request_queue *q) spin_unlock_irqrestore(q->queue_lock, flags); } +static void dm_mq_start_queue(struct request_queue *q) +{ + unsigned long flags; + + spin_lock_irqsave(q->queue_lock, flags); + queue_flag_clear(QUEUE_FLAG_STOPPED, q); + spin_unlock_irqrestore(q->queue_lock, flags); + + blk_mq_start_stopped_hw_queues(q, true); + blk_mq_kick_requeue_list(q); +} + void dm_start_queue(struct request_queue *q) { if (!q->mq_ops) dm_old_start_queue(q); - else { - queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, q); - blk_mq_start_stopped_hw_queues(q, true); - blk_mq_kick_requeue_list(q); - } + else + dm_mq_start_queue(q); } static void dm_old_stop_queue(struct request_queue *q) -- cgit v1.2.3 From c533f249a166142df4294ec38fa5dcd1903f0400 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 31 Aug 2016 15:17:24 -0700 Subject: dm rq: simplify dm_old_stop_queue() This patch does not change any functionality. Signed-off-by: Bart Van Assche Signed-off-by: Mike Snitzer --- drivers/md/dm-rq.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'drivers/md/dm-rq.c') diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index bd3ba97d44a2..0d301d5a4d0b 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -98,12 +98,8 @@ static void dm_old_stop_queue(struct request_queue *q) unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); - if (blk_queue_stopped(q)) { - spin_unlock_irqrestore(q->queue_lock, flags); - return; - } - - blk_stop_queue(q); + if (!blk_queue_stopped(q)) + blk_stop_queue(q); spin_unlock_irqrestore(q->queue_lock, flags); } -- cgit v1.2.3 From a8ac51e4ab97765838ae6a07d6ff7f7bfaaa0ea3 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Fri, 9 Sep 2016 19:24:57 -0400 Subject: dm rq: add DM_MAPIO_DELAY_REQUEUE to delay requeue of blk-mq requests Otherwise blk-mq will immediately dispatch requests that are requeued via a BLK_MQ_RQ_QUEUE_BUSY return from blk_mq_ops .queue_rq. Delayed requeue is implemented using blk_mq_delay_kick_requeue_list() with a delay of 5 secs. In the context of DM multipath (all paths down) it doesn't make any sense to requeue more quickly. Signed-off-by: Mike Snitzer --- drivers/md/dm-rq.c | 32 ++++++++++++++++++-------------- include/linux/device-mapper.h | 1 + 2 files changed, 19 insertions(+), 14 deletions(-) (limited to 'drivers/md/dm-rq.c') diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 0d301d5a4d0b..dbced7b15931 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -336,20 +336,21 @@ static void dm_old_requeue_request(struct request *rq) spin_unlock_irqrestore(q->queue_lock, flags); } -static void dm_mq_requeue_request(struct request *rq) +static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs) { struct request_queue *q = rq->q; unsigned long flags; blk_mq_requeue_request(rq); + spin_lock_irqsave(q->queue_lock, flags); if (!blk_queue_stopped(q)) - blk_mq_kick_requeue_list(q); + blk_mq_delay_kick_requeue_list(q, msecs); spin_unlock_irqrestore(q->queue_lock, flags); } static void dm_requeue_original_request(struct mapped_device *md, - struct request *rq) + struct request *rq, bool delay_requeue) { int rw = rq_data_dir(rq); @@ -359,7 +360,7 @@ static void dm_requeue_original_request(struct mapped_device *md, if (!rq->q->mq_ops) dm_old_requeue_request(rq); else - dm_mq_requeue_request(rq); + dm_mq_delay_requeue_request(rq, delay_requeue ? 5000 : 0); rq_completed(md, rw, false); } @@ -389,7 +390,7 @@ static void dm_done(struct request *clone, int error, bool mapped) return; else if (r == DM_ENDIO_REQUEUE) /* The target wants to requeue the I/O */ - dm_requeue_original_request(tio->md, tio->orig); + dm_requeue_original_request(tio->md, tio->orig, false); else { DMWARN("unimplemented target endio return value: %d", r); BUG(); @@ -629,8 +630,8 @@ static int dm_old_prep_fn(struct request_queue *q, struct request *rq) /* * Returns: - * 0 : the request has been processed - * DM_MAPIO_REQUEUE : the original request needs to be requeued + * DM_MAPIO_* : the request has been processed as indicated + * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued * < 0 : the request was completed due to failure */ static int map_request(struct dm_rq_target_io *tio, struct request *rq, @@ -643,6 +644,8 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq, if (tio->clone) { clone = tio->clone; r = ti->type->map_rq(ti, clone, &tio->info); + if (r == DM_MAPIO_DELAY_REQUEUE) + return DM_MAPIO_REQUEUE; /* .request_fn requeue is always immediate */ } else { r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone); if (r < 0) { @@ -650,9 +653,8 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq, dm_kill_unmapped_request(rq, r); return r; } - if (r != DM_MAPIO_REMAPPED) - return r; - if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { + if (r == DM_MAPIO_REMAPPED && + setup_clone(clone, rq, tio, GFP_ATOMIC)) { /* -ENOMEM */ ti->type->release_clone_rq(clone); return DM_MAPIO_REQUEUE; @@ -671,7 +673,10 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq, break; case DM_MAPIO_REQUEUE: /* The target wants to requeue the I/O */ - dm_requeue_original_request(md, tio->orig); + break; + case DM_MAPIO_DELAY_REQUEUE: + /* The target wants to requeue the I/O after a delay */ + dm_requeue_original_request(md, tio->orig, true); break; default: if (r > 0) { @@ -681,10 +686,9 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq, /* The target wants to complete the I/O */ dm_kill_unmapped_request(rq, r); - return r; } - return 0; + return r; } static void dm_start_request(struct mapped_device *md, struct request *orig) @@ -727,7 +731,7 @@ static void map_tio_request(struct kthread_work *work) struct mapped_device *md = tio->md; if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) - dm_requeue_original_request(md, rq); + dm_requeue_original_request(md, rq, false); } ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf) diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 91acfce74a22..ef7962e84444 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -590,6 +590,7 @@ extern struct ratelimit_state dm_ratelimit_state; #define DM_MAPIO_SUBMITTED 0 #define DM_MAPIO_REMAPPED 1 #define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE +#define DM_MAPIO_DELAY_REQUEUE 3 #define dm_sector_div64(x, y)( \ { \ -- cgit v1.2.3 From fbc39b4ca3bed38c6d62c658af2157d2ec9efa03 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Tue, 13 Sep 2016 12:16:14 -0400 Subject: dm rq: reduce arguments passed to map_request() and dm_requeue_original_request() Signed-off-by: Mike Snitzer Reviewed-by: Hannes Reinecke --- drivers/md/dm-rq.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) (limited to 'drivers/md/dm-rq.c') diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index dbced7b15931..8eefc0ad7a59 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -349,9 +349,10 @@ static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs) spin_unlock_irqrestore(q->queue_lock, flags); } -static void dm_requeue_original_request(struct mapped_device *md, - struct request *rq, bool delay_requeue) +static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_requeue) { + struct mapped_device *md = tio->md; + struct request *rq = tio->orig; int rw = rq_data_dir(rq); rq_end_stats(md, rq); @@ -390,7 +391,7 @@ static void dm_done(struct request *clone, int error, bool mapped) return; else if (r == DM_ENDIO_REQUEUE) /* The target wants to requeue the I/O */ - dm_requeue_original_request(tio->md, tio->orig, false); + dm_requeue_original_request(tio, false); else { DMWARN("unimplemented target endio return value: %d", r); BUG(); @@ -634,11 +635,12 @@ static int dm_old_prep_fn(struct request_queue *q, struct request *rq) * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued * < 0 : the request was completed due to failure */ -static int map_request(struct dm_rq_target_io *tio, struct request *rq, - struct mapped_device *md) +static int map_request(struct dm_rq_target_io *tio) { int r; struct dm_target *ti = tio->ti; + struct mapped_device *md = tio->md; + struct request *rq = tio->orig; struct request *clone = NULL; if (tio->clone) { @@ -676,7 +678,7 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq, break; case DM_MAPIO_DELAY_REQUEUE: /* The target wants to requeue the I/O after a delay */ - dm_requeue_original_request(md, tio->orig, true); + dm_requeue_original_request(tio, true); break; default: if (r > 0) { @@ -727,11 +729,9 @@ static void dm_start_request(struct mapped_device *md, struct request *orig) static void map_tio_request(struct kthread_work *work) { struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work); - struct request *rq = tio->orig; - struct mapped_device *md = tio->md; - if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) - dm_requeue_original_request(md, rq, false); + if (map_request(tio) == DM_MAPIO_REQUEUE) + dm_requeue_original_request(tio, false); } ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf) @@ -917,7 +917,7 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, tio->ti = ti; /* Direct call is fine since .queue_rq allows allocations */ - if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) { + if (map_request(tio) == DM_MAPIO_REQUEUE) { /* Undo dm_start_request() before requeuing */ rq_end_stats(md, rq); rq_completed(md, rq_data_dir(rq), false); -- cgit v1.2.3 From e0c107526960d1348cfe21f12bcfb3348fd7e8ab Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Wed, 14 Sep 2016 10:36:39 -0400 Subject: dm rq: introduce dm_mq_kick_requeue_list() Make it possible for a request-based target to kick the DM device's blk-mq request_queue's requeue_list. Signed-off-by: Mike Snitzer Reviewed-by: Hannes Reinecke --- drivers/md/dm-rq.c | 17 +++++++++++++---- drivers/md/dm-rq.h | 2 ++ 2 files changed, 15 insertions(+), 4 deletions(-) (limited to 'drivers/md/dm-rq.c') diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 8eefc0ad7a59..877b8f33620e 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -336,19 +336,28 @@ static void dm_old_requeue_request(struct request *rq) spin_unlock_irqrestore(q->queue_lock, flags); } -static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs) +static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs) { - struct request_queue *q = rq->q; unsigned long flags; - blk_mq_requeue_request(rq); - spin_lock_irqsave(q->queue_lock, flags); if (!blk_queue_stopped(q)) blk_mq_delay_kick_requeue_list(q, msecs); spin_unlock_irqrestore(q->queue_lock, flags); } +void dm_mq_kick_requeue_list(struct mapped_device *md) +{ + __dm_mq_kick_requeue_list(dm_get_md_queue(md), 0); +} +EXPORT_SYMBOL(dm_mq_kick_requeue_list); + +static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs) +{ + blk_mq_requeue_request(rq); + __dm_mq_kick_requeue_list(rq->q, msecs); +} + static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_requeue) { struct mapped_device *md = tio->md; diff --git a/drivers/md/dm-rq.h b/drivers/md/dm-rq.h index 9e6f0a3773d4..4da06cae7bad 100644 --- a/drivers/md/dm-rq.h +++ b/drivers/md/dm-rq.h @@ -55,6 +55,8 @@ void dm_mq_cleanup_mapped_device(struct mapped_device *md); void dm_start_queue(struct request_queue *q); void dm_stop_queue(struct request_queue *q); +void dm_mq_kick_requeue_list(struct mapped_device *md); + unsigned dm_get_reserved_rq_based_ios(void); ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf); -- cgit v1.2.3