summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-03-07 09:59:44 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2014-03-07 09:59:44 -0800
commit2a75184d52f41db1f54ad9eee7fa8b3ad820f4ac (patch)
tree126192eee76510426ee08b9f3a2197ff766a0d50 /include
parent8ab47d3ec77d94ad9a6bb01efd696e1e34cfe80d (diff)
parent739c3eea711a255df5ed1246face0a4dce5e589f (diff)
downloadlinux-2a75184d52f41db1f54ad9eee7fa8b3ad820f4ac.tar.bz2
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: "Small collection of fixes for 3.14-rc. It contains: - Three minor update to blk-mq from Christoph. - Reduce number of unaligned (< 4kb) in-flight writes on mtip32xx to two. From Micron. - Make the blk-mq CPU notify spinlock raw, since it can't be a sleeper spinlock on RT. From Mike Galbraith. - Drop now bogus BUG_ON() for bio iteration with blk integrity. From Nic Bellinger. - Properly propagate the SYNC flag on requests. From Shaohua" * 'for-linus' of git://git.kernel.dk/linux-block: blk-mq: add REQ_SYNC early rt,blk,mq: Make blk_mq_cpu_notify_lock a raw spinlock bio-integrity: Drop bio_integrity_verify BUG_ON in post bip->bip_iter world blk-mq: support partial I/O completions blk-mq: merge blk_mq_insert_request and blk_mq_run_request blk-mq: remove blk_mq_alloc_rq mtip32xx: Reduce the number of unaligned writes to 2
Diffstat (limited to 'include')
-rw-r--r--include/linux/blk-mq.h11
1 files changed, 8 insertions, 3 deletions
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 18ba8a627f46..2ff2e8d982be 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -121,8 +121,7 @@ void blk_mq_init_commands(struct request_queue *, void (*init)(void *data, struc
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
-void blk_mq_insert_request(struct request_queue *, struct request *,
- bool, bool);
+void blk_mq_insert_request(struct request *, bool, bool, bool);
void blk_mq_run_queues(struct request_queue *q, bool async);
void blk_mq_free_request(struct request *rq);
bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
@@ -134,7 +133,13 @@ struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_ind
struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_reg *, unsigned int);
void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int);
-void blk_mq_end_io(struct request *rq, int error);
+bool blk_mq_end_io_partial(struct request *rq, int error,
+ unsigned int nr_bytes);
+static inline void blk_mq_end_io(struct request *rq, int error)
+{
+ bool done = !blk_mq_end_io_partial(rq, error, blk_rq_bytes(rq));
+ BUG_ON(!done);
+}
void blk_mq_complete_request(struct request *rq);