From 37e58237a16b94fcd2c2d1b7e9c6e1ca661c231b Mon Sep 17 00:00:00 2001 From: Ming Lin Date: Tue, 22 Mar 2016 00:24:44 -0700 Subject: block: add offset in blk_add_request_payload() We could kmalloc() the payload, so need the offset in page. Signed-off-by: Ming Lin Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-core.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index b60537b2c35b..c50227796a26 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1523,6 +1523,7 @@ EXPORT_SYMBOL(blk_put_request); * blk_add_request_payload - add a payload to a request * @rq: request to update * @page: page backing the payload + * @offset: offset in page * @len: length of the payload. * * This allows to later add a payload to an already submitted request by @@ -1533,12 +1534,12 @@ EXPORT_SYMBOL(blk_put_request); * discard requests should ever use it. */ void blk_add_request_payload(struct request *rq, struct page *page, - unsigned int len) + int offset, unsigned int len) { struct bio *bio = rq->bio; bio->bi_io_vec->bv_page = page; - bio->bi_io_vec->bv_offset = 0; + bio->bi_io_vec->bv_offset = offset; bio->bi_io_vec->bv_len = len; bio->bi_iter.bi_size = len; -- cgit v1.2.3 From e0489487ec9cd79ee1fa0dc5d3789c08b0e51a2c Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Thu, 10 Mar 2016 13:58:46 +0200 Subject: blk-mq: Export tagset iter function Its useful to iterate on all the active tags in cases where we will need to fail all the queues IO. Signed-off-by: Sagi Grimberg [hch: carefully check for valid tagsets] Reviewed-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Signed-off-by: Jens Axboe --- block/blk-mq-tag.c | 12 ++++++++++++ include/linux/blk-mq.h | 2 ++ 2 files changed, 14 insertions(+) (limited to 'block') diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index abdbb47405cb..2fd04286f103 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -474,6 +474,18 @@ void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, } EXPORT_SYMBOL(blk_mq_all_tag_busy_iter); +void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, + busy_tag_iter_fn *fn, void *priv) +{ + int i; + + for (i = 0; i < tagset->nr_hw_queues; i++) { + if (tagset->tags && tagset->tags[i]) + blk_mq_all_tag_busy_iter(tagset->tags[i], fn, priv); + } +} +EXPORT_SYMBOL(blk_mq_tagset_busy_iter); + void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, void *priv) { diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 9ac9799b702b..c808fec1ce44 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -240,6 +240,8 @@ void blk_mq_run_hw_queues(struct request_queue *q, bool async); void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, void *priv); +void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, + busy_tag_iter_fn *fn, void *priv); void blk_mq_freeze_queue(struct request_queue *q); void blk_mq_unfreeze_queue(struct request_queue *q); void blk_mq_freeze_queue_start(struct request_queue *q); -- cgit v1.2.3 From 93e9d8e836cb1a9a58b33eb6643bf061c6119ef2 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 12 Apr 2016 12:32:46 -0600 Subject: block: add ability to flag write back caching on a device Add an internal helper and flag for setting whether a queue has write back caching, or write through (or none). Add a sysfs file to show this as well, and make it changeable from user space. This will replace the (awkward) blk_queue_flush() interface that drivers currently use to inform the block layer of write cache state and capabilities. Signed-off-by: Jens Axboe Reviewed-by: Christoph Hellwig --- Documentation/block/queue-sysfs.txt | 9 +++++++++ block/blk-settings.c | 26 +++++++++++++++++++++++++ block/blk-sysfs.c | 39 +++++++++++++++++++++++++++++++++++++ include/linux/blkdev.h | 3 +++ 4 files changed, 77 insertions(+) (limited to 'block') diff --git a/Documentation/block/queue-sysfs.txt b/Documentation/block/queue-sysfs.txt index e5d914845be6..dce25d848d92 100644 --- a/Documentation/block/queue-sysfs.txt +++ b/Documentation/block/queue-sysfs.txt @@ -141,6 +141,15 @@ control of this block device to that new IO scheduler. Note that writing an IO scheduler name to this file will attempt to load that IO scheduler module, if it isn't already present in the system. +write_cache (RW) +---------------- +When read, this file will display whether the device has write back +caching enabled or not. It will return "write back" for the former +case, and "write through" for the latter. Writing to this file can +change the kernels view of the device, but it doesn't alter the +device state. This means that it might not be safe to toggle the +setting from "write back" to "write through", since that will also +eliminate cache flushes issued by the kernel. Jens Axboe , February 2009 diff --git a/block/blk-settings.c b/block/blk-settings.c index 331e4eee0dda..c903bee43cf8 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -846,6 +846,32 @@ void blk_queue_flush_queueable(struct request_queue *q, bool queueable) } EXPORT_SYMBOL_GPL(blk_queue_flush_queueable); +/** + * blk_queue_write_cache - configure queue's write cache + * @q: the request queue for the device + * @wc: write back cache on or off + * @fua: device supports FUA writes, if true + * + * Tell the block layer about the write cache of @q. + */ +void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua) +{ + spin_lock_irq(q->queue_lock); + if (wc) { + queue_flag_set(QUEUE_FLAG_WC, q); + q->flush_flags = REQ_FLUSH; + } else + queue_flag_clear(QUEUE_FLAG_WC, q); + if (fua) { + if (wc) + q->flush_flags |= REQ_FUA; + queue_flag_set(QUEUE_FLAG_FUA, q); + } else + queue_flag_clear(QUEUE_FLAG_FUA, q); + spin_unlock_irq(q->queue_lock); +} +EXPORT_SYMBOL_GPL(blk_queue_write_cache); + static int __init blk_settings_init(void) { blk_max_low_pfn = max_low_pfn - 1; diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 995b58d46ed1..99205965f559 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -347,6 +347,38 @@ static ssize_t queue_poll_store(struct request_queue *q, const char *page, return ret; } +static ssize_t queue_wc_show(struct request_queue *q, char *page) +{ + if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) + return sprintf(page, "write back\n"); + + return sprintf(page, "write through\n"); +} + +static ssize_t queue_wc_store(struct request_queue *q, const char *page, + size_t count) +{ + int set = -1; + + if (!strncmp(page, "write back", 10)) + set = 1; + else if (!strncmp(page, "write through", 13) || + !strncmp(page, "none", 4)) + set = 0; + + if (set == -1) + return -EINVAL; + + spin_lock_irq(q->queue_lock); + if (set) + queue_flag_set(QUEUE_FLAG_WC, q); + else + queue_flag_clear(QUEUE_FLAG_WC, q); + spin_unlock_irq(q->queue_lock); + + return count; +} + static struct queue_sysfs_entry queue_requests_entry = { .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, .show = queue_requests_show, @@ -478,6 +510,12 @@ static struct queue_sysfs_entry queue_poll_entry = { .store = queue_poll_store, }; +static struct queue_sysfs_entry queue_wc_entry = { + .attr = {.name = "write_cache", .mode = S_IRUGO | S_IWUSR }, + .show = queue_wc_show, + .store = queue_wc_store, +}; + static struct attribute *default_attrs[] = { &queue_requests_entry.attr, &queue_ra_entry.attr, @@ -503,6 +541,7 @@ static struct attribute *default_attrs[] = { &queue_iostats_entry.attr, &queue_random_entry.attr, &queue_poll_entry.attr, + &queue_wc_entry.attr, NULL, }; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index bbaa76757018..ba72687c5654 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -491,6 +491,8 @@ struct request_queue { #define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ #define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/ #define QUEUE_FLAG_POLL 22 /* IO polling enabled if set */ +#define QUEUE_FLAG_WC 23 /* Write back caching */ +#define QUEUE_FLAG_FUA 24 /* device supports FUA writes */ #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ (1 << QUEUE_FLAG_STACKABLE) | \ @@ -1009,6 +1011,7 @@ extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); extern void blk_queue_flush(struct request_queue *q, unsigned int flush); extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); +extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); -- cgit v1.2.3 From 9082e87bfbf83579b97e3bfc45d81f3e50da2177 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 16 Apr 2016 14:55:27 -0400 Subject: block: remove struct bio_batch It can be replaced with a combination of bio_chain and submit_bio_wait. Signed-off-by: Christoph Hellwig Signed-off-by: Ming Lin Signed-off-by: Sagi Grimberg Reviewed-by: Ming Lei Signed-off-by: Jens Axboe --- block/blk-lib.c | 118 +++++++++++++------------------------------------------- 1 file changed, 27 insertions(+), 91 deletions(-) (limited to 'block') diff --git a/block/blk-lib.c b/block/blk-lib.c index 9ebf65379556..700d248cbde5 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c @@ -9,21 +9,17 @@ #include "blk.h" -struct bio_batch { - atomic_t done; - int error; - struct completion *wait; -}; - -static void bio_batch_end_io(struct bio *bio) +static struct bio *next_bio(struct bio *bio, int rw, unsigned int nr_pages, + gfp_t gfp) { - struct bio_batch *bb = bio->bi_private; + struct bio *new = bio_alloc(gfp, nr_pages); + + if (bio) { + bio_chain(bio, new); + submit_bio(rw, bio); + } - if (bio->bi_error && bio->bi_error != -EOPNOTSUPP) - bb->error = bio->bi_error; - if (atomic_dec_and_test(&bb->done)) - complete(bb->wait); - bio_put(bio); + return new; } /** @@ -40,13 +36,11 @@ static void bio_batch_end_io(struct bio *bio) int blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) { - DECLARE_COMPLETION_ONSTACK(wait); struct request_queue *q = bdev_get_queue(bdev); int type = REQ_WRITE | REQ_DISCARD; unsigned int granularity; int alignment; - struct bio_batch bb; - struct bio *bio; + struct bio *bio = NULL; int ret = 0; struct blk_plug plug; @@ -66,25 +60,15 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, type |= REQ_SECURE; } - atomic_set(&bb.done, 1); - bb.error = 0; - bb.wait = &wait; - blk_start_plug(&plug); while (nr_sects) { unsigned int req_sects; sector_t end_sect, tmp; - bio = bio_alloc(gfp_mask, 1); - if (!bio) { - ret = -ENOMEM; - break; - } - /* Make sure bi_size doesn't overflow */ req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9); - /* + /** * If splitting a request, and the next starting sector would be * misaligned, stop the discard at the previous aligned sector. */ @@ -98,18 +82,14 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, req_sects = end_sect - sector; } + bio = next_bio(bio, type, 1, gfp_mask); bio->bi_iter.bi_sector = sector; - bio->bi_end_io = bio_batch_end_io; bio->bi_bdev = bdev; - bio->bi_private = &bb; bio->bi_iter.bi_size = req_sects << 9; nr_sects -= req_sects; sector = end_sect; - atomic_inc(&bb.done); - submit_bio(type, bio); - /* * We can loop for a long time in here, if someone does * full device discards (like mkfs). Be nice and allow @@ -118,15 +98,11 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, */ cond_resched(); } + if (bio) + ret = submit_bio_wait(type, bio); blk_finish_plug(&plug); - /* Wait for bios in-flight */ - if (!atomic_dec_and_test(&bb.done)) - wait_for_completion_io(&wait); - - if (bb.error) - return bb.error; - return ret; + return ret != -EOPNOTSUPP ? ret : 0; } EXPORT_SYMBOL(blkdev_issue_discard); @@ -145,11 +121,9 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct page *page) { - DECLARE_COMPLETION_ONSTACK(wait); struct request_queue *q = bdev_get_queue(bdev); unsigned int max_write_same_sectors; - struct bio_batch bb; - struct bio *bio; + struct bio *bio = NULL; int ret = 0; if (!q) @@ -158,21 +132,10 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, /* Ensure that max_write_same_sectors doesn't overflow bi_size */ max_write_same_sectors = UINT_MAX >> 9; - atomic_set(&bb.done, 1); - bb.error = 0; - bb.wait = &wait; - while (nr_sects) { - bio = bio_alloc(gfp_mask, 1); - if (!bio) { - ret = -ENOMEM; - break; - } - + bio = next_bio(bio, REQ_WRITE | REQ_WRITE_SAME, 1, gfp_mask); bio->bi_iter.bi_sector = sector; - bio->bi_end_io = bio_batch_end_io; bio->bi_bdev = bdev; - bio->bi_private = &bb; bio->bi_vcnt = 1; bio->bi_io_vec->bv_page = page; bio->bi_io_vec->bv_offset = 0; @@ -186,18 +149,11 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, bio->bi_iter.bi_size = nr_sects << 9; nr_sects = 0; } - - atomic_inc(&bb.done); - submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio); } - /* Wait for bios in-flight */ - if (!atomic_dec_and_test(&bb.done)) - wait_for_completion_io(&wait); - - if (bb.error) - return bb.error; - return ret; + if (bio) + ret = submit_bio_wait(REQ_WRITE | REQ_WRITE_SAME, bio); + return ret != -EOPNOTSUPP ? ret : 0; } EXPORT_SYMBOL(blkdev_issue_write_same); @@ -216,28 +172,15 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask) { int ret; - struct bio *bio; - struct bio_batch bb; + struct bio *bio = NULL; unsigned int sz; - DECLARE_COMPLETION_ONSTACK(wait); - atomic_set(&bb.done, 1); - bb.error = 0; - bb.wait = &wait; - - ret = 0; while (nr_sects != 0) { - bio = bio_alloc(gfp_mask, - min(nr_sects, (sector_t)BIO_MAX_PAGES)); - if (!bio) { - ret = -ENOMEM; - break; - } - + bio = next_bio(bio, WRITE, + min(nr_sects, (sector_t)BIO_MAX_PAGES), + gfp_mask); bio->bi_iter.bi_sector = sector; bio->bi_bdev = bdev; - bio->bi_end_io = bio_batch_end_io; - bio->bi_private = &bb; while (nr_sects != 0) { sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects); @@ -247,18 +190,11 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, if (ret < (sz << 9)) break; } - ret = 0; - atomic_inc(&bb.done); - submit_bio(WRITE, bio); } - /* Wait for bios in-flight */ - if (!atomic_dec_and_test(&bb.done)) - wait_for_completion_io(&wait); - - if (bb.error) - return bb.error; - return ret; + if (bio) + return submit_bio_wait(WRITE, bio); + return 0; } /** -- cgit v1.2.3 From 38f252553300ee1d3346a5273e95fe1dd60ca50a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 16 Apr 2016 14:55:28 -0400 Subject: block: add __blkdev_issue_discard This is a version of blkdev_issue_discard which doesn't wait for the I/O to complete, but instead allows the caller to submit the final bio and/or chain it to others. Signed-off-by: Christoph Hellwig Signed-off-by: Ming Lin Signed-off-by: Sagi Grimberg Reviewed-by: Ming Lei Signed-off-by: Jens Axboe --- block/blk-lib.c | 63 +++++++++++++++++++++++++++++--------------------- include/linux/blkdev.h | 2 ++ 2 files changed, 39 insertions(+), 26 deletions(-) (limited to 'block') diff --git a/block/blk-lib.c b/block/blk-lib.c index 700d248cbde5..ccbce2b2ea05 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c @@ -22,45 +22,25 @@ static struct bio *next_bio(struct bio *bio, int rw, unsigned int nr_pages, return new; } -/** - * blkdev_issue_discard - queue a discard - * @bdev: blockdev to issue discard for - * @sector: start sector - * @nr_sects: number of sectors to discard - * @gfp_mask: memory allocation flags (for bio_alloc) - * @flags: BLKDEV_IFL_* flags to control behaviour - * - * Description: - * Issue a discard request for the sectors in question. - */ -int blkdev_issue_discard(struct block_device *bdev, sector_t sector, - sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) +int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, + sector_t nr_sects, gfp_t gfp_mask, int type, struct bio **biop) { struct request_queue *q = bdev_get_queue(bdev); - int type = REQ_WRITE | REQ_DISCARD; + struct bio *bio = *biop; unsigned int granularity; int alignment; - struct bio *bio = NULL; - int ret = 0; - struct blk_plug plug; if (!q) return -ENXIO; - if (!blk_queue_discard(q)) return -EOPNOTSUPP; + if ((type & REQ_SECURE) && !blk_queue_secdiscard(q)) + return -EOPNOTSUPP; /* Zero-sector (unknown) and one-sector granularities are the same. */ granularity = max(q->limits.discard_granularity >> 9, 1U); alignment = (bdev_discard_alignment(bdev) >> 9) % granularity; - if (flags & BLKDEV_DISCARD_SECURE) { - if (!blk_queue_secdiscard(q)) - return -EOPNOTSUPP; - type |= REQ_SECURE; - } - - blk_start_plug(&plug); while (nr_sects) { unsigned int req_sects; sector_t end_sect, tmp; @@ -98,7 +78,38 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, */ cond_resched(); } - if (bio) + + *biop = bio; + return 0; +} +EXPORT_SYMBOL(__blkdev_issue_discard); + +/** + * blkdev_issue_discard - queue a discard + * @bdev: blockdev to issue discard for + * @sector: start sector + * @nr_sects: number of sectors to discard + * @gfp_mask: memory allocation flags (for bio_alloc) + * @flags: BLKDEV_IFL_* flags to control behaviour + * + * Description: + * Issue a discard request for the sectors in question. + */ +int blkdev_issue_discard(struct block_device *bdev, sector_t sector, + sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) +{ + int type = REQ_WRITE | REQ_DISCARD; + struct bio *bio = NULL; + struct blk_plug plug; + int ret; + + if (flags & BLKDEV_DISCARD_SECURE) + type |= REQ_SECURE; + + blk_start_plug(&plug); + ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, type, + &bio); + if (!ret && bio) ret = submit_bio_wait(type, bio); blk_finish_plug(&plug); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index ba72687c5654..b79131acf6c0 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1131,6 +1131,8 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); +extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, + sector_t nr_sects, gfp_t gfp_mask, int type, struct bio **biop); extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct page *page); extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, -- cgit v1.2.3 From a21f2a3ec62abe2e06500d6550659a0ff5624fbb Mon Sep 17 00:00:00 2001 From: Michael Callahan Date: Tue, 3 May 2016 11:12:49 -0400 Subject: block: Minor blk_account_io_start usage cleanup blk_account_io_start does not need to be wrapped with blk_do_io_stat ais it already checks for that condition. Signed-off-by: Michael Callahan Signed-off-by: Jens Axboe --- block/blk-mq.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index 1699baf39b78..0c2ed831c926 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1122,8 +1122,7 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio) { init_request_from_bio(rq, bio); - if (blk_do_io_stat(rq)) - blk_account_io_start(rq, 1); + blk_account_io_start(rq, 1); } static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx) -- cgit v1.2.3 From bbd848e0fade51ae51dab86a0683069cef89953f Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Thu, 5 May 2016 11:54:21 -0400 Subject: block: reinstate early return of -EOPNOTSUPP from blkdev_issue_discard Commit 38f25255330 ("block: add __blkdev_issue_discard") incorrectly disallowed the early return of -EOPNOTSUPP if the device doesn't support discard (or secure discard). This early return of -EOPNOTSUPP has always been part of blkdev_issue_discard() interface so there isn't a good reason to break that behaviour -- especially when it can be easily reinstated. The nuance of allowing early return of -EOPNOTSUPP vs disallowing late return of -EOPNOTSUPP is: if the overall device never advertised support for discards and one is issued to the device it is beneficial to inform the caller that discards are not supported via -EOPNOTSUPP. But if a device advertises discard support it means that at least a subset of the device does have discard support -- but it could be that discards issued to some regions of a stacked device will not be supported. In that case the late return of -EOPNOTSUPP must be disallowed. Fixes: 38f25255330 ("block: add __blkdev_issue_discard") Signed-off-by: Mike Snitzer Signed-off-by: Jens Axboe --- block/blk-lib.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'block') diff --git a/block/blk-lib.c b/block/blk-lib.c index ccbce2b2ea05..23d7f301a196 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c @@ -109,11 +109,14 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, blk_start_plug(&plug); ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, type, &bio); - if (!ret && bio) + if (!ret && bio) { ret = submit_bio_wait(type, bio); + if (ret == -EOPNOTSUPP) + ret = 0; + } blk_finish_plug(&plug); - return ret != -EOPNOTSUPP ? ret : 0; + return ret; } EXPORT_SYMBOL(blkdev_issue_discard); -- cgit v1.2.3 From 0ef5a50c1658d4d96a44f145bcb92ff3310c75b1 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Thu, 5 May 2016 11:54:22 -0400 Subject: block: make bio_inc_remaining() interface accessible again Commit 326e1dbb57 ("block: remove management of bi_remaining when restoring original bi_end_io") made bio_inc_remaining() private to bio.c because the only use-case that made sense was confined to the bio_chain() interface. Since that time DM thinp went on to use bio_chain() in its relatively complex implementation of async discard support. That implementation, even when converted over to use the new async __blkdev_issue_discard() interface, depends on deferred completion of the original discard bio -- which is most appropriately implemented using bio_inc_remaining(). DM thinp foolishly duplicated bio_inc_remaining(), local to dm-thin.c as __bio_inc_remaining(), so re-exporting bio_inc_remaining() allows us to put an end to that foolishness. All said, bio_inc_remaining() should really only be used in conjunction with bio_chain(). It isn't intended for generic bio reference counting. Signed-off-by: Mike Snitzer Acked-by: Joe Thornber Signed-off-by: Jens Axboe --- block/bio.c | 11 ----------- include/linux/bio.h | 11 +++++++++++ 2 files changed, 11 insertions(+), 11 deletions(-) (limited to 'block') diff --git a/block/bio.c b/block/bio.c index 807d25e466ec..0e4aa42bc30d 100644 --- a/block/bio.c +++ b/block/bio.c @@ -311,17 +311,6 @@ static void bio_chain_endio(struct bio *bio) bio_endio(__bio_chain_endio(bio)); } -/* - * Increment chain count for the bio. Make sure the CHAIN flag update - * is visible before the raised count. - */ -static inline void bio_inc_remaining(struct bio *bio) -{ - bio_set_flag(bio, BIO_CHAIN); - smp_mb__before_atomic(); - atomic_inc(&bio->__bi_remaining); -} - /** * bio_chain - chain bio completions * @bio: the target bio diff --git a/include/linux/bio.h b/include/linux/bio.h index 6b7481f62218..9faebf7f9a33 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -702,6 +702,17 @@ static inline struct bio *bio_list_get(struct bio_list *bl) return bio; } +/* + * Increment chain count for the bio. Make sure the CHAIN flag update + * is visible before the raised count. + */ +static inline void bio_inc_remaining(struct bio *bio) +{ + bio_set_flag(bio, BIO_CHAIN); + smp_mb__before_atomic(); + atomic_inc(&bio->__bi_remaining); +} + /* * bio_set is used to allow other portions of the IO system to * allocate their own private memory pools for bio and iovec structures. -- cgit v1.2.3 From 59fa0224cfea31dde596e29555de94c961b139f9 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Mon, 9 May 2016 17:22:15 -0700 Subject: blk-throttle: don't parse cgroup path if trace isn't enabled if trace isn't enabled, parsing cgroup path just wastes cpu Signed-off-by: Shaohua Li Signed-off-by: Jens Axboe --- block/blk-throttle.c | 5 ++--- include/linux/blktrace_api.h | 9 +++++++++ 2 files changed, 11 insertions(+), 3 deletions(-) (limited to 'block') diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 2149a1ddbacf..47a3e540631a 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -211,15 +211,14 @@ static struct throtl_data *sq_to_td(struct throtl_service_queue *sq) * * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a * throtl_grp; otherwise, just "throtl". - * - * TODO: this should be made a function and name formatting should happen - * after testing whether blktrace is enabled. */ #define throtl_log(sq, fmt, args...) do { \ struct throtl_grp *__tg = sq_to_tg((sq)); \ struct throtl_data *__td = sq_to_td((sq)); \ \ (void)__td; \ + if (likely(!blk_trace_note_message_enabled(__td->queue))) \ + break; \ if ((__tg)) { \ char __pbuf[128]; \ \ diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index afc1343df3c7..0f3172b8b225 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h @@ -57,6 +57,14 @@ void __trace_note_message(struct blk_trace *, const char *fmt, ...); } while (0) #define BLK_TN_MAX_MSG 128 +static inline bool blk_trace_note_message_enabled(struct request_queue *q) +{ + struct blk_trace *bt = q->blk_trace; + if (likely(!bt)) + return false; + return bt->act_mask & BLK_TC_NOTIFY; +} + extern void blk_add_driver_data(struct request_queue *q, struct request *rq, void *data, size_t len); extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, @@ -79,6 +87,7 @@ extern struct attribute_group blk_trace_attr_group; # define blk_trace_remove(q) (-ENOTTY) # define blk_add_trace_msg(q, fmt, ...) do { } while (0) # define blk_trace_remove_sysfs(dev) do { } while (0) +# define blk_trace_note_message_enabled(q) (false) static inline int blk_trace_init_sysfs(struct device *dev) { return 0; -- cgit v1.2.3 From b3a834b1596ac668df206aa2bb1f191c31f5f5e4 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Mon, 16 May 2016 09:54:47 -0600 Subject: blk-mq: fix undefined behaviour in order_to_size() When this_order variable in blk_mq_init_rq_map() becomes zero the code incorrectly decrements the variable and passes the result to order_to_size() helper causing undefined behaviour: UBSAN: Undefined behaviour in block/blk-mq.c:1459:27 shift exponent 4294967295 is too large for 32-bit type 'unsigned int' CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.6.0-rc6-00072-g33656a1 #22 Fix the code by checking this_order variable for not having the zero value first. Reported-by: Meelis Roos Fixes: 320ae51feed5 ("blk-mq: new multi-queue block IO queueing mechanism") Signed-off-by: Bartlomiej Zolnierkiewicz Signed-off-by: Jens Axboe --- block/blk-mq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index 0c2ed831c926..7df9c9263b21 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1495,7 +1495,7 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set, int to_do; void *p; - while (left < order_to_size(this_order - 1) && this_order) + while (this_order && left < order_to_size(this_order - 1)) this_order--; do { -- cgit v1.2.3