summaryrefslogtreecommitdiffstats
path: root/block/blk-settings.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-05-17 15:29:49 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-17 15:29:49 -0700
commita4d1dbed0e27030b3c3ca2d1d5c33a1b45bc53d2 (patch)
tree6f92002ba36efbee3adc1e7b2a0d4b0621c1f1a6 /block/blk-settings.c
parentc2e7b207058d4ff6a9010430763fb561f307eb67 (diff)
parentb3a834b1596ac668df206aa2bb1f191c31f5f5e4 (diff)
downloadlinux-a4d1dbed0e27030b3c3ca2d1d5c33a1b45bc53d2.tar.bz2
Merge branch 'for-4.7/core' of git://git.kernel.dk/linux-block
Pull core block layer updates from Jens Axboe: "This is the core block IO changes for this merge window. Nothing earth shattering in here, it's mostly just fixes. In detail: - Fix for a long standing issue where wrong ordering in blk-mq caused order_to_size() to spew a warning. From Bart. - Async discard support from Christoph. Basically just splitting our sync interface into a submit + wait part. - Add a cleaner interface for flagging whether a device has a write back cache or not. We've previously overloaded blk_queue_flush() with this, but let's make it more explicit. Drivers cleaned up and updated in the drivers pull request. From me. - Fix for a double check for whether IO accounting is enabled or not. From Michael Callahan. - Fix for the async discard from Mike Snitzer, reinstating the early EOPNOTSUPP return if the device doesn't support discards. - Also from Mike, export bio_inc_remaining() so dm can drop it's private copy of it. - From Ming Lin, add support for passing in an offset for request payloads. - Tag function export from Sagi, which will be used in NVMe in the drivers pull. - Two blktrace related fixes from Shaohua. - Propagate NOMERGE flag when making a request from a bio, also from Shaohua. - An optimization to not parse cgroup paths in blk-throttle, if we don't need to. From Shaohua" * 'for-4.7/core' of git://git.kernel.dk/linux-block: blk-mq: fix undefined behaviour in order_to_size() blk-throttle: don't parse cgroup path if trace isn't enabled blktrace: add missed mask name blktrace: delete garbage for message trace block: make bio_inc_remaining() interface accessible again block: reinstate early return of -EOPNOTSUPP from blkdev_issue_discard block: Minor blk_account_io_start usage cleanup block: add __blkdev_issue_discard block: remove struct bio_batch block: copy NOMERGE flag from bio to request block: add ability to flag write back caching on a device blk-mq: Export tagset iter function block: add offset in blk_add_request_payload() writeback: Fix performance regression in wb_over_bg_thresh()
Diffstat (limited to 'block/blk-settings.c')
-rw-r--r--block/blk-settings.c26
1 files changed, 26 insertions, 0 deletions
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 331e4eee0dda..c903bee43cf8 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -846,6 +846,32 @@ void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
}
EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
+/**
+ * blk_queue_write_cache - configure queue's write cache
+ * @q: the request queue for the device
+ * @wc: write back cache on or off
+ * @fua: device supports FUA writes, if true
+ *
+ * Tell the block layer about the write cache of @q.
+ */
+void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
+{
+ spin_lock_irq(q->queue_lock);
+ if (wc) {
+ queue_flag_set(QUEUE_FLAG_WC, q);
+ q->flush_flags = REQ_FLUSH;
+ } else
+ queue_flag_clear(QUEUE_FLAG_WC, q);
+ if (fua) {
+ if (wc)
+ q->flush_flags |= REQ_FUA;
+ queue_flag_set(QUEUE_FLAG_FUA, q);
+ } else
+ queue_flag_clear(QUEUE_FLAG_FUA, q);
+ spin_unlock_irq(q->queue_lock);
+}
+EXPORT_SYMBOL_GPL(blk_queue_write_cache);
+
static int __init blk_settings_init(void)
{
blk_max_low_pfn = max_low_pfn - 1;