summaryrefslogtreecommitdiffstats
path: root/drivers/md/dm.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-05-03 10:31:20 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-03 10:31:20 -0700
commitd35a878ae1c50977b55e352fd46e36e35add72a0 (patch)
tree7cd4e0ec418c6f3be365e56ee3c49bab218cd608 /drivers/md/dm.c
parente5021876c91dc3894b2174cca8fa797f8e29e7b9 (diff)
parent390020ad2af9ca04844c4f3b1f299ad8746d84c8 (diff)
downloadlinux-d35a878ae1c50977b55e352fd46e36e35add72a0.tar.bz2
Merge tag 'for-4.12/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm
Pull device mapper updates from Mike Snitzer: - A major update for DM cache that reduces the latency for deciding whether blocks should migrate to/from the cache. The bio-prison-v2 interface supports this improvement by enabling direct dispatch of work to workqueues rather than having to delay the actual work dispatch to the DM cache core. So the dm-cache policies are much more nimble by being able to drive IO as they see fit. One immediate benefit from the improved latency is a cache that should be much more adaptive to changing workloads. - Add a new DM integrity target that emulates a block device that has additional per-sector tags that can be used for storing integrity information. - Add a new authenticated encryption feature to the DM crypt target that builds on the capabilities provided by the DM integrity target. - Add MD interface for switching the raid4/5/6 journal mode and update the DM raid target to use it to enable aid4/5/6 journal write-back support. - Switch the DM verity target over to using the asynchronous hash crypto API (this helps work better with architectures that have access to off-CPU algorithm providers, which should reduce CPU utilization). - Various request-based DM and DM multipath fixes and improvements from Bart and Christoph. - A DM thinp target fix for a bio structure leak that occurs for each discard IFF discard passdown is enabled. - A fix for a possible deadlock in DM bufio and a fix to re-check the new buffer allocation watermark in the face of competing admin changes to the 'max_cache_size_bytes' tunable. - A couple DM core cleanups. * tag 'for-4.12/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: (50 commits) dm bufio: check new buffer allocation watermark every 30 seconds dm bufio: avoid a possible ABBA deadlock dm mpath: make it easier to detect unintended I/O request flushes dm mpath: cleanup QUEUE_IF_NO_PATH bit manipulation by introducing assign_bit() dm mpath: micro-optimize the hot path relative to MPATHF_QUEUE_IF_NO_PATH dm: introduce enum dm_queue_mode to cleanup related code dm mpath: verify __pg_init_all_paths locking assumptions at runtime dm: verify suspend_locking assumptions at runtime dm block manager: remove an unused argument from dm_block_manager_create() dm rq: check blk_mq_register_dev() return value in dm_mq_init_request_queue() dm mpath: delay requeuing while path initialization is in progress dm mpath: avoid that path removal can trigger an infinite loop dm mpath: split and rename activate_path() to prepare for its expanded use dm ioctl: prevent stack leak in dm ioctl call dm integrity: use previously calculated log2 of sectors_per_block dm integrity: use hex2bin instead of open-coded variant dm crypt: replace custom implementation of hex2bin() dm crypt: remove obsolete references to per-CPU state dm verity: switch to using asynchronous hash crypto API dm crypt: use WQ_HIGHPRI for the IO and crypt workqueues ...
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r--drivers/md/dm.c35
1 files changed, 26 insertions, 9 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 8bf397729bbd..268edf402bbb 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1104,8 +1104,18 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio,
__bio_clone_fast(clone, bio);
- if (bio_integrity(bio)) {
- int r = bio_integrity_clone(clone, bio, GFP_NOIO);
+ if (unlikely(bio_integrity(bio) != NULL)) {
+ int r;
+
+ if (unlikely(!dm_target_has_integrity(tio->ti->type) &&
+ !dm_target_passes_integrity(tio->ti->type))) {
+ DMWARN("%s: the target %s doesn't support integrity data.",
+ dm_device_name(tio->io->md),
+ tio->ti->type->name);
+ return -EIO;
+ }
+
+ r = bio_integrity_clone(clone, bio, GFP_NOIO);
if (r < 0)
return r;
}
@@ -1113,7 +1123,7 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio,
bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
clone->bi_iter.bi_size = to_bytes(len);
- if (bio_integrity(bio))
+ if (unlikely(bio_integrity(bio) != NULL))
bio_integrity_trim(clone, 0, len);
return 0;
@@ -1715,6 +1725,8 @@ static void event_callback(void *context)
*/
static void __set_size(struct mapped_device *md, sector_t size)
{
+ lockdep_assert_held(&md->suspend_lock);
+
set_capacity(md->disk, size);
i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
@@ -1822,13 +1834,13 @@ void dm_unlock_md_type(struct mapped_device *md)
mutex_unlock(&md->type_lock);
}
-void dm_set_md_type(struct mapped_device *md, unsigned type)
+void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type)
{
BUG_ON(!mutex_is_locked(&md->type_lock));
md->type = type;
}
-unsigned dm_get_md_type(struct mapped_device *md)
+enum dm_queue_mode dm_get_md_type(struct mapped_device *md)
{
return md->type;
}
@@ -1855,7 +1867,7 @@ EXPORT_SYMBOL_GPL(dm_get_queue_limits);
int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
{
int r;
- unsigned type = dm_get_md_type(md);
+ enum dm_queue_mode type = dm_get_md_type(md);
switch (type) {
case DM_TYPE_REQUEST_BASED:
@@ -1886,6 +1898,9 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
if (type == DM_TYPE_DAX_BIO_BASED)
queue_flag_set_unlocked(QUEUE_FLAG_DAX, md->queue);
break;
+ case DM_TYPE_NONE:
+ WARN_ON_ONCE(true);
+ break;
}
return 0;
@@ -2164,8 +2179,6 @@ static void unlock_fs(struct mapped_device *md)
* If __dm_suspend returns 0, the device is completely quiescent
* now. There is no request-processing activity. All new requests
* are being added to md->deferred list.
- *
- * Caller must hold md->suspend_lock
*/
static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
unsigned suspend_flags, long task_state,
@@ -2183,6 +2196,8 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
*/
if (noflush)
set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
+ else
+ pr_debug("%s: suspending with flush\n", dm_device_name(md));
/*
* This gets reverted if there's an error later and the targets
@@ -2381,6 +2396,8 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla
{
struct dm_table *map = NULL;
+ lockdep_assert_held(&md->suspend_lock);
+
if (md->internal_suspend_count++)
return; /* nested internal suspend */
@@ -2571,7 +2588,7 @@ int dm_noflush_suspending(struct dm_target *ti)
}
EXPORT_SYMBOL_GPL(dm_noflush_suspending);
-struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type,
+struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
unsigned integrity, unsigned per_io_data_size)
{
struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);