summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-05-05 19:31:06 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-05 19:31:06 -0700
commit2eecf3a49f1ff24c6116c954dd74e83f227fc716 (patch)
treec7d51d35106fffb2bea53b7b1cbc7c5a78a4f233 /drivers
parent53ef7d0e208fa38c3f63d287e0c3ab174f1e1235 (diff)
parent10add84e276432d9dd8044679a1028dd4084117e (diff)
downloadlinux-2eecf3a49f1ff24c6116c954dd74e83f227fc716.tar.bz2
Merge tag 'for-4.12/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm
Pull device mapper fixes from Mike Snitzer: - DM cache metadata fixes to short-circuit operations that require the metadata not be in 'fail_io' mode. Otherwise crashes are possible. - a DM cache fix to address the inability to adapt to continuous IO that happened to also reflect a changing working set (which required old blocks be demoted before the new working set could be promoted) - a DM cache smq policy cleanup that fell out from reviewing the above - fix the Kconfig help text for CONFIG_DM_INTEGRITY * tag 'for-4.12/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: dm cache metadata: fail operations if fail_io mode has been established dm integrity: improve the Kconfig help text for DM_INTEGRITY dm cache policy smq: cleanup free_target_met() and clean_target_met() dm cache policy smq: allow demotions to happen even during continuous IO
Diffstat (limited to 'drivers')
-rw-r--r--drivers/md/Kconfig15
-rw-r--r--drivers/md/dm-cache-metadata.c12
-rw-r--r--drivers/md/dm-cache-policy-smq.c30
3 files changed, 39 insertions, 18 deletions
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 349ff8813401..906103c168ea 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -503,13 +503,24 @@ config DM_LOG_WRITES
If unsure, say N.
config DM_INTEGRITY
- tristate "Integrity target"
+ tristate "Integrity target support"
depends on BLK_DEV_DM
select BLK_DEV_INTEGRITY
select DM_BUFIO
select CRYPTO
select ASYNC_XOR
---help---
- This is the integrity target.
+ This device-mapper target emulates a block device that has
+ additional per-sector tags that can be used for storing
+ integrity information.
+
+ This integrity target is used with the dm-crypt target to
+ provide authenticated disk encryption or it can be used
+ standalone.
+
+ To compile this code as a module, choose M here: the module will
+ be called dm-integrity.
+
+ If unsure, say N.
endif # MD
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 8568dbd50ba4..4a4e9c75fc4c 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -1624,17 +1624,19 @@ void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown)
{
- int r;
+ int r = -EINVAL;
flags_mutator mutator = (clean_shutdown ? set_clean_shutdown :
clear_clean_shutdown);
WRITE_LOCK(cmd);
+ if (cmd->fail_io)
+ goto out;
+
r = __commit_transaction(cmd, mutator);
if (r)
goto out;
r = __begin_transaction(cmd);
-
out:
WRITE_UNLOCK(cmd);
return r;
@@ -1646,7 +1648,8 @@ int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
int r = -EINVAL;
READ_LOCK(cmd);
- r = dm_sm_get_nr_free(cmd->metadata_sm, result);
+ if (!cmd->fail_io)
+ r = dm_sm_get_nr_free(cmd->metadata_sm, result);
READ_UNLOCK(cmd);
return r;
@@ -1658,7 +1661,8 @@ int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
int r = -EINVAL;
READ_LOCK(cmd);
- r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
+ if (!cmd->fail_io)
+ r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
READ_UNLOCK(cmd);
return r;
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index e0c40aec5e96..72479bd61e11 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -1120,28 +1120,30 @@ static bool clean_target_met(struct smq_policy *mq, bool idle)
* Cache entries may not be populated. So we cannot rely on the
* size of the clean queue.
*/
- unsigned nr_clean = from_cblock(mq->cache_size) - q_size(&mq->dirty);
+ unsigned nr_clean;
- if (idle)
+ if (idle) {
/*
* We'd like to clean everything.
*/
return q_size(&mq->dirty) == 0u;
- else
- return (nr_clean + btracker_nr_writebacks_queued(mq->bg_work)) >=
- percent_to_target(mq, CLEAN_TARGET);
+ }
+
+ nr_clean = from_cblock(mq->cache_size) - q_size(&mq->dirty);
+ return (nr_clean + btracker_nr_writebacks_queued(mq->bg_work)) >=
+ percent_to_target(mq, CLEAN_TARGET);
}
static bool free_target_met(struct smq_policy *mq, bool idle)
{
- unsigned nr_free = from_cblock(mq->cache_size) -
- mq->cache_alloc.nr_allocated;
+ unsigned nr_free;
- if (idle)
- return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >=
- percent_to_target(mq, FREE_TARGET);
- else
+ if (!idle)
return true;
+
+ nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated;
+ return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >=
+ percent_to_target(mq, FREE_TARGET);
}
/*----------------------------------------------------------------*/
@@ -1214,7 +1216,11 @@ static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock,
return;
if (allocator_empty(&mq->cache_alloc)) {
- if (!free_target_met(mq, false))
+ /*
+ * We always claim to be 'idle' to ensure some demotions happen
+ * with continuous loads.
+ */
+ if (!free_target_met(mq, true))
queue_demotion(mq);
return;
}