summaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-cache-policy-smq.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-cache-policy-smq.c')
-rw-r--r--drivers/md/dm-cache-policy-smq.c30
1 files changed, 18 insertions, 12 deletions
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index e0c40aec5e96..72479bd61e11 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -1120,28 +1120,30 @@ static bool clean_target_met(struct smq_policy *mq, bool idle)
* Cache entries may not be populated. So we cannot rely on the
* size of the clean queue.
*/
- unsigned nr_clean = from_cblock(mq->cache_size) - q_size(&mq->dirty);
+ unsigned nr_clean;
- if (idle)
+ if (idle) {
/*
* We'd like to clean everything.
*/
return q_size(&mq->dirty) == 0u;
- else
- return (nr_clean + btracker_nr_writebacks_queued(mq->bg_work)) >=
- percent_to_target(mq, CLEAN_TARGET);
+ }
+
+ nr_clean = from_cblock(mq->cache_size) - q_size(&mq->dirty);
+ return (nr_clean + btracker_nr_writebacks_queued(mq->bg_work)) >=
+ percent_to_target(mq, CLEAN_TARGET);
}
static bool free_target_met(struct smq_policy *mq, bool idle)
{
- unsigned nr_free = from_cblock(mq->cache_size) -
- mq->cache_alloc.nr_allocated;
+ unsigned nr_free;
- if (idle)
- return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >=
- percent_to_target(mq, FREE_TARGET);
- else
+ if (!idle)
return true;
+
+ nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated;
+ return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >=
+ percent_to_target(mq, FREE_TARGET);
}
/*----------------------------------------------------------------*/
@@ -1214,7 +1216,11 @@ static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock,
return;
if (allocator_empty(&mq->cache_alloc)) {
- if (!free_target_met(mq, false))
+ /*
+ * We always claim to be 'idle' to ensure some demotions happen
+ * with continuous loads.
+ */
+ if (!free_target_met(mq, true))
queue_demotion(mq);
return;
}