summaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorJoe Thornber <ejt@redhat.com>2017-05-11 05:07:34 -0400
committerMike Snitzer <snitzer@redhat.com>2017-05-14 21:54:32 -0400
commita8cd1eba6135e086109e2b94bf96deb17456ede8 (patch)
tree4c77d6484c743c7cbd8b0138e336cb229b7194bf /drivers/md
parent072792dcdfc8d5f91a26050e5665285f50afebf5 (diff)
downloadlinux-a8cd1eba6135e086109e2b94bf96deb17456ede8.tar.bz2
dm cache policy smq: only demote entries in bottom half of the clean multiqueue
Heavy IO load may mean there are very few clean blocks in the cache, and we risk demoting entries that get hit a lot. Signed-off-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-cache-policy-smq.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index 72479bd61e11..a177559f2049 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -1190,7 +1190,7 @@ static void queue_demotion(struct smq_policy *mq)
if (unlikely(WARN_ON_ONCE(!mq->migrations_allowed)))
return;
- e = q_peek(&mq->clean, mq->clean.nr_levels, true);
+ e = q_peek(&mq->clean, mq->clean.nr_levels / 2, true);
if (!e) {
if (!clean_target_met(mq, false))
queue_writeback(mq);