summaryrefslogtreecommitdiffstats
path: root/mm/damon
diff options
context:
space:
mode:
authorSeongJae Park <sj@kernel.org>2021-11-05 13:47:20 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-11-06 13:30:45 -0700
commit50585192bc2ef9309d32dabdbb5e735679f4f128 (patch)
tree8faab246f2e20b909f1e6420a4f88d0dc1336296 /mm/damon
parent2b8a248d5873343aa16f6c5ede30517693995f13 (diff)
downloadlinux-50585192bc2ef9309d32dabdbb5e735679f4f128.tar.bz2
mm/damon/schemes: skip already charged targets and regions
If DAMOS has stopped applying action in the middle of a group of memory regions due to its size quota, it starts the work again from the beginning of the address space in the next charge window. If there is a huge memory region at the beginning of the address space and it fulfills the scheme's target data access pattern always, the action will applied to only the region. This mitigates the case by skipping memory regions that charged in current charge window at the beginning of next charge window. Link: https://lkml.kernel.org/r/20211019150731.16699-4-sj@kernel.org Signed-off-by: SeongJae Park <sj@kernel.org> Cc: Amit Shah <amit@kernel.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: David Hildenbrand <david@redhat.com> Cc: David Rientjes <rientjes@google.com> Cc: David Woodhouse <dwmw@amazon.com> Cc: Greg Thelen <gthelen@google.com> Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Leonard Foerster <foersleo@amazon.de> Cc: Marco Elver <elver@google.com> Cc: Markus Boehme <markubo@amazon.de> Cc: Shakeel Butt <shakeelb@google.com> Cc: Shuah Khan <shuah@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/damon')
-rw-r--r--mm/damon/core.c37
1 files changed, 37 insertions, 0 deletions
diff --git a/mm/damon/core.c b/mm/damon/core.c
index cce14a0d5c72..693b75bc3450 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -111,6 +111,8 @@ struct damos *damon_new_scheme(
scheme->quota.reset_interval = quota->reset_interval;
scheme->quota.charged_sz = 0;
scheme->quota.charged_from = 0;
+ scheme->quota.charge_target_from = NULL;
+ scheme->quota.charge_addr_from = 0;
return scheme;
}
@@ -553,6 +555,37 @@ static void damon_do_apply_schemes(struct damon_ctx *c,
if (quota->sz && quota->charged_sz >= quota->sz)
continue;
+ /* Skip previously charged regions */
+ if (quota->charge_target_from) {
+ if (t != quota->charge_target_from)
+ continue;
+ if (r == damon_last_region(t)) {
+ quota->charge_target_from = NULL;
+ quota->charge_addr_from = 0;
+ continue;
+ }
+ if (quota->charge_addr_from &&
+ r->ar.end <= quota->charge_addr_from)
+ continue;
+
+ if (quota->charge_addr_from && r->ar.start <
+ quota->charge_addr_from) {
+ sz = ALIGN_DOWN(quota->charge_addr_from -
+ r->ar.start, DAMON_MIN_REGION);
+ if (!sz) {
+ if (r->ar.end - r->ar.start <=
+ DAMON_MIN_REGION)
+ continue;
+ sz = DAMON_MIN_REGION;
+ }
+ damon_split_region_at(c, t, r, sz);
+ r = damon_next_region(r);
+ sz = r->ar.end - r->ar.start;
+ }
+ quota->charge_target_from = NULL;
+ quota->charge_addr_from = 0;
+ }
+
/* Check the target regions condition */
if (sz < s->min_sz_region || s->max_sz_region < sz)
continue;
@@ -573,6 +606,10 @@ static void damon_do_apply_schemes(struct damon_ctx *c,
}
c->primitive.apply_scheme(c, t, r, s);
quota->charged_sz += sz;
+ if (quota->sz && quota->charged_sz >= quota->sz) {
+ quota->charge_target_from = t;
+ quota->charge_addr_from = r->ar.end + 1;
+ }
}
if (s->action != DAMOS_STAT)
r->age = 0;