diff options
author | Johannes Weiner <hannes@cmpxchg.org> | 2019-11-30 17:55:49 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-12-01 12:59:07 -0800 |
commit | 0f6a5cff43d3bcd6aa54c9af267737249d02aa21 (patch) | |
tree | 7e4a15f6e5cd4360bc705cc5e4287d59ace0b19e /mm | |
parent | afaf07a65ddbdd70871cc3b81463f2a8f3884b6f (diff) | |
download | linux-0f6a5cff43d3bcd6aa54c9af267737249d02aa21.tar.bz2 |
mm: vmscan: split shrink_node() into node part and memcgs part
This function is getting long and unwieldy, split out the memcg bits.
The updated shrink_node() handles the generic (node) reclaim aspects:
- global vmpressure notifications
- writeback and congestion throttling
- reclaim/compaction management
- kswapd giving up on unreclaimable nodes
It then calls a new shrink_node_memcgs() which handles cgroup specifics:
- the cgroup tree traversal
- memory.low considerations
- per-cgroup slab shrinking callbacks
- per-cgroup vmpressure notifications
[hannes@cmpxchg.org: rename "root" to "target_memcg", per Roman]
Link: http://lkml.kernel.org/r/20191025143640.GA386981@cmpxchg.org
Link: http://lkml.kernel.org/r/20191022144803.302233-8-hannes@cmpxchg.org
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Roman Gushchin <guro@fb.com>
Reviewed-by: Shakeel Butt <shakeelb@google.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/vmscan.c | 41 |
1 files changed, 25 insertions, 16 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 98684c92f897..d35864850b43 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2722,26 +2722,18 @@ static bool pgdat_memcg_congested(pg_data_t *pgdat, struct mem_cgroup *memcg) (memcg && memcg_congested(pgdat, memcg)); } -static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc) +static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc) { - struct reclaim_state *reclaim_state = current->reclaim_state; - struct mem_cgroup *root = sc->target_mem_cgroup; - unsigned long nr_reclaimed, nr_scanned; - bool reclaimable = false; + struct mem_cgroup *target_memcg = sc->target_mem_cgroup; struct mem_cgroup *memcg; -again: - memset(&sc->nr, 0, sizeof(sc->nr)); - nr_reclaimed = sc->nr_reclaimed; - nr_scanned = sc->nr_scanned; - - memcg = mem_cgroup_iter(root, NULL, NULL); + memcg = mem_cgroup_iter(target_memcg, NULL, NULL); do { struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); unsigned long reclaimed; unsigned long scanned; - switch (mem_cgroup_protected(root, memcg)) { + switch (mem_cgroup_protected(target_memcg, memcg)) { case MEMCG_PROT_MIN: /* * Hard protection. @@ -2785,7 +2777,23 @@ again: sc->nr_scanned - scanned, sc->nr_reclaimed - reclaimed); - } while ((memcg = mem_cgroup_iter(root, memcg, NULL))); + } while ((memcg = mem_cgroup_iter(target_memcg, memcg, NULL))); +} + +static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc) +{ + struct reclaim_state *reclaim_state = current->reclaim_state; + struct mem_cgroup *target_memcg = sc->target_mem_cgroup; + unsigned long nr_reclaimed, nr_scanned; + bool reclaimable = false; + +again: + memset(&sc->nr, 0, sizeof(sc->nr)); + + nr_reclaimed = sc->nr_reclaimed; + nr_scanned = sc->nr_scanned; + + shrink_node_memcgs(pgdat, sc); if (reclaim_state) { sc->nr_reclaimed += reclaim_state->reclaimed_slab; @@ -2793,7 +2801,7 @@ again: } /* Record the subtree's reclaim efficiency */ - vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true, + vmpressure(sc->gfp_mask, target_memcg, true, sc->nr_scanned - nr_scanned, sc->nr_reclaimed - nr_reclaimed); @@ -2849,7 +2857,7 @@ again: */ if (cgroup_reclaim(sc) && writeback_throttling_sane(sc) && sc->nr.dirty && sc->nr.dirty == sc->nr.congested) - set_memcg_congestion(pgdat, root, true); + set_memcg_congestion(pgdat, target_memcg, true); /* * Stall direct reclaim for IO completions if underlying BDIs @@ -2858,7 +2866,8 @@ again: * the LRU too quickly. */ if (!sc->hibernation_mode && !current_is_kswapd() && - current_may_throttle() && pgdat_memcg_congested(pgdat, root)) + current_may_throttle() && + pgdat_memcg_congested(pgdat, target_memcg)) wait_iff_congested(BLK_RW_ASYNC, HZ/10); if (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed, |