summaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorChris Down <chris@chrisdown.name>2019-05-14 15:46:57 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-05-14 19:52:52 -0700
commit871789d4af807d1e91a6299f12a67e06177ed420 (patch)
treebf1243d96c02a9c6185fc116619dd401490dbd90 /mm/memcontrol.c
parentb09e89366e1730ac13088706cec0f1335299eefb (diff)
downloadlinux-871789d4af807d1e91a6299f12a67e06177ed420.tar.bz2
mm, memcg: rename ambiguously named memory.stat counters and functions
I spent literally an hour trying to work out why an earlier version of my memory.events aggregation code doesn't work properly, only to find out I was calling memcg->events instead of memcg->memory_events, which is fairly confusing. This naming seems in need of reworking, so make it harder to do the wrong thing by using vmevents instead of events, which makes it more clear that these are vm counters rather than memcg-specific counters. There are also a few other inconsistent names in both the percpu and aggregated structs, so these are all cleaned up to be more coherent and easy to understand. This commit contains code cleanup only: there are no logic changes. [akpm@linux-foundation.org: fix it for preceding changes] Link: http://lkml.kernel.org/r/20190208224319.GA23801@chrisdown.name Signed-off-by: Chris Down <chris@chrisdown.name> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Tejun Heo <tj@kernel.org> Cc: Roman Gushchin <guro@fb.com> Cc: Dennis Zhou <dennis@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c148
1 files changed, 76 insertions, 72 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 287933005e11..52a47f4e28c7 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -690,7 +690,7 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
static unsigned long memcg_sum_events(struct mem_cgroup *memcg,
int event)
{
- return atomic_long_read(&memcg->events[event]);
+ return atomic_long_read(&memcg->vmevents[event]);
}
static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
@@ -722,7 +722,7 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
nr_pages = -nr_pages; /* for event */
}
- __this_cpu_add(memcg->stat_cpu->nr_page_events, nr_pages);
+ __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
}
static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
@@ -730,8 +730,8 @@ static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
{
unsigned long val, next;
- val = __this_cpu_read(memcg->stat_cpu->nr_page_events);
- next = __this_cpu_read(memcg->stat_cpu->targets[target]);
+ val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
+ next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
/* from time_after() in jiffies.h */
if ((long)(next - val) < 0) {
switch (target) {
@@ -747,7 +747,7 @@ static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
default:
break;
}
- __this_cpu_write(memcg->stat_cpu->targets[target], next);
+ __this_cpu_write(memcg->vmstats_percpu->targets[target], next);
return true;
}
return false;
@@ -2088,9 +2088,9 @@ static int memcg_hotplug_cpu_dead(unsigned int cpu)
int nid;
long x;
- x = this_cpu_xchg(memcg->stat_cpu->count[i], 0);
+ x = this_cpu_xchg(memcg->vmstats_percpu->stat[i], 0);
if (x)
- atomic_long_add(x, &memcg->stat[i]);
+ atomic_long_add(x, &memcg->vmstats[i]);
if (i >= NR_VM_NODE_STAT_ITEMS)
continue;
@@ -2108,9 +2108,9 @@ static int memcg_hotplug_cpu_dead(unsigned int cpu)
for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
long x;
- x = this_cpu_xchg(memcg->stat_cpu->events[i], 0);
+ x = this_cpu_xchg(memcg->vmstats_percpu->events[i], 0);
if (x)
- atomic_long_add(x, &memcg->events[i]);
+ atomic_long_add(x, &memcg->vmevents[i]);
}
}
@@ -2940,30 +2940,34 @@ static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
return retval;
}
-struct accumulated_stats {
- unsigned long stat[MEMCG_NR_STAT];
- unsigned long events[NR_VM_EVENT_ITEMS];
+struct accumulated_vmstats {
+ unsigned long vmstats[MEMCG_NR_STAT];
+ unsigned long vmevents[NR_VM_EVENT_ITEMS];
unsigned long lru_pages[NR_LRU_LISTS];
- const unsigned int *stats_array;
- const unsigned int *events_array;
- int stats_size;
- int events_size;
+
+ /* overrides for v1 */
+ const unsigned int *vmstats_array;
+ const unsigned int *vmevents_array;
+
+ int vmstats_size;
+ int vmevents_size;
};
-static void accumulate_memcg_tree(struct mem_cgroup *memcg,
- struct accumulated_stats *acc)
+static void accumulate_vmstats(struct mem_cgroup *memcg,
+ struct accumulated_vmstats *acc)
{
struct mem_cgroup *mi;
int i;
for_each_mem_cgroup_tree(mi, memcg) {
- for (i = 0; i < acc->stats_size; i++)
- acc->stat[i] += memcg_page_state(mi,
- acc->stats_array ? acc->stats_array[i] : i);
+ for (i = 0; i < acc->vmstats_size; i++)
+ acc->vmstats[i] += memcg_page_state(mi,
+ acc->vmstats_array ? acc->vmstats_array[i] : i);
- for (i = 0; i < acc->events_size; i++)
- acc->events[i] += memcg_sum_events(mi,
- acc->events_array ? acc->events_array[i] : i);
+ for (i = 0; i < acc->vmevents_size; i++)
+ acc->vmevents[i] += memcg_sum_events(mi,
+ acc->vmevents_array
+ ? acc->vmevents_array[i] : i);
for (i = 0; i < NR_LRU_LISTS; i++)
acc->lru_pages[i] += memcg_page_state(mi,
@@ -3414,7 +3418,7 @@ static int memcg_stat_show(struct seq_file *m, void *v)
unsigned long memory, memsw;
struct mem_cgroup *mi;
unsigned int i;
- struct accumulated_stats acc;
+ struct accumulated_vmstats acc;
BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
@@ -3449,22 +3453,22 @@ static int memcg_stat_show(struct seq_file *m, void *v)
(u64)memsw * PAGE_SIZE);
memset(&acc, 0, sizeof(acc));
- acc.stats_size = ARRAY_SIZE(memcg1_stats);
- acc.stats_array = memcg1_stats;
- acc.events_size = ARRAY_SIZE(memcg1_events);
- acc.events_array = memcg1_events;
- accumulate_memcg_tree(memcg, &acc);
+ acc.vmstats_size = ARRAY_SIZE(memcg1_stats);
+ acc.vmstats_array = memcg1_stats;
+ acc.vmevents_size = ARRAY_SIZE(memcg1_events);
+ acc.vmevents_array = memcg1_events;
+ accumulate_vmstats(memcg, &acc);
for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
continue;
seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i],
- (u64)acc.stat[i] * PAGE_SIZE);
+ (u64)acc.vmstats[i] * PAGE_SIZE);
}
for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
seq_printf(m, "total_%s %llu\n", memcg1_event_names[i],
- (u64)acc.events[i]);
+ (u64)acc.vmevents[i]);
for (i = 0; i < NR_LRU_LISTS; i++)
seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i],
@@ -3901,11 +3905,11 @@ struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
*/
static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx)
{
- long x = atomic_long_read(&memcg->stat[idx]);
+ long x = atomic_long_read(&memcg->vmstats[idx]);
int cpu;
for_each_online_cpu(cpu)
- x += per_cpu_ptr(memcg->stat_cpu, cpu)->count[idx];
+ x += per_cpu_ptr(memcg->vmstats_percpu, cpu)->stat[idx];
if (x < 0)
x = 0;
return x;
@@ -4445,7 +4449,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
for_each_node(node)
free_mem_cgroup_per_node_info(memcg, node);
- free_percpu(memcg->stat_cpu);
+ free_percpu(memcg->vmstats_percpu);
kfree(memcg);
}
@@ -4474,8 +4478,8 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
if (memcg->id.id < 0)
goto fail;
- memcg->stat_cpu = alloc_percpu(struct mem_cgroup_stat_cpu);
- if (!memcg->stat_cpu)
+ memcg->vmstats_percpu = alloc_percpu(struct memcg_vmstats_percpu);
+ if (!memcg->vmstats_percpu)
goto fail;
for_each_node(node)
@@ -5561,7 +5565,7 @@ static int memory_events_show(struct seq_file *m, void *v)
static int memory_stat_show(struct seq_file *m, void *v)
{
struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
- struct accumulated_stats acc;
+ struct accumulated_vmstats acc;
int i;
/*
@@ -5576,30 +5580,30 @@ static int memory_stat_show(struct seq_file *m, void *v)
*/
memset(&acc, 0, sizeof(acc));
- acc.stats_size = MEMCG_NR_STAT;
- acc.events_size = NR_VM_EVENT_ITEMS;
- accumulate_memcg_tree(memcg, &acc);
+ acc.vmstats_size = MEMCG_NR_STAT;
+ acc.vmevents_size = NR_VM_EVENT_ITEMS;
+ accumulate_vmstats(memcg, &acc);
seq_printf(m, "anon %llu\n",
- (u64)acc.stat[MEMCG_RSS] * PAGE_SIZE);
+ (u64)acc.vmstats[MEMCG_RSS] * PAGE_SIZE);
seq_printf(m, "file %llu\n",
- (u64)acc.stat[MEMCG_CACHE] * PAGE_SIZE);
+ (u64)acc.vmstats[MEMCG_CACHE] * PAGE_SIZE);
seq_printf(m, "kernel_stack %llu\n",
- (u64)acc.stat[MEMCG_KERNEL_STACK_KB] * 1024);
+ (u64)acc.vmstats[MEMCG_KERNEL_STACK_KB] * 1024);
seq_printf(m, "slab %llu\n",
- (u64)(acc.stat[NR_SLAB_RECLAIMABLE] +
- acc.stat[NR_SLAB_UNRECLAIMABLE]) * PAGE_SIZE);
+ (u64)(acc.vmstats[NR_SLAB_RECLAIMABLE] +
+ acc.vmstats[NR_SLAB_UNRECLAIMABLE]) * PAGE_SIZE);
seq_printf(m, "sock %llu\n",
- (u64)acc.stat[MEMCG_SOCK] * PAGE_SIZE);
+ (u64)acc.vmstats[MEMCG_SOCK] * PAGE_SIZE);
seq_printf(m, "shmem %llu\n",
- (u64)acc.stat[NR_SHMEM] * PAGE_SIZE);
+ (u64)acc.vmstats[NR_SHMEM] * PAGE_SIZE);
seq_printf(m, "file_mapped %llu\n",
- (u64)acc.stat[NR_FILE_MAPPED] * PAGE_SIZE);
+ (u64)acc.vmstats[NR_FILE_MAPPED] * PAGE_SIZE);
seq_printf(m, "file_dirty %llu\n",
- (u64)acc.stat[NR_FILE_DIRTY] * PAGE_SIZE);
+ (u64)acc.vmstats[NR_FILE_DIRTY] * PAGE_SIZE);
seq_printf(m, "file_writeback %llu\n",
- (u64)acc.stat[NR_WRITEBACK] * PAGE_SIZE);
+ (u64)acc.vmstats[NR_WRITEBACK] * PAGE_SIZE);
/*
* TODO: We should eventually replace our own MEMCG_RSS_HUGE counter
@@ -5608,43 +5612,43 @@ static int memory_stat_show(struct seq_file *m, void *v)
* where the page->mem_cgroup is set up and stable.
*/
seq_printf(m, "anon_thp %llu\n",
- (u64)acc.stat[MEMCG_RSS_HUGE] * PAGE_SIZE);
+ (u64)acc.vmstats[MEMCG_RSS_HUGE] * PAGE_SIZE);
for (i = 0; i < NR_LRU_LISTS; i++)
seq_printf(m, "%s %llu\n", mem_cgroup_lru_names[i],
(u64)acc.lru_pages[i] * PAGE_SIZE);
seq_printf(m, "slab_reclaimable %llu\n",
- (u64)acc.stat[NR_SLAB_RECLAIMABLE] * PAGE_SIZE);
+ (u64)acc.vmstats[NR_SLAB_RECLAIMABLE] * PAGE_SIZE);
seq_printf(m, "slab_unreclaimable %llu\n",
- (u64)acc.stat[NR_SLAB_UNRECLAIMABLE] * PAGE_SIZE);
+ (u64)acc.vmstats[NR_SLAB_UNRECLAIMABLE] * PAGE_SIZE);
/* Accumulated memory events */
- seq_printf(m, "pgfault %lu\n", acc.events[PGFAULT]);
- seq_printf(m, "pgmajfault %lu\n", acc.events[PGMAJFAULT]);
+ seq_printf(m, "pgfault %lu\n", acc.vmevents[PGFAULT]);
+ seq_printf(m, "pgmajfault %lu\n", acc.vmevents[PGMAJFAULT]);
seq_printf(m, "workingset_refault %lu\n",
- acc.stat[WORKINGSET_REFAULT]);
+ acc.vmstats[WORKINGSET_REFAULT]);
seq_printf(m, "workingset_activate %lu\n",
- acc.stat[WORKINGSET_ACTIVATE]);
+ acc.vmstats[WORKINGSET_ACTIVATE]);
seq_printf(m, "workingset_nodereclaim %lu\n",
- acc.stat[WORKINGSET_NODERECLAIM]);
-
- seq_printf(m, "pgrefill %lu\n", acc.events[PGREFILL]);
- seq_printf(m, "pgscan %lu\n", acc.events[PGSCAN_KSWAPD] +
- acc.events[PGSCAN_DIRECT]);
- seq_printf(m, "pgsteal %lu\n", acc.events[PGSTEAL_KSWAPD] +
- acc.events[PGSTEAL_DIRECT]);
- seq_printf(m, "pgactivate %lu\n", acc.events[PGACTIVATE]);
- seq_printf(m, "pgdeactivate %lu\n", acc.events[PGDEACTIVATE]);
- seq_printf(m, "pglazyfree %lu\n", acc.events[PGLAZYFREE]);
- seq_printf(m, "pglazyfreed %lu\n", acc.events[PGLAZYFREED]);
+ acc.vmstats[WORKINGSET_NODERECLAIM]);
+
+ seq_printf(m, "pgrefill %lu\n", acc.vmevents[PGREFILL]);
+ seq_printf(m, "pgscan %lu\n", acc.vmevents[PGSCAN_KSWAPD] +
+ acc.vmevents[PGSCAN_DIRECT]);
+ seq_printf(m, "pgsteal %lu\n", acc.vmevents[PGSTEAL_KSWAPD] +
+ acc.vmevents[PGSTEAL_DIRECT]);
+ seq_printf(m, "pgactivate %lu\n", acc.vmevents[PGACTIVATE]);
+ seq_printf(m, "pgdeactivate %lu\n", acc.vmevents[PGDEACTIVATE]);
+ seq_printf(m, "pglazyfree %lu\n", acc.vmevents[PGLAZYFREE]);
+ seq_printf(m, "pglazyfreed %lu\n", acc.vmevents[PGLAZYFREED]);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- seq_printf(m, "thp_fault_alloc %lu\n", acc.events[THP_FAULT_ALLOC]);
+ seq_printf(m, "thp_fault_alloc %lu\n", acc.vmevents[THP_FAULT_ALLOC]);
seq_printf(m, "thp_collapse_alloc %lu\n",
- acc.events[THP_COLLAPSE_ALLOC]);
+ acc.vmevents[THP_COLLAPSE_ALLOC]);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
return 0;
@@ -6080,7 +6084,7 @@ static void uncharge_batch(const struct uncharge_gather *ug)
__mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge);
__mod_memcg_state(ug->memcg, NR_SHMEM, -ug->nr_shmem);
__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
- __this_cpu_add(ug->memcg->stat_cpu->nr_page_events, nr_pages);
+ __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, nr_pages);
memcg_check_events(ug->memcg, ug->dummy_page);
local_irq_restore(flags);