diff options
author | KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> | 2010-10-27 15:33:42 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-27 18:03:09 -0700 |
commit | 1489ebad8b5b20300562f634f279cb9c435fd90b (patch) | |
tree | 4bfa738b0733c11120705aaa37a45d87d1dd5534 /mm | |
parent | 711d3d2c9bc3fb7cb5116352fecdb5b4adb6db6e (diff) | |
download | linux-1489ebad8b5b20300562f634f279cb9c435fd90b.tar.bz2 |
memcg: cpu hotplug aware quick acount_move detection
An event counter MEM_CGROUP_ON_MOVE is used for quick check whether file
stat update can be done in async manner or not. Now, it use percpu
counter and for_each_possible_cpu to update.
This patch replaces for_each_possible_cpu to for_each_online_cpu and adds
necessary synchronization logic at CPU HOTPLUG.
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memcontrol.c | 37 |
1 files changed, 30 insertions, 7 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 31a1d3b71eee..52840adae62a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1132,11 +1132,14 @@ static unsigned int get_swappiness(struct mem_cgroup *memcg) static void mem_cgroup_start_move(struct mem_cgroup *mem) { int cpu; - /* Because this is for moving account, reuse mc.lock */ - spin_lock(&mc.lock); - for_each_possible_cpu(cpu) + + get_online_cpus(); + spin_lock(&mem->pcp_counter_lock); + for_each_online_cpu(cpu) per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) += 1; - spin_unlock(&mc.lock); + mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] += 1; + spin_unlock(&mem->pcp_counter_lock); + put_online_cpus(); synchronize_rcu(); } @@ -1147,10 +1150,13 @@ static void mem_cgroup_end_move(struct mem_cgroup *mem) if (!mem) return; - spin_lock(&mc.lock); - for_each_possible_cpu(cpu) + get_online_cpus(); + spin_lock(&mem->pcp_counter_lock); + for_each_online_cpu(cpu) per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) -= 1; - spin_unlock(&mc.lock); + mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] -= 1; + spin_unlock(&mem->pcp_counter_lock); + put_online_cpus(); } /* * 2 routines for checking "mem" is under move_account() or not. @@ -1751,6 +1757,17 @@ static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *mem, int cpu) per_cpu(mem->stat->count[i], cpu) = 0; mem->nocpu_base.count[i] += x; } + /* need to clear ON_MOVE value, works as a kind of lock. */ + per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) = 0; + spin_unlock(&mem->pcp_counter_lock); +} + +static void synchronize_mem_cgroup_on_move(struct mem_cgroup *mem, int cpu) +{ + int idx = MEM_CGROUP_ON_MOVE; + + spin_lock(&mem->pcp_counter_lock); + per_cpu(mem->stat->count[idx], cpu) = mem->nocpu_base.count[idx]; spin_unlock(&mem->pcp_counter_lock); } @@ -1762,6 +1779,12 @@ static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb, struct memcg_stock_pcp *stock; struct mem_cgroup *iter; + if ((action == CPU_ONLINE)) { + for_each_mem_cgroup_all(iter) + synchronize_mem_cgroup_on_move(iter, cpu); + return NOTIFY_OK; + } + if ((action != CPU_DEAD) || action != CPU_DEAD_FROZEN) return NOTIFY_OK; |