diff options
author | Tejun Heo <tj@kernel.org> | 2014-09-02 14:46:05 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2014-09-02 14:46:05 -0400 |
commit | fe6bd8c3d28357174587c4fe895d10b00321b692 (patch) | |
tree | a8425e061a985f998573d819a3092bc201c2a525 /mm/percpu.c | |
parent | b539b87fed37ffc16c89a6bc3beca2d7aed82e1c (diff) | |
download | linux-fe6bd8c3d28357174587c4fe895d10b00321b692.tar.bz2 |
percpu: rename pcpu_reclaim_work to pcpu_balance_work
pcpu_reclaim_work will also be used to populate chunks asynchronously.
Rename it to pcpu_balance_work in preparation. pcpu_reclaim() is
renamed to pcpu_balance_workfn() and some of its local variables are
renamed too.
This is pure rename.
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'mm/percpu.c')
-rw-r--r-- | mm/percpu.c | 27 |
1 files changed, 12 insertions, 15 deletions
diff --git a/mm/percpu.c b/mm/percpu.c index 4f2d58760c9c..28a830590b4c 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -168,9 +168,9 @@ static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ */ static int pcpu_nr_empty_pop_pages; -/* reclaim work to release fully free chunks, scheduled from free path */ -static void pcpu_reclaim(struct work_struct *work); -static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim); +/* balance work is used to populate or destroy chunks asynchronously */ +static void pcpu_balance_workfn(struct work_struct *work); +static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn); static bool pcpu_addr_in_first_chunk(void *addr) { @@ -1080,36 +1080,33 @@ void __percpu *__alloc_reserved_percpu(size_t size, size_t align) } /** - * pcpu_reclaim - reclaim fully free chunks, workqueue function + * pcpu_balance_workfn - reclaim fully free chunks, workqueue function * @work: unused * * Reclaim all fully free chunks except for the first one. - * - * CONTEXT: - * workqueue context. */ -static void pcpu_reclaim(struct work_struct *work) +static void pcpu_balance_workfn(struct work_struct *work) { - LIST_HEAD(todo); - struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1]; + LIST_HEAD(to_free); + struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1]; struct pcpu_chunk *chunk, *next; mutex_lock(&pcpu_alloc_mutex); spin_lock_irq(&pcpu_lock); - list_for_each_entry_safe(chunk, next, head, list) { + list_for_each_entry_safe(chunk, next, free_head, list) { WARN_ON(chunk->immutable); /* spare the first one */ - if (chunk == list_first_entry(head, struct pcpu_chunk, list)) + if (chunk == list_first_entry(free_head, struct pcpu_chunk, list)) continue; - list_move(&chunk->list, &todo); + list_move(&chunk->list, &to_free); } spin_unlock_irq(&pcpu_lock); - list_for_each_entry_safe(chunk, next, &todo, list) { + list_for_each_entry_safe(chunk, next, &to_free, list) { int rs, re; pcpu_for_each_pop_region(chunk, rs, re, 0, pcpu_unit_pages) { @@ -1163,7 +1160,7 @@ void free_percpu(void __percpu *ptr) list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list) if (pos != chunk) { - schedule_work(&pcpu_reclaim_work); + schedule_work(&pcpu_balance_work); break; } } |