summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorAaron Lu <aaron.lu@intel.com>2018-04-05 16:24:06 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-04-05 21:36:26 -0700
commit77ba9062e43c7e4966d9ff3afd87dca86542f86a (patch)
treec9cf12435f9382e2929621e7aa92b48d664cb247 /mm/page_alloc.c
parentbc3106b26cf6a6f214fd1a8538736afc39ae1b5c (diff)
downloadlinux-77ba9062e43c7e4966d9ff3afd87dca86542f86a.tar.bz2
mm/free_pcppages_bulk: update pcp->count inside
Matthew Wilcox found that all callers of free_pcppages_bulk() currently update pcp->count immediately after so it's natural to do it inside free_pcppages_bulk(). No functionality or performance change is expected from this patch. Link: http://lkml.kernel.org/r/20180301062845.26038-2-aaron.lu@intel.com Signed-off-by: Aaron Lu <aaron.lu@intel.com> Suggested-by: Matthew Wilcox <willy@infradead.org> Acked-by: David Rientjes <rientjes@google.com> Acked-by: Michal Hocko <mhocko@suse.com> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Huang Ying <ying.huang@intel.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Kemi Wang <kemi.wang@intel.com> Cc: Tim Chen <tim.c.chen@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Mel Gorman <mgorman@techsingularity.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c10
1 files changed, 3 insertions, 7 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 86b7f0430e02..08c195cdf161 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1112,6 +1112,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
page = list_last_entry(list, struct page, lru);
/* must delete as __free_one_page list manipulates */
list_del(&page->lru);
+ pcp->count--;
mt = get_pcppage_migratetype(page);
/* MIGRATE_ISOLATE page should not go to pcplists */
@@ -2495,10 +2496,8 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
local_irq_save(flags);
batch = READ_ONCE(pcp->batch);
to_drain = min(pcp->count, batch);
- if (to_drain > 0) {
+ if (to_drain > 0)
free_pcppages_bulk(zone, to_drain, pcp);
- pcp->count -= to_drain;
- }
local_irq_restore(flags);
}
#endif
@@ -2520,10 +2519,8 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp;
- if (pcp->count) {
+ if (pcp->count)
free_pcppages_bulk(zone, pcp->count, pcp);
- pcp->count = 0;
- }
local_irq_restore(flags);
}
@@ -2747,7 +2744,6 @@ static void free_unref_page_commit(struct page *page, unsigned long pfn)
if (pcp->count >= pcp->high) {
unsigned long batch = READ_ONCE(pcp->batch);
free_pcppages_bulk(zone, batch, pcp);
- pcp->count -= batch;
}
}