summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2020-12-14 19:10:53 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2020-12-15 12:13:43 -0800
commit952eaf815925f106eb6b68346b3458a68bb18ec1 (patch)
tree7c66b4fd4dfd320f88749320152b9d1029a2855c /mm
parent5c3ad2eb7104754a36580079a2e4aed04a10631d (diff)
downloadlinux-952eaf815925f106eb6b68346b3458a68bb18ec1.tar.bz2
mm, page_alloc: cache pageset high and batch in struct zone
All per-cpu pagesets for a zone use the same high and batch values, that are duplicated there just for performance (locality) reasons. This patch adds the same variables also to struct zone as a shared copy. This will be useful later for making possible to disable pcplists temporarily by setting high value to 0, while remembering the values for restoring them later. But we can also immediately benefit from not updating pagesets of all possible cpus in case the newly recalculated values (after sysctl change or memory online/offline) are actually unchanged from the previous ones. Link: https://lkml.kernel.org/r/20201111092812.11329-6-vbabka@suse.cz Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Oscar Salvador <osalvador@suse.de> Acked-by: Michal Hocko <mhocko@suse.com> Reviewed-by: David Hildenbrand <david@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c16
1 files changed, 14 insertions, 2 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0c47af9e97c6..c3d1752b57dc 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5920,6 +5920,9 @@ static void build_zonelists(pg_data_t *pgdat)
* Other parts of the kernel may not check if the zone is available.
*/
static void pageset_init(struct per_cpu_pageset *p);
+/* These effectively disable the pcplists in the boot pageset completely */
+#define BOOT_PAGESET_HIGH 0
+#define BOOT_PAGESET_BATCH 1
static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
@@ -6309,8 +6312,8 @@ static void pageset_init(struct per_cpu_pageset *p)
* need to be as careful as pageset_update() as nobody can access the
* pageset yet.
*/
- pcp->high = 0;
- pcp->batch = 1;
+ pcp->high = BOOT_PAGESET_HIGH;
+ pcp->batch = BOOT_PAGESET_BATCH;
}
/*
@@ -6334,6 +6337,13 @@ static void zone_set_pageset_high_and_batch(struct zone *zone)
new_batch = max(1UL, 1 * new_batch);
}
+ if (zone->pageset_high == new_high &&
+ zone->pageset_batch == new_batch)
+ return;
+
+ zone->pageset_high = new_high;
+ zone->pageset_batch = new_batch;
+
for_each_possible_cpu(cpu) {
p = per_cpu_ptr(zone->pageset, cpu);
pageset_update(&p->pcp, new_high, new_batch);
@@ -6394,6 +6404,8 @@ static __meminit void zone_pcp_init(struct zone *zone)
* offset of a (static) per cpu variable into the per cpu area.
*/
zone->pageset = &boot_pageset;
+ zone->pageset_high = BOOT_PAGESET_HIGH;
+ zone->pageset_batch = BOOT_PAGESET_BATCH;
if (populated_zone(zone))
printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",