summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorAlexey Dobriyan <adobriyan@gmail.com>2019-03-05 15:48:26 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-05 21:07:19 -0800
commitb9726c26dc21b15a2faea96fae3a42f2f7fffdcb (patch)
tree28c39800b37b56f9cb026a8dfc79e35c6208e2b4 /mm
parentd342a0b38674867ea67fde47b0e1e60ffe9f17a2 (diff)
downloadlinux-b9726c26dc21b15a2faea96fae3a42f2f7fffdcb.tar.bz2
numa: make "nr_node_ids" unsigned int
Number of NUMA nodes can't be negative. This saves a few bytes on x86_64: add/remove: 0/0 grow/shrink: 4/21 up/down: 27/-265 (-238) Function old new delta hv_synic_alloc.cold 88 110 +22 prealloc_shrinker 260 262 +2 bootstrap 249 251 +2 sched_init_numa 1566 1567 +1 show_slab_objects 778 777 -1 s_show 1201 1200 -1 kmem_cache_init 346 345 -1 __alloc_workqueue_key 1146 1145 -1 mem_cgroup_css_alloc 1614 1612 -2 __do_sys_swapon 4702 4699 -3 __list_lru_init 655 651 -4 nic_probe 2379 2374 -5 store_user_store 118 111 -7 red_zone_store 106 99 -7 poison_store 106 99 -7 wq_numa_init 348 338 -10 __kmem_cache_empty 75 65 -10 task_numa_free 186 173 -13 merge_across_nodes_store 351 336 -15 irq_create_affinity_masks 1261 1246 -15 do_numa_crng_init 343 321 -22 task_numa_fault 4760 4737 -23 swapfile_init 179 156 -23 hv_synic_alloc 536 492 -44 apply_wqattrs_prepare 746 695 -51 Link: http://lkml.kernel.org/r/20190201223029.GA15820@avx2 Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/list_lru.c3
-rw-r--r--mm/memcontrol.c2
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/slab.c3
-rw-r--r--mm/slub.c2
-rw-r--r--mm/swapfile.c2
-rw-r--r--mm/vmscan.c2
7 files changed, 7 insertions, 9 deletions
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 5b30625fd365..0730bf8ff39f 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -601,7 +601,6 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware,
struct lock_class_key *key, struct shrinker *shrinker)
{
int i;
- size_t size = sizeof(*lru->node) * nr_node_ids;
int err = -ENOMEM;
#ifdef CONFIG_MEMCG_KMEM
@@ -612,7 +611,7 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware,
#endif
memcg_get_cache_ids();
- lru->node = kzalloc(size, GFP_KERNEL);
+ lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL);
if (!lru->node)
goto out;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 30bda8d7fb5c..45cd1f84268a 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -4429,7 +4429,7 @@ static void mem_cgroup_free(struct mem_cgroup *memcg)
static struct mem_cgroup *mem_cgroup_alloc(void)
{
struct mem_cgroup *memcg;
- size_t size;
+ unsigned int size;
int node;
size = sizeof(struct mem_cgroup);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 11a5f50efd97..8df43caf2eb7 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -289,7 +289,7 @@ EXPORT_SYMBOL(movable_zone);
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
#if MAX_NUMNODES > 1
-int nr_node_ids __read_mostly = MAX_NUMNODES;
+unsigned int nr_node_ids __read_mostly = MAX_NUMNODES;
int nr_online_nodes __read_mostly = 1;
EXPORT_SYMBOL(nr_node_ids);
EXPORT_SYMBOL(nr_online_nodes);
diff --git a/mm/slab.c b/mm/slab.c
index 757e646baa5d..7510a1b489df 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -677,12 +677,11 @@ static struct alien_cache *__alloc_alien_cache(int node, int entries,
static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
{
struct alien_cache **alc_ptr;
- size_t memsize = sizeof(void *) * nr_node_ids;
int i;
if (limit > 1)
limit = 12;
- alc_ptr = kzalloc_node(memsize, gfp, node);
+ alc_ptr = kcalloc_node(nr_node_ids, sizeof(void *), gfp, node);
if (!alc_ptr)
return NULL;
diff --git a/mm/slub.c b/mm/slub.c
index 017a2ce5ba23..1b08fbcb7e61 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4262,7 +4262,7 @@ void __init kmem_cache_init(void)
cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
slub_cpu_dead);
- pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%d\n",
+ pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n",
cache_line_size(),
slub_min_order, slub_max_order, slub_min_objects,
nr_cpu_ids, nr_node_ids);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 57e9b1b31d55..a14257ac0476 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2713,7 +2713,7 @@ static struct swap_info_struct *alloc_swap_info(void)
struct swap_info_struct *p;
unsigned int type;
int i;
- int size = sizeof(*p) + nr_node_ids * sizeof(struct plist_node);
+ unsigned int size = sizeof(*p) + nr_node_ids * sizeof(struct plist_node);
p = kvzalloc(size, GFP_KERNEL);
if (!p)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 209c2c78a087..e1f7ccdc0a90 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -374,7 +374,7 @@ unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone
*/
int prealloc_shrinker(struct shrinker *shrinker)
{
- size_t size = sizeof(*shrinker->nr_deferred);
+ unsigned int size = sizeof(*shrinker->nr_deferred);
if (shrinker->flags & SHRINKER_NUMA_AWARE)
size *= nr_node_ids;