diff options
author | Alexey Dobriyan <adobriyan@gmail.com> | 2017-11-15 17:32:21 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-15 18:21:01 -0800 |
commit | 4fd0b46e898791009b03b2fdd6510044fa8730a6 (patch) | |
tree | ef78168cf70327d341eaf9fd07f25683dc869262 | |
parent | d50112edde1d0c621520e53747044009f11c656b (diff) | |
download | linux-4fd0b46e898791009b03b2fdd6510044fa8730a6.tar.bz2 |
slab, slub, slob: convert slab_flags_t to 32-bit
struct kmem_cache::flags is "unsigned long" which is unnecessary on
64-bit as no flags are defined in the higher bits.
Switch the field to 32-bit and save some space on x86_64 until such
flags appear:
add/remove: 0/0 grow/shrink: 0/107 up/down: 0/-657 (-657)
function old new delta
sysfs_slab_add 720 719 -1
...
check_object 699 676 -23
[akpm@linux-foundation.org: fix printk warning]
Link: http://lkml.kernel.org/r/20171021100635.GA8287@avx2
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Acked-by: Pekka Enberg <penberg@kernel.org>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/slab.h | 44 | ||||
-rw-r--r-- | include/linux/types.h | 2 | ||||
-rw-r--r-- | mm/slab.c | 4 | ||||
-rw-r--r-- | mm/slub.c | 6 |
4 files changed, 28 insertions, 28 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h index 0c4c579f52ed..f37cb93768ab 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -22,19 +22,19 @@ * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set. */ /* DEBUG: Perform (expensive) checks on alloc/free */ -#define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100UL) +#define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U) /* DEBUG: Red zone objs in a cache */ -#define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400UL) +#define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U) /* DEBUG: Poison objects */ -#define SLAB_POISON ((slab_flags_t __force)0x00000800UL) +#define SLAB_POISON ((slab_flags_t __force)0x00000800U) /* Align objs on cache lines */ -#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000UL) +#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U) /* Use GFP_DMA memory */ -#define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000UL) +#define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U) /* DEBUG: Store the last owner for bug hunting */ -#define SLAB_STORE_USER ((slab_flags_t __force)0x00010000UL) +#define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U) /* Panic if kmem_cache_create() fails */ -#define SLAB_PANIC ((slab_flags_t __force)0x00040000UL) +#define SLAB_PANIC ((slab_flags_t __force)0x00040000U) /* * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS! * @@ -73,50 +73,50 @@ * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU. */ /* Defer freeing slabs to RCU */ -#define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000UL) +#define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U) /* Spread some memory over cpuset */ -#define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000UL) +#define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000U) /* Trace allocations and frees */ -#define SLAB_TRACE ((slab_flags_t __force)0x00200000UL) +#define SLAB_TRACE ((slab_flags_t __force)0x00200000U) /* Flag to prevent checks on free */ #ifdef CONFIG_DEBUG_OBJECTS -# define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000UL) +# define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U) #else -# define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00000000UL) +# define SLAB_DEBUG_OBJECTS 0 #endif /* Avoid kmemleak tracing */ -#define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000UL) +#define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U) /* Don't track use of uninitialized memory */ #ifdef CONFIG_KMEMCHECK -# define SLAB_NOTRACK ((slab_flags_t __force)0x01000000UL) +# define SLAB_NOTRACK ((slab_flags_t __force)0x01000000U) #else -# define SLAB_NOTRACK ((slab_flags_t __force)0x00000000UL) +# define SLAB_NOTRACK 0 #endif /* Fault injection mark */ #ifdef CONFIG_FAILSLAB -# define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000UL) +# define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U) #else -# define SLAB_FAILSLAB ((slab_flags_t __force)0x00000000UL) +# define SLAB_FAILSLAB 0 #endif /* Account to memcg */ #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) -# define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000UL) +# define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U) #else -# define SLAB_ACCOUNT ((slab_flags_t __force)0x00000000UL) +# define SLAB_ACCOUNT 0 #endif #ifdef CONFIG_KASAN -#define SLAB_KASAN ((slab_flags_t __force)0x08000000UL) +#define SLAB_KASAN ((slab_flags_t __force)0x08000000U) #else -#define SLAB_KASAN ((slab_flags_t __force)0x00000000UL) +#define SLAB_KASAN 0 #endif /* The following flags affect the page allocator grouping pages by mobility */ /* Objects are reclaimable */ -#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000UL) +#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U) #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ /* * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. diff --git a/include/linux/types.h b/include/linux/types.h index 732b52c2eae4..c94d59ef96cc 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -156,7 +156,7 @@ typedef u32 dma_addr_t; #endif typedef unsigned __bitwise gfp_t; -typedef unsigned long __bitwise slab_flags_t; +typedef unsigned __bitwise slab_flags_t; typedef unsigned __bitwise fmode_t; #ifdef CONFIG_PHYS_ADDR_T_64BIT diff --git a/mm/slab.c b/mm/slab.c index 19b1b9f99819..7a5e0888a401 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -252,8 +252,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent) MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \ } while (0) -#define CFLGS_OBJFREELIST_SLAB ((slab_flags_t __force)0x40000000UL) -#define CFLGS_OFF_SLAB ((slab_flags_t __force)0x80000000UL) +#define CFLGS_OBJFREELIST_SLAB ((slab_flags_t __force)0x40000000U) +#define CFLGS_OFF_SLAB ((slab_flags_t __force)0x80000000U) #define OBJFREELIST_SLAB(x) ((x)->flags & CFLGS_OBJFREELIST_SLAB) #define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) diff --git a/mm/slub.c b/mm/slub.c index 482d1daa9088..33957fd376ae 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -194,9 +194,9 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) /* Internal SLUB flags */ /* Poison object */ -#define __OBJECT_POISON ((slab_flags_t __force)0x80000000UL) +#define __OBJECT_POISON ((slab_flags_t __force)0x80000000U) /* Use cmpxchg_double */ -#define __CMPXCHG_DOUBLE ((slab_flags_t __force)0x40000000UL) +#define __CMPXCHG_DOUBLE ((slab_flags_t __force)0x40000000U) /* * Tracking user of a slab. @@ -3657,7 +3657,7 @@ error: if (flags & SLAB_PANIC) panic("Cannot create slab %s size=%lu realsize=%u order=%u offset=%u flags=%lx\n", s->name, (unsigned long)s->size, s->size, - oo_order(s->oo), s->offset, flags); + oo_order(s->oo), s->offset, (unsigned long)flags); return -EINVAL; } |