summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2022-11-10 09:44:07 +0100
committerVlastimil Babka <vbabka@suse.cz>2022-11-21 10:35:37 +0100
commit4b28ba9eeab4345af43e45e6eb4056eb2f1cb764 (patch)
treed40099d9c163394f8dd7ded45d78a28f3610b0ec
parentc18c20f16219516b12a4f2fd29c25e06be97e064 (diff)
parent838de63b101147fc7d8af828465cf6d1d30232a8 (diff)
downloadlinux-4b28ba9eeab4345af43e45e6eb4056eb2f1cb764.tar.bz2
Merge branch 'slab/for-6.2/cleanups' into slab/for-next
- Removal of dead code from deactivate_slab() by Hyeonggon Yoo. - Fix of BUILD_BUG_ON() for sufficient early percpu size by Baoquan He. - Make kmem_cache_alloc() kernel-doc less misleading, by myself.
-rw-r--r--include/linux/percpu.h2
-rw-r--r--include/linux/slab.h23
-rw-r--r--mm/slab.c10
-rw-r--r--mm/slub.c19
4 files changed, 22 insertions, 32 deletions
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index f1ec5ad1351c..3dbb6fb70658 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -42,7 +42,7 @@
* larger than PERCPU_DYNAMIC_EARLY_SIZE.
*/
#define PERCPU_DYNAMIC_EARLY_SLOTS 128
-#define PERCPU_DYNAMIC_EARLY_SIZE (12 << 10)
+#define PERCPU_DYNAMIC_EARLY_SIZE (20 << 10)
/*
* PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 45efc6c553b8..067f0e80be9e 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -441,7 +441,18 @@ static_assert(PAGE_SHIFT <= 20);
#endif /* !CONFIG_SLOB */
void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
-void *kmem_cache_alloc(struct kmem_cache *s, gfp_t flags) __assume_slab_alignment __malloc;
+
+/**
+ * kmem_cache_alloc - Allocate an object
+ * @cachep: The cache to allocate from.
+ * @flags: See kmalloc().
+ *
+ * Allocate an object from this cache.
+ * See kmem_cache_zalloc() for a shortcut of adding __GFP_ZERO to flags.
+ *
+ * Return: pointer to the new object or %NULL in case of error
+ */
+void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) __assume_slab_alignment __malloc;
void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
gfp_t gfpflags) __assume_slab_alignment __malloc;
void kmem_cache_free(struct kmem_cache *s, void *objp);
@@ -483,9 +494,9 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_align
__alloc_size(1);
/**
- * kmalloc - allocate memory
+ * kmalloc - allocate kernel memory
* @size: how many bytes of memory are required.
- * @flags: the type of memory to allocate.
+ * @flags: describe the allocation context
*
* kmalloc is the normal method of allocating memory
* for objects smaller than page size in the kernel.
@@ -512,12 +523,12 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_align
* %GFP_ATOMIC
* Allocation will not sleep. May use emergency pools.
*
- * %GFP_HIGHUSER
- * Allocate memory from high memory on behalf of user.
- *
* Also it is possible to set different flags by OR'ing
* in one or more of the following additional @flags:
*
+ * %__GFP_ZERO
+ * Zero the allocated memory before returning. Also see kzalloc().
+ *
* %__GFP_HIGH
* This allocation has high priority and may use emergency pools.
*
diff --git a/mm/slab.c b/mm/slab.c
index 59c8e28f7b6a..f6f3e51317d5 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3446,16 +3446,6 @@ void *__kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
return ret;
}
-/**
- * kmem_cache_alloc - Allocate an object
- * @cachep: The cache to allocate from.
- * @flags: See kmalloc().
- *
- * Allocate an object from this cache. The flags are only relevant
- * if the cache has no available objects.
- *
- * Return: pointer to the new object or %NULL in case of error
- */
void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
return __kmem_cache_alloc_lru(cachep, NULL, flags);
diff --git a/mm/slub.c b/mm/slub.c
index 157527d7101b..52b8995a03d1 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2411,7 +2411,7 @@ static void init_kmem_cache_cpus(struct kmem_cache *s)
static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
void *freelist)
{
- enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE, M_FULL_NOLIST };
+ enum slab_modes { M_NONE, M_PARTIAL, M_FREE, M_FULL_NOLIST };
struct kmem_cache_node *n = get_node(s, slab_nid(slab));
int free_delta = 0;
enum slab_modes mode = M_NONE;
@@ -2487,14 +2487,6 @@ redo:
* acquire_slab() will see a slab that is frozen
*/
spin_lock_irqsave(&n->list_lock, flags);
- } else if (kmem_cache_debug_flags(s, SLAB_STORE_USER)) {
- mode = M_FULL;
- /*
- * This also ensures that the scanning of full
- * slabs from diagnostic functions will not see
- * any frozen slabs.
- */
- spin_lock_irqsave(&n->list_lock, flags);
} else {
mode = M_FULL_NOLIST;
}
@@ -2504,7 +2496,7 @@ redo:
old.freelist, old.counters,
new.freelist, new.counters,
"unfreezing slab")) {
- if (mode == M_PARTIAL || mode == M_FULL)
+ if (mode == M_PARTIAL)
spin_unlock_irqrestore(&n->list_lock, flags);
goto redo;
}
@@ -2518,10 +2510,6 @@ redo:
stat(s, DEACTIVATE_EMPTY);
discard_slab(s, slab);
stat(s, FREE_SLAB);
- } else if (mode == M_FULL) {
- add_full(s, n, slab);
- spin_unlock_irqrestore(&n->list_lock, flags);
- stat(s, DEACTIVATE_FULL);
} else if (mode == M_FULL_NOLIST) {
stat(s, DEACTIVATE_FULL);
}
@@ -4017,7 +4005,8 @@ init_kmem_cache_node(struct kmem_cache_node *n)
static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
{
BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
- KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu));
+ NR_KMALLOC_TYPES * KMALLOC_SHIFT_HIGH *
+ sizeof(struct kmem_cache_cpu));
/*
* Must align to double word boundary for the double cmpxchg