summaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorVegard Nossum <vegard.nossum@gmail.com>2009-06-15 15:50:49 +0200
committerVegard Nossum <vegard.nossum@gmail.com>2009-06-15 15:50:49 +0200
commit722f2a6c87f34ee0fd0130a8cf45f81e0705594a (patch)
tree50b054df34d2731eb0ba0cf1a6c27e43e7eed428 /mm/slab.c
parent7a0aeb14e18ad59394bd9bbc6e57fb345819e748 (diff)
parent45e3e1935e2857c54783291107d33323b3ef33c8 (diff)
downloadlinux-722f2a6c87f34ee0fd0130a8cf45f81e0705594a.tar.bz2
Merge commit 'linus/master' into HEAD
Conflicts: MAINTAINERS Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c41
1 files changed, 30 insertions, 11 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 6a1ad0b9a94f..af3376d0a833 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -305,6 +305,12 @@ struct kmem_list3 {
};
/*
+ * The slab allocator is initialized with interrupts disabled. Therefore, make
+ * sure early boot allocations don't accidentally enable interrupts.
+ */
+static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK;
+
+/*
* Need this for bootstrapping a per node allocator.
*/
#define NUM_INIT_LISTS (3 * MAX_NUMNODES)
@@ -673,6 +679,7 @@ static enum {
NONE,
PARTIAL_AC,
PARTIAL_L3,
+ EARLY,
FULL
} g_cpucache_up;
@@ -681,7 +688,7 @@ static enum {
*/
int slab_is_available(void)
{
- return g_cpucache_up == FULL;
+ return g_cpucache_up >= EARLY;
}
static DEFINE_PER_CPU(struct delayed_work, reap_work);
@@ -1545,19 +1552,27 @@ void __init kmem_cache_init(void)
}
}
- /* 6) resize the head arrays to their final sizes */
- {
- struct kmem_cache *cachep;
- mutex_lock(&cache_chain_mutex);
- list_for_each_entry(cachep, &cache_chain, next)
- if (enable_cpucache(cachep, GFP_NOWAIT))
- BUG();
- mutex_unlock(&cache_chain_mutex);
- }
+ g_cpucache_up = EARLY;
/* Annotate slab for lockdep -- annotate the malloc caches */
init_lock_keys();
+}
+
+void __init kmem_cache_init_late(void)
+{
+ struct kmem_cache *cachep;
+
+ /*
+ * Interrupts are enabled now so all GFP allocations are safe.
+ */
+ slab_gfp_mask = __GFP_BITS_MASK;
+ /* 6) resize the head arrays to their final sizes */
+ mutex_lock(&cache_chain_mutex);
+ list_for_each_entry(cachep, &cache_chain, next)
+ if (enable_cpucache(cachep, GFP_NOWAIT))
+ BUG();
+ mutex_unlock(&cache_chain_mutex);
/* Done! */
g_cpucache_up = FULL;
@@ -2034,7 +2049,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
for_each_online_node(node) {
cachep->nodelists[node] =
kmalloc_node(sizeof(struct kmem_list3),
- GFP_KERNEL, node);
+ gfp, node);
BUG_ON(!cachep->nodelists[node]);
kmem_list3_init(cachep->nodelists[node]);
}
@@ -3286,6 +3301,8 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
unsigned long save_flags;
void *ptr;
+ flags &= slab_gfp_mask;
+
lockdep_trace_alloc(flags);
if (slab_should_failslab(cachep, flags))
@@ -3369,6 +3386,8 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
unsigned long save_flags;
void *objp;
+ flags &= slab_gfp_mask;
+
lockdep_trace_alloc(flags);
if (slab_should_failslab(cachep, flags))