diff options
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 32 |
1 files changed, 20 insertions, 12 deletions
diff --git a/mm/slub.c b/mm/slub.c index 3d3a8a7a0f8c..25f14ad8f817 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1518,11 +1518,9 @@ static void discard_slab(struct kmem_cache *s, struct page *page) /* * Management of partially allocated slabs. */ -static inline void add_partial(struct kmem_cache_node *n, - struct page *page, int tail) +static inline void +__add_partial(struct kmem_cache_node *n, struct page *page, int tail) { - lockdep_assert_held(&n->list_lock); - n->nr_partial++; if (tail == DEACTIVATE_TO_TAIL) list_add_tail(&page->lru, &n->partial); @@ -1530,15 +1528,27 @@ static inline void add_partial(struct kmem_cache_node *n, list_add(&page->lru, &n->partial); } -static inline void remove_partial(struct kmem_cache_node *n, - struct page *page) +static inline void add_partial(struct kmem_cache_node *n, + struct page *page, int tail) { lockdep_assert_held(&n->list_lock); + __add_partial(n, page, tail); +} +static inline void +__remove_partial(struct kmem_cache_node *n, struct page *page) +{ list_del(&page->lru); n->nr_partial--; } +static inline void remove_partial(struct kmem_cache_node *n, + struct page *page) +{ + lockdep_assert_held(&n->list_lock); + __remove_partial(n, page); +} + /* * Remove slab from the partial list, freeze it and * return the pointer to the freelist. @@ -2904,12 +2914,10 @@ static void early_kmem_cache_node_alloc(int node) inc_slabs_node(kmem_cache_node, node, page->objects); /* - * the lock is for lockdep's sake, not for any actual - * race protection + * No locks need to be taken here as it has just been + * initialized and there is no concurrent access. */ - spin_lock(&n->list_lock); - add_partial(n, page, DEACTIVATE_TO_HEAD); - spin_unlock(&n->list_lock); + __add_partial(n, page, DEACTIVATE_TO_HEAD); } static void free_kmem_cache_nodes(struct kmem_cache *s) @@ -3195,7 +3203,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) list_for_each_entry_safe(page, h, &n->partial, lru) { if (!page->inuse) { - remove_partial(n, page); + __remove_partial(n, page); discard_slab(s, page); } else { list_slab_objects(s, page, |