diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 66 | ||||
-rw-r--r-- | mm/slab_common.c | 2 | ||||
-rw-r--r-- | mm/slub.c | 6 | ||||
-rw-r--r-- | mm/vmalloc.c | 6 |
4 files changed, 20 insertions, 60 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index de2491c42d4f..7023a31edc5c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -63,6 +63,7 @@ #include <linux/sched/rt.h> #include <linux/page_owner.h> #include <linux/kthread.h> +#include <linux/memcontrol.h> #include <asm/sections.h> #include <asm/tlbflush.h> @@ -1018,6 +1019,10 @@ static __always_inline bool free_pages_prepare(struct page *page, } if (PageMappingFlags(page)) page->mapping = NULL; + if (memcg_kmem_enabled() && PageKmemcg(page)) { + memcg_kmem_uncharge(page, order); + __ClearPageKmemcg(page); + } if (check_free) bad += free_pages_check(page); if (bad) @@ -3841,6 +3846,14 @@ no_zone: } out: + if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page) { + if (unlikely(memcg_kmem_charge(page, gfp_mask, order))) { + __free_pages(page, order); + page = NULL; + } else + __SetPageKmemcg(page); + } + if (kmemcheck_enabled && page) kmemcheck_pagealloc_alloc(page, order, gfp_mask); @@ -3996,59 +4009,6 @@ void __free_page_frag(void *addr) } EXPORT_SYMBOL(__free_page_frag); -/* - * alloc_kmem_pages charges newly allocated pages to the kmem resource counter - * of the current memory cgroup if __GFP_ACCOUNT is set, other than that it is - * equivalent to alloc_pages. - * - * It should be used when the caller would like to use kmalloc, but since the - * allocation is large, it has to fall back to the page allocator. - */ -struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order) -{ - struct page *page; - - page = alloc_pages(gfp_mask, order); - if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && - page && memcg_kmem_charge(page, gfp_mask, order) != 0) { - __free_pages(page, order); - page = NULL; - } - return page; -} - -struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order) -{ - struct page *page; - - page = alloc_pages_node(nid, gfp_mask, order); - if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && - page && memcg_kmem_charge(page, gfp_mask, order) != 0) { - __free_pages(page, order); - page = NULL; - } - return page; -} - -/* - * __free_kmem_pages and free_kmem_pages will free pages allocated with - * alloc_kmem_pages. - */ -void __free_kmem_pages(struct page *page, unsigned int order) -{ - if (memcg_kmem_enabled()) - memcg_kmem_uncharge(page, order); - __free_pages(page, order); -} - -void free_kmem_pages(unsigned long addr, unsigned int order) -{ - if (addr != 0) { - VM_BUG_ON(!virt_addr_valid((void *)addr)); - __free_kmem_pages(virt_to_page((void *)addr), order); - } -} - static void *make_alloc_exact(unsigned long addr, unsigned int order, size_t size) { diff --git a/mm/slab_common.c b/mm/slab_common.c index da88c1588752..71f0b28a1bec 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -1012,7 +1012,7 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) struct page *page; flags |= __GFP_COMP; - page = alloc_kmem_pages(flags, order); + page = alloc_pages(flags, order); ret = page ? page_address(page) : NULL; kmemleak_alloc(ret, size, 1, flags); kasan_kmalloc_large(ret, size, flags); diff --git a/mm/slub.c b/mm/slub.c index c0cfa2722539..f9da8716b8b3 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2977,7 +2977,7 @@ int build_detached_freelist(struct kmem_cache *s, size_t size, if (unlikely(!PageSlab(page))) { BUG_ON(!PageCompound(page)); kfree_hook(object); - __free_kmem_pages(page, compound_order(page)); + __free_pages(page, compound_order(page)); p[size] = NULL; /* mark object processed */ return size; } @@ -3693,7 +3693,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node) void *ptr = NULL; flags |= __GFP_COMP | __GFP_NOTRACK; - page = alloc_kmem_pages_node(node, flags, get_order(size)); + page = alloc_pages_node(node, flags, get_order(size)); if (page) ptr = page_address(page); @@ -3774,7 +3774,7 @@ void kfree(const void *x) if (unlikely(!PageSlab(page))) { BUG_ON(!PageCompound(page)); kfree_hook(x); - __free_kmem_pages(page, compound_order(page)); + __free_pages(page, compound_order(page)); return; } slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_); diff --git a/mm/vmalloc.c b/mm/vmalloc.c index e11475cdeb7a..91f44e78c516 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1501,7 +1501,7 @@ static void __vunmap(const void *addr, int deallocate_pages) struct page *page = area->pages[i]; BUG_ON(!page); - __free_kmem_pages(page, 0); + __free_pages(page, 0); } kvfree(area->pages); @@ -1629,9 +1629,9 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, struct page *page; if (node == NUMA_NO_NODE) - page = alloc_kmem_pages(alloc_mask, order); + page = alloc_pages(alloc_mask, order); else - page = alloc_kmem_pages_node(node, alloc_mask, order); + page = alloc_pages_node(node, alloc_mask, order); if (unlikely(!page)) { /* Successfully allocated i pages, free them in __vunmap() */ |