diff options
-rw-r--r-- | arch/x86/mm/init_64.c | 1 | ||||
-rw-r--r-- | mm/sparse-vmemmap.c | 12 |
2 files changed, 10 insertions, 3 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index c3fc544b50d2..4a837289f2ad 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1405,7 +1405,6 @@ static int __meminit vmemmap_populate_hugepages(unsigned long start, vmemmap_verify((pte_t *)pmd, node, addr, next); continue; } - pr_warn_once("vmemmap: falling back to regular page backing\n"); if (vmemmap_populate_basepages(addr, next, node)) return -ENOMEM; } diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index 4e49762599c8..17acf01791fa 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c @@ -53,12 +53,20 @@ void * __meminit vmemmap_alloc_block(unsigned long size, int node) { /* If the main allocator is up use that, fallback to bootmem. */ if (slab_is_available()) { + gfp_t gfp_mask = GFP_KERNEL|__GFP_RETRY_MAYFAIL|__GFP_NOWARN; + int order = get_order(size); + static bool warned; struct page *page; - page = alloc_pages_node(node, GFP_KERNEL | __GFP_RETRY_MAYFAIL, - get_order(size)); + page = alloc_pages_node(node, gfp_mask, order); if (page) return page_address(page); + + if (!warned) { + warn_alloc(gfp_mask & ~__GFP_NOWARN, NULL, + "vmemmap alloc failure: order:%u", order); + warned = true; + } return NULL; } else return __earlyonly_bootmem_alloc(node, size, size, |