summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/internal.h4
-rw-r--r--mm/page_alloc.c24
-rw-r--r--mm/sparse.c6
3 files changed, 28 insertions, 6 deletions
diff --git a/mm/internal.h b/mm/internal.h
index 31ff935b2547..57e28261a3b1 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -211,6 +211,10 @@ extern void zone_pcp_reset(struct zone *zone);
extern void zone_pcp_disable(struct zone *zone);
extern void zone_pcp_enable(struct zone *zone);
+extern void *memmap_alloc(phys_addr_t size, phys_addr_t align,
+ phys_addr_t min_addr,
+ int nid, bool exact_nid);
+
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
/*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index bd2efd6287ce..71ad97c96075 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6748,6 +6748,26 @@ static void __init memmap_init(void)
init_unavailable_range(hole_pfn, end_pfn, zone_id, nid);
}
+void __init *memmap_alloc(phys_addr_t size, phys_addr_t align,
+ phys_addr_t min_addr, int nid, bool exact_nid)
+{
+ void *ptr;
+
+ if (exact_nid)
+ ptr = memblock_alloc_exact_nid_raw(size, align, min_addr,
+ MEMBLOCK_ALLOC_ACCESSIBLE,
+ nid);
+ else
+ ptr = memblock_alloc_try_nid_raw(size, align, min_addr,
+ MEMBLOCK_ALLOC_ACCESSIBLE,
+ nid);
+
+ if (ptr && size > 0)
+ page_init_poison(ptr, size);
+
+ return ptr;
+}
+
static int zone_batchsize(struct zone *zone)
{
#ifdef CONFIG_MMU
@@ -7519,8 +7539,8 @@ static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
end = pgdat_end_pfn(pgdat);
end = ALIGN(end, MAX_ORDER_NR_PAGES);
size = (end - start) * sizeof(struct page);
- map = memblock_alloc_node(size, SMP_CACHE_BYTES,
- pgdat->node_id);
+ map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
+ pgdat->node_id, false);
if (!map)
panic("Failed to allocate %ld bytes for node %d memory map\n",
size, pgdat->node_id);
diff --git a/mm/sparse.c b/mm/sparse.c
index fd29b3abffa0..120bc8ea5293 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -436,8 +436,7 @@ struct page __init *__populate_section_memmap(unsigned long pfn,
if (map)
return map;
- map = memblock_alloc_try_nid_raw(size, size, addr,
- MEMBLOCK_ALLOC_ACCESSIBLE, nid);
+ map = memmap_alloc(size, size, addr, nid, false);
if (!map)
panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa\n",
__func__, size, PAGE_SIZE, nid, &addr);
@@ -464,8 +463,7 @@ static void __init sparse_buffer_init(unsigned long size, int nid)
* and we want it to be properly aligned to the section size - this is
* especially the case for VMEMMAP which maps memmap to PMDs
*/
- sparsemap_buf = memblock_alloc_exact_nid_raw(size, section_map_size(),
- addr, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
+ sparsemap_buf = memmap_alloc(size, section_map_size(), addr, nid, true);
sparsemap_buf_end = sparsemap_buf + size;
}