summaryrefslogtreecommitdiffstats
path: root/arch/ia64/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/mm')
-rw-r--r--arch/ia64/mm/contig.c48
-rw-r--r--arch/ia64/mm/init.c14
2 files changed, 21 insertions, 41 deletions
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index ba81d8cb0059..bfc4ecd0a2ab 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -19,15 +19,12 @@
#include <linux/mm.h>
#include <linux/nmi.h>
#include <linux/swap.h>
+#include <linux/sizes.h>
#include <asm/meminit.h>
#include <asm/sections.h>
#include <asm/mca.h>
-#ifdef CONFIG_VIRTUAL_MEM_MAP
-static unsigned long max_gap;
-#endif
-
/* physical address where the bootmem map is located */
unsigned long bootmap_start;
@@ -166,33 +163,30 @@ find_memory (void)
alloc_per_cpu_data();
}
-static void __init virtual_map_init(void)
+static int __init find_largest_hole(u64 start, u64 end, void *arg)
{
-#ifdef CONFIG_VIRTUAL_MEM_MAP
- efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
- if (max_gap < LARGE_GAP) {
- vmem_map = (struct page *) 0;
- } else {
- unsigned long map_size;
+ u64 *max_gap = arg;
- /* allocate virtual_mem_map */
+ static u64 last_end = PAGE_OFFSET;
- map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
- sizeof(struct page));
- VMALLOC_END -= map_size;
- vmem_map = (struct page *) VMALLOC_END;
- efi_memmap_walk(create_mem_map_page_table, NULL);
+ /* NOTE: this algorithm assumes efi memmap table is ordered */
- /*
- * alloc_node_mem_map makes an adjustment for mem_map
- * which isn't compatible with vmem_map.
- */
- NODE_DATA(0)->node_mem_map = vmem_map +
- find_min_pfn_with_active_regions();
+ if (*max_gap < (start - last_end))
+ *max_gap = start - last_end;
+ last_end = end;
+ return 0;
+}
- printk("Virtual mem_map starts at 0x%p\n", mem_map);
- }
-#endif /* !CONFIG_VIRTUAL_MEM_MAP */
+static void __init verify_gap_absence(void)
+{
+ unsigned long max_gap;
+
+ /* Forbid FLATMEM if hole is > than 1G */
+ efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
+ if (max_gap >= SZ_1G)
+ panic("Cannot use FLATMEM with %ldMB hole\n"
+ "Please switch over to SPARSEMEM\n",
+ (max_gap >> 20));
}
/*
@@ -210,7 +204,7 @@ paging_init (void)
max_zone_pfns[ZONE_DMA32] = max_dma;
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
- virtual_map_init();
+ verify_gap_absence();
free_area_init(max_zone_pfns);
zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index ef12e097f318..9b5acf8fb092 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -574,20 +574,6 @@ ia64_pfn_valid (unsigned long pfn)
}
EXPORT_SYMBOL(ia64_pfn_valid);
-int __init find_largest_hole(u64 start, u64 end, void *arg)
-{
- u64 *max_gap = arg;
-
- static u64 last_end = PAGE_OFFSET;
-
- /* NOTE: this algorithm assumes efi memmap table is ordered */
-
- if (*max_gap < (start - last_end))
- *max_gap = start - last_end;
- last_end = end;
- return 0;
-}
-
#endif /* CONFIG_VIRTUAL_MEM_MAP */
int __init register_active_ranges(u64 start, u64 len, int nid)