summaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/init_64.c
diff options
context:
space:
mode:
authorYinghai Lu <yinghai@kernel.org>2012-11-16 19:38:58 -0800
committerH. Peter Anvin <hpa@linux.intel.com>2012-11-17 11:59:19 -0800
commit8d57470d8f859635deffe3919d7d4867b488b85a (patch)
tree8576bb911f3f671952a6d2310b787e1ed55e4456 /arch/x86/mm/init_64.c
parentf763ad1d3870abb811ec7520b4c1adc56471a3a4 (diff)
downloadlinux-8d57470d8f859635deffe3919d7d4867b488b85a.tar.bz2
x86, mm: setup page table in top-down
Get pgt_buf early from BRK, and use it to map PMD_SIZE from top at first. Then use mapped pages to map more ranges below, and keep looping until all pages get mapped. alloc_low_page will use page from BRK at first, after that buffer is used up, will use memblock to find and reserve pages for page table usage. Introduce min_pfn_mapped to make sure find new pages from mapped ranges, that will be updated when lower pages get mapped. Also add step_size to make sure that don't try to map too big range with limited mapped pages initially, and increase the step_size when we have more mapped pages on hand. We don't need to call pagetable_reserve anymore, reserve work is done in alloc_low_page() directly. At last we can get rid of calculation and find early pgt related code. -v2: update to after fix_xen change, also use MACRO for initial pgt_buf size and add comments with it. -v3: skip big reserved range in memblock.reserved near end. -v4: don't need fix_xen change now. -v5: add changelog about moving about reserving pagetable to alloc_low_page. Suggested-by: "H. Peter Anvin" <hpa@zytor.com> Signed-off-by: Yinghai Lu <yinghai@kernel.org> Link: http://lkml.kernel.org/r/1353123563-3103-22-git-send-email-yinghai@kernel.org Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/mm/init_64.c')
-rw-r--r--arch/x86/mm/init_64.c17
1 files changed, 14 insertions, 3 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index fa28e3e29741..eefaea637b3d 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -316,7 +316,7 @@ void __init cleanup_highmap(void)
static __ref void *alloc_low_page(unsigned long *phys)
{
- unsigned long pfn = pgt_buf_end++;
+ unsigned long pfn;
void *adr;
if (after_bootmem) {
@@ -326,8 +326,19 @@ static __ref void *alloc_low_page(unsigned long *phys)
return adr;
}
- if (pfn >= pgt_buf_top)
- panic("alloc_low_page: ran out of memory");
+ if ((pgt_buf_end + 1) >= pgt_buf_top) {
+ unsigned long ret;
+ if (min_pfn_mapped >= max_pfn_mapped)
+ panic("alloc_low_page: ran out of memory");
+ ret = memblock_find_in_range(min_pfn_mapped << PAGE_SHIFT,
+ max_pfn_mapped << PAGE_SHIFT,
+ PAGE_SIZE, PAGE_SIZE);
+ if (!ret)
+ panic("alloc_low_page: can not alloc memory");
+ memblock_reserve(ret, PAGE_SIZE);
+ pfn = ret >> PAGE_SHIFT;
+ } else
+ pfn = pgt_buf_end++;
adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
clear_page(adr);