diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-11-12 11:20:52 +0900 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-11-12 11:20:52 +0900 |
commit | 2612c49db3808f976ba12eed4170917f49693ac1 (patch) | |
tree | 7bb9fc021fe6acbc78ae87f54a001933b3cc5086 /arch | |
parent | 340286cd4e9aff841affb897f6d2535ed27605cf (diff) | |
parent | 6979287a7df66a92d6f308338e972a406f9ef842 (diff) | |
download | linux-2612c49db3808f976ba12eed4170917f49693ac1.tar.bz2 |
Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 mm fixlet from Ingo Molnar:
"One cleanup that documents a particular detail in init_mem_mapping()"
* 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/mm: Add 'step_size' comments to init_mem_mapping()
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/mm/init.c | 23 |
1 files changed, 20 insertions, 3 deletions
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 04664cdb7fda..ce32017c5e38 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -399,8 +399,25 @@ static unsigned long __init init_range_memory_mapping( return mapped_ram_size; } -/* (PUD_SHIFT-PMD_SHIFT)/2 */ -#define STEP_SIZE_SHIFT 5 +static unsigned long __init get_new_step_size(unsigned long step_size) +{ + /* + * Explain why we shift by 5 and why we don't have to worry about + * 'step_size << 5' overflowing: + * + * initial mapped size is PMD_SIZE (2M). + * We can not set step_size to be PUD_SIZE (1G) yet. + * In worse case, when we cross the 1G boundary, and + * PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k) + * to map 1G range with PTE. Use 5 as shift for now. + * + * Don't need to worry about overflow, on 32bit, when step_size + * is 0, round_down() returns 0 for start, and that turns it + * into 0x100000000ULL. + */ + return step_size << 5; +} + void __init init_mem_mapping(void) { unsigned long end, real_end, start, last_start; @@ -449,7 +466,7 @@ void __init init_mem_mapping(void) min_pfn_mapped = last_start >> PAGE_SHIFT; /* only increase step_size after big range get mapped */ if (new_mapped_ram_size > mapped_ram_size) - step_size <<= STEP_SIZE_SHIFT; + step_size = get_new_step_size(step_size); mapped_ram_size += new_mapped_ram_size; } |