From e3239ff92a17976ac5d26fa0fe40ef3a9daf2523 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 4 Aug 2010 14:06:41 +1000 Subject: memblock: Rename memblock_region to memblock_type and memblock_property to memblock_region Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/hash_utils_64.c | 2 +- arch/powerpc/mm/mem.c | 26 +++++++++++++------------- 2 files changed, 14 insertions(+), 14 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 09dffe6efa46..b1a3784744db 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -660,7 +660,7 @@ static void __init htab_initialize(void) /* create bolted the linear mapping in the hash table */ for (i=0; i < memblock.memory.cnt; i++) { - base = (unsigned long)__va(memblock.memory.region[i].base); + base = (unsigned long)__va(memblock.memory.regions[i].base); size = memblock.memory.region[i].size; DBG("creating mapping for region: %lx..%lx (prot: %lx)\n", diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 1a84a8d00005..a33f5c186fb7 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -86,10 +86,10 @@ int page_is_ram(unsigned long pfn) for (i=0; i < memblock.memory.cnt; i++) { unsigned long base; - base = memblock.memory.region[i].base; + base = memblock.memory.regions[i].base; if ((paddr >= base) && - (paddr < (base + memblock.memory.region[i].size))) { + (paddr < (base + memblock.memory.regions[i].size))) { return 1; } } @@ -149,7 +149,7 @@ int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, void *arg, int (*func)(unsigned long, unsigned long, void *)) { - struct memblock_property res; + struct memblock_region res; unsigned long pfn, len; u64 end; int ret = -1; @@ -206,7 +206,7 @@ void __init do_init_bootmem(void) /* Add active regions with valid PFNs */ for (i = 0; i < memblock.memory.cnt; i++) { unsigned long start_pfn, end_pfn; - start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT; + start_pfn = memblock.memory.regions[i].base >> PAGE_SHIFT; end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i); add_active_range(0, start_pfn, end_pfn); } @@ -219,16 +219,16 @@ void __init do_init_bootmem(void) /* reserve the sections we're already using */ for (i = 0; i < memblock.reserved.cnt; i++) { - unsigned long addr = memblock.reserved.region[i].base + + unsigned long addr = memblock.reserved.regions[i].base + memblock_size_bytes(&memblock.reserved, i) - 1; if (addr < lowmem_end_addr) - reserve_bootmem(memblock.reserved.region[i].base, + reserve_bootmem(memblock.reserved.regions[i].base, memblock_size_bytes(&memblock.reserved, i), BOOTMEM_DEFAULT); - else if (memblock.reserved.region[i].base < lowmem_end_addr) { + else if (memblock.reserved.regions[i].base < lowmem_end_addr) { unsigned long adjusted_size = lowmem_end_addr - - memblock.reserved.region[i].base; - reserve_bootmem(memblock.reserved.region[i].base, + memblock.reserved.regions[i].base; + reserve_bootmem(memblock.reserved.regions[i].base, adjusted_size, BOOTMEM_DEFAULT); } } @@ -237,7 +237,7 @@ void __init do_init_bootmem(void) /* reserve the sections we're already using */ for (i = 0; i < memblock.reserved.cnt; i++) - reserve_bootmem(memblock.reserved.region[i].base, + reserve_bootmem(memblock.reserved.regions[i].base, memblock_size_bytes(&memblock.reserved, i), BOOTMEM_DEFAULT); @@ -257,10 +257,10 @@ static int __init mark_nonram_nosave(void) for (i = 0; i < memblock.memory.cnt - 1; i++) { memblock_region_max_pfn = - (memblock.memory.region[i].base >> PAGE_SHIFT) + - (memblock.memory.region[i].size >> PAGE_SHIFT); + (memblock.memory.regions[i].base >> PAGE_SHIFT) + + (memblock.memory.regions[i].size >> PAGE_SHIFT); memblock_next_region_start_pfn = - memblock.memory.region[i+1].base >> PAGE_SHIFT; + memblock.memory.regions[i+1].base >> PAGE_SHIFT; if (memblock_region_max_pfn < memblock_next_region_start_pfn) register_nosave_region(memblock_region_max_pfn, -- cgit v1.2.3 From 28be7072ce54b82642ebff6a80d474d4c6a6a7fd Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 4 Aug 2010 13:43:53 +1000 Subject: memblock/powerpc: Use new accessors Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/hash_utils_64.c | 8 ++-- arch/powerpc/mm/mem.c | 92 +++++++++++++++-------------------------- arch/powerpc/mm/numa.c | 17 ++++---- 3 files changed, 46 insertions(+), 71 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index b1a3784744db..4072b871497d 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -588,7 +588,7 @@ static void __init htab_initialize(void) unsigned long pteg_count; unsigned long prot; unsigned long base = 0, size = 0, limit; - int i; + struct memblock_region *reg; DBG(" -> htab_initialize()\n"); @@ -659,9 +659,9 @@ static void __init htab_initialize(void) */ /* create bolted the linear mapping in the hash table */ - for (i=0; i < memblock.memory.cnt; i++) { - base = (unsigned long)__va(memblock.memory.regions[i].base); - size = memblock.memory.region[i].size; + for_each_memblock(memory, reg) { + base = (unsigned long)__va(reg->base); + size = reg->size; DBG("creating mapping for region: %lx..%lx (prot: %lx)\n", base, size, prot); diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index a33f5c186fb7..52df5428ece4 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -82,18 +82,11 @@ int page_is_ram(unsigned long pfn) return pfn < max_pfn; #else unsigned long paddr = (pfn << PAGE_SHIFT); - int i; - for (i=0; i < memblock.memory.cnt; i++) { - unsigned long base; + struct memblock_region *reg; - base = memblock.memory.regions[i].base; - - if ((paddr >= base) && - (paddr < (base + memblock.memory.regions[i].size))) { + for_each_memblock(memory, reg) + if (paddr >= reg->base && paddr < (reg->base + reg->size)) return 1; - } - } - return 0; #endif } @@ -149,23 +142,19 @@ int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, void *arg, int (*func)(unsigned long, unsigned long, void *)) { - struct memblock_region res; - unsigned long pfn, len; - u64 end; + struct memblock_region *reg; + unsigned long end_pfn = start_pfn + nr_pages; + unsigned long tstart, tend; int ret = -1; - res.base = (u64) start_pfn << PAGE_SHIFT; - res.size = (u64) nr_pages << PAGE_SHIFT; - - end = res.base + res.size - 1; - while ((res.base < end) && (memblock_find(&res) >= 0)) { - pfn = (unsigned long)(res.base >> PAGE_SHIFT); - len = (unsigned long)(res.size >> PAGE_SHIFT); - ret = (*func)(pfn, len, arg); + for_each_memblock(memory, reg) { + tstart = max(start_pfn, memblock_region_base_pfn(reg)); + tend = min(end_pfn, memblock_region_end_pfn(reg)); + if (tstart >= tend) + continue; + ret = (*func)(tstart, tend - tstart, arg); if (ret) break; - res.base += (res.size + 1); - res.size = (end - res.base + 1); } return ret; } @@ -179,9 +168,9 @@ EXPORT_SYMBOL_GPL(walk_system_ram_range); #ifndef CONFIG_NEED_MULTIPLE_NODES void __init do_init_bootmem(void) { - unsigned long i; unsigned long start, bootmap_pages; unsigned long total_pages; + struct memblock_region *reg; int boot_mapsize; max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; @@ -204,10 +193,10 @@ void __init do_init_bootmem(void) boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn); /* Add active regions with valid PFNs */ - for (i = 0; i < memblock.memory.cnt; i++) { + for_each_memblock(memory, reg) { unsigned long start_pfn, end_pfn; - start_pfn = memblock.memory.regions[i].base >> PAGE_SHIFT; - end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i); + start_pfn = memblock_region_base_pfn(reg); + end_pfn = memblock_region_end_pfn(reg); add_active_range(0, start_pfn, end_pfn); } @@ -218,29 +207,21 @@ void __init do_init_bootmem(void) free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT); /* reserve the sections we're already using */ - for (i = 0; i < memblock.reserved.cnt; i++) { - unsigned long addr = memblock.reserved.regions[i].base + - memblock_size_bytes(&memblock.reserved, i) - 1; - if (addr < lowmem_end_addr) - reserve_bootmem(memblock.reserved.regions[i].base, - memblock_size_bytes(&memblock.reserved, i), - BOOTMEM_DEFAULT); - else if (memblock.reserved.regions[i].base < lowmem_end_addr) { - unsigned long adjusted_size = lowmem_end_addr - - memblock.reserved.regions[i].base; - reserve_bootmem(memblock.reserved.regions[i].base, - adjusted_size, BOOTMEM_DEFAULT); + for_each_memblock(reserved, reg) { + unsigned long top = reg->base + reg->size - 1; + if (top < lowmem_end_addr) + reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); + else if (reg->base < lowmem_end_addr) { + unsigned long trunc_size = lowmem_end_addr - reg->base; + reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT); } } #else free_bootmem_with_active_regions(0, max_pfn); /* reserve the sections we're already using */ - for (i = 0; i < memblock.reserved.cnt; i++) - reserve_bootmem(memblock.reserved.regions[i].base, - memblock_size_bytes(&memblock.reserved, i), - BOOTMEM_DEFAULT); - + for_each_memblock(reserved, reg) + reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); #endif /* XXX need to clip this if using highmem? */ sparse_memory_present_with_active_regions(0); @@ -251,22 +232,15 @@ void __init do_init_bootmem(void) /* mark pages that don't exist as nosave */ static int __init mark_nonram_nosave(void) { - unsigned long memblock_next_region_start_pfn, - memblock_region_max_pfn; - int i; - - for (i = 0; i < memblock.memory.cnt - 1; i++) { - memblock_region_max_pfn = - (memblock.memory.regions[i].base >> PAGE_SHIFT) + - (memblock.memory.regions[i].size >> PAGE_SHIFT); - memblock_next_region_start_pfn = - memblock.memory.regions[i+1].base >> PAGE_SHIFT; - - if (memblock_region_max_pfn < memblock_next_region_start_pfn) - register_nosave_region(memblock_region_max_pfn, - memblock_next_region_start_pfn); + struct memblock_region *reg, *prev = NULL; + + for_each_memblock(memory, reg) { + if (prev && + memblock_region_end_pfn(prev) < memblock_region_base_pfn(reg)) + register_nosave_region(memblock_region_end_pfn(prev), + memblock_region_base_pfn(reg)); + prev = reg; } - return 0; } diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index aa731af720c0..9ba9ba1a430d 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -746,16 +746,17 @@ static void __init setup_nonnuma(void) unsigned long top_of_ram = memblock_end_of_DRAM(); unsigned long total_ram = memblock_phys_mem_size(); unsigned long start_pfn, end_pfn; - unsigned int i, nid = 0; + unsigned int nid = 0; + struct memblock_region *reg; printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", top_of_ram, total_ram); printk(KERN_DEBUG "Memory hole size: %ldMB\n", (top_of_ram - total_ram) >> 20); - for (i = 0; i < memblock.memory.cnt; ++i) { - start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT; - end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i); + for_each_memblock(memory, reg) { + start_pfn = memblock_region_base_pfn(reg); + end_pfn = memblock_region_end_pfn(reg); fake_numa_create_new_node(end_pfn, &nid); add_active_range(nid, start_pfn, end_pfn); @@ -891,11 +892,11 @@ static struct notifier_block __cpuinitdata ppc64_numa_nb = { static void mark_reserved_regions_for_nid(int nid) { struct pglist_data *node = NODE_DATA(nid); - int i; + struct memblock_region *reg; - for (i = 0; i < memblock.reserved.cnt; i++) { - unsigned long physbase = memblock.reserved.region[i].base; - unsigned long size = memblock.reserved.region[i].size; + for_each_memblock(reserved, reg) { + unsigned long physbase = reg->base; + unsigned long size = reg->size; unsigned long start_pfn = physbase >> PAGE_SHIFT; unsigned long end_pfn = PFN_UP(physbase + size); struct node_active_region node_ar; -- cgit v1.2.3 From 27f574c223d2c09610058b3ec7a29582d63a3e06 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 6 Jul 2010 15:39:00 -0700 Subject: memblock: Expose MEMBLOCK_ALLOC_ANYWHERE Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/hash_utils_64.c | 2 +- include/linux/memblock.h | 1 + mm/memblock.c | 2 -- 3 files changed, 2 insertions(+), 3 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 4072b871497d..a542ff5ec8a9 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -625,7 +625,7 @@ static void __init htab_initialize(void) if (machine_is(cell)) limit = 0x80000000; else - limit = 0; + limit = MEMBLOCK_ALLOC_ANYWHERE; table = memblock_alloc_base(htab_size_bytes, htab_size_bytes, limit); diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 367dea6e95a0..3cf3304e901d 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -50,6 +50,7 @@ extern u64 __init memblock_alloc_nid(u64 size, u64 align, int nid); extern u64 __init memblock_alloc(u64 size, u64 align); extern u64 __init memblock_alloc_base(u64 size, u64, u64 max_addr); +#define MEMBLOCK_ALLOC_ANYWHERE 0 extern u64 __init __memblock_alloc_base(u64 size, u64 align, u64 max_addr); extern u64 __init memblock_phys_mem_size(void); diff --git a/mm/memblock.c b/mm/memblock.c index e264e8c70892..0131684c42f8 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -15,8 +15,6 @@ #include #include -#define MEMBLOCK_ALLOC_ANYWHERE 0 - struct memblock memblock; static int memblock_debug; -- cgit v1.2.3 From e63075a3c9377536d085bc013cd3fe6323162449 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 6 Jul 2010 15:39:01 -0700 Subject: memblock: Introduce default allocation limit and use it to replace explicit ones This introduce memblock.current_limit which is used to limit allocations from memblock_alloc() or memblock_alloc_base(..., MEMBLOCK_ALLOC_ACCESSIBLE). The old MEMBLOCK_ALLOC_ANYWHERE changes value from 0 to ~(u64)0 and can still be used with memblock_alloc_base() to allocate really anywhere. It is -no-longer- cropped to MEMBLOCK_REAL_LIMIT which disappears. Note to archs: I'm leaving the default limit to MEMBLOCK_ALLOC_ANYWHERE. I strongly recommend that you ensure that you set an appropriate limit during boot in order to guarantee that an memblock_alloc() at any time results in something that is accessible with a simple __va(). The reason is that a subsequent patch will introduce the ability for the array to resize itself by reallocating itself. The MEMBLOCK core will honor the current limit when performing those allocations. Signed-off-by: Benjamin Herrenschmidt --- arch/microblaze/include/asm/memblock.h | 3 --- arch/powerpc/include/asm/memblock.h | 7 ------- arch/powerpc/kernel/prom.c | 20 +++++++++++++++++++- arch/powerpc/kernel/setup_32.c | 2 +- arch/powerpc/mm/40x_mmu.c | 5 +++-- arch/powerpc/mm/fsl_booke_mmu.c | 3 ++- arch/powerpc/mm/hash_utils_64.c | 3 ++- arch/powerpc/mm/init_32.c | 29 +++++++---------------------- arch/powerpc/mm/ppc_mmu_32.c | 3 +-- arch/powerpc/mm/tlb_nohash.c | 2 ++ arch/sh/include/asm/memblock.h | 2 -- arch/sparc/include/asm/memblock.h | 2 -- include/linux/memblock.h | 16 +++++++++++++++- mm/memblock.c | 19 +++++++++++-------- 14 files changed, 63 insertions(+), 53 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/microblaze/include/asm/memblock.h b/arch/microblaze/include/asm/memblock.h index f9c2fa331d2a..20a8e257c77f 100644 --- a/arch/microblaze/include/asm/memblock.h +++ b/arch/microblaze/include/asm/memblock.h @@ -9,9 +9,6 @@ #ifndef _ASM_MICROBLAZE_MEMBLOCK_H #define _ASM_MICROBLAZE_MEMBLOCK_H -/* MEMBLOCK limit is OFF */ -#define MEMBLOCK_REAL_LIMIT 0xFFFFFFFF - #endif /* _ASM_MICROBLAZE_MEMBLOCK_H */ diff --git a/arch/powerpc/include/asm/memblock.h b/arch/powerpc/include/asm/memblock.h index 3c29728b56b1..43efc345065e 100644 --- a/arch/powerpc/include/asm/memblock.h +++ b/arch/powerpc/include/asm/memblock.h @@ -5,11 +5,4 @@ #define MEMBLOCK_DBG(fmt...) udbg_printf(fmt) -#ifdef CONFIG_PPC32 -extern phys_addr_t lowmem_end_addr; -#define MEMBLOCK_REAL_LIMIT lowmem_end_addr -#else -#define MEMBLOCK_REAL_LIMIT 0 -#endif - #endif /* _ASM_POWERPC_MEMBLOCK_H */ diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index fed9bf6187d1..3aec0b980f6a 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -98,7 +98,7 @@ static void __init move_device_tree(void) if ((memory_limit && (start + size) > memory_limit) || overlaps_crashkernel(start, size)) { - p = __va(memblock_alloc_base(size, PAGE_SIZE, memblock.rmo_size)); + p = __va(memblock_alloc(size, PAGE_SIZE)); memcpy(p, initial_boot_params, size); initial_boot_params = (struct boot_param_header *)p; DBG("Moved device tree to 0x%p\n", p); @@ -655,6 +655,21 @@ static void __init phyp_dump_reserve_mem(void) static inline void __init phyp_dump_reserve_mem(void) {} #endif /* CONFIG_PHYP_DUMP && CONFIG_PPC_RTAS */ +static void set_boot_memory_limit(void) +{ +#ifdef CONFIG_PPC32 + /* 601 can only access 16MB at the moment */ + if (PVR_VER(mfspr(SPRN_PVR)) == 1) + memblock_set_current_limit(0x01000000); + /* 8xx can only access 8MB at the moment */ + else if (PVR_VER(mfspr(SPRN_PVR)) == 0x50) + memblock_set_current_limit(0x00800000); + else + memblock_set_current_limit(0x10000000); +#else + memblock_set_current_limit(memblock.rmo_size); +#endif +} void __init early_init_devtree(void *params) { @@ -683,6 +698,7 @@ void __init early_init_devtree(void *params) /* Scan memory nodes and rebuild MEMBLOCKs */ memblock_init(); + of_scan_flat_dt(early_init_dt_scan_root, NULL); of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL); @@ -718,6 +734,8 @@ void __init early_init_devtree(void *params) DBG("Phys. mem: %llx\n", memblock_phys_mem_size()); + set_boot_memory_limit(); + /* We may need to relocate the flat tree, do it now. * FIXME .. and the initrd too? */ move_device_tree(); diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index a10ffc85ada7..b7eb1ded3b5f 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -246,7 +246,7 @@ static void __init irqstack_early_init(void) unsigned int i; /* interrupt stacks must be in lowmem, we get that for free on ppc32 - * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */ + * as the memblock is limited to lowmem by default */ for_each_possible_cpu(i) { softirq_ctx[i] = (struct thread_info *) __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); diff --git a/arch/powerpc/mm/40x_mmu.c b/arch/powerpc/mm/40x_mmu.c index 1dc2fa5ce1bd..58969b51f454 100644 --- a/arch/powerpc/mm/40x_mmu.c +++ b/arch/powerpc/mm/40x_mmu.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include @@ -47,6 +48,7 @@ #include #include #include + #include "mmu_decl.h" extern int __map_without_ltlbs; @@ -139,8 +141,7 @@ unsigned long __init mmu_mapin_ram(unsigned long top) * coverage with normal-sized pages (or other reasons) do not * attempt to allocate outside the allowed range. */ - - __initial_memory_limit_addr = memstart_addr + mapped; + memblock_set_current_limit(memstart_addr + mapped); return mapped; } diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c index cdc7526e9c93..e525f862d759 100644 --- a/arch/powerpc/mm/fsl_booke_mmu.c +++ b/arch/powerpc/mm/fsl_booke_mmu.c @@ -40,6 +40,7 @@ #include #include #include +#include #include #include @@ -212,5 +213,5 @@ void __init adjust_total_lowmem(void) pr_cont("%lu Mb, residual: %dMb\n", tlbcam_sz(tlbcam_index - 1) >> 20, (unsigned int)((total_lowmem - __max_low_memory) >> 20)); - __initial_memory_limit_addr = memstart_addr + __max_low_memory; + memblock_set_current_limit(memstart_addr + __max_low_memory); } diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index a542ff5ec8a9..b05890e23813 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -696,7 +696,8 @@ static void __init htab_initialize(void) #endif /* CONFIG_U3_DART */ BUG_ON(htab_bolt_mapping(base, base + size, __pa(base), prot, mmu_linear_psize, mmu_kernel_ssize)); - } + } + memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); /* * If we have a memory_limit and we've allocated TCEs then we need to diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 6a6975dc2654..59b208b7ec6f 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -91,12 +91,6 @@ int __allow_ioremap_reserved; /* max amount of low RAM to map in */ unsigned long __max_low_memory = MAX_LOW_MEM; -/* - * address of the limit of what is accessible with initial MMU setup - - * 256MB usually, but only 16MB on 601. - */ -phys_addr_t __initial_memory_limit_addr = (phys_addr_t)0x10000000; - /* * Check for command-line options that affect what MMU_init will do. */ @@ -126,13 +120,6 @@ void __init MMU_init(void) if (ppc_md.progress) ppc_md.progress("MMU:enter", 0x111); - /* 601 can only access 16MB at the moment */ - if (PVR_VER(mfspr(SPRN_PVR)) == 1) - __initial_memory_limit_addr = 0x01000000; - /* 8xx can only access 8MB at the moment */ - if (PVR_VER(mfspr(SPRN_PVR)) == 0x50) - __initial_memory_limit_addr = 0x00800000; - /* parse args from command line */ MMU_setup(); @@ -190,20 +177,18 @@ void __init MMU_init(void) #ifdef CONFIG_BOOTX_TEXT btext_unmap(); #endif + + /* Shortly after that, the entire linear mapping will be available */ + memblock_set_current_limit(lowmem_end_addr); } /* This is only called until mem_init is done. */ void __init *early_get_page(void) { - void *p; - - if (init_bootmem_done) { - p = alloc_bootmem_pages(PAGE_SIZE); - } else { - p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, - __initial_memory_limit_addr)); - } - return p; + if (init_bootmem_done) + return alloc_bootmem_pages(PAGE_SIZE); + else + return __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE)); } /* Free up now-unused memory */ diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c index f8a01829d64f..7d34e170e80f 100644 --- a/arch/powerpc/mm/ppc_mmu_32.c +++ b/arch/powerpc/mm/ppc_mmu_32.c @@ -223,8 +223,7 @@ void __init MMU_init_hw(void) * Find some memory for the hash table. */ if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322); - Hash = __va(memblock_alloc_base(Hash_size, Hash_size, - __initial_memory_limit_addr)); + Hash = __va(memblock_alloc(Hash_size, Hash_size)); cacheable_memzero(Hash, Hash_size); _SDR1 = __pa(Hash) | SDR1_LOW_BITS; diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c index d8695b02a968..7ba32e762990 100644 --- a/arch/powerpc/mm/tlb_nohash.c +++ b/arch/powerpc/mm/tlb_nohash.c @@ -432,6 +432,8 @@ static void __early_init_mmu(int boot_cpu) * the MMU configuration */ mb(); + + memblock_set_current_limit(linear_map_top); } void __init early_init_mmu(void) diff --git a/arch/sh/include/asm/memblock.h b/arch/sh/include/asm/memblock.h index dfe683b88075..e87063fad2ea 100644 --- a/arch/sh/include/asm/memblock.h +++ b/arch/sh/include/asm/memblock.h @@ -1,6 +1,4 @@ #ifndef __ASM_SH_MEMBLOCK_H #define __ASM_SH_MEMBLOCK_H -#define MEMBLOCK_REAL_LIMIT 0 - #endif /* __ASM_SH_MEMBLOCK_H */ diff --git a/arch/sparc/include/asm/memblock.h b/arch/sparc/include/asm/memblock.h index f12af880649b..c67b047ef85e 100644 --- a/arch/sparc/include/asm/memblock.h +++ b/arch/sparc/include/asm/memblock.h @@ -5,6 +5,4 @@ #define MEMBLOCK_DBG(fmt...) prom_printf(fmt) -#define MEMBLOCK_REAL_LIMIT 0 - #endif /* !(_SPARC64_MEMBLOCK_H) */ diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 3cf3304e901d..c4f6e53264ed 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -34,6 +34,7 @@ struct memblock_type { struct memblock { unsigned long debug; u64 rmo_size; + u64 current_limit; struct memblock_type memory; struct memblock_type reserved; }; @@ -46,11 +47,16 @@ extern long memblock_add(u64 base, u64 size); extern long memblock_remove(u64 base, u64 size); extern long __init memblock_free(u64 base, u64 size); extern long __init memblock_reserve(u64 base, u64 size); + extern u64 __init memblock_alloc_nid(u64 size, u64 align, int nid); extern u64 __init memblock_alloc(u64 size, u64 align); + +/* Flags for memblock_alloc_base() amd __memblock_alloc_base() */ +#define MEMBLOCK_ALLOC_ANYWHERE (~(u64)0) +#define MEMBLOCK_ALLOC_ACCESSIBLE 0 + extern u64 __init memblock_alloc_base(u64 size, u64, u64 max_addr); -#define MEMBLOCK_ALLOC_ANYWHERE 0 extern u64 __init __memblock_alloc_base(u64 size, u64 align, u64 max_addr); extern u64 __init memblock_phys_mem_size(void); @@ -66,6 +72,14 @@ extern void memblock_dump_all(void); /* Provided by the architecture */ extern u64 memblock_nid_range(u64 start, u64 end, int *nid); +/** + * memblock_set_current_limit - Set the current allocation limit to allow + * limiting allocations to what is currently + * accessible during boot + * @limit: New limit value (physical address) + */ +extern void memblock_set_current_limit(u64 limit); + /* * pfn conversion functions diff --git a/mm/memblock.c b/mm/memblock.c index 0131684c42f8..770c5bfac2cd 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -115,6 +115,8 @@ void __init memblock_init(void) memblock.reserved.regions[0].base = 0; memblock.reserved.regions[0].size = 0; memblock.reserved.cnt = 1; + + memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE; } void __init memblock_analyze(void) @@ -373,7 +375,7 @@ u64 __init memblock_alloc_nid(u64 size, u64 align, int nid) u64 __init memblock_alloc(u64 size, u64 align) { - return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE); + return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); } u64 __init memblock_alloc_base(u64 size, u64 align, u64 max_addr) @@ -399,14 +401,9 @@ u64 __init __memblock_alloc_base(u64 size, u64 align, u64 max_addr) size = memblock_align_up(size, align); - /* On some platforms, make sure we allocate lowmem */ - /* Note that MEMBLOCK_REAL_LIMIT may be MEMBLOCK_ALLOC_ANYWHERE */ - if (max_addr == MEMBLOCK_ALLOC_ANYWHERE) - max_addr = MEMBLOCK_REAL_LIMIT; - /* Pump up max_addr */ - if (max_addr == MEMBLOCK_ALLOC_ANYWHERE) - max_addr = ~(u64)0; + if (max_addr == MEMBLOCK_ALLOC_ACCESSIBLE) + max_addr = memblock.current_limit; /* We do a top-down search, this tends to limit memory * fragmentation by keeping early boot allocs near the @@ -527,3 +524,9 @@ int memblock_is_region_reserved(u64 base, u64 size) return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; } + +void __init memblock_set_current_limit(u64 limit) +{ + memblock.current_limit = limit; +} + -- cgit v1.2.3 From cd3db0c4ca3d237e7ad20f7107216e575705d2b0 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 6 Jul 2010 15:39:02 -0700 Subject: memblock: Remove rmo_size, burry it in arch/powerpc where it belongs The RMA (RMO is a misnomer) is a concept specific to ppc64 (in fact server ppc64 though I hijack it on embedded ppc64 for similar purposes) and represents the area of memory that can be accessed in real mode (aka with MMU off), or on embedded, from the exception vectors (which is bolted in the TLB) which pretty much boils down to the same thing. We take that out of the generic MEMBLOCK data structure and move it into arch/powerpc where it belongs, renaming it to "RMA" while at it. Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/mmu.h | 12 ++++++++++++ arch/powerpc/kernel/head_40x.S | 6 +----- arch/powerpc/kernel/paca.c | 2 +- arch/powerpc/kernel/prom.c | 29 ++++++++--------------------- arch/powerpc/kernel/rtas.c | 2 +- arch/powerpc/kernel/setup_64.c | 2 +- arch/powerpc/mm/40x_mmu.c | 14 +++++++++++++- arch/powerpc/mm/44x_mmu.c | 14 ++++++++++++++ arch/powerpc/mm/fsl_booke_mmu.c | 9 +++++++++ arch/powerpc/mm/hash_utils_64.c | 22 +++++++++++++++++++++- arch/powerpc/mm/init_32.c | 14 ++++++++++++++ arch/powerpc/mm/init_64.c | 1 + arch/powerpc/mm/ppc_mmu_32.c | 15 +++++++++++++++ arch/powerpc/mm/tlb_nohash.c | 14 ++++++++++++++ include/linux/memblock.h | 1 - mm/memblock.c | 8 -------- 16 files changed, 125 insertions(+), 40 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 7ebf42ed84a2..bb40a06d3b77 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -2,6 +2,8 @@ #define _ASM_POWERPC_MMU_H_ #ifdef __KERNEL__ +#include + #include #include @@ -82,6 +84,16 @@ extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup; extern void early_init_mmu(void); extern void early_init_mmu_secondary(void); +extern void setup_initial_memory_limit(phys_addr_t first_memblock_base, + phys_addr_t first_memblock_size); + +#ifdef CONFIG_PPC64 +/* This is our real memory area size on ppc64 server, on embedded, we + * make it match the size our of bolted TLB area + */ +extern u64 ppc64_rma_size; +#endif /* CONFIG_PPC64 */ + #endif /* !__ASSEMBLY__ */ /* The kernel use the constants below to index in the page sizes array. diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index a90625f9b485..8278e8bad5a0 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S @@ -923,11 +923,7 @@ initial_mmu: mtspr SPRN_PID,r0 sync - /* Configure and load two entries into TLB slots 62 and 63. - * In case we are pinning TLBs, these are reserved in by the - * other TLB functions. If not reserving, then it doesn't - * matter where they are loaded. - */ + /* Configure and load one entry into TLB slots 63 */ clrrwi r4,r4,10 /* Mask off the real page number */ ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */ diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index 139a773853f4..b9ffd7deeed7 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -117,7 +117,7 @@ void __init allocate_pacas(void) * the first segment. On iSeries they must be within the area mapped * by the HV, which is HvPagesToMap * HVPAGESIZE bytes. */ - limit = min(0x10000000ULL, memblock.rmo_size); + limit = min(0x10000000ULL, ppc64_rma_size); if (firmware_has_feature(FW_FEATURE_ISERIES)) limit = min(limit, HvPagesToMap * HVPAGESIZE); diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 3aec0b980f6a..c3c6a8857544 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -66,6 +66,7 @@ int __initdata iommu_is_off; int __initdata iommu_force_on; unsigned long tce_alloc_start, tce_alloc_end; +u64 ppc64_rma_size; #endif static int __init early_parse_mem(char *p) @@ -492,7 +493,7 @@ static int __init early_init_dt_scan_memory_ppc(unsigned long node, void __init early_init_dt_add_memory_arch(u64 base, u64 size) { -#if defined(CONFIG_PPC64) +#ifdef CONFIG_PPC64 if (iommu_is_off) { if (base >= 0x80000000ul) return; @@ -501,9 +502,13 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size) } #endif - memblock_add(base, size); - + /* First MEMBLOCK added, do some special initializations */ + if (memstart_addr == ~(phys_addr_t)0) + setup_initial_memory_limit(base, size); memstart_addr = min((u64)memstart_addr, base); + + /* Add the chunk to the MEMBLOCK list */ + memblock_add(base, size); } u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align) @@ -655,22 +660,6 @@ static void __init phyp_dump_reserve_mem(void) static inline void __init phyp_dump_reserve_mem(void) {} #endif /* CONFIG_PHYP_DUMP && CONFIG_PPC_RTAS */ -static void set_boot_memory_limit(void) -{ -#ifdef CONFIG_PPC32 - /* 601 can only access 16MB at the moment */ - if (PVR_VER(mfspr(SPRN_PVR)) == 1) - memblock_set_current_limit(0x01000000); - /* 8xx can only access 8MB at the moment */ - else if (PVR_VER(mfspr(SPRN_PVR)) == 0x50) - memblock_set_current_limit(0x00800000); - else - memblock_set_current_limit(0x10000000); -#else - memblock_set_current_limit(memblock.rmo_size); -#endif -} - void __init early_init_devtree(void *params) { phys_addr_t limit; @@ -734,8 +723,6 @@ void __init early_init_devtree(void *params) DBG("Phys. mem: %llx\n", memblock_phys_mem_size()); - set_boot_memory_limit(); - /* We may need to relocate the flat tree, do it now. * FIXME .. and the initrd too? */ move_device_tree(); diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index d0516dbee762..1662777be5dd 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -934,7 +934,7 @@ void __init rtas_initialize(void) */ #ifdef CONFIG_PPC64 if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR)) { - rtas_region = min(memblock.rmo_size, RTAS_INSTANTIATE_MAX); + rtas_region = min(ppc64_rma_size, RTAS_INSTANTIATE_MAX); ibm_suspend_me_token = rtas_token("ibm,suspend-me"); } #endif diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index d135f93cb0f6..4360944b60f0 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -487,7 +487,7 @@ static void __init emergency_stack_init(void) * bringup, we need to get at them in real mode. This means they * must also be within the RMO region. */ - limit = min(slb0_limit(), memblock.rmo_size); + limit = min(slb0_limit(), ppc64_rma_size); for_each_possible_cpu(i) { unsigned long sp; diff --git a/arch/powerpc/mm/40x_mmu.c b/arch/powerpc/mm/40x_mmu.c index 58969b51f454..5810967511d4 100644 --- a/arch/powerpc/mm/40x_mmu.c +++ b/arch/powerpc/mm/40x_mmu.c @@ -141,7 +141,19 @@ unsigned long __init mmu_mapin_ram(unsigned long top) * coverage with normal-sized pages (or other reasons) do not * attempt to allocate outside the allowed range. */ - memblock_set_current_limit(memstart_addr + mapped); + memblock_set_current_limit(mapped); return mapped; } + +void setup_initial_memory_limit(phys_addr_t first_memblock_base, + phys_addr_t first_memblock_size) +{ + /* We don't currently support the first MEMBLOCK not mapping 0 + * physical on those processors + */ + BUG_ON(first_memblock_base != 0); + + /* 40x can only access 16MB at the moment (see head_40x.S) */ + memblock_set_current_limit(min_t(u64, first_memblock_size, 0x00800000)); +} diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c index d8c6efb32bc6..024acab588fd 100644 --- a/arch/powerpc/mm/44x_mmu.c +++ b/arch/powerpc/mm/44x_mmu.c @@ -24,6 +24,8 @@ */ #include +#include + #include #include #include @@ -213,6 +215,18 @@ unsigned long __init mmu_mapin_ram(unsigned long top) return total_lowmem; } +void setup_initial_memory_limit(phys_addr_t first_memblock_base, + phys_addr_t first_memblock_size) +{ + /* We don't currently support the first MEMBLOCK not mapping 0 + * physical on those processors + */ + BUG_ON(first_memblock_base != 0); + + /* 44x has a 256M TLB entry pinned at boot */ + memblock_set_current_limit(min_t(u64, first_memblock_size, PPC_PIN_SIZE)); +} + #ifdef CONFIG_SMP void __cpuinit mmu_init_secondary(int cpu) { diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c index e525f862d759..0be8fe24c54e 100644 --- a/arch/powerpc/mm/fsl_booke_mmu.c +++ b/arch/powerpc/mm/fsl_booke_mmu.c @@ -215,3 +215,12 @@ void __init adjust_total_lowmem(void) memblock_set_current_limit(memstart_addr + __max_low_memory); } + +void setup_initial_memory_limit(phys_addr_t first_memblock_base, + phys_addr_t first_memblock_size) +{ + phys_addr_t limit = first_memblock_base + first_memblock_size; + + /* 64M mapped initially according to head_fsl_booke.S */ + memblock_set_current_limit(min_t(u64, limit, 0x04000000)); +} diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index b05890e23813..83f534d862db 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -649,7 +649,7 @@ static void __init htab_initialize(void) #ifdef CONFIG_DEBUG_PAGEALLOC linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT; linear_map_hash_slots = __va(memblock_alloc_base(linear_map_hash_count, - 1, memblock.rmo_size)); + 1, ppc64_rma_size)); memset(linear_map_hash_slots, 0, linear_map_hash_count); #endif /* CONFIG_DEBUG_PAGEALLOC */ @@ -1248,3 +1248,23 @@ void kernel_map_pages(struct page *page, int numpages, int enable) local_irq_restore(flags); } #endif /* CONFIG_DEBUG_PAGEALLOC */ + +void setup_initial_memory_limit(phys_addr_t first_memblock_base, + phys_addr_t first_memblock_size) +{ + /* We don't currently support the first MEMBLOCK not mapping 0 + * physical on those processors + */ + BUG_ON(first_memblock_base != 0); + + /* On LPAR systems, the first entry is our RMA region, + * non-LPAR 64-bit hash MMU systems don't have a limitation + * on real mode access, but using the first entry works well + * enough. We also clamp it to 1G to avoid some funky things + * such as RTAS bugs etc... + */ + ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000); + + /* Finally limit subsequent allocations */ + memblock_set_current_limit(ppc64_rma_size); +} diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 59b208b7ec6f..742da43b4ab6 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -237,3 +237,17 @@ void free_initrd_mem(unsigned long start, unsigned long end) } #endif + +#ifdef CONFIG_8xx /* No 8xx specific .c file to put that in ... */ +void setup_initial_memory_limit(phys_addr_t first_memblock_base, + phys_addr_t first_memblock_size) +{ + /* We don't currently support the first MEMBLOCK not mapping 0 + * physical on those processors + */ + BUG_ON(first_memblock_base != 0); + + /* 8xx can only access 8MB at the moment */ + memblock_set_current_limit(min_t(u64, first_memblock_size, 0x00800000)); +} +#endif /* CONFIG_8xx */ diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 71f1415e2472..9e081ffbf0f2 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -328,3 +328,4 @@ int __meminit vmemmap_populate(struct page *start_page, return 0; } #endif /* CONFIG_SPARSEMEM_VMEMMAP */ + diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c index 7d34e170e80f..11571e118831 100644 --- a/arch/powerpc/mm/ppc_mmu_32.c +++ b/arch/powerpc/mm/ppc_mmu_32.c @@ -271,3 +271,18 @@ void __init MMU_init_hw(void) if ( ppc_md.progress ) ppc_md.progress("hash:done", 0x205); } + +void setup_initial_memory_limit(phys_addr_t first_memblock_base, + phys_addr_t first_memblock_size) +{ + /* We don't currently support the first MEMBLOCK not mapping 0 + * physical on those processors + */ + BUG_ON(first_memblock_base != 0); + + /* 601 can only access 16MB at the moment */ + if (PVR_VER(mfspr(SPRN_PVR)) == 1) + memblock_set_current_limit(min_t(u64, first_memblock_size, 0x01000000)); + else /* Anything else has 256M mapped */ + memblock_set_current_limit(min_t(u64, first_memblock_size, 0x10000000)); +} diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c index 7ba32e762990..a086ed562606 100644 --- a/arch/powerpc/mm/tlb_nohash.c +++ b/arch/powerpc/mm/tlb_nohash.c @@ -446,4 +446,18 @@ void __cpuinit early_init_mmu_secondary(void) __early_init_mmu(0); } +void setup_initial_memory_limit(phys_addr_t first_memblock_base, + phys_addr_t first_memblock_size) +{ + /* On Embedded 64-bit, we adjust the RMA size to match + * the bolted TLB entry. We know for now that only 1G + * entries are supported though that may eventually + * change. We crop it to the size of the first MEMBLOCK to + * avoid going over total available memory just in case... + */ + ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000); + + /* Finally limit subsequent allocations */ + memblock_set_current_limit(ppc64_memblock_base + ppc64_rma_size); +} #endif /* CONFIG_PPC64 */ diff --git a/include/linux/memblock.h b/include/linux/memblock.h index c4f6e53264ed..71b8edc6ede8 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -33,7 +33,6 @@ struct memblock_type { struct memblock { unsigned long debug; - u64 rmo_size; u64 current_limit; struct memblock_type memory; struct memblock_type reserved; diff --git a/mm/memblock.c b/mm/memblock.c index 770c5bfac2cd..73d903ebf3d4 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -49,7 +49,6 @@ void memblock_dump_all(void) return; pr_info("MEMBLOCK configuration:\n"); - pr_info(" rmo_size = 0x%llx\n", (unsigned long long)memblock.rmo_size); pr_info(" memory.size = 0x%llx\n", (unsigned long long)memblock.memory.size); memblock_dump(&memblock.memory, "memory"); @@ -195,10 +194,6 @@ static long memblock_add_region(struct memblock_type *type, u64 base, u64 size) long memblock_add(u64 base, u64 size) { - /* On pSeries LPAR systems, the first MEMBLOCK is our RMO region. */ - if (base == 0) - memblock.rmo_size = size; - return memblock_add_region(&memblock.memory, base, size); } @@ -459,9 +454,6 @@ void __init memblock_enforce_memory_limit(u64 memory_limit) break; } - if (memblock.memory.regions[0].size < memblock.rmo_size) - memblock.rmo_size = memblock.memory.regions[0].size; - memory_limit = memblock_end_of_DRAM(); /* And truncate any reserves above the limit also. */ -- cgit v1.2.3 From 4734b594c6ca1be796d30c82d93fdf5160f45124 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 28 Jul 2010 14:31:29 +1000 Subject: memblock: Remove memblock_type.size and add memblock.memory_size instead Right now, both the "memory" and "reserved" memblock_type structures have a "size" member. It represents the calculated memory size in the former case and is unused in the latter. This moves it out to the main memblock structure instead Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/mem.c | 2 +- include/linux/memblock.h | 2 +- mm/memblock.c | 8 ++++---- 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 52df5428ece4..f661f6c527da 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -301,7 +301,7 @@ void __init mem_init(void) swiotlb_init(1); #endif - num_physpages = memblock.memory.size >> PAGE_SHIFT; + num_physpages = memblock_phys_mem_size() >> PAGE_SHIFT; high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); #ifdef CONFIG_NEED_MULTIPLE_NODES diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 0fe6dd56a4b4..c9c7b0f344a5 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -27,12 +27,12 @@ struct memblock_region { struct memblock_type { unsigned long cnt; - phys_addr_t size; struct memblock_region regions[MAX_MEMBLOCK_REGIONS+1]; }; struct memblock { phys_addr_t current_limit; + phys_addr_t memory_size; /* Updated by memblock_analyze() */ struct memblock_type memory; struct memblock_type reserved; }; diff --git a/mm/memblock.c b/mm/memblock.c index 81da63592a68..5ae413e9afd8 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -49,7 +49,7 @@ void memblock_dump_all(void) return; pr_info("MEMBLOCK configuration:\n"); - pr_info(" memory.size = 0x%llx\n", (unsigned long long)memblock.memory.size); + pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock.memory_size); memblock_dump(&memblock.memory, "memory"); memblock_dump(&memblock.reserved, "reserved"); @@ -123,10 +123,10 @@ void __init memblock_analyze(void) { int i; - memblock.memory.size = 0; + memblock.memory_size = 0; for (i = 0; i < memblock.memory.cnt; i++) - memblock.memory.size += memblock.memory.regions[i].size; + memblock.memory_size += memblock.memory.regions[i].size; } static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size) @@ -423,7 +423,7 @@ phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, ph /* You must call memblock_analyze() before this. */ phys_addr_t __init memblock_phys_mem_size(void) { - return memblock.memory.size; + return memblock.memory_size; } phys_addr_t memblock_end_of_DRAM(void) -- cgit v1.2.3 From 28b549905b239357db7c249e261857c1716db05a Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Tue, 24 Aug 2010 13:15:28 +0000 Subject: powerpc: Check end of stack canary at oops time Add a check for the stack canary when we oops, similar to x86. This should make it clear that we overran our stack: Unable to handle kernel paging request for data at address 0x24652f63700ac689 Faulting instruction address: 0xc000000000063d24 Thread overran stack, or stack corrupted Signed-off-by: Anton Blanchard Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/fault.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 1bd712c33ce2..54f4fb994e99 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include @@ -385,6 +386,7 @@ do_sigbus: void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) { const struct exception_table_entry *entry; + unsigned long *stackend; /* Are we prepared to handle this fault? */ if ((entry = search_exception_tables(regs->nip)) != NULL) { @@ -413,5 +415,9 @@ void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n", regs->nip); + stackend = end_of_stack(current); + if (current != &init_task && *stackend != STACK_END_MAGIC) + printk(KERN_ALERT "Thread overran stack, or stack corrupted\n"); + die("Kernel access of bad area", regs, sig); } -- cgit v1.2.3 From 0d35e1620d2882d74faed90d9ac457bf6c7a0886 Mon Sep 17 00:00:00 2001 From: Matthew McClintock Date: Tue, 31 Aug 2010 13:24:44 +0000 Subject: powerpc/mm: Assume first cpu is boot_cpuid not 0 arch/powerpc/mm/mmu_context_nohash.c assumes the boot cpu will always have smp_processor_id() == 0. This patch fixes that assumption Signed-off-by: Matthew McClintock Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/mmu_context_nohash.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c index ddfd7ad4e1d6..5ce99848d91e 100644 --- a/arch/powerpc/mm/mmu_context_nohash.c +++ b/arch/powerpc/mm/mmu_context_nohash.c @@ -334,7 +334,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self, /* We don't touch CPU 0 map, it's allocated at aboot and kept * around forever */ - if (cpu == 0) + if (cpu == boot_cpuid) return NOTIFY_OK; switch (action) { @@ -420,9 +420,11 @@ void __init mmu_context_init(void) */ context_map = alloc_bootmem(CTX_MAP_SIZE); context_mm = alloc_bootmem(sizeof(void *) * (last_context + 1)); +#ifndef CONFIG_SMP stale_map[0] = alloc_bootmem(CTX_MAP_SIZE); +#else + stale_map[boot_cpuid] = alloc_bootmem(CTX_MAP_SIZE); -#ifdef CONFIG_SMP register_cpu_notifier(&mmu_context_cpu_nb); #endif -- cgit v1.2.3 From c7fc2de0c83dbd2eaf759c5cd0e2b9cf1eb4df3a Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Tue, 12 Oct 2010 14:07:09 -0700 Subject: memblock, bootmem: Round pfn properly for memory and reserved regions We need to round memory regions correctly -- specifically, we need to round reserved region in the more expansive direction (lower limit down, upper limit up) whereas usable memory regions need to be rounded in the more restrictive direction (lower limit up, upper limit down). This introduces two set of inlines: memblock_region_memory_base_pfn() memblock_region_memory_end_pfn() memblock_region_reserved_base_pfn() memblock_region_reserved_end_pfn() Although they are antisymmetric (and therefore are technically duplicates) the use of the different inlines explicitly documents the programmer's intention. The lack of proper rounding caused a bug on ARM, which was then found to also affect other architectures. Reported-by: Russell King Signed-off-by: Yinghai Lu LKML-Reference: <4CB4CDFD.4020105@kernel.org> Cc: Jeremy Fitzhardinge Signed-off-by: H. Peter Anvin --- arch/arm/mm/init.c | 8 ++++---- arch/powerpc/mm/mem.c | 14 +++++++------- arch/powerpc/mm/numa.c | 4 ++-- arch/sh/mm/init.c | 4 ++-- arch/sparc/mm/init_64.c | 4 ++-- include/linux/memblock.h | 25 ++++++++++++------------- 6 files changed, 29 insertions(+), 30 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index d6022d1f51d1..63f441797c96 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -182,8 +182,8 @@ static void __init arm_bootmem_init(struct meminfo *mi, * Reserve the memblock reserved regions in bootmem. */ for_each_memblock(reserved, reg) { - phys_addr_t start = memblock_region_base_pfn(reg); - phys_addr_t end = memblock_region_end_pfn(reg); + phys_addr_t start = memblock_region_reserved_base_pfn(reg); + phys_addr_t end = memblock_region_reserved_end_pfn(reg); if (start >= start_pfn && end <= end_pfn) reserve_bootmem_node(pgdat, __pfn_to_phys(start), (end - start) << PAGE_SHIFT, @@ -251,8 +251,8 @@ static void arm_memory_present(void) struct memblock_region *reg; for_each_memblock(memory, reg) - memory_present(0, memblock_region_base_pfn(reg), - memblock_region_end_pfn(reg)); + memory_present(0, memblock_region_memory_base_pfn(reg), + memblock_region_memory_end_pfn(reg)); } #endif diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index f661f6c527da..a66499650909 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -148,8 +148,8 @@ walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, int ret = -1; for_each_memblock(memory, reg) { - tstart = max(start_pfn, memblock_region_base_pfn(reg)); - tend = min(end_pfn, memblock_region_end_pfn(reg)); + tstart = max(start_pfn, memblock_region_memory_base_pfn(reg)); + tend = min(end_pfn, memblock_region_memory_end_pfn(reg)); if (tstart >= tend) continue; ret = (*func)(tstart, tend - tstart, arg); @@ -195,8 +195,8 @@ void __init do_init_bootmem(void) /* Add active regions with valid PFNs */ for_each_memblock(memory, reg) { unsigned long start_pfn, end_pfn; - start_pfn = memblock_region_base_pfn(reg); - end_pfn = memblock_region_end_pfn(reg); + start_pfn = memblock_region_memory_base_pfn(reg); + end_pfn = memblock_region_memory_end_pfn(reg); add_active_range(0, start_pfn, end_pfn); } @@ -236,9 +236,9 @@ static int __init mark_nonram_nosave(void) for_each_memblock(memory, reg) { if (prev && - memblock_region_end_pfn(prev) < memblock_region_base_pfn(reg)) - register_nosave_region(memblock_region_end_pfn(prev), - memblock_region_base_pfn(reg)); + memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg)) + register_nosave_region(memblock_region_memory_end_pfn(prev), + memblock_region_memory_base_pfn(reg)); prev = reg; } return 0; diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 066fb443ba5a..74505b245374 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -811,8 +811,8 @@ static void __init setup_nonnuma(void) (top_of_ram - total_ram) >> 20); for_each_memblock(memory, reg) { - start_pfn = memblock_region_base_pfn(reg); - end_pfn = memblock_region_end_pfn(reg); + start_pfn = memblock_region_memory_base_pfn(reg); + end_pfn = memblock_region_memory_end_pfn(reg); fake_numa_create_new_node(end_pfn, &nid); add_active_range(nid, start_pfn, end_pfn); diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index b977475f7446..552bea5113f5 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -244,8 +244,8 @@ static void __init do_init_bootmem(void) /* Add active regions with valid PFNs. */ for_each_memblock(memory, reg) { unsigned long start_pfn, end_pfn; - start_pfn = memblock_region_base_pfn(reg); - end_pfn = memblock_region_end_pfn(reg); + start_pfn = memblock_region_memory_base_pfn(reg); + end_pfn = memblock_region_memory_end_pfn(reg); __add_active_range(0, start_pfn, end_pfn); } diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index dc584d26d597..4c2572773b55 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -1294,8 +1294,8 @@ static void __init bootmem_init_nonnuma(void) if (!reg->size) continue; - start_pfn = memblock_region_base_pfn(reg); - end_pfn = memblock_region_end_pfn(reg); + start_pfn = memblock_region_memory_base_pfn(reg); + end_pfn = memblock_region_memory_end_pfn(reg); add_active_range(0, start_pfn, end_pfn); } diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 5096458c7535..62a10c2a11f2 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -111,40 +111,39 @@ extern void memblock_set_current_limit(phys_addr_t limit); */ /** - * memblock_region_base_pfn - Return the lowest pfn intersecting with the region + * memblock_region_memory_base_pfn - Return the lowest pfn intersecting with the memory region * @reg: memblock_region structure */ -static inline unsigned long memblock_region_base_pfn(const struct memblock_region *reg) +static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg) { - return reg->base >> PAGE_SHIFT; + return PFN_UP(reg->base); } /** - * memblock_region_last_pfn - Return the highest pfn intersecting with the region + * memblock_region_memory_end_pfn - Return the end_pfn this region * @reg: memblock_region structure */ -static inline unsigned long memblock_region_last_pfn(const struct memblock_region *reg) +static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg) { - return (reg->base + reg->size - 1) >> PAGE_SHIFT; + return PFN_DOWN(reg->base + reg->size); } /** - * memblock_region_end_pfn - Return the pfn of the first page following the region - * but not intersecting it + * memblock_region_reserved_base_pfn - Return the lowest pfn intersecting with the reserved region * @reg: memblock_region structure */ -static inline unsigned long memblock_region_end_pfn(const struct memblock_region *reg) +static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg) { - return memblock_region_last_pfn(reg) + 1; + return PFN_DOWN(reg->base); } /** - * memblock_region_pages - Return the number of pages covering a region + * memblock_region_reserved_end_pfn - Return the end_pfn this region * @reg: memblock_region structure */ -static inline unsigned long memblock_region_pages(const struct memblock_region *reg) +static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg) { - return memblock_region_end_pfn(reg) - memblock_region_end_pfn(reg); + return PFN_UP(reg->base + reg->size); } #define for_each_memblock(memblock_type, region) \ -- cgit v1.2.3 From 4108d9ba9091c55cfb968d42dd7dcae9a098b876 Mon Sep 17 00:00:00 2001 From: matt mooney Date: Wed, 22 Sep 2010 20:51:09 +0000 Subject: powerpc/Makefiles: Change to new flag variables Replace EXTRA_CFLAGS with ccflags-y and EXTRA_AFLAGS with asflags-y. Signed-off-by: matt mooney Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/vdso32/Makefile | 6 +++--- arch/powerpc/kernel/vdso64/Makefile | 6 +++--- arch/powerpc/kvm/Makefile | 2 +- arch/powerpc/lib/Makefile | 4 +--- arch/powerpc/math-emu/Makefile | 2 +- arch/powerpc/mm/Makefile | 4 +--- arch/powerpc/oprofile/Makefile | 4 +--- arch/powerpc/platforms/iseries/Makefile | 2 +- arch/powerpc/platforms/pseries/Makefile | 11 +++-------- arch/powerpc/sysdev/Makefile | 4 +--- arch/powerpc/xmon/Makefile | 4 +--- 11 files changed, 17 insertions(+), 32 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/kernel/vdso32/Makefile b/arch/powerpc/kernel/vdso32/Makefile index 51ead52141bd..9a7946c41738 100644 --- a/arch/powerpc/kernel/vdso32/Makefile +++ b/arch/powerpc/kernel/vdso32/Makefile @@ -14,10 +14,10 @@ obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32)) GCOV_PROFILE := n -EXTRA_CFLAGS := -shared -fno-common -fno-builtin -EXTRA_CFLAGS += -nostdlib -Wl,-soname=linux-vdso32.so.1 \ +ccflags-y := -shared -fno-common -fno-builtin +ccflags-y += -nostdlib -Wl,-soname=linux-vdso32.so.1 \ $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) -EXTRA_AFLAGS := -D__VDSO32__ -s +asflags-y := -D__VDSO32__ -s obj-y += vdso32_wrapper.o extra-y += vdso32.lds diff --git a/arch/powerpc/kernel/vdso64/Makefile b/arch/powerpc/kernel/vdso64/Makefile index 79da65d44a2a..8c500d8622e4 100644 --- a/arch/powerpc/kernel/vdso64/Makefile +++ b/arch/powerpc/kernel/vdso64/Makefile @@ -9,10 +9,10 @@ obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64)) GCOV_PROFILE := n -EXTRA_CFLAGS := -shared -fno-common -fno-builtin -EXTRA_CFLAGS += -nostdlib -Wl,-soname=linux-vdso64.so.1 \ +ccflags-y := -shared -fno-common -fno-builtin +ccflags-y += -nostdlib -Wl,-soname=linux-vdso64.so.1 \ $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) -EXTRA_AFLAGS := -D__VDSO64__ -s +asflags-y := -D__VDSO64__ -s obj-y += vdso64_wrapper.o extra-y += vdso64.lds diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index d45c818a384c..4d6863823f69 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile @@ -4,7 +4,7 @@ subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror -EXTRA_CFLAGS += -Ivirt/kvm -Iarch/powerpc/kvm +ccflags-y := -Ivirt/kvm -Iarch/powerpc/kvm common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index ad4a36848f25..889f2bc106dd 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -4,9 +4,7 @@ subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror -ifeq ($(CONFIG_PPC64),y) -EXTRA_CFLAGS += -mno-minimal-toc -endif +ccflags-$(CONFIG_PPC64) := -mno-minimal-toc CFLAGS_REMOVE_code-patching.o = -pg CFLAGS_REMOVE_feature-fixups.o = -pg diff --git a/arch/powerpc/math-emu/Makefile b/arch/powerpc/math-emu/Makefile index 0c16ab947f1f..7d1dba0d57f9 100644 --- a/arch/powerpc/math-emu/Makefile +++ b/arch/powerpc/math-emu/Makefile @@ -15,4 +15,4 @@ obj-$(CONFIG_SPE) += math_efp.o CFLAGS_fabs.o = -fno-builtin-fabs CFLAGS_math.o = -fno-builtin-fabs -EXTRA_CFLAGS = -I. -Iinclude/math-emu -w +ccflags-y = -I. -Iinclude/math-emu -w diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index ce68708bbad5..53102f306880 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile @@ -4,9 +4,7 @@ subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror -ifeq ($(CONFIG_PPC64),y) -EXTRA_CFLAGS += -mno-minimal-toc -endif +ccflags-$(CONFIG_PPC64) := -mno-minimal-toc obj-y := fault.o mem.o pgtable.o gup.o \ init_$(CONFIG_WORD_SIZE).o \ diff --git a/arch/powerpc/oprofile/Makefile b/arch/powerpc/oprofile/Makefile index e219ca43962d..73456c4cec28 100644 --- a/arch/powerpc/oprofile/Makefile +++ b/arch/powerpc/oprofile/Makefile @@ -1,8 +1,6 @@ subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror -ifeq ($(CONFIG_PPC64),y) -EXTRA_CFLAGS += -mno-minimal-toc -endif +ccflags-$(CONFIG_PPC64) := -mno-minimal-toc obj-$(CONFIG_OPROFILE) += oprofile.o diff --git a/arch/powerpc/platforms/iseries/Makefile b/arch/powerpc/platforms/iseries/Makefile index ce014928d460..a7602b11ed9d 100644 --- a/arch/powerpc/platforms/iseries/Makefile +++ b/arch/powerpc/platforms/iseries/Makefile @@ -1,4 +1,4 @@ -EXTRA_CFLAGS += -mno-minimal-toc +ccflags-y := -mno-minimal-toc obj-y += exception.o obj-y += hvlog.o hvlpconfig.o lpardata.o setup.o dt.o mf.o lpevents.o \ diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile index 93541b39dd12..59eb8bdaa79d 100644 --- a/arch/powerpc/platforms/pseries/Makefile +++ b/arch/powerpc/platforms/pseries/Makefile @@ -1,10 +1,5 @@ -ifeq ($(CONFIG_PPC64),y) -EXTRA_CFLAGS += -mno-minimal-toc -endif - -ifeq ($(CONFIG_PPC_PSERIES_DEBUG),y) -EXTRA_CFLAGS += -DDEBUG -endif +ccflags-$(CONFIG_PPC64) := -mno-minimal-toc +ccflags-$(CONFIG_PPC_PSERIES_DEBUG) += -DDEBUG obj-y := lpar.o hvCall.o nvram.o reconfig.o \ setup.o iommu.o event_sources.o ras.o \ @@ -23,7 +18,7 @@ obj-$(CONFIG_MEMORY_HOTPLUG) += hotplug-memory.o obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o obj-$(CONFIG_HVCS) += hvcserver.o obj-$(CONFIG_HCALL_STATS) += hvCall_inst.o -obj-$(CONFIG_PHYP_DUMP) += phyp_dump.o +obj-$(CONFIG_PHYP_DUMP) += phyp_dump.o obj-$(CONFIG_CMM) += cmm.o obj-$(CONFIG_DTL) += dtl.o diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile index 5642924fb9fb..c20ad6de33ee 100644 --- a/arch/powerpc/sysdev/Makefile +++ b/arch/powerpc/sysdev/Makefile @@ -1,8 +1,6 @@ subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror -ifeq ($(CONFIG_PPC64),y) -EXTRA_CFLAGS += -mno-minimal-toc -endif +ccflags-$(CONFIG_PPC64) := -mno-minimal-toc mpic-msi-obj-$(CONFIG_PCI_MSI) += mpic_msi.o mpic_u3msi.o mpic_pasemi_msi.o obj-$(CONFIG_MPIC) += mpic.o $(mpic-msi-obj-y) diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile index faa81b6a6612..c168c54e3c40 100644 --- a/arch/powerpc/xmon/Makefile +++ b/arch/powerpc/xmon/Makefile @@ -4,9 +4,7 @@ subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror GCOV_PROFILE := n -ifdef CONFIG_PPC64 -EXTRA_CFLAGS += -mno-minimal-toc -endif +ccflags-$(CONFIG_PPC64) := -mno-minimal-toc obj-y += xmon.o start.o nonstdio.o -- cgit v1.2.3 From 92437d41374bf59b1914b53bd10ca69d31b1b581 Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Fri, 24 Sep 2010 12:44:52 -0400 Subject: powerpc: Fix invalid page flags in create TLB CAM path for PTE_64BIT There exists a four line chunk of code, which when configured for 64 bit address space, can incorrectly set certain page flags during the TLB creation. It turns out that this is code which isn't used, but might still serve a purpose. Since it isn't obvious why it exists or why it causes problems, the below description covers both in detail. For powerpc bootstrap, the physical memory (at most 768M), is mapped into the kernel space via the following path: MMU_init() | + adjust_total_lowmem() | + map_mem_in_cams() | + settlbcam(i, virt, phys, cam_sz, PAGE_KERNEL_X, 0); On settlbcam(), the kernel will create TLB entries according to the flag, PAGE_KERNEL_X. settlbcam() { ... TLBCAM[index].MAS1 = MAS1_VALID | MAS1_IPROT | MAS1_TSIZE(tsize) | MAS1_TID(pid); ^ These entries cannot be invalidated by the kernel since MAS1_IPROT is set on TLB property. ... if (flags & _PAGE_USER) { TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR; TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0); } For classic BookE (flags & _PAGE_USER) is 'zero' so it's fine. But on boards like the the Freescale P4080, we want to support 36-bit physical address on it. So the following options may be set: CONFIG_FSL_BOOKE=y CONFIG_PTE_64BIT=y CONFIG_PHYS_64BIT=y As a result, boards like the P4080 will introduce PTE format as Book3E. As per the file: arch/powerpc/include/asm/pgtable-ppc32.h * #elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT) * #include So PAGE_KERNEL_X is __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX) and the book3E version of _PAGE_KERNEL_RWX is defined with: (_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY | _PAGE_BAP_SX) Note the _PAGE_BAP_SR, which is also defined in the book3E _PAGE_USER: #define _PAGE_USER (_PAGE_BAP_UR | _PAGE_BAP_SR) /* Can be read */ So the possibility exists to wrongly assign the user MAS3_U bits to kernel (PAGE_KERNEL_X) address space via the following code fragment: if (flags & _PAGE_USER) { TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR; TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0); } Here is a dump of the TLB info from Simics with the above code present: ------ L2 TLB1 GT SSS UUU V I Row Logical Physical SS TLPID TID WIMGE XWR XWR F P V ----- ----------------- ------------------- -- ----- ----- ----- --- --- - - - 0 c0000000-cfffffff 000000000-00fffffff 00 0 0 M XWR XWR 0 1 1 1 d0000000-dfffffff 010000000-01fffffff 00 0 0 M XWR XWR 0 1 1 2 e0000000-efffffff 020000000-02fffffff 00 0 0 M XWR XWR 0 1 1 Actually this conditional code was used for two legacy functions: 1: support KGDB to set break point. KGDB already dropped this; now uses its core write to set break point. 2: io_block_mapping() to create TLB in segmentation size (not PAGE_SIZE) for device IO space. This use case is also removed from the latest PowerPC kernel. However, there may still be a use case for it in the future, like large user pages, so we can't remove it entirely. As an alternative, we match on all bits of _PAGE_USER instead of just any bits, so the case where just _PAGE_BAP_SR is set can't sneak through. With this done, the TLB appears without U having XWR as below: ------- L2 TLB1 GT SSS UUU V I Row Logical Physical SS TLPID TID WIMGE XWR XWR F P V ----- ----------------- ------------------- -- ----- ----- ----- --- --- - - - 0 c0000000-cfffffff 000000000-00fffffff 00 0 0 M XWR 0 1 1 1 d0000000-dfffffff 010000000-01fffffff 00 0 0 M XWR 0 1 1 2 e0000000-efffffff 020000000-02fffffff 00 0 0 M XWR 0 1 1 Signed-off-by: Tiejun Chen Signed-off-by: Paul Gortmaker Signed-off-by: Kumar Gala --- arch/powerpc/include/asm/pte-common.h | 7 +++++++ arch/powerpc/mm/fsl_booke_mmu.c | 3 ++- 2 files changed, 9 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h index f2b370180a09..76bb195e4f24 100644 --- a/arch/powerpc/include/asm/pte-common.h +++ b/arch/powerpc/include/asm/pte-common.h @@ -171,6 +171,13 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void); /* Make modules code happy. We don't set RO yet */ #define PAGE_KERNEL_EXEC PAGE_KERNEL_X +/* + * Don't just check for any non zero bits in __PAGE_USER, since for book3e + * and PTE_64BIT, PAGE_KERNEL_X contains _PAGE_BAP_SR which is also in + * _PAGE_USER. Need to explictly match _PAGE_BAP_UR bit in that case too. + */ +#define pte_user(val) ((val & _PAGE_USER) == _PAGE_USER) + /* Advertise special mapping type for AGP */ #define PAGE_AGP (PAGE_KERNEL_NC) #define HAVE_PAGE_AGP diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c index 4b66a1ece6d8..1b4354db51bb 100644 --- a/arch/powerpc/mm/fsl_booke_mmu.c +++ b/arch/powerpc/mm/fsl_booke_mmu.c @@ -137,7 +137,8 @@ static void settlbcam(int index, unsigned long virt, phys_addr_t phys, if (mmu_has_feature(MMU_FTR_BIG_PHYS)) TLBCAM[index].MAS7 = (u64)phys >> 32; - if (flags & _PAGE_USER) { + /* Below is unlikely -- only for large user pages or similar */ + if (pte_user(flags)) { TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR; TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0); } -- cgit v1.2.3 From 988cf86d4f0da4150e808300c145ba87c0aad02f Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Fri, 8 Oct 2010 02:13:25 -0500 Subject: powerpc/fsl-booke: Add support for FSL Arch v1.0 MMU in setup_page_sizes Update setup_page_sizes() to support for a MMU v1.0 FSL style MMU implementation. In such a processor, we don't have TLB0PS or EPTCFG registers (and access to these registers may cause exceptions). We need to parse the older format of TLBnCFG for page size support. Additionaly, assume since we are an FSL implementation that we have 2 TLB arrays and the second array contains the variable size pages. Signed-off-by: Kumar Gala --- arch/powerpc/include/asm/mmu-book3e.h | 15 +++++++++++++ arch/powerpc/mm/tlb_nohash.c | 42 ++++++++++++++++++++++++++++++++--- 2 files changed, 54 insertions(+), 3 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h index 87a1d787c5b6..8eaed81ea642 100644 --- a/arch/powerpc/include/asm/mmu-book3e.h +++ b/arch/powerpc/include/asm/mmu-book3e.h @@ -114,6 +114,17 @@ #define MAS7_RPN 0xFFFFFFFF +/* Bit definitions for MMUCFG */ +#define MMUCFG_MAVN 0x00000003 /* MMU Architecture Version Number */ +#define MMUCFG_MAVN_V1 0x00000000 /* v1.0 */ +#define MMUCFG_MAVN_V2 0x00000001 /* v2.0 */ +#define MMUCFG_NTLBS 0x0000000c /* Number of TLBs */ +#define MMUCFG_PIDSIZE 0x000007c0 /* PID Reg Size */ +#define MMUCFG_TWC 0x00008000 /* TLB Write Conditional (v2.0) */ +#define MMUCFG_LRAT 0x00010000 /* LRAT Supported (v2.0) */ +#define MMUCFG_RASIZE 0x00fe0000 /* Real Addr Size */ +#define MMUCFG_LPIDSIZE 0x0f000000 /* LPID Reg Size */ + /* Bit definitions for MMUCSR0 */ #define MMUCSR0_TLB1FI 0x00000002 /* TLB1 Flash invalidate */ #define MMUCSR0_TLB0FI 0x00000004 /* TLB0 Flash invalidate */ @@ -133,6 +144,10 @@ #define TLBnCFG_GTWE 0x00010000 /* Guest can write */ #define TLBnCFG_IND 0x00020000 /* IND entries supported */ #define TLBnCFG_PT 0x00040000 /* Can load from page table */ +#define TLBnCFG_MINSIZE 0x00f00000 /* Minimum Page Size (v1.0) */ +#define TLBnCFG_MINSIZE_SHIFT 20 +#define TLBnCFG_MAXSIZE 0x000f0000 /* Maximum Page Size (v1.0) */ +#define TLBnCFG_MAXSIZE_SHIFT 16 #define TLBnCFG_ASSOC 0xff000000 /* Associativity */ /* TLBnPS encoding */ diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c index fe391e942521..665189920762 100644 --- a/arch/powerpc/mm/tlb_nohash.c +++ b/arch/powerpc/mm/tlb_nohash.c @@ -349,11 +349,47 @@ void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address) static void setup_page_sizes(void) { - unsigned int tlb0cfg = mfspr(SPRN_TLB0CFG); - unsigned int tlb0ps = mfspr(SPRN_TLB0PS); - unsigned int eptcfg = mfspr(SPRN_EPTCFG); + unsigned int tlb0cfg; + unsigned int tlb0ps; + unsigned int eptcfg; int i, psize; +#ifdef CONFIG_PPC_FSL_BOOK3E + unsigned int mmucfg = mfspr(SPRN_MMUCFG); + + if (((mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) && + (mmu_has_feature(MMU_FTR_TYPE_FSL_E))) { + unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG); + unsigned int min_pg, max_pg; + + min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT; + max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT; + + for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { + struct mmu_psize_def *def; + unsigned int shift; + + def = &mmu_psize_defs[psize]; + shift = def->shift; + + if (shift == 0) + continue; + + /* adjust to be in terms of 4^shift Kb */ + shift = (shift - 10) >> 1; + + if ((shift >= min_pg) && (shift <= max_pg)) + def->flags |= MMU_PAGE_SIZE_DIRECT; + } + + goto no_indirect; + } +#endif + + tlb0cfg = mfspr(SPRN_TLB0CFG); + tlb0ps = mfspr(SPRN_TLB0PS); + eptcfg = mfspr(SPRN_EPTCFG); + /* Look for supported direct sizes */ for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { struct mmu_psize_def *def = &mmu_psize_defs[psize]; -- cgit v1.2.3 From 55fd766b5fad8240b7a6e994b5779a46d28f73d4 Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Fri, 16 Oct 2009 18:48:40 -0500 Subject: powerpc/fsl-booke64: Use TLB CAMs to cover linear mapping on FSL 64-bit chips On Freescale parts typically have TLB array for large mappings that we can bolt the linear mapping into. We utilize the code that already exists on PPC32 on the 64-bit side to setup the linear mapping to be cover by bolted TLB entries. We utilize a quarter of the variable size TLB array for this purpose. Additionally, we limit the amount of memory to what we can cover via bolted entries so we don't get secondary faults in the TLB miss handlers. We should fix this limitation in the future. Signed-off-by: Kumar Gala --- arch/powerpc/kernel/asm-offsets.c | 4 ++-- arch/powerpc/mm/Makefile | 2 +- arch/powerpc/mm/fsl_booke_mmu.c | 12 +++++++----- arch/powerpc/mm/mmu_decl.h | 5 ++++- arch/powerpc/mm/tlb_nohash.c | 14 ++++++++++++++ arch/powerpc/mm/tlb_nohash_low.S | 2 +- 6 files changed, 29 insertions(+), 10 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index c63494090854..c3e01945ad4f 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -61,7 +61,7 @@ #endif #endif -#if defined(CONFIG_FSL_BOOKE) +#if defined(CONFIG_PPC_FSL_BOOK3E) #include "../mm/mmu_decl.h" #endif @@ -470,7 +470,7 @@ int main(void) DEFINE(PGD_T_LOG2, PGD_T_LOG2); DEFINE(PTE_T_LOG2, PTE_T_LOG2); #endif -#ifdef CONFIG_FSL_BOOKE +#ifdef CONFIG_PPC_FSL_BOOK3E DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam)); DEFINE(TLBCAM_MAS0, offsetof(struct tlbcam, MAS0)); DEFINE(TLBCAM_MAS1, offsetof(struct tlbcam, MAS1)); diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index 53102f306880..bdca46e08382 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile @@ -23,7 +23,7 @@ obj-$(CONFIG_PPC_STD_MMU) += hash_low_$(CONFIG_WORD_SIZE).o \ mmu_context_hash$(CONFIG_WORD_SIZE).o obj-$(CONFIG_40x) += 40x_mmu.o obj-$(CONFIG_44x) += 44x_mmu.o -obj-$(CONFIG_FSL_BOOKE) += fsl_booke_mmu.o +obj-$(CONFIG_PPC_FSL_BOOK3E) += fsl_booke_mmu.o obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o obj-$(CONFIG_PPC_MM_SLICES) += slice.o ifeq ($(CONFIG_HUGETLB_PAGE),y) diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c index 1b4354db51bb..67bc8a7c7e0b 100644 --- a/arch/powerpc/mm/fsl_booke_mmu.c +++ b/arch/powerpc/mm/fsl_booke_mmu.c @@ -56,11 +56,6 @@ unsigned int tlbcam_index; - -#if defined(CONFIG_LOWMEM_CAM_NUM_BOOL) && (CONFIG_LOWMEM_CAM_NUM >= NUM_TLBCAMS) -#error "LOWMEM_CAM_NUM must be less than NUM_TLBCAMS" -#endif - #define NUM_TLBCAMS (64) struct tlbcam TLBCAM[NUM_TLBCAMS]; @@ -185,6 +180,12 @@ unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx) return amount_mapped; } +#ifdef CONFIG_PPC32 + +#if defined(CONFIG_LOWMEM_CAM_NUM_BOOL) && (CONFIG_LOWMEM_CAM_NUM >= NUM_TLBCAMS) +#error "LOWMEM_CAM_NUM must be less than NUM_TLBCAMS" +#endif + unsigned long __init mmu_mapin_ram(unsigned long top) { return tlbcam_addrs[tlbcam_index - 1].limit - PAGE_OFFSET + 1; @@ -216,3 +217,4 @@ void __init adjust_total_lowmem(void) __initial_memory_limit_addr = memstart_addr + __max_low_memory; } +#endif diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index 63b84a0d3b10..dd0a2589591d 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h @@ -140,10 +140,13 @@ extern void wii_memory_fixups(void); extern void MMU_init_hw(void); extern unsigned long mmu_mapin_ram(unsigned long top); -#elif defined(CONFIG_FSL_BOOKE) +#elif defined(CONFIG_PPC_FSL_BOOK3E) +extern unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx); +#ifdef CONFIG_PPC32 extern void MMU_init_hw(void); extern unsigned long mmu_mapin_ram(unsigned long top); extern void adjust_total_lowmem(void); +#endif extern void loadcam_entry(unsigned int index); struct tlbcam { diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c index 665189920762..61fe32a256da 100644 --- a/arch/powerpc/mm/tlb_nohash.c +++ b/arch/powerpc/mm/tlb_nohash.c @@ -541,6 +541,20 @@ static void __early_init_mmu(int boot_cpu) */ linear_map_top = memblock_end_of_DRAM(); +#ifdef CONFIG_PPC_FSL_BOOK3E + if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { + unsigned int num_cams; + + /* use a quarter of the TLBCAM for bolted linear map */ + num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4; + linear_map_top = map_mem_in_cams(linear_map_top, num_cams); + + /* limit memory so we dont have linear faults */ + memblock_enforce_memory_limit(linear_map_top); + memblock_analyze(); + } +#endif + /* A sync won't hurt us after mucking around with * the MMU configuration */ diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S index b9d9fed8f36e..af405eefe48d 100644 --- a/arch/powerpc/mm/tlb_nohash_low.S +++ b/arch/powerpc/mm/tlb_nohash_low.S @@ -367,7 +367,7 @@ _GLOBAL(set_context) #error Unsupported processor type ! #endif -#if defined(CONFIG_FSL_BOOKE) +#if defined(CONFIG_PPC_FSL_BOOK3E) /* * extern void loadcam_entry(unsigned int index) * -- cgit v1.2.3