summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/setup_64.c
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2010-08-04 10:26:03 +1000
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2010-08-04 10:26:03 +1000
commit412a4ac5e9cf7fdeb6af562c25547a9b9da7674f (patch)
treea8ce13cbc9c47c99799e5e3e3ad26ba78274ee73 /arch/powerpc/kernel/setup_64.c
parente8e5c2155b0035b6e04f29be67f6444bc914005b (diff)
parent0c2daaafcdec726e89cbccca61d576de8429c537 (diff)
downloadlinux-412a4ac5e9cf7fdeb6af562c25547a9b9da7674f.tar.bz2
Merge commit 'gcl/next' into next
Diffstat (limited to 'arch/powerpc/kernel/setup_64.c')
-rw-r--r--arch/powerpc/kernel/setup_64.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 96e662c1d46b..1bee4b68fa45 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -34,7 +34,7 @@
#include <linux/bootmem.h>
#include <linux/pci.h>
#include <linux/lockdep.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
#include <asm/io.h>
#include <asm/kdump.h>
#include <asm/prom.h>
@@ -148,7 +148,7 @@ early_param("smt-enabled", early_smt_enabled);
* the CPU that ignores the top 2 bits of the address in real
* mode so we can access kernel globals normally provided we
* only toy with things in the RMO region. From here, we do
- * some early parsing of the device-tree to setup out LMB
+ * some early parsing of the device-tree to setup out MEMBLOCK
* data structures, and allocate & initialize the hash table
* and segment tables so we can start running with translation
* enabled.
@@ -394,7 +394,7 @@ void __init setup_system(void)
printk("-----------------------------------------------------\n");
printk("ppc64_pft_size = 0x%llx\n", ppc64_pft_size);
- printk("physicalMemorySize = 0x%llx\n", lmb_phys_mem_size());
+ printk("physicalMemorySize = 0x%llx\n", memblock_phys_mem_size());
if (ppc64_caches.dline_size != 0x80)
printk("ppc64_caches.dcache_line_size = 0x%x\n",
ppc64_caches.dline_size);
@@ -433,10 +433,10 @@ static void __init irqstack_early_init(void)
*/
for_each_possible_cpu(i) {
softirq_ctx[i] = (struct thread_info *)
- __va(lmb_alloc_base(THREAD_SIZE,
+ __va(memblock_alloc_base(THREAD_SIZE,
THREAD_SIZE, limit));
hardirq_ctx[i] = (struct thread_info *)
- __va(lmb_alloc_base(THREAD_SIZE,
+ __va(memblock_alloc_base(THREAD_SIZE,
THREAD_SIZE, limit));
}
}
@@ -448,11 +448,11 @@ static void __init exc_lvl_early_init(void)
for_each_possible_cpu(i) {
critirq_ctx[i] = (struct thread_info *)
- __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
+ __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
dbgirq_ctx[i] = (struct thread_info *)
- __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
+ __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
mcheckirq_ctx[i] = (struct thread_info *)
- __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
+ __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
}
}
#else
@@ -477,11 +477,11 @@ static void __init emergency_stack_init(void)
* bringup, we need to get at them in real mode. This means they
* must also be within the RMO region.
*/
- limit = min(slb0_limit(), lmb.rmo_size);
+ limit = min(slb0_limit(), memblock.rmo_size);
for_each_possible_cpu(i) {
unsigned long sp;
- sp = lmb_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
+ sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
sp += THREAD_SIZE;
paca[i].emergency_sp = __va(sp);
}