diff options
author | Michael Ellerman <michael@ellerman.id.au> | 2005-12-04 18:39:37 +1100 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2006-01-09 14:52:21 +1100 |
commit | 0cc4746cadda16826a1b3214c042a2f75445b71c (patch) | |
tree | ec8decc81a3f9fd09454ff208fd3b82cf5bdb730 | |
parent | 8c4f1f2958ff9d4a6760f3bdd0cfb7d2b9e12093 (diff) | |
download | linux-0cc4746cadda16826a1b3214c042a2f75445b71c.tar.bz2 |
[PATCH] powerpc: Reroute interrupts from 0 + offset to PHYSICAL_START + offset
Regardless of where the kernel's linked we always get interrupts at low
addresses. This patch creates a trampoline in the first 3 pages of memory,
where interrupts land, and patches those addresses to jump into the real
kernel code at PHYSICAL_START.
We also need to reserve the trampoline code and a bit more in prom.c
Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r-- | arch/powerpc/kernel/Makefile | 1 | ||||
-rw-r--r-- | arch/powerpc/kernel/crash_dump.c | 53 | ||||
-rw-r--r-- | arch/powerpc/kernel/prom.c | 6 | ||||
-rw-r--r-- | arch/powerpc/kernel/setup_64.c | 5 | ||||
-rw-r--r-- | include/asm-powerpc/kdump.h | 13 |
5 files changed, 77 insertions, 1 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 89714929f444..5719248d344d 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -34,6 +34,7 @@ obj-$(CONFIG_IBMVIO) += vio.o obj-$(CONFIG_IBMEBUS) += ibmebus.o obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o obj64-$(CONFIG_PPC_MULTIPLATFORM) += nvram_64.o +obj-$(CONFIG_CRASH_DUMP) += crash_dump.o ifeq ($(CONFIG_PPC_MERGE),y) diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c new file mode 100644 index 000000000000..63919bcfc9fe --- /dev/null +++ b/arch/powerpc/kernel/crash_dump.c @@ -0,0 +1,53 @@ +/* + * Routines for doing kexec-based kdump. + * + * Copyright (C) 2005, IBM Corp. + * + * Created by: Michael Ellerman + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ + +#undef DEBUG + +#include <asm/kdump.h> +#include <asm/lmb.h> +#include <asm/firmware.h> + +#ifdef DEBUG +#include <asm/udbg.h> +#define DBG(fmt...) udbg_printf(fmt) +#else +#define DBG(fmt...) +#endif + +static void __init create_trampoline(unsigned long addr) +{ + /* The maximum range of a single instruction branch, is the current + * instruction's address + (32 MB - 4) bytes. For the trampoline we + * need to branch to current address + 32 MB. So we insert a nop at + * the trampoline address, then the next instruction (+ 4 bytes) + * does a branch to (32 MB - 4). The net effect is that when we + * branch to "addr" we jump to ("addr" + 32 MB). Although it requires + * two instructions it doesn't require any registers. + */ + create_instruction(addr, 0x60000000); /* nop */ + create_branch(addr + 4, addr + PHYSICAL_START, 0); +} + +void __init kdump_setup(void) +{ + unsigned long i; + + DBG(" -> kdump_setup()\n"); + + for (i = KDUMP_TRAMPOLINE_START; i < KDUMP_TRAMPOLINE_END; i += 8) { + create_trampoline(i); + } + + create_trampoline(__pa(system_reset_fwnmi) - PHYSICAL_START); + create_trampoline(__pa(machine_check_fwnmi) - PHYSICAL_START); + + DBG(" <- kdump_setup()\n"); +} diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 3bf968e74095..9aac77ca3167 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -37,6 +37,7 @@ #include <asm/processor.h> #include <asm/irq.h> #include <asm/io.h> +#include <asm/kdump.h> #include <asm/smp.h> #include <asm/system.h> #include <asm/mmu.h> @@ -1335,11 +1336,14 @@ void __init early_init_devtree(void *params) of_scan_flat_dt(early_init_dt_scan_memory, NULL); lmb_enforce_memory_limit(memory_limit); lmb_analyze(); - lmb_reserve(0, __pa(klimit)); DBG("Phys. mem: %lx\n", lmb_phys_mem_size()); /* Reserve LMB regions used by kernel, initrd, dt, etc... */ + lmb_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START); +#ifdef CONFIG_CRASH_DUMP + lmb_reserve(0, KDUMP_RESERVE_LIMIT); +#endif early_reserve_mem(); DBG("Scanning CPUs ...\n"); diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 6509dd7c2f8f..e67120e34652 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -34,6 +34,7 @@ #include <linux/serial.h> #include <linux/serial_8250.h> #include <asm/io.h> +#include <asm/kdump.h> #include <asm/prom.h> #include <asm/processor.h> #include <asm/pgtable.h> @@ -268,6 +269,10 @@ void __init early_setup(unsigned long dt_ptr) } ppc_md = **mach; +#ifdef CONFIG_CRASH_DUMP + kdump_setup(); +#endif + DBG("Found, Initializing memory management...\n"); /* diff --git a/include/asm-powerpc/kdump.h b/include/asm-powerpc/kdump.h new file mode 100644 index 000000000000..a87aed00d61f --- /dev/null +++ b/include/asm-powerpc/kdump.h @@ -0,0 +1,13 @@ +#ifndef _PPC64_KDUMP_H +#define _PPC64_KDUMP_H + +/* How many bytes to reserve at zero for kdump. The reserve limit should + * be greater or equal to the trampoline's end address. */ +#define KDUMP_RESERVE_LIMIT 0x8000 + +#define KDUMP_TRAMPOLINE_START 0x0100 +#define KDUMP_TRAMPOLINE_END 0x3000 + +extern void kdump_setup(void); + +#endif /* __PPC64_KDUMP_H */ |