From 362ff7b2ac0234152b4a334dd006b77f4fa2ab23 Mon Sep 17 00:00:00 2001 From: Jake Moilanen Date: Wed, 18 Oct 2006 10:47:22 -0500 Subject: [POWERPC] Add 970GX cputable entry 970GX cputable entry from Steve Winiecki. Signed-off-by: Jake Moilanen arch/powerpc/kernel/cputable.c | 15 +++++++++++++++ arch/powerpc/oprofile/op_model_power4.c | 2 +- include/asm-powerpc/reg.h | 1 + 3 files changed, 17 insertions(+), 1 deletion(-) Signed-off-by: Paul Mackerras --- include/asm-powerpc/reg.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/asm-powerpc/reg.h b/include/asm-powerpc/reg.h index 8fb96811b55d..fde5c804eccb 100644 --- a/include/asm-powerpc/reg.h +++ b/include/asm-powerpc/reg.h @@ -591,6 +591,7 @@ #define PV_630 0x0040 #define PV_630p 0x0041 #define PV_970MP 0x0044 +#define PV_970GX 0x0045 #define PV_BE 0x0070 #define PV_PA6T 0x0090 -- cgit v1.2.3 From 42c4aaadb737e0e672b3fb86b2c41ff59f0fb8bc Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 24 Oct 2006 16:42:40 +1000 Subject: [POWERPC] Consolidate feature fixup code There are currently two versions of the functions for applying the feature fixups, one for CPU features and one for firmware features. In addition, they are both in assembly and with separate implementations for 32 and 64 bits. identify_cpu() is also implemented in assembly and separately for 32 and 64 bits. This patch replaces them with a pair of C functions. The call sites are slightly moved on ppc64 as well to be called from C instead of from assembly, though it's a very small change, and thus shouldn't cause any problem. Signed-off-by: Benjamin Herrenschmidt Acked-by: Olof Johansson Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/cputable.c | 72 ++++++++++++++++++- arch/powerpc/kernel/head_64.S | 19 ----- arch/powerpc/kernel/misc_32.S | 74 -------------------- arch/powerpc/kernel/misc_64.S | 124 --------------------------------- arch/powerpc/kernel/setup_32.c | 8 ++- arch/powerpc/kernel/setup_64.c | 11 +++ arch/powerpc/platforms/iseries/setup.c | 5 ++ arch/ppc/kernel/misc.S | 74 -------------------- arch/ppc/kernel/setup.c | 10 +-- include/asm-powerpc/cputable.h | 7 +- include/asm-powerpc/firmware.h | 2 + 11 files changed, 106 insertions(+), 300 deletions(-) (limited to 'include') diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index f23aad66a79e..6fdfaa4a82b8 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -18,6 +18,7 @@ #include #include +#include /* for PTRRELOC on ARCH=ppc */ struct cpu_spec* cur_cpu_spec = NULL; EXPORT_SYMBOL(cur_cpu_spec); @@ -73,7 +74,7 @@ extern void __restore_cpu_ppc970(void); #define PPC_FEATURE_SPE_COMP 0 #endif -struct cpu_spec cpu_specs[] = { +static struct cpu_spec cpu_specs[] = { #ifdef CONFIG_PPC64 { /* Power3 */ .pvr_mask = 0xffff0000, @@ -1167,3 +1168,72 @@ struct cpu_spec cpu_specs[] = { #endif /* !CLASSIC_PPC */ #endif /* CONFIG_PPC32 */ }; + +struct cpu_spec *identify_cpu(unsigned long offset) +{ + struct cpu_spec *s = cpu_specs; + struct cpu_spec **cur = &cur_cpu_spec; + unsigned int pvr = mfspr(SPRN_PVR); + int i; + + s = PTRRELOC(s); + cur = PTRRELOC(cur); + + if (*cur != NULL) + return PTRRELOC(*cur); + + for (i = 0; i < ARRAY_SIZE(cpu_specs); i++,s++) + if ((pvr & s->pvr_mask) == s->pvr_value) { + *cur = cpu_specs + i; +#ifdef CONFIG_PPC64 + /* ppc64 expects identify_cpu to also call setup_cpu + * for that processor. I will consolidate that at a + * later time, for now, just use our friend #ifdef. + * we also don't need to PTRRELOC the function pointer + * on ppc64 as we are running at 0 in real mode. + */ + if (s->cpu_setup) { + s->cpu_setup(offset, s); + } +#endif /* CONFIG_PPC64 */ + return s; + } + BUG(); + return NULL; +} + +void do_feature_fixups(unsigned long offset, unsigned long value, + void *fixup_start, void *fixup_end) +{ + struct fixup_entry { + unsigned long mask; + unsigned long value; + unsigned int *start; + unsigned int *end; + } *fcur, *fend; + + fcur = fixup_start; + fend = fixup_end; + + for (; fcur < fend; fcur++) { + unsigned int *pstart, *pend, *p; + + if ((value & fcur->mask) == fcur->value) + continue; + + /* These PTRRELOCs will disappear once the new scheme for + * modules and vdso is implemented + */ + pstart = PTRRELOC(fcur->start); + pend = PTRRELOC(fcur->end); + + for (p = pstart; p < pend; p++) { + *p = 0x60000000u; + asm volatile ("dcbst 0, %0" : : "r" (p)); + } + asm volatile ("sync" : : : "memory"); + for (p = pstart; p < pend; p++) + asm volatile ("icbi 0,%0" : : "r" (p)); + asm volatile ("sync; isync" : : : "memory"); + } +} diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 645c7f10fb28..f12e3c55520d 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -1580,11 +1580,6 @@ _STATIC(__start_initialization_iSeries) li r0,0 stdu r0,-STACK_FRAME_OVERHEAD(r1) - LOAD_REG_IMMEDIATE(r3,cpu_specs) - LOAD_REG_IMMEDIATE(r4,cur_cpu_spec) - li r5,0 - bl .identify_cpu - LOAD_REG_IMMEDIATE(r2,__toc_start) addi r2,r2,0x4000 addi r2,r2,0x4000 @@ -1964,13 +1959,6 @@ _STATIC(start_here_multiplatform) addi r2,r2,0x4000 add r2,r2,r26 - LOAD_REG_IMMEDIATE(r3, cpu_specs) - add r3,r3,r26 - LOAD_REG_IMMEDIATE(r4,cur_cpu_spec) - add r4,r4,r26 - mr r5,r26 - bl .identify_cpu - /* Do very early kernel initializations, including initial hash table, * stab and slb setup before we turn on relocation. */ @@ -2000,13 +1988,6 @@ _STATIC(start_here_common) li r0,0 stdu r0,-STACK_FRAME_OVERHEAD(r1) - /* Apply the CPUs-specific fixups (nop out sections not relevant - * to this CPU - */ - li r3,0 - bl .do_cpu_ftr_fixups - bl .do_fw_ftr_fixups - /* ptr to current */ LOAD_REG_IMMEDIATE(r4, init_task) std r4,PACACURRENT(r13) diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 88fd73fdf048..412bea3cf813 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -101,80 +101,6 @@ _GLOBAL(reloc_got2) mtlr r11 blr -/* - * identify_cpu, - * called with r3 = data offset and r4 = CPU number - * doesn't change r3 - */ -_GLOBAL(identify_cpu) - addis r8,r3,cpu_specs@ha - addi r8,r8,cpu_specs@l - mfpvr r7 -1: - lwz r5,CPU_SPEC_PVR_MASK(r8) - and r5,r5,r7 - lwz r6,CPU_SPEC_PVR_VALUE(r8) - cmplw 0,r6,r5 - beq 1f - addi r8,r8,CPU_SPEC_ENTRY_SIZE - b 1b -1: - addis r6,r3,cur_cpu_spec@ha - addi r6,r6,cur_cpu_spec@l - sub r8,r8,r3 - stw r8,0(r6) - blr - -/* - * do_cpu_ftr_fixups - goes through the list of CPU feature fixups - * and writes nop's over sections of code that don't apply for this cpu. - * r3 = data offset (not changed) - */ -_GLOBAL(do_cpu_ftr_fixups) - /* Get CPU 0 features */ - addis r6,r3,cur_cpu_spec@ha - addi r6,r6,cur_cpu_spec@l - lwz r4,0(r6) - add r4,r4,r3 - lwz r4,CPU_SPEC_FEATURES(r4) - - /* Get the fixup table */ - addis r6,r3,__start___ftr_fixup@ha - addi r6,r6,__start___ftr_fixup@l - addis r7,r3,__stop___ftr_fixup@ha - addi r7,r7,__stop___ftr_fixup@l - - /* Do the fixup */ -1: cmplw 0,r6,r7 - bgelr - addi r6,r6,16 - lwz r8,-16(r6) /* mask */ - and r8,r8,r4 - lwz r9,-12(r6) /* value */ - cmplw 0,r8,r9 - beq 1b - lwz r8,-8(r6) /* section begin */ - lwz r9,-4(r6) /* section end */ - subf. r9,r8,r9 - beq 1b - /* write nops over the section of code */ - /* todo: if large section, add a branch at the start of it */ - srwi r9,r9,2 - mtctr r9 - add r8,r8,r3 - lis r0,0x60000000@h /* nop */ -3: stw r0,0(r8) - andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l - beq 2f - dcbst 0,r8 /* suboptimal, but simpler */ - sync - icbi 0,r8 -2: addi r8,r8,4 - bdnz 3b - sync /* additional sync needed on g4 */ - isync - b 1b - /* * call_setup_cpu - call the setup_cpu function for this cpu * r3 = data offset, r24 = cpu number diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index c70e20708a1f..21fd2c662a99 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -246,130 +246,6 @@ _GLOBAL(__flush_dcache_icache) isync blr -/* - * identify_cpu and calls setup_cpu - * In: r3 = base of the cpu_specs array - * r4 = address of cur_cpu_spec - * r5 = relocation offset - */ -_GLOBAL(identify_cpu) - mfpvr r7 -1: - lwz r8,CPU_SPEC_PVR_MASK(r3) - and r8,r8,r7 - lwz r9,CPU_SPEC_PVR_VALUE(r3) - cmplw 0,r9,r8 - beq 1f - addi r3,r3,CPU_SPEC_ENTRY_SIZE - b 1b -1: - sub r0,r3,r5 - std r0,0(r4) - ld r4,CPU_SPEC_SETUP(r3) - cmpdi 0,r4,0 - add r4,r4,r5 - beqlr - ld r4,0(r4) - add r4,r4,r5 - mtctr r4 - /* Calling convention for cpu setup is r3=offset, r4=cur_cpu_spec */ - mr r4,r3 - mr r3,r5 - bctr - -/* - * do_cpu_ftr_fixups - goes through the list of CPU feature fixups - * and writes nop's over sections of code that don't apply for this cpu. - * r3 = data offset (not changed) - */ -_GLOBAL(do_cpu_ftr_fixups) - /* Get CPU 0 features */ - LOAD_REG_IMMEDIATE(r6,cur_cpu_spec) - sub r6,r6,r3 - ld r4,0(r6) - sub r4,r4,r3 - ld r4,CPU_SPEC_FEATURES(r4) - /* Get the fixup table */ - LOAD_REG_IMMEDIATE(r6,__start___ftr_fixup) - sub r6,r6,r3 - LOAD_REG_IMMEDIATE(r7,__stop___ftr_fixup) - sub r7,r7,r3 - /* Do the fixup */ -1: cmpld r6,r7 - bgelr - addi r6,r6,32 - ld r8,-32(r6) /* mask */ - and r8,r8,r4 - ld r9,-24(r6) /* value */ - cmpld r8,r9 - beq 1b - ld r8,-16(r6) /* section begin */ - ld r9,-8(r6) /* section end */ - subf. r9,r8,r9 - beq 1b - /* write nops over the section of code */ - /* todo: if large section, add a branch at the start of it */ - srwi r9,r9,2 - mtctr r9 - sub r8,r8,r3 - lis r0,0x60000000@h /* nop */ -3: stw r0,0(r8) - andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l - beq 2f - dcbst 0,r8 /* suboptimal, but simpler */ - sync - icbi 0,r8 -2: addi r8,r8,4 - bdnz 3b - sync /* additional sync needed on g4 */ - isync - b 1b - -/* - * do_fw_ftr_fixups - goes through the list of firmware feature fixups - * and writes nop's over sections of code that don't apply for this firmware. - * r3 = data offset (not changed) - */ -_GLOBAL(do_fw_ftr_fixups) - /* Get firmware features */ - LOAD_REG_IMMEDIATE(r6,powerpc_firmware_features) - sub r6,r6,r3 - ld r4,0(r6) - /* Get the fixup table */ - LOAD_REG_IMMEDIATE(r6,__start___fw_ftr_fixup) - sub r6,r6,r3 - LOAD_REG_IMMEDIATE(r7,__stop___fw_ftr_fixup) - sub r7,r7,r3 - /* Do the fixup */ -1: cmpld r6,r7 - bgelr - addi r6,r6,32 - ld r8,-32(r6) /* mask */ - and r8,r8,r4 - ld r9,-24(r6) /* value */ - cmpld r8,r9 - beq 1b - ld r8,-16(r6) /* section begin */ - ld r9,-8(r6) /* section end */ - subf. r9,r8,r9 - beq 1b - /* write nops over the section of code */ - /* todo: if large section, add a branch at the start of it */ - srwi r9,r9,2 - mtctr r9 - sub r8,r8,r3 - lis r0,0x60000000@h /* nop */ -3: stw r0,0(r8) -BEGIN_FTR_SECTION - dcbst 0,r8 /* suboptimal, but simpler */ - sync - icbi 0,r8 -END_FTR_SECTION_IFSET(CPU_FTR_SPLIT_ID_CACHE) - addi r8,r8,4 - bdnz 3b - sync /* additional sync needed on g4 */ - isync - b 1b #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) /* diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 191d0ab09222..769e511783b0 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -91,6 +91,7 @@ int ucache_bsize; unsigned long __init early_init(unsigned long dt_ptr) { unsigned long offset = reloc_offset(); + struct cpu_spec *spec; /* First zero the BSS -- use memset_io, some platforms don't have * caches on yet */ @@ -100,8 +101,11 @@ unsigned long __init early_init(unsigned long dt_ptr) * Identify the CPU type and fix up code sections * that depend on which cpu we have. */ - identify_cpu(offset, 0); - do_cpu_ftr_fixups(offset); + spec = identify_cpu(offset); + + do_feature_fixups(offset, spec->cpu_features, + PTRRELOC(&__start___ftr_fixup), + PTRRELOC(&__stop___ftr_fixup)); return KERNELBASE + offset; } diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 4b2e32eab9dc..1969b5686eee 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -170,6 +170,9 @@ void __init setup_paca(int cpu) void __init early_setup(unsigned long dt_ptr) { + /* Identify CPU type */ + identify_cpu(0); + /* Assume we're on cpu 0 for now. Don't write to the paca yet! */ setup_paca(0); @@ -348,6 +351,14 @@ void __init setup_system(void) { DBG(" -> setup_system()\n"); + /* Apply the CPUs-specific and firmware specific fixups to kernel + * text (nop out sections not relevant to this CPU or this firmware) + */ + do_feature_fixups(0, cur_cpu_spec->cpu_features, + &__start___ftr_fixup, &__stop___ftr_fixup); + do_feature_fixups(0, powerpc_firmware_features, + &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup); + /* * Unflatten the device-tree passed by prom_init or kexec */ diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c index a0ff7ba7d666..6f73469fd3b0 100644 --- a/arch/powerpc/platforms/iseries/setup.c +++ b/arch/powerpc/platforms/iseries/setup.c @@ -694,6 +694,11 @@ void * __init iSeries_early_setup(void) { unsigned long phys_mem_size; + /* Identify CPU type. This is done again by the common code later + * on but calling this function multiple times is fine. + */ + identify_cpu(0); + powerpc_firmware_features |= FW_FEATURE_ISERIES; powerpc_firmware_features |= FW_FEATURE_LPAR; diff --git a/arch/ppc/kernel/misc.S b/arch/ppc/kernel/misc.S index 5f6684012ded..d319f9ba2379 100644 --- a/arch/ppc/kernel/misc.S +++ b/arch/ppc/kernel/misc.S @@ -109,80 +109,6 @@ _GLOBAL(reloc_got2) mtlr r11 blr -/* - * identify_cpu, - * called with r3 = data offset and r4 = CPU number - * doesn't change r3 - */ -_GLOBAL(identify_cpu) - addis r8,r3,cpu_specs@ha - addi r8,r8,cpu_specs@l - mfpvr r7 -1: - lwz r5,CPU_SPEC_PVR_MASK(r8) - and r5,r5,r7 - lwz r6,CPU_SPEC_PVR_VALUE(r8) - cmplw 0,r6,r5 - beq 1f - addi r8,r8,CPU_SPEC_ENTRY_SIZE - b 1b -1: - addis r6,r3,cur_cpu_spec@ha - addi r6,r6,cur_cpu_spec@l - sub r8,r8,r3 - stw r8,0(r6) - blr - -/* - * do_cpu_ftr_fixups - goes through the list of CPU feature fixups - * and writes nop's over sections of code that don't apply for this cpu. - * r3 = data offset (not changed) - */ -_GLOBAL(do_cpu_ftr_fixups) - /* Get CPU 0 features */ - addis r6,r3,cur_cpu_spec@ha - addi r6,r6,cur_cpu_spec@l - lwz r4,0(r6) - add r4,r4,r3 - lwz r4,CPU_SPEC_FEATURES(r4) - - /* Get the fixup table */ - addis r6,r3,__start___ftr_fixup@ha - addi r6,r6,__start___ftr_fixup@l - addis r7,r3,__stop___ftr_fixup@ha - addi r7,r7,__stop___ftr_fixup@l - - /* Do the fixup */ -1: cmplw 0,r6,r7 - bgelr - addi r6,r6,16 - lwz r8,-16(r6) /* mask */ - and r8,r8,r4 - lwz r9,-12(r6) /* value */ - cmplw 0,r8,r9 - beq 1b - lwz r8,-8(r6) /* section begin */ - lwz r9,-4(r6) /* section end */ - subf. r9,r8,r9 - beq 1b - /* write nops over the section of code */ - /* todo: if large section, add a branch at the start of it */ - srwi r9,r9,2 - mtctr r9 - add r8,r8,r3 - lis r0,0x60000000@h /* nop */ -3: stw r0,0(r8) - andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l - beq 2f - dcbst 0,r8 /* suboptimal, but simpler */ - sync - icbi 0,r8 -2: addi r8,r8,4 - bdnz 3b - sync /* additional sync needed on g4 */ - isync - b 1b - /* * call_setup_cpu - call the setup_cpu function for this cpu * r3 = data offset, r24 = cpu number diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c index 75fe13815be2..41a640f16bdd 100644 --- a/arch/ppc/kernel/setup.c +++ b/arch/ppc/kernel/setup.c @@ -38,6 +38,7 @@ #include #include #include +#include #define USES_PPC_SYS (defined(CONFIG_85xx) || defined(CONFIG_83xx) || \ defined(CONFIG_MPC10X_BRIDGE) || defined(CONFIG_8260) || \ @@ -53,8 +54,6 @@ extern void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7); -extern void identify_cpu(unsigned long offset, unsigned long cpu); -extern void do_cpu_ftr_fixups(unsigned long offset); extern void reloc_got2(unsigned long offset); extern void ppc6xx_idle(void); @@ -301,6 +300,7 @@ early_init(int r3, int r4, int r5) { unsigned long phys; unsigned long offset = reloc_offset(); + struct cpu_spec *spec; /* Default */ phys = offset + KERNELBASE; @@ -313,8 +313,10 @@ early_init(int r3, int r4, int r5) * Identify the CPU type and fix up code sections * that depend on which cpu we have. */ - identify_cpu(offset, 0); - do_cpu_ftr_fixups(offset); + spec = identify_cpu(offset); + do_feature_fixups(offset, spec->cpu_features, + PTRRELOC(&__start___ftr_fixup), + PTRRELOC(&__stop___ftr_fixup)); return phys; } diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h index 12707ab9dc98..4d22218739e0 100644 --- a/include/asm-powerpc/cputable.h +++ b/include/asm-powerpc/cputable.h @@ -89,8 +89,11 @@ struct cpu_spec { extern struct cpu_spec *cur_cpu_spec; -extern void identify_cpu(unsigned long offset, unsigned long cpu); -extern void do_cpu_ftr_fixups(unsigned long offset); +extern unsigned int __start___ftr_fixup, __stop___ftr_fixup; + +extern struct cpu_spec *identify_cpu(unsigned long offset); +extern void do_feature_fixups(unsigned long offset, unsigned long value, + void *fixup_start, void *fixup_end); #endif /* __ASSEMBLY__ */ diff --git a/include/asm-powerpc/firmware.h b/include/asm-powerpc/firmware.h index 1022737f4f34..c16e0a6b9dab 100644 --- a/include/asm-powerpc/firmware.h +++ b/include/asm-powerpc/firmware.h @@ -96,6 +96,8 @@ extern void machine_check_fwnmi(void); /* This is true if we are using the firmware NMI handler (typically LPAR) */ extern int fwnmi_active; +extern unsigned int __start___fw_ftr_fixup, __stop___fw_ftr_fixup; + #else /* __ASSEMBLY__ */ #define BEGIN_FW_FTR_SECTION 96: -- cgit v1.2.3 From 7aeb732428fc8e2ecae6d432873770c12f04a979 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Fri, 20 Oct 2006 11:47:16 +1000 Subject: [POWERPC] Support nested cpu feature sections This patch adds some macros that can be used with an explicit label in order to nest cpu features. This should be used very careful but is necessary for the upcoming cell TB fixup. Signed-off-by: Benjamin Herrenschmidt Acked-by: Olof Johansson Signed-off-by: Paul Mackerras --- include/asm-powerpc/cputable.h | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h index 4d22218739e0..65faf322ace0 100644 --- a/include/asm-powerpc/cputable.h +++ b/include/asm-powerpc/cputable.h @@ -434,30 +434,34 @@ static inline int cpu_has_feature(unsigned long feature) #ifdef __ASSEMBLY__ -#define BEGIN_FTR_SECTION 98: +#define BEGIN_FTR_SECTION_NESTED(label) label: +#define BEGIN_FTR_SECTION BEGIN_FTR_SECTION_NESTED(98) #ifndef __powerpc64__ -#define END_FTR_SECTION(msk, val) \ +#define END_FTR_SECTION_NESTED(msk, val, label) \ 99: \ .section __ftr_fixup,"a"; \ .align 2; \ .long msk; \ .long val; \ - .long 98b; \ + .long label##b; \ .long 99b; \ .previous #else /* __powerpc64__ */ -#define END_FTR_SECTION(msk, val) \ +#define END_FTR_SECTION_NESTED(msk, val, label) \ 99: \ .section __ftr_fixup,"a"; \ .align 3; \ .llong msk; \ .llong val; \ - .llong 98b; \ + .llong label##b; \ .llong 99b; \ .previous #endif /* __powerpc64__ */ +#define END_FTR_SECTION(msk, val) \ + END_FTR_SECTION_NESTED(msk, val, 98) + #define END_FTR_SECTION_IFSET(msk) END_FTR_SECTION((msk), (msk)) #define END_FTR_SECTION_IFCLR(msk) END_FTR_SECTION((msk), 0) #endif /* __ASSEMBLY__ */ -- cgit v1.2.3 From 0909c8c2d547e45ca50e2492b08ec93a37b35237 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Fri, 20 Oct 2006 11:47:18 +1000 Subject: [POWERPC] Support feature fixups in vdso's This patch reworks the feature fixup mecanism so vdso's can be fixed up. The main issue was that the construct: .long label (or .llong on 64 bits) will not work in the case of a shared library like the vdso. It will generate an empty placeholder in the fixup table along with a reloc, which is not something we can deal with in the vdso. The idea here (thanks Alan Modra !) is to instead use something like: 1: .long label - 1b That is, the feature fixup tables no longer contain addresses of bits of code to patch, but offsets of such code from the fixup table entry itself. That is properly resolved by ld when building the .so's. I've modified the fixup mecanism generically to use that method for the rest of the kernel as well. Another trick is that the 32 bits vDSO included in the 64 bits kernel need to have a table in the 64 bits format. However, gas does not support 32 bits code with a statement of the form: .llong label - 1b (Or even just .llong label) That is, it cannot emit the right fixup/relocation for the linker to use to assign a 32 bits address to an .llong field. Thus, in the specific case of the 32 bits vdso built as part of the 64 bits kernel, we are using a modified macro that generates: .long 0xffffffff .llong label - 1b Note that is assumes that the value is negative which is enforced by the .lds (those offsets are always negative as the .text is always before the fixup table and gas doesn't support emiting the reloc the other way around). Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/cputable.c | 11 ++++--- arch/powerpc/kernel/setup_32.c | 2 +- arch/powerpc/kernel/setup_64.c | 4 +-- arch/powerpc/kernel/vdso.c | 43 +++++++++++++++++++++++++++ arch/powerpc/kernel/vdso32/vdso32.lds.S | 12 ++++++++ arch/powerpc/kernel/vdso64/vdso64.lds.S | 10 +++++++ arch/ppc/kernel/setup.c | 2 +- include/asm-powerpc/asm-compat.h | 52 +++++++++++++++++++++++++++++++++ include/asm-powerpc/cputable.h | 31 ++++---------------- include/asm-powerpc/firmware.h | 15 ++++------ include/asm-powerpc/timex.h | 8 +++-- 11 files changed, 141 insertions(+), 49 deletions(-) (limited to 'include') diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 6fdfaa4a82b8..bfd499ee3753 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -1202,14 +1202,13 @@ struct cpu_spec *identify_cpu(unsigned long offset) return NULL; } -void do_feature_fixups(unsigned long offset, unsigned long value, - void *fixup_start, void *fixup_end) +void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end) { struct fixup_entry { unsigned long mask; unsigned long value; - unsigned int *start; - unsigned int *end; + long start_off; + long end_off; } *fcur, *fend; fcur = fixup_start; @@ -1224,8 +1223,8 @@ void do_feature_fixups(unsigned long offset, unsigned long value, /* These PTRRELOCs will disappear once the new scheme for * modules and vdso is implemented */ - pstart = PTRRELOC(fcur->start); - pend = PTRRELOC(fcur->end); + pstart = ((unsigned int *)fcur) + (fcur->start_off / 4); + pend = ((unsigned int *)fcur) + (fcur->end_off / 4); for (p = pstart; p < pend; p++) { *p = 0x60000000u; diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 769e511783b0..a4c2964a3ca6 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -103,7 +103,7 @@ unsigned long __init early_init(unsigned long dt_ptr) */ spec = identify_cpu(offset); - do_feature_fixups(offset, spec->cpu_features, + do_feature_fixups(spec->cpu_features, PTRRELOC(&__start___ftr_fixup), PTRRELOC(&__stop___ftr_fixup)); diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 1969b5686eee..16278968dab6 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -354,9 +354,9 @@ void __init setup_system(void) /* Apply the CPUs-specific and firmware specific fixups to kernel * text (nop out sections not relevant to this CPU or this firmware) */ - do_feature_fixups(0, cur_cpu_spec->cpu_features, + do_feature_fixups(cur_cpu_spec->cpu_features, &__start___ftr_fixup, &__stop___ftr_fixup); - do_feature_fixups(0, powerpc_firmware_features, + do_feature_fixups(powerpc_firmware_features, &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup); /* diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 1a7e19cdab39..c913ad5cad29 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -36,6 +36,8 @@ #include #include +#include "setup.h" + #undef DEBUG #ifdef DEBUG @@ -586,6 +588,43 @@ static __init int vdso_fixup_datapage(struct lib32_elfinfo *v32, return 0; } + +static __init int vdso_fixup_features(struct lib32_elfinfo *v32, + struct lib64_elfinfo *v64) +{ + void *start32; + unsigned long size32; + +#ifdef CONFIG_PPC64 + void *start64; + unsigned long size64; + + start64 = find_section64(v64->hdr, "__ftr_fixup", &size64); + if (start64) + do_feature_fixups(cur_cpu_spec->cpu_features, + start64, start64 + size64); + + start64 = find_section64(v64->hdr, "__fw_ftr_fixup", &size64); + if (start64) + do_feature_fixups(powerpc_firmware_features, + start64, start64 + size64); +#endif /* CONFIG_PPC64 */ + + start32 = find_section32(v32->hdr, "__ftr_fixup", &size32); + if (start32) + do_feature_fixups(cur_cpu_spec->cpu_features, + start32, start32 + size32); + +#ifdef CONFIG_PPC64 + start32 = find_section32(v32->hdr, "__fw_ftr_fixup", &size32); + if (start32) + do_feature_fixups(powerpc_firmware_features, + start32, start32 + size32); +#endif /* CONFIG_PPC64 */ + + return 0; +} + static __init int vdso_fixup_alt_funcs(struct lib32_elfinfo *v32, struct lib64_elfinfo *v64) { @@ -634,6 +673,9 @@ static __init int vdso_setup(void) if (vdso_fixup_datapage(&v32, &v64)) return -1; + if (vdso_fixup_features(&v32, &v64)) + return -1; + if (vdso_fixup_alt_funcs(&v32, &v64)) return -1; @@ -714,6 +756,7 @@ void __init vdso_init(void) * Setup the syscall map in the vDOS */ vdso_setup_syscall_map(); + /* * Initialize the vDSO images in memory, that is do necessary * fixups of vDSO symbols, locate trampolines, etc... diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso32/vdso32.lds.S index 6187af2d54c3..26e138c4ce17 100644 --- a/arch/powerpc/kernel/vdso32/vdso32.lds.S +++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S @@ -32,6 +32,18 @@ SECTIONS PROVIDE (_etext = .); PROVIDE (etext = .); + . = ALIGN(8); + __ftr_fixup : { + *(__ftr_fixup) + } + +#ifdef CONFIG_PPC64 + . = ALIGN(8); + __fw_ftr_fixup : { + *(__fw_ftr_fixup) + } +#endif + /* Other stuff is appended to the text segment: */ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } .rodata1 : { *(.rodata1) } diff --git a/arch/powerpc/kernel/vdso64/vdso64.lds.S b/arch/powerpc/kernel/vdso64/vdso64.lds.S index 4a2b6dc0960c..2d70f35d50b5 100644 --- a/arch/powerpc/kernel/vdso64/vdso64.lds.S +++ b/arch/powerpc/kernel/vdso64/vdso64.lds.S @@ -31,6 +31,16 @@ SECTIONS PROVIDE (_etext = .); PROVIDE (etext = .); + . = ALIGN(8); + __ftr_fixup : { + *(__ftr_fixup) + } + + . = ALIGN(8); + __fw_ftr_fixup : { + *(__fw_ftr_fixup) + } + /* Other stuff is appended to the text segment: */ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } .rodata1 : { *(.rodata1) } diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c index 41a640f16bdd..27faeca2c7a2 100644 --- a/arch/ppc/kernel/setup.c +++ b/arch/ppc/kernel/setup.c @@ -314,7 +314,7 @@ early_init(int r3, int r4, int r5) * that depend on which cpu we have. */ spec = identify_cpu(offset); - do_feature_fixups(offset, spec->cpu_features, + do_feature_fixups(spec->cpu_features, PTRRELOC(&__start___ftr_fixup), PTRRELOC(&__stop___ftr_fixup)); diff --git a/include/asm-powerpc/asm-compat.h b/include/asm-powerpc/asm-compat.h index 8e64be0cc47d..c89bd58ee283 100644 --- a/include/asm-powerpc/asm-compat.h +++ b/include/asm-powerpc/asm-compat.h @@ -14,6 +14,58 @@ # define ASM_CONST(x) __ASM_CONST(x) #endif + +/* + * Feature section common macros + * + * Note that the entries now contain offsets between the table entry + * and the code rather than absolute code pointers in order to be + * useable with the vdso shared library. There is also an assumption + * that values will be negative, that is, the fixup table has to be + * located after the code it fixes up. + */ +#ifdef CONFIG_PPC64 +#ifdef __powerpc64__ +/* 64 bits kernel, 64 bits code */ +#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \ +99: \ + .section sect,"a"; \ + .align 3; \ +98: \ + .llong msk; \ + .llong val; \ + .llong label##b-98b; \ + .llong 99b-98b; \ + .previous +#else /* __powerpc64__ */ +/* 64 bits kernel, 32 bits code (ie. vdso32) */ +#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \ +99: \ + .section sect,"a"; \ + .align 3; \ +98: \ + .llong msk; \ + .llong val; \ + .long 0xffffffff; \ + .long label##b-98b; \ + .long 0xffffffff; \ + .long 99b-98b; \ + .previous +#endif /* !__powerpc64__ */ +#else /* CONFIG_PPC64 */ +/* 32 bits kernel, 32 bits code */ +#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \ +99: \ + .section sect,"a"; \ + .align 2; \ +98: \ + .long msk; \ + .long val; \ + .long label##b-98b; \ + .long 99b-98b; \ + .previous +#endif /* !CONFIG_PPC64 */ + #ifdef __powerpc64__ /* operations for longs and pointers */ diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h index 65faf322ace0..02e52d68cbbe 100644 --- a/include/asm-powerpc/cputable.h +++ b/include/asm-powerpc/cputable.h @@ -92,8 +92,8 @@ extern struct cpu_spec *cur_cpu_spec; extern unsigned int __start___ftr_fixup, __stop___ftr_fixup; extern struct cpu_spec *identify_cpu(unsigned long offset); -extern void do_feature_fixups(unsigned long offset, unsigned long value, - void *fixup_start, void *fixup_end); +extern void do_feature_fixups(unsigned long value, void *fixup_start, + void *fixup_end); #endif /* __ASSEMBLY__ */ @@ -435,32 +435,11 @@ static inline int cpu_has_feature(unsigned long feature) #ifdef __ASSEMBLY__ #define BEGIN_FTR_SECTION_NESTED(label) label: -#define BEGIN_FTR_SECTION BEGIN_FTR_SECTION_NESTED(98) - -#ifndef __powerpc64__ -#define END_FTR_SECTION_NESTED(msk, val, label) \ -99: \ - .section __ftr_fixup,"a"; \ - .align 2; \ - .long msk; \ - .long val; \ - .long label##b; \ - .long 99b; \ - .previous -#else /* __powerpc64__ */ +#define BEGIN_FTR_SECTION BEGIN_FTR_SECTION_NESTED(97) #define END_FTR_SECTION_NESTED(msk, val, label) \ -99: \ - .section __ftr_fixup,"a"; \ - .align 3; \ - .llong msk; \ - .llong val; \ - .llong label##b; \ - .llong 99b; \ - .previous -#endif /* __powerpc64__ */ - + MAKE_FTR_SECTION_ENTRY(msk, val, label, __ftr_fixup) #define END_FTR_SECTION(msk, val) \ - END_FTR_SECTION_NESTED(msk, val, 98) + END_FTR_SECTION_NESTED(msk, val, 97) #define END_FTR_SECTION_IFSET(msk) END_FTR_SECTION((msk), (msk)) #define END_FTR_SECTION_IFCLR(msk) END_FTR_SECTION((msk), 0) diff --git a/include/asm-powerpc/firmware.h b/include/asm-powerpc/firmware.h index c16e0a6b9dab..fdf9aff71150 100644 --- a/include/asm-powerpc/firmware.h +++ b/include/asm-powerpc/firmware.h @@ -100,17 +100,12 @@ extern unsigned int __start___fw_ftr_fixup, __stop___fw_ftr_fixup; #else /* __ASSEMBLY__ */ -#define BEGIN_FW_FTR_SECTION 96: - +#define BEGIN_FW_FTR_SECTION_NESTED(label) label: +#define BEGIN_FW_FTR_SECTION BEGIN_FW_FTR_SECTION_NESTED(97) +#define END_FW_FTR_SECTION_NESTED(msk, val, label) \ + MAKE_FTR_SECTION_ENTRY(msk, val, label, __fw_ftr_fixup) #define END_FW_FTR_SECTION(msk, val) \ -97: \ - .section __fw_ftr_fixup,"a"; \ - .align 3; \ - .llong msk; \ - .llong val; \ - .llong 96b; \ - .llong 97b; \ - .previous + END_FW_FTR_SECTION_NESTED(msk, val, 97) #define END_FW_FTR_SECTION_IFSET(msk) END_FW_FTR_SECTION((msk), (msk)) #define END_FW_FTR_SECTION_IFCLR(msk) END_FW_FTR_SECTION((msk), 0) diff --git a/include/asm-powerpc/timex.h b/include/asm-powerpc/timex.h index 3b9a8e786806..e3f08cf91486 100644 --- a/include/asm-powerpc/timex.h +++ b/include/asm-powerpc/timex.h @@ -30,13 +30,15 @@ static inline cycles_t get_cycles(void) ret = 0; __asm__ __volatile__( - "98: mftb %0\n" + "97: mftb %0\n" "99:\n" ".section __ftr_fixup,\"a\"\n" + ".align 2\n" + "98:\n" " .long %1\n" " .long 0\n" - " .long 98b\n" - " .long 99b\n" + " .long 97b-98b\n" + " .long 99b-98b\n" ".previous" : "=r" (ret) : "i" (CPU_FTR_601)); #endif -- cgit v1.2.3 From 859deea949c382d9ccb6397fe33df3703ecef45d Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Fri, 20 Oct 2006 14:37:05 +1000 Subject: [POWERPC] Cell timebase bug workaround The Cell CPU timebase has an erratum. When reading the entire 64 bits of the timebase with one mftb instruction, there is a handful of cycles window during which one might read a value with the low order 32 bits already reset to 0x00000000 but the high order bits not yet incremeted by one. This fixes it by reading the timebase again until the low order 32 bits is no longer 0. That might introduce occasional latencies if hitting mftb just at the wrong time, but no more than 70ns on a cell blade, and that was considered acceptable. Signed-off-by: Benjamin Herrenschmidt Acked-by: Olof Johansson Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/vdso64/gettimeofday.S | 6 ++++-- include/asm-powerpc/cputable.h | 3 ++- include/asm-powerpc/ppc_asm.h | 18 ++++++++++++++---- include/asm-powerpc/reg.h | 25 +++++++++++++++++++++++++ include/asm-powerpc/time.h | 27 ++++++++++++++++----------- include/asm-powerpc/timex.h | 12 +++++------- 6 files changed, 66 insertions(+), 25 deletions(-) (limited to 'include') diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S index 56e76ff5498f..40ffd9b6cef7 100644 --- a/arch/powerpc/kernel/vdso64/gettimeofday.S +++ b/arch/powerpc/kernel/vdso64/gettimeofday.S @@ -229,8 +229,10 @@ V_FUNCTION_BEGIN(__do_get_xsec) xor r0,r8,r8 /* create dependency */ add r3,r3,r0 - /* Get TB & offset it */ - mftb r7 + /* Get TB & offset it. We use the MFTB macro which will generate + * workaround code for Cell. + */ + MFTB(r7) ld r9,CFG_TB_ORIG_STAMP(r3) subf r7,r9,r7 diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h index 02e52d68cbbe..a9a40149a7c0 100644 --- a/include/asm-powerpc/cputable.h +++ b/include/asm-powerpc/cputable.h @@ -147,6 +147,7 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start, #define CPU_FTR_CI_LARGE_PAGE LONG_ASM_CONST(0x0000100000000000) #define CPU_FTR_PAUSE_ZERO LONG_ASM_CONST(0x0000200000000000) #define CPU_FTR_PURR LONG_ASM_CONST(0x0000400000000000) +#define CPU_FTR_CELL_TB_BUG LONG_ASM_CONST(0x0000800000000000) #ifndef __ASSEMBLY__ @@ -335,7 +336,7 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start, #define CPU_FTRS_CELL (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ - CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE) + CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE | CPU_FTR_CELL_TB_BUG) #define CPU_FTRS_PA6T (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_CI_LARGE_PAGE | \ diff --git a/include/asm-powerpc/ppc_asm.h b/include/asm-powerpc/ppc_asm.h index a940cfe040da..fa083d8e4663 100644 --- a/include/asm-powerpc/ppc_asm.h +++ b/include/asm-powerpc/ppc_asm.h @@ -30,9 +30,9 @@ BEGIN_FTR_SECTION; \ mfspr ra,SPRN_PURR; /* get processor util. reg */ \ END_FTR_SECTION_IFSET(CPU_FTR_PURR); \ BEGIN_FTR_SECTION; \ - mftb ra; /* or get TB if no PURR */ \ + MFTB(ra); /* or get TB if no PURR */ \ END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \ - ld rb,PACA_STARTPURR(r13); \ + ld rb,PACA_STARTPURR(r13); \ std ra,PACA_STARTPURR(r13); \ subf rb,rb,ra; /* subtract start value */ \ ld ra,PACA_USER_TIME(r13); \ @@ -45,9 +45,9 @@ BEGIN_FTR_SECTION; \ mfspr ra,SPRN_PURR; /* get processor util. reg */ \ END_FTR_SECTION_IFSET(CPU_FTR_PURR); \ BEGIN_FTR_SECTION; \ - mftb ra; /* or get TB if no PURR */ \ + MFTB(ra); /* or get TB if no PURR */ \ END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \ - ld rb,PACA_STARTPURR(r13); \ + ld rb,PACA_STARTPURR(r13); \ std ra,PACA_STARTPURR(r13); \ subf rb,rb,ra; /* subtract start value */ \ ld ra,PACA_SYSTEM_TIME(r13); \ @@ -274,6 +274,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_601) #define ISYNC_601 #endif +#ifdef CONFIG_PPC_CELL +#define MFTB(dest) \ +90: mftb dest; \ +BEGIN_FTR_SECTION_NESTED(96); \ + cmpwi dest,0; \ + beq- 90b; \ +END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96) +#else +#define MFTB(dest) mftb dest +#endif #ifndef CONFIG_SMP #define TLBSYNC diff --git a/include/asm-powerpc/reg.h b/include/asm-powerpc/reg.h index fde5c804eccb..6faae7b14d55 100644 --- a/include/asm-powerpc/reg.h +++ b/include/asm-powerpc/reg.h @@ -619,10 +619,35 @@ : "=r" (rval)); rval;}) #define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : : "r" (v)) +#ifdef __powerpc64__ +#ifdef CONFIG_PPC_CELL +#define mftb() ({unsigned long rval; \ + asm volatile( \ + "90: mftb %0;\n" \ + "97: cmpwi %0,0;\n" \ + " beq- 90b;\n" \ + "99:\n" \ + ".section __ftr_fixup,\"a\"\n" \ + ".align 3\n" \ + "98:\n" \ + " .llong %1\n" \ + " .llong %1\n" \ + " .llong 97b-98b\n" \ + " .llong 99b-98b\n" \ + ".previous" \ + : "=r" (rval) : "i" (CPU_FTR_CELL_TB_BUG)); rval;}) +#else #define mftb() ({unsigned long rval; \ asm volatile("mftb %0" : "=r" (rval)); rval;}) +#endif /* !CONFIG_PPC_CELL */ + +#else /* __powerpc64__ */ + #define mftbl() ({unsigned long rval; \ asm volatile("mftbl %0" : "=r" (rval)); rval;}) +#define mftbu() ({unsigned long rval; \ + asm volatile("mftbu %0" : "=r" (rval)); rval;}) +#endif /* !__powerpc64__ */ #define mttbl(v) asm volatile("mttbl %0":: "r"(v)) #define mttbu(v) asm volatile("mttbu %0":: "r"(v)) diff --git a/include/asm-powerpc/time.h b/include/asm-powerpc/time.h index b051d4c88c3b..a78285010d62 100644 --- a/include/asm-powerpc/time.h +++ b/include/asm-powerpc/time.h @@ -82,30 +82,35 @@ struct div_result { #define __USE_RTC() 0 #endif -/* On ppc64 this gets us the whole timebase; on ppc32 just the lower half */ +#ifdef CONFIG_PPC64 + +/* For compatibility, get_tbl() is defined as get_tb() on ppc64 */ +#define get_tbl get_tb + +#else + static inline unsigned long get_tbl(void) { - unsigned long tbl; - #if defined(CONFIG_403GCX) + unsigned long tbl; asm volatile("mfspr %0, 0x3dd" : "=r" (tbl)); + return tbl; #else - asm volatile("mftb %0" : "=r" (tbl)); + return mftbl(); #endif - return tbl; } static inline unsigned int get_tbu(void) { +#ifdef CONFIG_403GCX unsigned int tbu; - -#if defined(CONFIG_403GCX) asm volatile("mfspr %0, 0x3dc" : "=r" (tbu)); + return tbu; #else - asm volatile("mftbu %0" : "=r" (tbu)); + return mftbu(); #endif - return tbu; } +#endif /* !CONFIG_PPC64 */ static inline unsigned int get_rtcl(void) { @@ -131,7 +136,7 @@ static inline u64 get_tb(void) { return mftb(); } -#else +#else /* CONFIG_PPC64 */ static inline u64 get_tb(void) { unsigned int tbhi, tblo, tbhi2; @@ -144,7 +149,7 @@ static inline u64 get_tb(void) return ((u64)tbhi << 32) | tblo; } -#endif +#endif /* !CONFIG_PPC64 */ static inline void set_tb(unsigned int upper, unsigned int lower) { diff --git a/include/asm-powerpc/timex.h b/include/asm-powerpc/timex.h index e3f08cf91486..92dedde761d1 100644 --- a/include/asm-powerpc/timex.h +++ b/include/asm-powerpc/timex.h @@ -8,6 +8,7 @@ */ #include +#include #define CLOCK_TICK_RATE 1024000 /* Underlying HZ */ @@ -15,13 +16,11 @@ typedef unsigned long cycles_t; static inline cycles_t get_cycles(void) { - cycles_t ret; - #ifdef __powerpc64__ - - __asm__ __volatile__("mftb %0" : "=r" (ret) : ); - + return mftb(); #else + cycles_t ret; + /* * For the "cycle" counter we use the timebase lower half. * Currently only used on SMP. @@ -41,9 +40,8 @@ static inline cycles_t get_cycles(void) " .long 99b-98b\n" ".previous" : "=r" (ret) : "i" (CPU_FTR_601)); -#endif - return ret; +#endif } #endif /* __KERNEL__ */ -- cgit v1.2.3 From e2100efb266c9335925191afe79f81f8d0a5807e Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Fri, 20 Oct 2006 11:49:54 +1000 Subject: [POWERPC] Fix device_is_compatible() const warning Fix a const'ification related warning with device_is_compatible() and friends related to get_property() not properly having const on it's input device node argument. Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/prom.c | 8 +++++--- include/asm-powerpc/prom.h | 10 ++++++---- 2 files changed, 11 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 865b9648d0d5..bdb412d4b748 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -1014,7 +1014,7 @@ EXPORT_SYMBOL(find_all_nodes); /** Checks if the given "compat" string matches one of the strings in * the device's "compatible" property */ -int device_is_compatible(struct device_node *device, const char *compat) +int device_is_compatible(const struct device_node *device, const char *compat) { const char* cp; int cplen, l; @@ -1491,7 +1491,8 @@ static int __init prom_reconfig_setup(void) __initcall(prom_reconfig_setup); #endif -struct property *of_find_property(struct device_node *np, const char *name, +struct property *of_find_property(const struct device_node *np, + const char *name, int *lenp) { struct property *pp; @@ -1512,7 +1513,8 @@ struct property *of_find_property(struct device_node *np, const char *name, * Find a property with a given name for a given node * and return the value. */ -const void *get_property(struct device_node *np, const char *name, int *lenp) +const void *get_property(const struct device_node *np, const char *name, + int *lenp) { struct property *pp = of_find_property(np,name,lenp); return pp ? pp->value : NULL; diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h index 524629769336..ec11d44eaeb5 100644 --- a/include/asm-powerpc/prom.h +++ b/include/asm-powerpc/prom.h @@ -134,7 +134,7 @@ extern struct device_node *of_find_all_nodes(struct device_node *prev); extern struct device_node *of_get_parent(const struct device_node *node); extern struct device_node *of_get_next_child(const struct device_node *node, struct device_node *prev); -extern struct property *of_find_property(struct device_node *np, +extern struct property *of_find_property(const struct device_node *np, const char *name, int *lenp); extern struct device_node *of_node_get(struct device_node *node); @@ -158,10 +158,12 @@ extern void of_detach_node(const struct device_node *); extern void finish_device_tree(void); extern void unflatten_device_tree(void); extern void early_init_devtree(void *); -extern int device_is_compatible(struct device_node *device, const char *); +extern int device_is_compatible(const struct device_node *device, + const char *); extern int machine_is_compatible(const char *compat); -extern const void *get_property(struct device_node *node, const char *name, - int *lenp); +extern const void *get_property(const struct device_node *node, + const char *name, + int *lenp); extern void print_properties(struct device_node *node); extern int prom_n_addr_cells(struct device_node* np); extern int prom_n_size_cells(struct device_node* np); -- cgit v1.2.3 From f4d4c354bca18210296cc0a8f592c0cdb720bf20 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 25 Oct 2006 13:22:27 +1000 Subject: [POWERPC] Fix CHRP platforms with only 8259 On CHRP platforms with only a 8259 controller, we should set the default IRQ host to the 8259 driver's one for the IRQ probing fallbacks to work in case the IRQ tree is incorrect (like on Pegasos for example). Without this fix, we get a bunch of WARN_ON's during boot. Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Paul Mackerras --- arch/powerpc/platforms/chrp/setup.c | 4 +++- arch/powerpc/sysdev/i8259.c | 5 +++++ include/asm-powerpc/i8259.h | 1 + 3 files changed, 9 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c index cae3d13229b9..49b8dabcbc99 100644 --- a/arch/powerpc/platforms/chrp/setup.c +++ b/arch/powerpc/platforms/chrp/setup.c @@ -477,8 +477,10 @@ static void __init chrp_find_8259(void) " address, polling\n"); i8259_init(pic, chrp_int_ack); - if (ppc_md.get_irq == NULL) + if (ppc_md.get_irq == NULL) { ppc_md.get_irq = i8259_irq; + irq_set_default_host(i8259_get_host()); + } if (chrp_mpic != NULL) { cascade_irq = irq_of_parse_and_map(pic, 0); if (cascade_irq == NO_IRQ) diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c index 0450265d73bb..ad87adc975bc 100644 --- a/arch/powerpc/sysdev/i8259.c +++ b/arch/powerpc/sysdev/i8259.c @@ -224,6 +224,11 @@ static struct irq_host_ops i8259_host_ops = { .xlate = i8259_host_xlate, }; +struct irq_host *i8259_get_host(void) +{ + return i8259_host; +} + /** * i8259_init - Initialize the legacy controller * @node: device node of the legacy PIC (can be NULL, but then, it will match diff --git a/include/asm-powerpc/i8259.h b/include/asm-powerpc/i8259.h index 78489fb8d140..db1362f8c603 100644 --- a/include/asm-powerpc/i8259.h +++ b/include/asm-powerpc/i8259.h @@ -7,6 +7,7 @@ #ifdef CONFIG_PPC_MERGE extern void i8259_init(struct device_node *node, unsigned long intack_addr); extern unsigned int i8259_irq(void); +extern struct irq_host *i8259_get_host(void); #else extern void i8259_init(unsigned long intack_addr, int offset); extern int i8259_irq(void); -- cgit v1.2.3