From a7db50405216610c8a0d62b8b400180b6f366733 Mon Sep 17 00:00:00 2001 From: Alex Chiang Date: Mon, 22 Jun 2009 08:08:07 -0600 Subject: PCI: remove pcibios_scan_all_fns() This was #define'd as 0 on all platforms, so let's get rid of it. This change makes pci_scan_slot() slightly easier to read. Cc: Yoshinori Sato Cc: Tony Luck Cc: David Howells Cc: "David S. Miller" Cc: Jeff Dike Cc: Ingo Molnar Cc: Ivan Kokshaysky Reviewed-by: Matthew Wilcox Acked-by: Russell King Acked-by: Ralf Baechle Acked-by: Kyle McMartin Acked-by: Benjamin Herrenschmidt Acked-by: Paul Mundt Acked-by: Arnd Bergmann Signed-off-by: Alex Chiang Signed-off-by: Jesse Barnes --- arch/arm/include/asm/pci.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h index 0abf386ba3d3..226cddd2fb65 100644 --- a/arch/arm/include/asm/pci.h +++ b/arch/arm/include/asm/pci.h @@ -6,8 +6,6 @@ #include /* for PCIBIOS_MIN_* */ -#define pcibios_scan_all_fns(a, b) 0 - #ifdef CONFIG_PCI_HOST_ITE8152 /* ITE bridge requires setting latency timer to avoid early bus access termination by PIC bus mater devices -- cgit v1.2.3 From 910a17e57ab6cd22b300bde4ce5f633f175c7ccd Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 15 Sep 2009 10:23:53 +0100 Subject: ARM: 5700/1: ARM: Introduce ARM_L1_CACHE_SHIFT to define cache line size Currently kernel believes that all ARM CPUs have L1_CACHE_SHIFT == 5. It's not true at least for CPUs based on Cortex-A8. List of CPUs with cache line size != 32 should be expanded later. Signed-off-by: Kirill A. Shutemov Signed-off-by: Russell King --- arch/arm/include/asm/cache.h | 2 +- arch/arm/mm/Kconfig | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h index feaa75f0013e..66c160b8547f 100644 --- a/arch/arm/include/asm/cache.h +++ b/arch/arm/include/asm/cache.h @@ -4,7 +4,7 @@ #ifndef __ASMARM_CACHE_H #define __ASMARM_CACHE_H -#define L1_CACHE_SHIFT 5 +#define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) /* diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 5fe595aeba69..8d43e58f9244 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -771,3 +771,8 @@ config CACHE_XSC3L2 select OUTER_CACHE help This option enables the L2 cache on XScale3. + +config ARM_L1_CACHE_SHIFT + int + default 6 if ARCH_OMAP3 + default 5 -- cgit v1.2.3 From 200b812d0084f800bc52465e273b118ff5f8141f Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Fri, 18 Sep 2009 23:27:05 +0100 Subject: Clear the exclusive monitor when returning from an exception The patch adds a CLREX or dummy STREX to the exception return path. This is needed because several atomic/locking operations use a pair of LDREX/STREXEQ and the EQ condition may not always be satisfied. This would leave the exclusive monitor status set and may cause problems with atomic/locking operations in the interrupted code. With this patch, the atomic_set() operation can be a simple STR instruction (on SMP systems, the global exclusive monitor is cleared by STR anyway). Clearing the exclusive monitor during context switch is no longer needed as this is handled by the exception return path anyway. Signed-off-by: Catalin Marinas Reported-by: Jamie Lokier --- arch/arm/include/asm/atomic.h | 26 +++++++------------------- arch/arm/kernel/entry-armv.S | 7 ------- arch/arm/kernel/entry-header.S | 14 ++++++++++++++ 3 files changed, 21 insertions(+), 26 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index 9ed2377fe8e5..d0daeab2234e 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -19,31 +19,21 @@ #ifdef __KERNEL__ +/* + * On ARM, ordinary assignment (str instruction) doesn't clear the local + * strex/ldrex monitor on some implementations. The reason we can use it for + * atomic_set() is the clrex or dummy strex done on every exception return. + */ #define atomic_read(v) ((v)->counter) +#define atomic_set(v,i) (((v)->counter) = (i)) #if __LINUX_ARM_ARCH__ >= 6 /* * ARMv6 UP and SMP safe atomic ops. We use load exclusive and * store exclusive to ensure that these are atomic. We may loop - * to ensure that the update happens. Writing to 'v->counter' - * without using the following operations WILL break the atomic - * nature of these ops. + * to ensure that the update happens. */ -static inline void atomic_set(atomic_t *v, int i) -{ - unsigned long tmp; - - __asm__ __volatile__("@ atomic_set\n" -"1: ldrex %0, [%1]\n" -" strex %0, %2, [%1]\n" -" teq %0, #0\n" -" bne 1b" - : "=&r" (tmp) - : "r" (&v->counter), "r" (i) - : "cc"); -} - static inline void atomic_add(int i, atomic_t *v) { unsigned long tmp; @@ -163,8 +153,6 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) #error SMP not supported on pre-ARMv6 CPUs #endif -#define atomic_set(v,i) (((v)->counter) = (i)) - static inline int atomic_add_return(int i, atomic_t *v) { unsigned long flags; diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 3d727a8a23bc..a332bc7225bf 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -734,13 +734,6 @@ ENTRY(__switch_to) #ifdef CONFIG_MMU ldr r6, [r2, #TI_CPU_DOMAIN] #endif -#if __LINUX_ARM_ARCH__ >= 6 -#ifdef CONFIG_CPU_32v6K - clrex -#else - strex r5, r4, [ip] @ Clear exclusive monitor -#endif -#endif #if defined(CONFIG_HAS_TLS_REG) mcr p15, 0, r3, c13, c0, 3 @ set TLS register #elif !defined(CONFIG_TLS_REG_EMUL) diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index a4eaf4f920c5..e17e3c30d957 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -76,13 +76,25 @@ #ifndef CONFIG_THUMB2_KERNEL .macro svc_exit, rpsr msr spsr_cxsf, \rpsr +#if defined(CONFIG_CPU_32v6K) + clrex @ clear the exclusive monitor ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr +#elif defined (CONFIG_CPU_V6) + ldr r0, [sp] + strex r1, r2, [sp] @ clear the exclusive monitor + ldmib sp, {r1 - pc}^ @ load r1 - pc, cpsr +#endif .endm .macro restore_user_regs, fast = 0, offset = 0 ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr ldr lr, [sp, #\offset + S_PC]! @ get pc msr spsr_cxsf, r1 @ save in spsr_svc +#if defined(CONFIG_CPU_32v6K) + clrex @ clear the exclusive monitor +#elif defined (CONFIG_CPU_V6) + strex r1, r2, [sp] @ clear the exclusive monitor +#endif .if \fast ldmdb sp, {r1 - lr}^ @ get calling r1 - lr .else @@ -98,6 +110,7 @@ .endm #else /* CONFIG_THUMB2_KERNEL */ .macro svc_exit, rpsr + clrex @ clear the exclusive monitor ldr r0, [sp, #S_SP] @ top of the stack ldr r1, [sp, #S_PC] @ return address tst r0, #4 @ orig stack 8-byte aligned? @@ -110,6 +123,7 @@ .endm .macro restore_user_regs, fast = 0, offset = 0 + clrex @ clear the exclusive monitor mov r2, sp load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr -- cgit v1.2.3 From 74109b8913277b1dbd072039d1e0a930d2834389 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Fri, 18 Sep 2009 15:09:33 +0100 Subject: Fix "W" macro in arch/arm/include/asm/unified.h Please, fold into 0becb088501886f37ade38762c8eaaf4263572cc aka "Thumb-2: Add macros for the unified assembler syntax" otherwise: crypto/cast6.c:372:39: error: macro "W" passed 2 arguments, but takes just 1 Signed-off-by: Alexey Dobriyan Signed-off-by: Catalin Marinas --- arch/arm/include/asm/unified.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/unified.h b/arch/arm/include/asm/unified.h index 073e85b9b961..bc631161e9c6 100644 --- a/arch/arm/include/asm/unified.h +++ b/arch/arm/include/asm/unified.h @@ -35,7 +35,9 @@ #define ARM(x...) #define THUMB(x...) x +#ifdef __ASSEMBLY__ #define W(instr) instr.w +#endif #define BSYM(sym) sym + 1 #else /* !CONFIG_THUMB2_KERNEL */ @@ -45,7 +47,9 @@ #define ARM(x...) x #define THUMB(x...) +#ifdef __ASSEMBLY__ #define W(instr) instr +#endif #define BSYM(sym) sym #endif /* CONFIG_THUMB2_KERNEL */ -- cgit v1.2.3