summaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 15:51:40 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 15:51:40 +0200
commit6d5f0ebfc0be9cbfeaafdd9258d5fa24b7975a36 (patch)
tree3b7a5851a3d9f02441e2dcbaf22785d131544544 /arch/x86
parentdbb885fecc1b1b35e93416bedd24d21bd20f60ed (diff)
parent8acd91e8620836a56ff62028ed28ba629f2881a0 (diff)
downloadlinux-6d5f0ebfc0be9cbfeaafdd9258d5fa24b7975a36.tar.bz2
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull core locking updates from Ingo Molnar: "The main updates in this cycle were: - mutex MCS refactoring finishing touches: improve comments, refactor and clean up code, reduce debug data structure footprint, etc. - qrwlock finishing touches: remove old code, self-test updates. - small rwsem optimization - various smaller fixes/cleanups" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: locking/lockdep: Revert qrwlock recusive stuff locking/rwsem: Avoid double checking before try acquiring write lock locking/rwsem: Move EXPORT_SYMBOL() lines to follow function definition locking/rwlock, x86: Delete unused asm/rwlock.h and rwlock.S locking/rwlock, x86: Clean up asm/spinlock*.h to remove old rwlock code locking/semaphore: Resolve some shadow warnings locking/selftest: Support queued rwlock locking/lockdep: Restrict the use of recursive read_lock() with qrwlock locking/spinlocks: Always evaluate the second argument of spin_lock_nested() locking/Documentation: Update locking/mutex-design.txt disadvantages locking/Documentation: Move locking related docs into Documentation/locking/ locking/mutexes: Use MUTEX_SPIN_ON_OWNER when appropriate locking/mutexes: Refactor optimistic spinning code locking/mcs: Remove obsolete comment locking/mutexes: Document quick lock release when unlocking locking/mutexes: Standardize arguments in lock/unlock slowpaths locking: Remove deprecated smp_mb__() barriers
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/rwlock.h49
-rw-r--r--arch/x86/include/asm/spinlock.h81
-rw-r--r--arch/x86/include/asm/spinlock_types.h4
-rw-r--r--arch/x86/lib/Makefile1
-rw-r--r--arch/x86/lib/rwlock.S44
5 files changed, 2 insertions, 177 deletions
diff --git a/arch/x86/include/asm/rwlock.h b/arch/x86/include/asm/rwlock.h
deleted file mode 100644
index a5370a03d90c..000000000000
--- a/arch/x86/include/asm/rwlock.h
+++ /dev/null
@@ -1,49 +0,0 @@
-#ifndef _ASM_X86_RWLOCK_H
-#define _ASM_X86_RWLOCK_H
-
-#include <asm/asm.h>
-
-#if CONFIG_NR_CPUS <= 2048
-
-#ifndef __ASSEMBLY__
-typedef union {
- s32 lock;
- s32 write;
-} arch_rwlock_t;
-#endif
-
-#define RW_LOCK_BIAS 0x00100000
-#define READ_LOCK_SIZE(insn) __ASM_FORM(insn##l)
-#define READ_LOCK_ATOMIC(n) atomic_##n
-#define WRITE_LOCK_ADD(n) __ASM_FORM_COMMA(addl n)
-#define WRITE_LOCK_SUB(n) __ASM_FORM_COMMA(subl n)
-#define WRITE_LOCK_CMP RW_LOCK_BIAS
-
-#else /* CONFIG_NR_CPUS > 2048 */
-
-#include <linux/const.h>
-
-#ifndef __ASSEMBLY__
-typedef union {
- s64 lock;
- struct {
- u32 read;
- s32 write;
- };
-} arch_rwlock_t;
-#endif
-
-#define RW_LOCK_BIAS (_AC(1,L) << 32)
-#define READ_LOCK_SIZE(insn) __ASM_FORM(insn##q)
-#define READ_LOCK_ATOMIC(n) atomic64_##n
-#define WRITE_LOCK_ADD(n) __ASM_FORM(incl)
-#define WRITE_LOCK_SUB(n) __ASM_FORM(decl)
-#define WRITE_LOCK_CMP 1
-
-#endif /* CONFIG_NR_CPUS */
-
-#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
-
-/* Actual code is in asm/spinlock.h or in arch/x86/lib/rwlock.S */
-
-#endif /* _ASM_X86_RWLOCK_H */
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 54f1c8068c02..9295016485c9 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -187,7 +187,6 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
cpu_relax();
}
-#ifndef CONFIG_QUEUE_RWLOCK
/*
* Read-write spinlocks, allowing multiple readers
* but only one writer.
@@ -198,91 +197,15 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
* irq-safe write-lock, but readers can get non-irqsafe
* read-locks.
*
- * On x86, we implement read-write locks as a 32-bit counter
- * with the high bit (sign) being the "contended" bit.
+ * On x86, we implement read-write locks using the generic qrwlock with
+ * x86 specific optimization.
*/
-/**
- * read_can_lock - would read_trylock() succeed?
- * @lock: the rwlock in question.
- */
-static inline int arch_read_can_lock(arch_rwlock_t *lock)
-{
- return lock->lock > 0;
-}
-
-/**
- * write_can_lock - would write_trylock() succeed?
- * @lock: the rwlock in question.
- */
-static inline int arch_write_can_lock(arch_rwlock_t *lock)
-{
- return lock->write == WRITE_LOCK_CMP;
-}
-
-static inline void arch_read_lock(arch_rwlock_t *rw)
-{
- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
- "jns 1f\n"
- "call __read_lock_failed\n\t"
- "1:\n"
- ::LOCK_PTR_REG (rw) : "memory");
-}
-
-static inline void arch_write_lock(arch_rwlock_t *rw)
-{
- asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
- "jz 1f\n"
- "call __write_lock_failed\n\t"
- "1:\n"
- ::LOCK_PTR_REG (&rw->write), "i" (RW_LOCK_BIAS)
- : "memory");
-}
-
-static inline int arch_read_trylock(arch_rwlock_t *lock)
-{
- READ_LOCK_ATOMIC(t) *count = (READ_LOCK_ATOMIC(t) *)lock;
-
- if (READ_LOCK_ATOMIC(dec_return)(count) >= 0)
- return 1;
- READ_LOCK_ATOMIC(inc)(count);
- return 0;
-}
-
-static inline int arch_write_trylock(arch_rwlock_t *lock)
-{
- atomic_t *count = (atomic_t *)&lock->write;
-
- if (atomic_sub_and_test(WRITE_LOCK_CMP, count))
- return 1;
- atomic_add(WRITE_LOCK_CMP, count);
- return 0;
-}
-
-static inline void arch_read_unlock(arch_rwlock_t *rw)
-{
- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
- :"+m" (rw->lock) : : "memory");
-}
-
-static inline void arch_write_unlock(arch_rwlock_t *rw)
-{
- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
- : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
-}
-#else
#include <asm/qrwlock.h>
-#endif /* CONFIG_QUEUE_RWLOCK */
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-#undef READ_LOCK_SIZE
-#undef READ_LOCK_ATOMIC
-#undef WRITE_LOCK_ADD
-#undef WRITE_LOCK_SUB
-#undef WRITE_LOCK_CMP
-
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()
diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h
index 73c4c007200f..5f9d7572d82b 100644
--- a/arch/x86/include/asm/spinlock_types.h
+++ b/arch/x86/include/asm/spinlock_types.h
@@ -34,10 +34,6 @@ typedef struct arch_spinlock {
#define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } }
-#ifdef CONFIG_QUEUE_RWLOCK
#include <asm-generic/qrwlock_types.h>
-#else
-#include <asm/rwlock.h>
-#endif
#endif /* _ASM_X86_SPINLOCK_TYPES_H */
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 4d4f96a27638..7ef9a30e7dac 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -20,7 +20,6 @@ lib-y := delay.o misc.o cmdline.o
lib-y += thunk_$(BITS).o
lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o
lib-y += memcpy_$(BITS).o
-lib-$(CONFIG_SMP) += rwlock.o
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o
diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
deleted file mode 100644
index 1cad22139c88..000000000000
--- a/arch/x86/lib/rwlock.S
+++ /dev/null
@@ -1,44 +0,0 @@
-/* Slow paths of read/write spinlocks. */
-
-#include <linux/linkage.h>
-#include <asm/alternative-asm.h>
-#include <asm/frame.h>
-#include <asm/rwlock.h>
-
-#ifdef CONFIG_X86_32
-# define __lock_ptr eax
-#else
-# define __lock_ptr rdi
-#endif
-
-ENTRY(__write_lock_failed)
- CFI_STARTPROC
- FRAME
-0: LOCK_PREFIX
- WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
-1: rep; nop
- cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
- jne 1b
- LOCK_PREFIX
- WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
- jnz 0b
- ENDFRAME
- ret
- CFI_ENDPROC
-END(__write_lock_failed)
-
-ENTRY(__read_lock_failed)
- CFI_STARTPROC
- FRAME
-0: LOCK_PREFIX
- READ_LOCK_SIZE(inc) (%__lock_ptr)
-1: rep; nop
- READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
- js 1b
- LOCK_PREFIX
- READ_LOCK_SIZE(dec) (%__lock_ptr)
- js 0b
- ENDFRAME
- ret
- CFI_ENDPROC
-END(__read_lock_failed)