summaryrefslogtreecommitdiffstats
path: root/arch/x86/include
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2013-09-09 12:01:15 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2013-09-09 12:01:15 -0400
commitc3f31f6a6f68bcb51689c90733282ec263602a9d (patch)
tree07c2c7ae966b07d5adabe78215d9c76fa4ec531a /arch/x86/include
parente1a9c16b303725ac900fee2a3ec4dbe2c2f846ab (diff)
parent36bd621337c91a1ecda588e5bbbae8dd9698bae7 (diff)
downloadlinux-c3f31f6a6f68bcb51689c90733282ec263602a9d.tar.bz2
Merge branch 'x86/spinlocks' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into stable/for-linus-3.12
* 'x86/spinlocks' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/kvm/guest: Fix sparse warning: "symbol 'klock_waiting' was not declared as static" kvm: Paravirtual ticketlocks support for linux guests running on KVM hypervisor kvm guest: Add configuration support to enable debug information for KVM Guests kvm uapi: Add KICK_CPU and PV_UNHALT definition to uapi xen, pvticketlock: Allow interrupts to be enabled while blocking x86, ticketlock: Add slowpath logic jump_label: Split jumplabel ratelimit x86, pvticketlock: When paravirtualizing ticket locks, increment by 2 x86, pvticketlock: Use callee-save for lock_spinning xen, pvticketlocks: Add xen_nopvspin parameter to disable xen pv ticketlocks xen, pvticketlock: Xen implementation for PV ticket locks xen: Defer spinlock setup until boot CPU setup x86, ticketlock: Collapse a layer of functions x86, ticketlock: Don't inline _spin_unlock when using paravirt spinlocks x86, spinlock: Replace pv spinlocks with pv ticketlocks
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/kvm_para.h14
-rw-r--r--arch/x86/include/asm/paravirt.h32
-rw-r--r--arch/x86/include/asm/paravirt_types.h14
-rw-r--r--arch/x86/include/asm/spinlock.h128
-rw-r--r--arch/x86/include/asm/spinlock_types.h16
-rw-r--r--arch/x86/include/uapi/asm/kvm_para.h1
6 files changed, 125 insertions, 80 deletions
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index 695399f2d5eb..427afcbf3d55 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -118,10 +118,20 @@ void kvm_async_pf_task_wait(u32 token);
void kvm_async_pf_task_wake(u32 token);
u32 kvm_read_and_reset_pf_reason(void);
extern void kvm_disable_steal_time(void);
-#else
-#define kvm_guest_init() do { } while (0)
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+void __init kvm_spinlock_init(void);
+#else /* !CONFIG_PARAVIRT_SPINLOCKS */
+static inline void kvm_spinlock_init(void)
+{
+}
+#endif /* CONFIG_PARAVIRT_SPINLOCKS */
+
+#else /* CONFIG_KVM_GUEST */
+#define kvm_guest_init() do {} while (0)
#define kvm_async_pf_task_wait(T) do {} while(0)
#define kvm_async_pf_task_wake(T) do {} while(0)
+
static inline u32 kvm_read_and_reset_pf_reason(void)
{
return 0;
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index cfdc9ee4c900..401f350ef71b 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -712,36 +712,16 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
-static inline int arch_spin_is_locked(struct arch_spinlock *lock)
+static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
+ __ticket_t ticket)
{
- return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
+ PVOP_VCALLEE2(pv_lock_ops.lock_spinning, lock, ticket);
}
-static inline int arch_spin_is_contended(struct arch_spinlock *lock)
+static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock,
+ __ticket_t ticket)
{
- return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
-}
-#define arch_spin_is_contended arch_spin_is_contended
-
-static __always_inline void arch_spin_lock(struct arch_spinlock *lock)
-{
- PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
-}
-
-static __always_inline void arch_spin_lock_flags(struct arch_spinlock *lock,
- unsigned long flags)
-{
- PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
-}
-
-static __always_inline int arch_spin_trylock(struct arch_spinlock *lock)
-{
- return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
-}
-
-static __always_inline void arch_spin_unlock(struct arch_spinlock *lock)
-{
- PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
+ PVOP_VCALL2(pv_lock_ops.unlock_kick, lock, ticket);
}
#endif
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 0db1fcac668c..04ac40e192eb 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -327,13 +327,15 @@ struct pv_mmu_ops {
};
struct arch_spinlock;
+#ifdef CONFIG_SMP
+#include <asm/spinlock_types.h>
+#else
+typedef u16 __ticket_t;
+#endif
+
struct pv_lock_ops {
- int (*spin_is_locked)(struct arch_spinlock *lock);
- int (*spin_is_contended)(struct arch_spinlock *lock);
- void (*spin_lock)(struct arch_spinlock *lock);
- void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
- int (*spin_trylock)(struct arch_spinlock *lock);
- void (*spin_unlock)(struct arch_spinlock *lock);
+ struct paravirt_callee_save lock_spinning;
+ void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
};
/* This contains all the paravirt structures: we get a convenient
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 33692eaabab5..d68883dd133c 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -1,11 +1,14 @@
#ifndef _ASM_X86_SPINLOCK_H
#define _ASM_X86_SPINLOCK_H
+#include <linux/jump_label.h>
#include <linux/atomic.h>
#include <asm/page.h>
#include <asm/processor.h>
#include <linux/compiler.h>
#include <asm/paravirt.h>
+#include <asm/bitops.h>
+
/*
* Your basic SMP spinlocks, allowing only a single CPU anywhere
*
@@ -34,6 +37,31 @@
# define UNLOCK_LOCK_PREFIX
#endif
+/* How long a lock should spin before we consider blocking */
+#define SPIN_THRESHOLD (1 << 15)
+
+extern struct static_key paravirt_ticketlocks_enabled;
+static __always_inline bool static_key_false(struct static_key *key);
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+
+static inline void __ticket_enter_slowpath(arch_spinlock_t *lock)
+{
+ set_bit(0, (volatile unsigned long *)&lock->tickets.tail);
+}
+
+#else /* !CONFIG_PARAVIRT_SPINLOCKS */
+static __always_inline void __ticket_lock_spinning(arch_spinlock_t *lock,
+ __ticket_t ticket)
+{
+}
+static inline void __ticket_unlock_kick(arch_spinlock_t *lock,
+ __ticket_t ticket)
+{
+}
+
+#endif /* CONFIG_PARAVIRT_SPINLOCKS */
+
/*
* Ticket locks are conceptually two parts, one indicating the current head of
* the queue, and the other indicating the current tail. The lock is acquired
@@ -47,81 +75,101 @@
* in the high part, because a wide xadd increment of the low part would carry
* up and contaminate the high part.
*/
-static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
+static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
{
- register struct __raw_tickets inc = { .tail = 1 };
+ register struct __raw_tickets inc = { .tail = TICKET_LOCK_INC };
inc = xadd(&lock->tickets, inc);
+ if (likely(inc.head == inc.tail))
+ goto out;
+ inc.tail &= ~TICKET_SLOWPATH_FLAG;
for (;;) {
- if (inc.head == inc.tail)
- break;
- cpu_relax();
- inc.head = ACCESS_ONCE(lock->tickets.head);
+ unsigned count = SPIN_THRESHOLD;
+
+ do {
+ if (ACCESS_ONCE(lock->tickets.head) == inc.tail)
+ goto out;
+ cpu_relax();
+ } while (--count);
+ __ticket_lock_spinning(lock, inc.tail);
}
- barrier(); /* make sure nothing creeps before the lock is taken */
+out: barrier(); /* make sure nothing creeps before the lock is taken */
}
-static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
+static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
{
arch_spinlock_t old, new;
old.tickets = ACCESS_ONCE(lock->tickets);
- if (old.tickets.head != old.tickets.tail)
+ if (old.tickets.head != (old.tickets.tail & ~TICKET_SLOWPATH_FLAG))
return 0;
- new.head_tail = old.head_tail + (1 << TICKET_SHIFT);
+ new.head_tail = old.head_tail + (TICKET_LOCK_INC << TICKET_SHIFT);
/* cmpxchg is a full barrier, so nothing can move before it */
return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
}
-static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
+static inline void __ticket_unlock_slowpath(arch_spinlock_t *lock,
+ arch_spinlock_t old)
{
- __add(&lock->tickets.head, 1, UNLOCK_LOCK_PREFIX);
+ arch_spinlock_t new;
+
+ BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS);
+
+ /* Perform the unlock on the "before" copy */
+ old.tickets.head += TICKET_LOCK_INC;
+
+ /* Clear the slowpath flag */
+ new.head_tail = old.head_tail & ~(TICKET_SLOWPATH_FLAG << TICKET_SHIFT);
+
+ /*
+ * If the lock is uncontended, clear the flag - use cmpxchg in
+ * case it changes behind our back though.
+ */
+ if (new.tickets.head != new.tickets.tail ||
+ cmpxchg(&lock->head_tail, old.head_tail,
+ new.head_tail) != old.head_tail) {
+ /*
+ * Lock still has someone queued for it, so wake up an
+ * appropriate waiter.
+ */
+ __ticket_unlock_kick(lock, old.tickets.head);
+ }
}
-static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
+static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
{
- struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
+ if (TICKET_SLOWPATH_FLAG &&
+ static_key_false(&paravirt_ticketlocks_enabled)) {
+ arch_spinlock_t prev;
- return tmp.tail != tmp.head;
-}
+ prev = *lock;
+ add_smp(&lock->tickets.head, TICKET_LOCK_INC);
-static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
-{
- struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
+ /* add_smp() is a full mb() */
- return (__ticket_t)(tmp.tail - tmp.head) > 1;
+ if (unlikely(lock->tickets.tail & TICKET_SLOWPATH_FLAG))
+ __ticket_unlock_slowpath(lock, prev);
+ } else
+ __add(&lock->tickets.head, TICKET_LOCK_INC, UNLOCK_LOCK_PREFIX);
}
-#ifndef CONFIG_PARAVIRT_SPINLOCKS
-
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
- return __ticket_spin_is_locked(lock);
-}
-
-static inline int arch_spin_is_contended(arch_spinlock_t *lock)
-{
- return __ticket_spin_is_contended(lock);
-}
-#define arch_spin_is_contended arch_spin_is_contended
+ struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
-static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
-{
- __ticket_spin_lock(lock);
+ return tmp.tail != tmp.head;
}
-static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
+static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
- return __ticket_spin_trylock(lock);
-}
+ struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
-static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
-{
- __ticket_spin_unlock(lock);
+ return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC;
}
+#define arch_spin_is_contended arch_spin_is_contended
static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
unsigned long flags)
@@ -129,8 +177,6 @@ static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
arch_spin_lock(lock);
}
-#endif /* CONFIG_PARAVIRT_SPINLOCKS */
-
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
while (arch_spin_is_locked(lock))
diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h
index ad0ad07fc006..4f1bea19945b 100644
--- a/arch/x86/include/asm/spinlock_types.h
+++ b/arch/x86/include/asm/spinlock_types.h
@@ -1,13 +1,17 @@
#ifndef _ASM_X86_SPINLOCK_TYPES_H
#define _ASM_X86_SPINLOCK_TYPES_H
-#ifndef __LINUX_SPINLOCK_TYPES_H
-# error "please don't include this file directly"
-#endif
-
#include <linux/types.h>
-#if (CONFIG_NR_CPUS < 256)
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+#define __TICKET_LOCK_INC 2
+#define TICKET_SLOWPATH_FLAG ((__ticket_t)1)
+#else
+#define __TICKET_LOCK_INC 1
+#define TICKET_SLOWPATH_FLAG ((__ticket_t)0)
+#endif
+
+#if (CONFIG_NR_CPUS < (256 / __TICKET_LOCK_INC))
typedef u8 __ticket_t;
typedef u16 __ticketpair_t;
#else
@@ -15,6 +19,8 @@ typedef u16 __ticket_t;
typedef u32 __ticketpair_t;
#endif
+#define TICKET_LOCK_INC ((__ticket_t)__TICKET_LOCK_INC)
+
#define TICKET_SHIFT (sizeof(__ticket_t) * 8)
typedef struct arch_spinlock {
diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h
index 06fdbd987e97..94dc8ca434e0 100644
--- a/arch/x86/include/uapi/asm/kvm_para.h
+++ b/arch/x86/include/uapi/asm/kvm_para.h
@@ -23,6 +23,7 @@
#define KVM_FEATURE_ASYNC_PF 4
#define KVM_FEATURE_STEAL_TIME 5
#define KVM_FEATURE_PV_EOI 6
+#define KVM_FEATURE_PV_UNHALT 7
/* The last 8 bits are used to indicate how to interpret the flags field
* in pvclock structure. If no bits are set, all flags are ignored.