summaryrefslogtreecommitdiffstats
path: root/include/asm-generic/qspinlock.h
blob: 6bd05700d8c94cfe5c190008c1eb8910bf8ae0ae (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
/*
 * Queued spinlock
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
 * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
 *
 * Authors: Waiman Long <waiman.long@hpe.com>
 */
#ifndef __ASM_GENERIC_QSPINLOCK_H
#define __ASM_GENERIC_QSPINLOCK_H

#include <asm-generic/qspinlock_types.h>

/**
 * queued_spin_is_locked - is the spinlock locked?
 * @lock: Pointer to queued spinlock structure
 * Return: 1 if it is locked, 0 otherwise
 */
static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
{
	/*
	 * queued_spin_lock_slowpath() can ACQUIRE the lock before
	 * issuing the unordered store that sets _Q_LOCKED_VAL.
	 *
	 * See both smp_cond_acquire() sites for more detail.
	 *
	 * This however means that in code like:
	 *
	 *   spin_lock(A)		spin_lock(B)
	 *   spin_unlock_wait(B)	spin_is_locked(A)
	 *   do_something()		do_something()
	 *
	 * Both CPUs can end up running do_something() because the store
	 * setting _Q_LOCKED_VAL will pass through the loads in
	 * spin_unlock_wait() and/or spin_is_locked().
	 *
	 * Avoid this by issuing a full memory barrier between the spin_lock()
	 * and the loads in spin_unlock_wait() and spin_is_locked().
	 *
	 * Note that regular mutual exclusion doesn't care about this
	 * delayed store.
	 */
	smp_mb();
	return atomic_read(&lock->val) & _Q_LOCKED_MASK;
}

/**
 * queued_spin_value_unlocked - is the spinlock structure unlocked?
 * @lock: queued spinlock structure
 * Return: 1 if it is unlocked, 0 otherwise
 *
 * N.B. Whenever there are tasks waiting for the lock, it is considered
 *      locked wrt the lockref code to avoid lock stealing by the lockref
 *      code and change things underneath the lock. This also allows some
 *      optimizations to be applied without conflict with lockref.
 */
static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
{
	return !atomic_read(&lock.val);
}

/**
 * queued_spin_is_contended - check if the lock is contended
 * @lock : Pointer to queued spinlock structure
 * Return: 1 if lock contended, 0 otherwise
 */
static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
{
	return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
}
/**
 * queued_spin_trylock - try to acquire the queued spinlock
 * @lock : Pointer to queued spinlock structure
 * Return: 1 if lock acquired, 0 if failed
 */
static __always_inline int queued_spin_trylock(struct qspinlock *lock)
{
	if (!atomic_read(&lock->val) &&
	   (atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL) == 0))
		return 1;
	return 0;
}

extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);

/**
 * queued_spin_lock - acquire a queued spinlock
 * @lock: Pointer to queued spinlock structure
 */
static __always_inline void queued_spin_lock(struct qspinlock *lock)
{
	u32 val;

	val = atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL);
	if (likely(val == 0))
		return;
	queued_spin_lock_slowpath(lock, val);
}

#ifndef queued_spin_unlock
/**
 * queued_spin_unlock - release a queued spinlock
 * @lock : Pointer to queued spinlock structure
 */
static __always_inline void queued_spin_unlock(struct qspinlock *lock)
{
	/*
	 * smp_mb__before_atomic() in order to guarantee release semantics
	 */
	smp_mb__before_atomic();
	atomic_sub(_Q_LOCKED_VAL, &lock->val);
}
#endif

/**
 * queued_spin_unlock_wait - wait until current lock holder releases the lock
 * @lock : Pointer to queued spinlock structure
 *
 * There is a very slight possibility of live-lock if the lockers keep coming
 * and the waiter is just unfortunate enough to not see any unlock state.
 */
static inline void queued_spin_unlock_wait(struct qspinlock *lock)
{
	/* See queued_spin_is_locked() */
	smp_mb();
	while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
		cpu_relax();
}

#ifndef virt_spin_lock
static __always_inline bool virt_spin_lock(struct qspinlock *lock)
{
	return false;
}
#endif

/*
 * Remapping spinlock architecture specific functions to the corresponding
 * queued spinlock functions.
 */
#define arch_spin_is_locked(l)		queued_spin_is_locked(l)
#define arch_spin_is_contended(l)	queued_spin_is_contended(l)
#define arch_spin_value_unlocked(l)	queued_spin_value_unlocked(l)
#define arch_spin_lock(l)		queued_spin_lock(l)
#define arch_spin_trylock(l)		queued_spin_trylock(l)
#define arch_spin_unlock(l)		queued_spin_unlock(l)
#define arch_spin_lock_flags(l, f)	queued_spin_lock(l)
#define arch_spin_unlock_wait(l)	queued_spin_unlock_wait(l)

#endif /* __ASM_GENERIC_QSPINLOCK_H */