summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2016-07-07 09:12:02 +0200
committerIngo Molnar <mingo@kernel.org>2016-07-07 09:12:02 +0200
commit36e91aa2628e46c2146049eee8b9b7f773b0ffc3 (patch)
treec14e756181dca4e4e06d6aa1b1299a5f4dc5bf3d /kernel
parent03e3c2b7edbe1e8758196b2c7843333eb328063d (diff)
parentb7271b9f3e18181559b96a610f4e42bdb04b07f5 (diff)
downloadlinux-36e91aa2628e46c2146049eee8b9b7f773b0ffc3.tar.bz2
Merge branch 'locking/arch-atomic' into locking/core, because the topic is ready
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/locking/qrwlock.c2
-rw-r--r--kernel/locking/qspinlock_paravirt.h4
-rw-r--r--kernel/locking/rwsem-xadd.c2
3 files changed, 4 insertions, 4 deletions
diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c
index fec082338668..19248ddf37ce 100644
--- a/kernel/locking/qrwlock.c
+++ b/kernel/locking/qrwlock.c
@@ -93,7 +93,7 @@ void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts)
* that accesses can't leak upwards out of our subsequent critical
* section in the case that the lock is currently held for write.
*/
- cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts) - _QR_BIAS;
+ cnts = atomic_fetch_add_acquire(_QR_BIAS, &lock->cnts);
rspin_until_writer_unlock(lock, cnts);
/*
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
index 21ede57f68b3..37649e69056c 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -112,12 +112,12 @@ static __always_inline int trylock_clear_pending(struct qspinlock *lock)
#else /* _Q_PENDING_BITS == 8 */
static __always_inline void set_pending(struct qspinlock *lock)
{
- atomic_set_mask(_Q_PENDING_VAL, &lock->val);
+ atomic_or(_Q_PENDING_VAL, &lock->val);
}
static __always_inline void clear_pending(struct qspinlock *lock)
{
- atomic_clear_mask(_Q_PENDING_VAL, &lock->val);
+ atomic_andnot(_Q_PENDING_VAL, &lock->val);
}
static __always_inline int trylock_clear_pending(struct qspinlock *lock)
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index 2031281bb940..447e08de1fab 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -153,7 +153,7 @@ __rwsem_mark_wake(struct rw_semaphore *sem,
if (wake_type != RWSEM_WAKE_READ_OWNED) {
adjustment = RWSEM_ACTIVE_READ_BIAS;
try_reader_grant:
- oldcount = atomic_long_add_return(adjustment, &sem->count) - adjustment;
+ oldcount = atomic_long_fetch_add(adjustment, &sem->count);
if (unlikely(oldcount < RWSEM_WAITING_BIAS)) {
/*