diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-08 16:12:03 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-08 16:12:03 -0700 |
commit | e1928328699a582a540b105e5f4c160832a7fdcb (patch) | |
tree | f36bb303b8648189d7b5a7feb27e58fe9fe3b9f0 /arch/arc/include | |
parent | 46f1ec23a46940846f86a91c46f7119d8a8b5de1 (diff) | |
parent | 9156e545765e467e6268c4814cfa609ebb16237e (diff) | |
download | linux-e1928328699a582a540b105e5f4c160832a7fdcb.tar.bz2 |
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar:
"The main changes in this cycle are:
- rwsem scalability improvements, phase #2, by Waiman Long, which are
rather impressive:
"On a 2-socket 40-core 80-thread Skylake system with 40 reader
and writer locking threads, the min/mean/max locking operations
done in a 5-second testing window before the patchset were:
40 readers, Iterations Min/Mean/Max = 1,807/1,808/1,810
40 writers, Iterations Min/Mean/Max = 1,807/50,344/151,255
After the patchset, they became:
40 readers, Iterations Min/Mean/Max = 30,057/31,359/32,741
40 writers, Iterations Min/Mean/Max = 94,466/95,845/97,098"
There's a lot of changes to the locking implementation that makes
it similar to qrwlock, including owner handoff for more fair
locking.
Another microbenchmark shows how across the spectrum the
improvements are:
"With a locking microbenchmark running on 5.1 based kernel, the
total locking rates (in kops/s) on a 2-socket Skylake system
with equal numbers of readers and writers (mixed) before and
after this patchset were:
# of Threads Before Patch After Patch
------------ ------------ -----------
2 2,618 4,193
4 1,202 3,726
8 802 3,622
16 729 3,359
32 319 2,826
64 102 2,744"
The changes are extensive and the patch-set has been through
several iterations addressing various locking workloads. There
might be more regressions, but unless they are pathological I
believe we want to use this new implementation as the baseline
going forward.
- jump-label optimizations by Daniel Bristot de Oliveira: the primary
motivation was to remove IPI disturbance of isolated RT-workload
CPUs, which resulted in the implementation of batched jump-label
updates. Beyond the improvement of the real-time characteristics
kernel, in one test this patchset improved static key update
overhead from 57 msecs to just 1.4 msecs - which is a nice speedup
as well.
- atomic64_t cross-arch type cleanups by Mark Rutland: over the last
~10 years of atomic64_t existence the various types used by the
APIs only had to be self-consistent within each architecture -
which means they became wildly inconsistent across architectures.
Mark puts and end to this by reworking all the atomic64
implementations to use 's64' as the base type for atomic64_t, and
to ensure that this type is consistently used for parameters and
return values in the API, avoiding further problems in this area.
- A large set of small improvements to lockdep by Yuyang Du: type
cleanups, output cleanups, function return type and othr cleanups
all around the place.
- A set of percpu ops cleanups and fixes by Peter Zijlstra.
- Misc other changes - please see the Git log for more details"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (82 commits)
locking/lockdep: increase size of counters for lockdep statistics
locking/atomics: Use sed(1) instead of non-standard head(1) option
locking/lockdep: Move mark_lock() inside CONFIG_TRACE_IRQFLAGS && CONFIG_PROVE_LOCKING
x86/jump_label: Make tp_vec_nr static
x86/percpu: Optimize raw_cpu_xchg()
x86/percpu, sched/fair: Avoid local_clock()
x86/percpu, x86/irq: Relax {set,get}_irq_regs()
x86/percpu: Relax smp_processor_id()
x86/percpu: Differentiate this_cpu_{}() and __this_cpu_{}()
locking/rwsem: Guard against making count negative
locking/rwsem: Adaptive disabling of reader optimistic spinning
locking/rwsem: Enable time-based spinning on reader-owned rwsem
locking/rwsem: Make rwsem->owner an atomic_long_t
locking/rwsem: Enable readers spinning on writer
locking/rwsem: Clarify usage of owner's nonspinaable bit
locking/rwsem: Wake up almost all readers in wait queue
locking/rwsem: More optimal RT task handling of null owner
locking/rwsem: Always release wait_lock before waking up tasks
locking/rwsem: Implement lock handoff to prevent lock starvation
locking/rwsem: Make rwsem_spin_on_owner() return owner state
...
Diffstat (limited to 'arch/arc/include')
-rw-r--r-- | arch/arc/include/asm/atomic.h | 41 |
1 files changed, 20 insertions, 21 deletions
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h index 17cf1c657cb3..7298ce84762e 100644 --- a/arch/arc/include/asm/atomic.h +++ b/arch/arc/include/asm/atomic.h @@ -321,14 +321,14 @@ ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3) */ typedef struct { - aligned_u64 counter; + s64 __aligned(8) counter; } atomic64_t; #define ATOMIC64_INIT(a) { (a) } -static inline long long atomic64_read(const atomic64_t *v) +static inline s64 atomic64_read(const atomic64_t *v) { - unsigned long long val; + s64 val; __asm__ __volatile__( " ldd %0, [%1] \n" @@ -338,7 +338,7 @@ static inline long long atomic64_read(const atomic64_t *v) return val; } -static inline void atomic64_set(atomic64_t *v, long long a) +static inline void atomic64_set(atomic64_t *v, s64 a) { /* * This could have been a simple assignment in "C" but would need @@ -359,9 +359,9 @@ static inline void atomic64_set(atomic64_t *v, long long a) } #define ATOMIC64_OP(op, op1, op2) \ -static inline void atomic64_##op(long long a, atomic64_t *v) \ +static inline void atomic64_##op(s64 a, atomic64_t *v) \ { \ - unsigned long long val; \ + s64 val; \ \ __asm__ __volatile__( \ "1: \n" \ @@ -372,13 +372,13 @@ static inline void atomic64_##op(long long a, atomic64_t *v) \ " bnz 1b \n" \ : "=&r"(val) \ : "r"(&v->counter), "ir"(a) \ - : "cc"); \ + : "cc"); \ } \ #define ATOMIC64_OP_RETURN(op, op1, op2) \ -static inline long long atomic64_##op##_return(long long a, atomic64_t *v) \ +static inline s64 atomic64_##op##_return(s64 a, atomic64_t *v) \ { \ - unsigned long long val; \ + s64 val; \ \ smp_mb(); \ \ @@ -399,9 +399,9 @@ static inline long long atomic64_##op##_return(long long a, atomic64_t *v) \ } #define ATOMIC64_FETCH_OP(op, op1, op2) \ -static inline long long atomic64_fetch_##op(long long a, atomic64_t *v) \ +static inline s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \ { \ - unsigned long long val, orig; \ + s64 val, orig; \ \ smp_mb(); \ \ @@ -441,10 +441,10 @@ ATOMIC64_OPS(xor, xor, xor) #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP -static inline long long -atomic64_cmpxchg(atomic64_t *ptr, long long expected, long long new) +static inline s64 +atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new) { - long long prev; + s64 prev; smp_mb(); @@ -464,9 +464,9 @@ atomic64_cmpxchg(atomic64_t *ptr, long long expected, long long new) return prev; } -static inline long long atomic64_xchg(atomic64_t *ptr, long long new) +static inline s64 atomic64_xchg(atomic64_t *ptr, s64 new) { - long long prev; + s64 prev; smp_mb(); @@ -492,9 +492,9 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new) * the atomic variable, v, was not decremented. */ -static inline long long atomic64_dec_if_positive(atomic64_t *v) +static inline s64 atomic64_dec_if_positive(atomic64_t *v) { - long long val; + s64 val; smp_mb(); @@ -525,10 +525,9 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v) * Atomically adds @a to @v, if it was not @u. * Returns the old value of @v */ -static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a, - long long u) +static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) { - long long old, temp; + s64 old, temp; smp_mb(); |