diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-03-30 16:17:15 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-03-30 16:17:15 -0700 |
commit | 4b9fd8a829a1eec7442e38afff21d610604de56a (patch) | |
tree | 32757737b7eecbe8c5be7606d8dcec883d39f1e6 /arch/x86/include/asm | |
parent | a776c270a0b2fad6715cb714187e4290cadb9237 (diff) | |
parent | f1e67e355c2aafeddf1eac31335709236996d2fe (diff) | |
download | linux-4b9fd8a829a1eec7442e38afff21d610604de56a.tar.bz2 |
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar:
"The main changes in this cycle were:
- Continued user-access cleanups in the futex code.
- percpu-rwsem rewrite that uses its own waitqueue and atomic_t
instead of an embedded rwsem. This addresses a couple of
weaknesses, but the primary motivation was complications on the -rt
kernel.
- Introduce raw lock nesting detection on lockdep
(CONFIG_PROVE_RAW_LOCK_NESTING=y), document the raw_lock vs. normal
lock differences. This too originates from -rt.
- Reuse lockdep zapped chain_hlocks entries, to conserve RAM
footprint on distro-ish kernels running into the "BUG:
MAX_LOCKDEP_CHAIN_HLOCKS too low!" depletion of the lockdep
chain-entries pool.
- Misc cleanups, smaller fixes and enhancements - see the changelog
for details"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (55 commits)
fs/buffer: Make BH_Uptodate_Lock bit_spin_lock a regular spinlock_t
thermal/x86_pkg_temp: Make pkg_temp_lock a raw_spinlock_t
Documentation/locking/locktypes: Minor copy editor fixes
Documentation/locking/locktypes: Further clarifications and wordsmithing
m68knommu: Remove mm.h include from uaccess_no.h
x86: get rid of user_atomic_cmpxchg_inatomic()
generic arch_futex_atomic_op_inuser() doesn't need access_ok()
x86: don't reload after cmpxchg in unsafe_atomic_op2() loop
x86: convert arch_futex_atomic_op_inuser() to user_access_begin/user_access_end()
objtool: whitelist __sanitizer_cov_trace_switch()
[parisc, s390, sparc64] no need for access_ok() in futex handling
sh: no need of access_ok() in arch_futex_atomic_op_inuser()
futex: arch_futex_atomic_op_inuser() calling conventions change
completion: Use lockdep_assert_RT_in_threaded_ctx() in complete_all()
lockdep: Add posixtimer context tracing bits
lockdep: Annotate irq_work
lockdep: Add hrtimer context tracing bits
lockdep: Introduce wait-type checks
completion: Use simple wait queues
sched/swait: Prepare usage in completions
...
Diffstat (limited to 'arch/x86/include/asm')
-rw-r--r-- | arch/x86/include/asm/futex.h | 99 | ||||
-rw-r--r-- | arch/x86/include/asm/uaccess.h | 93 |
2 files changed, 63 insertions, 129 deletions
diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h index 13c83fe97988..f9c00110a69a 100644 --- a/arch/x86/include/asm/futex.h +++ b/arch/x86/include/asm/futex.h @@ -12,76 +12,103 @@ #include <asm/processor.h> #include <asm/smap.h> -#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ - asm volatile("\t" ASM_STAC "\n" \ - "1:\t" insn "\n" \ - "2:\t" ASM_CLAC "\n" \ +#define unsafe_atomic_op1(insn, oval, uaddr, oparg, label) \ +do { \ + int oldval = 0, ret; \ + asm volatile("1:\t" insn "\n" \ + "2:\n" \ "\t.section .fixup,\"ax\"\n" \ "3:\tmov\t%3, %1\n" \ "\tjmp\t2b\n" \ "\t.previous\n" \ _ASM_EXTABLE_UA(1b, 3b) \ : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \ - : "i" (-EFAULT), "0" (oparg), "1" (0)) + : "i" (-EFAULT), "0" (oparg), "1" (0)); \ + if (ret) \ + goto label; \ + *oval = oldval; \ +} while(0) -#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ - asm volatile("\t" ASM_STAC "\n" \ - "1:\tmovl %2, %0\n" \ - "\tmovl\t%0, %3\n" \ + +#define unsafe_atomic_op2(insn, oval, uaddr, oparg, label) \ +do { \ + int oldval = 0, ret, tem; \ + asm volatile("1:\tmovl %2, %0\n" \ + "2:\tmovl\t%0, %3\n" \ "\t" insn "\n" \ - "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \ - "\tjnz\t1b\n" \ - "3:\t" ASM_CLAC "\n" \ + "3:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \ + "\tjnz\t2b\n" \ + "4:\n" \ "\t.section .fixup,\"ax\"\n" \ - "4:\tmov\t%5, %1\n" \ - "\tjmp\t3b\n" \ + "5:\tmov\t%5, %1\n" \ + "\tjmp\t4b\n" \ "\t.previous\n" \ - _ASM_EXTABLE_UA(1b, 4b) \ - _ASM_EXTABLE_UA(2b, 4b) \ + _ASM_EXTABLE_UA(1b, 5b) \ + _ASM_EXTABLE_UA(3b, 5b) \ : "=&a" (oldval), "=&r" (ret), \ "+m" (*uaddr), "=&r" (tem) \ - : "r" (oparg), "i" (-EFAULT), "1" (0)) + : "r" (oparg), "i" (-EFAULT), "1" (0)); \ + if (ret) \ + goto label; \ + *oval = oldval; \ +} while(0) -static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, +static __always_inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) { - int oldval = 0, ret, tem; - - pagefault_disable(); + if (!user_access_begin(uaddr, sizeof(u32))) + return -EFAULT; switch (op) { case FUTEX_OP_SET: - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg); + unsafe_atomic_op1("xchgl %0, %2", oval, uaddr, oparg, Efault); break; case FUTEX_OP_ADD: - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval, - uaddr, oparg); + unsafe_atomic_op1(LOCK_PREFIX "xaddl %0, %2", oval, + uaddr, oparg, Efault); break; case FUTEX_OP_OR: - __futex_atomic_op2("orl %4, %3", ret, oldval, uaddr, oparg); + unsafe_atomic_op2("orl %4, %3", oval, uaddr, oparg, Efault); break; case FUTEX_OP_ANDN: - __futex_atomic_op2("andl %4, %3", ret, oldval, uaddr, ~oparg); + unsafe_atomic_op2("andl %4, %3", oval, uaddr, ~oparg, Efault); break; case FUTEX_OP_XOR: - __futex_atomic_op2("xorl %4, %3", ret, oldval, uaddr, oparg); + unsafe_atomic_op2("xorl %4, %3", oval, uaddr, oparg, Efault); break; default: - ret = -ENOSYS; + user_access_end(); + return -ENOSYS; } - - pagefault_enable(); - - if (!ret) - *oval = oldval; - - return ret; + user_access_end(); + return 0; +Efault: + user_access_end(); + return -EFAULT; } static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval) { - return user_atomic_cmpxchg_inatomic(uval, uaddr, oldval, newval); + int ret = 0; + + if (!user_access_begin(uaddr, sizeof(u32))) + return -EFAULT; + asm volatile("\n" + "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" + "2:\n" + "\t.section .fixup, \"ax\"\n" + "3:\tmov %3, %0\n" + "\tjmp 2b\n" + "\t.previous\n" + _ASM_EXTABLE_UA(1b, 3b) + : "+r" (ret), "=a" (oldval), "+m" (*uaddr) + : "i" (-EFAULT), "r" (newval), "1" (oldval) + : "memory" + ); + user_access_end(); + *uval = oldval; + return ret; } #endif diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 61d93f062a36..ea6fc643ccfe 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -584,99 +584,6 @@ extern __must_check long strnlen_user(const char __user *str, long n); unsigned long __must_check clear_user(void __user *mem, unsigned long len); unsigned long __must_check __clear_user(void __user *mem, unsigned long len); -extern void __cmpxchg_wrong_size(void) - __compiletime_error("Bad argument size for cmpxchg"); - -#define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \ -({ \ - int __ret = 0; \ - __typeof__(*(ptr)) __old = (old); \ - __typeof__(*(ptr)) __new = (new); \ - __uaccess_begin_nospec(); \ - switch (size) { \ - case 1: \ - { \ - asm volatile("\n" \ - "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \ - "2:\n" \ - "\t.section .fixup, \"ax\"\n" \ - "3:\tmov %3, %0\n" \ - "\tjmp 2b\n" \ - "\t.previous\n" \ - _ASM_EXTABLE_UA(1b, 3b) \ - : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ - : "i" (-EFAULT), "q" (__new), "1" (__old) \ - : "memory" \ - ); \ - break; \ - } \ - case 2: \ - { \ - asm volatile("\n" \ - "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \ - "2:\n" \ - "\t.section .fixup, \"ax\"\n" \ - "3:\tmov %3, %0\n" \ - "\tjmp 2b\n" \ - "\t.previous\n" \ - _ASM_EXTABLE_UA(1b, 3b) \ - : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ - : "i" (-EFAULT), "r" (__new), "1" (__old) \ - : "memory" \ - ); \ - break; \ - } \ - case 4: \ - { \ - asm volatile("\n" \ - "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \ - "2:\n" \ - "\t.section .fixup, \"ax\"\n" \ - "3:\tmov %3, %0\n" \ - "\tjmp 2b\n" \ - "\t.previous\n" \ - _ASM_EXTABLE_UA(1b, 3b) \ - : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ - : "i" (-EFAULT), "r" (__new), "1" (__old) \ - : "memory" \ - ); \ - break; \ - } \ - case 8: \ - { \ - if (!IS_ENABLED(CONFIG_X86_64)) \ - __cmpxchg_wrong_size(); \ - \ - asm volatile("\n" \ - "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \ - "2:\n" \ - "\t.section .fixup, \"ax\"\n" \ - "3:\tmov %3, %0\n" \ - "\tjmp 2b\n" \ - "\t.previous\n" \ - _ASM_EXTABLE_UA(1b, 3b) \ - : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ - : "i" (-EFAULT), "r" (__new), "1" (__old) \ - : "memory" \ - ); \ - break; \ - } \ - default: \ - __cmpxchg_wrong_size(); \ - } \ - __uaccess_end(); \ - *(uval) = __old; \ - __ret; \ -}) - -#define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \ -({ \ - access_ok((ptr), sizeof(*(ptr))) ? \ - __user_atomic_cmpxchg_inatomic((uval), (ptr), \ - (old), (new), sizeof(*(ptr))) : \ - -EFAULT; \ -}) - /* * movsl can be slow when source and dest are not both 8-byte aligned */ |