summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-07-03 12:14:18 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-07-03 12:14:18 -0700
commit892ad5acca0b2ddb514fae63fa4686bf726d2471 (patch)
tree9945b41e6552c0b6b9b1b5b0ffb77f2f15857671 /arch
parent162b246eb420d2ca2002a50917c897b10c9aba09 (diff)
parent5d6dec6fba38c3e2d408df108bb92ef4ac201f18 (diff)
downloadlinux-892ad5acca0b2ddb514fae63fa4686bf726d2471.tar.bz2
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar: "The main changes in this cycle were: - Add CONFIG_REFCOUNT_FULL=y to allow the disabling of the 'full' (robustness checked) refcount_t implementation with slightly lower runtime overhead. (Kees Cook) The lighter weight variant is the default. The two variants use the same API. Having this variant was a precondition by some maintainers to merge refcount_t cleanups. - Add lockdep support for rtmutexes (Peter Zijlstra) - liblockdep fixes and improvements (Sasha Levin, Ben Hutchings) - ... misc fixes and improvements" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (30 commits) locking/refcount: Remove the half-implemented refcount_sub() API locking/refcount: Create unchecked atomic_t implementation locking/rtmutex: Don't initialize lockdep when not required locking/selftest: Add RT-mutex support locking/selftest: Remove the bad unlock ordering test rt_mutex: Add lockdep annotations MAINTAINERS: Claim atomic*_t maintainership locking/x86: Remove the unused atomic_inc_short() methd tools/lib/lockdep: Remove private kernel headers tools/lib/lockdep: Hide liblockdep output from test results tools/lib/lockdep: Add dummy current_gfp_context() tools/include: Add IS_ERR_OR_NULL to err.h tools/lib/lockdep: Add empty __is_[module,kernel]_percpu_address tools/lib/lockdep: Include err.h tools/include: Add (mostly) empty include/linux/sched/mm.h tools/lib/lockdep: Use LDFLAGS tools/lib/lockdep: Remove double-quotes from soname tools/lib/lockdep: Fix object file paths used in an out-of-tree build tools/lib/lockdep: Fix compilation for 4.11 tools/lib/lockdep: Don't mix fd-based and stream IO ...
Diffstat (limited to 'arch')
-rw-r--r--arch/Kconfig9
-rw-r--r--arch/tile/lib/atomic_asm_32.S3
-rw-r--r--arch/x86/include/asm/atomic.h13
3 files changed, 10 insertions, 15 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index 6c00e5b00f8b..f76b214cf7ad 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -867,4 +867,13 @@ config STRICT_MODULE_RWX
config ARCH_WANT_RELAX_ORDER
bool
+config REFCOUNT_FULL
+ bool "Perform full reference count validation at the expense of speed"
+ help
+ Enabling this switches the refcounting infrastructure from a fast
+ unchecked atomic_t implementation to a fully state checked
+ implementation, which can be (slightly) slower but provides protections
+ against various use-after-free conditions that can be used in
+ security flaw exploits.
+
source "kernel/gcov/Kconfig"
diff --git a/arch/tile/lib/atomic_asm_32.S b/arch/tile/lib/atomic_asm_32.S
index 1a70e6c0f259..94709ab41ed8 100644
--- a/arch/tile/lib/atomic_asm_32.S
+++ b/arch/tile/lib/atomic_asm_32.S
@@ -24,8 +24,7 @@
* has an opportunity to return -EFAULT to the user if needed.
* The 64-bit routines just return a "long long" with the value,
* since they are only used from kernel space and don't expect to fault.
- * Support for 16-bit ops is included in the framework but we don't provide
- * any (x86_64 has an atomic_inc_short(), so we might want to some day).
+ * Support for 16-bit ops is included in the framework but we don't provide any.
*
* Note that the caller is advised to issue a suitable L1 or L2
* prefetch on the address being manipulated to avoid extra stalls.
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index caa5798c92f4..33380b871463 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -246,19 +246,6 @@ static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
return c;
}
-/**
- * atomic_inc_short - increment of a short integer
- * @v: pointer to type int
- *
- * Atomically adds 1 to @v
- * Returns the new value of @u
- */
-static __always_inline short int atomic_inc_short(short int *v)
-{
- asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v));
- return *v;
-}
-
#ifdef CONFIG_X86_32
# include <asm/atomic64_32.h>
#else