diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-02-21 12:12:01 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-02-21 12:12:01 -0800 |
commit | 9eef02334505411667a7b51a8f349f8c6c4f3b66 (patch) | |
tree | 3a0c8fb85d76595b2f9468d3e31f41147a43ed55 /lib | |
parent | d089f48fba28db14d0fe7753248f2575a9ddfc73 (diff) | |
parent | 3765d01bab73bdb920ef711203978f02cd26e4da (diff) | |
download | linux-9eef02334505411667a7b51a8f349f8c6c4f3b66.tar.bz2 |
Merge tag 'locking-core-2021-02-17' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar:
"Core locking primitives updates:
- Remove mutex_trylock_recursive() from the API - no users left
- Simplify + constify the futex code a bit
Lockdep updates:
- Teach lockdep about local_lock_t
- Add CONFIG_DEBUG_IRQFLAGS=y debug config option to check for
potentially unsafe IRQ mask restoration patterns. (I.e.
calling raw_local_irq_restore() with IRQs enabled.)
- Add wait context self-tests
- Fix graph lock corner case corrupting internal data structures
- Fix noinstr annotations
LKMM updates:
- Simplify the litmus tests
- Documentation fixes
KCSAN updates:
- Re-enable KCSAN instrumentation in lib/random32.c
Misc fixes:
- Don't branch-trace static label APIs
- DocBook fix
- Remove stale leftover empty file"
* tag 'locking-core-2021-02-17' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (24 commits)
checkpatch: Don't check for mutex_trylock_recursive()
locking/mutex: Kill mutex_trylock_recursive()
s390: Use arch_local_irq_{save,restore}() in early boot code
lockdep: Noinstr annotate warn_bogus_irq_restore()
locking/lockdep: Avoid unmatched unlock
locking/rwsem: Remove empty rwsem.h
locking/rtmutex: Add missing kernel-doc markup
futex: Remove unneeded gotos
futex: Change utime parameter to be 'const ... *'
lockdep: report broken irq restoration
jump_label: Do not profile branch annotations
locking: Add Reviewers
locking/selftests: Add local_lock inversion tests
locking/lockdep: Exclude local_lock_t from IRQ inversions
locking/lockdep: Clean up check_redundant() a bit
locking/lockdep: Add a skip() function to __bfs()
locking/lockdep: Mark local_lock_t
locking/selftests: More granular debug_locks_verbose
lockdep/selftest: Add wait context selftests
tools/memory-model: Fix typo in klitmus7 compatibility table
...
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 8 | ||||
-rw-r--r-- | lib/Makefile | 3 | ||||
-rw-r--r-- | lib/locking-selftest.c | 334 |
3 files changed, 340 insertions, 5 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 7937265ef879..5ea0c1773b0a 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1335,6 +1335,7 @@ config LOCKDEP_SMALL config DEBUG_LOCKDEP bool "Lock dependency engine debugging" depends on DEBUG_KERNEL && LOCKDEP + select DEBUG_IRQFLAGS help If you say Y here, the lock dependency engine will do additional runtime checks to debug itself, at the price @@ -1423,6 +1424,13 @@ config TRACE_IRQFLAGS_NMI depends on TRACE_IRQFLAGS depends on TRACE_IRQFLAGS_NMI_SUPPORT +config DEBUG_IRQFLAGS + bool "Debug IRQ flag manipulation" + help + Enables checks for potentially unsafe enabling or disabling of + interrupts, such as calling raw_local_irq_restore() when interrupts + are enabled. + config STACKTRACE bool "Stack backtrace support" depends on STACKTRACE_SUPPORT diff --git a/lib/Makefile b/lib/Makefile index a6b160c3a4fa..fb7d946bb8c3 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -27,9 +27,6 @@ KASAN_SANITIZE_string.o := n CFLAGS_string.o += -fno-stack-protector endif -# Used by KCSAN while enabled, avoid recursion. -KCSAN_SANITIZE_random32.o := n - lib-y := ctype.o string.o vsprintf.o cmdline.o \ rbtree.o radix-tree.o timerqueue.o xarray.o \ idr.o extable.o sha1.o irq_regs.o argv_split.o \ diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c index 9959ea23529e..2d85abac1744 100644 --- a/lib/locking-selftest.c +++ b/lib/locking-selftest.c @@ -24,6 +24,7 @@ #include <linux/debug_locks.h> #include <linux/irqflags.h> #include <linux/rtmutex.h> +#include <linux/local_lock.h> /* * Change this to 1 if you want to see the failure printouts: @@ -51,6 +52,7 @@ __setup("debug_locks_verbose=", setup_debug_locks_verbose); #define LOCKTYPE_RWSEM 0x8 #define LOCKTYPE_WW 0x10 #define LOCKTYPE_RTMUTEX 0x20 +#define LOCKTYPE_LL 0x40 static struct ww_acquire_ctx t, t2; static struct ww_mutex o, o2, o3; @@ -64,6 +66,9 @@ static DEFINE_SPINLOCK(lock_B); static DEFINE_SPINLOCK(lock_C); static DEFINE_SPINLOCK(lock_D); +static DEFINE_RAW_SPINLOCK(raw_lock_A); +static DEFINE_RAW_SPINLOCK(raw_lock_B); + static DEFINE_RWLOCK(rwlock_A); static DEFINE_RWLOCK(rwlock_B); static DEFINE_RWLOCK(rwlock_C); @@ -133,6 +138,8 @@ static DEFINE_RT_MUTEX(rtmutex_Z2); #endif +static local_lock_t local_A = INIT_LOCAL_LOCK(local_A); + /* * non-inlined runtime initializers, to let separate locks share * the same lock-class: @@ -1306,19 +1313,23 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_wlock) #ifdef CONFIG_DEBUG_LOCK_ALLOC # define I_SPINLOCK(x) lockdep_reset_lock(&lock_##x.dep_map) +# define I_RAW_SPINLOCK(x) lockdep_reset_lock(&raw_lock_##x.dep_map) # define I_RWLOCK(x) lockdep_reset_lock(&rwlock_##x.dep_map) # define I_MUTEX(x) lockdep_reset_lock(&mutex_##x.dep_map) # define I_RWSEM(x) lockdep_reset_lock(&rwsem_##x.dep_map) # define I_WW(x) lockdep_reset_lock(&x.dep_map) +# define I_LOCAL_LOCK(x) lockdep_reset_lock(&local_##x.dep_map) #ifdef CONFIG_RT_MUTEXES # define I_RTMUTEX(x) lockdep_reset_lock(&rtmutex_##x.dep_map) #endif #else # define I_SPINLOCK(x) +# define I_RAW_SPINLOCK(x) # define I_RWLOCK(x) # define I_MUTEX(x) # define I_RWSEM(x) # define I_WW(x) +# define I_LOCAL_LOCK(x) #endif #ifndef I_RTMUTEX @@ -1358,9 +1369,16 @@ static void reset_locks(void) I1(A); I1(B); I1(C); I1(D); I1(X1); I1(X2); I1(Y1); I1(Y2); I1(Z1); I1(Z2); I_WW(t); I_WW(t2); I_WW(o.base); I_WW(o2.base); I_WW(o3.base); + I_RAW_SPINLOCK(A); I_RAW_SPINLOCK(B); + I_LOCAL_LOCK(A); + lockdep_reset(); + I2(A); I2(B); I2(C); I2(D); init_shared_classes(); + raw_spin_lock_init(&raw_lock_A); + raw_spin_lock_init(&raw_lock_B); + local_lock_init(&local_A); ww_mutex_init(&o, &ww_lockdep); ww_mutex_init(&o2, &ww_lockdep); ww_mutex_init(&o3, &ww_lockdep); memset(&t, 0, sizeof(t)); memset(&t2, 0, sizeof(t2)); @@ -1382,6 +1400,8 @@ static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask) WARN_ON(irqs_disabled()); + debug_locks_silent = !(debug_locks_verbose & lockclass_mask); + testcase_fn(); /* * Filter out expected failures: @@ -1402,7 +1422,7 @@ static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask) } testcase_total++; - if (debug_locks_verbose) + if (debug_locks_verbose & lockclass_mask) pr_cont(" lockclass mask: %x, debug_locks: %d, expected: %d\n", lockclass_mask, debug_locks, expected); /* @@ -2419,6 +2439,311 @@ static void fs_reclaim_tests(void) pr_cont("\n"); } +#define __guard(cleanup) __maybe_unused __attribute__((__cleanup__(cleanup))) + +static void hardirq_exit(int *_) +{ + HARDIRQ_EXIT(); +} + +#define HARDIRQ_CONTEXT(name, ...) \ + int hardirq_guard_##name __guard(hardirq_exit); \ + HARDIRQ_ENTER(); + +#define NOTTHREADED_HARDIRQ_CONTEXT(name, ...) \ + int notthreaded_hardirq_guard_##name __guard(hardirq_exit); \ + local_irq_disable(); \ + __irq_enter(); \ + WARN_ON(!in_irq()); + +static void softirq_exit(int *_) +{ + SOFTIRQ_EXIT(); +} + +#define SOFTIRQ_CONTEXT(name, ...) \ + int softirq_guard_##name __guard(softirq_exit); \ + SOFTIRQ_ENTER(); + +static void rcu_exit(int *_) +{ + rcu_read_unlock(); +} + +#define RCU_CONTEXT(name, ...) \ + int rcu_guard_##name __guard(rcu_exit); \ + rcu_read_lock(); + +static void rcu_bh_exit(int *_) +{ + rcu_read_unlock_bh(); +} + +#define RCU_BH_CONTEXT(name, ...) \ + int rcu_bh_guard_##name __guard(rcu_bh_exit); \ + rcu_read_lock_bh(); + +static void rcu_sched_exit(int *_) +{ + rcu_read_unlock_sched(); +} + +#define RCU_SCHED_CONTEXT(name, ...) \ + int rcu_sched_guard_##name __guard(rcu_sched_exit); \ + rcu_read_lock_sched(); + +static void rcu_callback_exit(int *_) +{ + rcu_lock_release(&rcu_callback_map); +} + +#define RCU_CALLBACK_CONTEXT(name, ...) \ + int rcu_callback_guard_##name __guard(rcu_callback_exit); \ + rcu_lock_acquire(&rcu_callback_map); + + +static void raw_spinlock_exit(raw_spinlock_t **lock) +{ + raw_spin_unlock(*lock); +} + +#define RAW_SPINLOCK_CONTEXT(name, lock) \ + raw_spinlock_t *raw_spinlock_guard_##name __guard(raw_spinlock_exit) = &(lock); \ + raw_spin_lock(&(lock)); + +static void spinlock_exit(spinlock_t **lock) +{ + spin_unlock(*lock); +} + +#define SPINLOCK_CONTEXT(name, lock) \ + spinlock_t *spinlock_guard_##name __guard(spinlock_exit) = &(lock); \ + spin_lock(&(lock)); + +static void mutex_exit(struct mutex **lock) +{ + mutex_unlock(*lock); +} + +#define MUTEX_CONTEXT(name, lock) \ + struct mutex *mutex_guard_##name __guard(mutex_exit) = &(lock); \ + mutex_lock(&(lock)); + +#define GENERATE_2_CONTEXT_TESTCASE(outer, outer_lock, inner, inner_lock) \ + \ +static void __maybe_unused inner##_in_##outer(void) \ +{ \ + outer##_CONTEXT(_, outer_lock); \ + { \ + inner##_CONTEXT(_, inner_lock); \ + } \ +} + +/* + * wait contexts (considering PREEMPT_RT) + * + * o: inner is allowed in outer + * x: inner is disallowed in outer + * + * \ inner | RCU | RAW_SPIN | SPIN | MUTEX + * outer \ | | | | + * ---------------+-------+----------+------+------- + * HARDIRQ | o | o | o | x + * ---------------+-------+----------+------+------- + * NOTTHREADED_IRQ| o | o | x | x + * ---------------+-------+----------+------+------- + * SOFTIRQ | o | o | o | x + * ---------------+-------+----------+------+------- + * RCU | o | o | o | x + * ---------------+-------+----------+------+------- + * RCU_BH | o | o | o | x + * ---------------+-------+----------+------+------- + * RCU_CALLBACK | o | o | o | x + * ---------------+-------+----------+------+------- + * RCU_SCHED | o | o | x | x + * ---------------+-------+----------+------+------- + * RAW_SPIN | o | o | x | x + * ---------------+-------+----------+------+------- + * SPIN | o | o | o | x + * ---------------+-------+----------+------+------- + * MUTEX | o | o | o | o + * ---------------+-------+----------+------+------- + */ + +#define GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(inner, inner_lock) \ +GENERATE_2_CONTEXT_TESTCASE(HARDIRQ, , inner, inner_lock) \ +GENERATE_2_CONTEXT_TESTCASE(NOTTHREADED_HARDIRQ, , inner, inner_lock) \ +GENERATE_2_CONTEXT_TESTCASE(SOFTIRQ, , inner, inner_lock) \ +GENERATE_2_CONTEXT_TESTCASE(RCU, , inner, inner_lock) \ +GENERATE_2_CONTEXT_TESTCASE(RCU_BH, , inner, inner_lock) \ +GENERATE_2_CONTEXT_TESTCASE(RCU_CALLBACK, , inner, inner_lock) \ +GENERATE_2_CONTEXT_TESTCASE(RCU_SCHED, , inner, inner_lock) \ +GENERATE_2_CONTEXT_TESTCASE(RAW_SPINLOCK, raw_lock_A, inner, inner_lock) \ +GENERATE_2_CONTEXT_TESTCASE(SPINLOCK, lock_A, inner, inner_lock) \ +GENERATE_2_CONTEXT_TESTCASE(MUTEX, mutex_A, inner, inner_lock) + +GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(RCU, ) +GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(RAW_SPINLOCK, raw_lock_B) +GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(SPINLOCK, lock_B) +GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(MUTEX, mutex_B) + +/* the outer context allows all kinds of preemption */ +#define DO_CONTEXT_TESTCASE_OUTER_PREEMPTIBLE(outer) \ + dotest(RCU_in_##outer, SUCCESS, LOCKTYPE_RWLOCK); \ + dotest(RAW_SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \ + dotest(SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \ + dotest(MUTEX_in_##outer, SUCCESS, LOCKTYPE_MUTEX); \ + +/* + * the outer context only allows the preemption introduced by spinlock_t (which + * is a sleepable lock for PREEMPT_RT) + */ +#define DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(outer) \ + dotest(RCU_in_##outer, SUCCESS, LOCKTYPE_RWLOCK); \ + dotest(RAW_SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \ + dotest(SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \ + dotest(MUTEX_in_##outer, FAILURE, LOCKTYPE_MUTEX); \ + +/* the outer doesn't allows any kind of preemption */ +#define DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(outer) \ + dotest(RCU_in_##outer, SUCCESS, LOCKTYPE_RWLOCK); \ + dotest(RAW_SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \ + dotest(SPINLOCK_in_##outer, FAILURE, LOCKTYPE_SPIN); \ + dotest(MUTEX_in_##outer, FAILURE, LOCKTYPE_MUTEX); \ + +static void wait_context_tests(void) +{ + printk(" --------------------------------------------------------------------------\n"); + printk(" | wait context tests |\n"); + printk(" --------------------------------------------------------------------------\n"); + printk(" | rcu | raw | spin |mutex |\n"); + printk(" --------------------------------------------------------------------------\n"); + print_testname("in hardirq context"); + DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(HARDIRQ); + pr_cont("\n"); + + print_testname("in hardirq context (not threaded)"); + DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(NOTTHREADED_HARDIRQ); + pr_cont("\n"); + + print_testname("in softirq context"); + DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(SOFTIRQ); + pr_cont("\n"); + + print_testname("in RCU context"); + DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(RCU); + pr_cont("\n"); + + print_testname("in RCU-bh context"); + DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(RCU_BH); + pr_cont("\n"); + + print_testname("in RCU callback context"); + DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(RCU_CALLBACK); + pr_cont("\n"); + + print_testname("in RCU-sched context"); + DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(RCU_SCHED); + pr_cont("\n"); + + print_testname("in RAW_SPINLOCK context"); + DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(RAW_SPINLOCK); + pr_cont("\n"); + + print_testname("in SPINLOCK context"); + DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(SPINLOCK); + pr_cont("\n"); + + print_testname("in MUTEX context"); + DO_CONTEXT_TESTCASE_OUTER_PREEMPTIBLE(MUTEX); + pr_cont("\n"); +} + +static void local_lock_2(void) +{ + local_lock_acquire(&local_A); /* IRQ-ON */ + local_lock_release(&local_A); + + HARDIRQ_ENTER(); + spin_lock(&lock_A); /* IN-IRQ */ + spin_unlock(&lock_A); + HARDIRQ_EXIT() + + HARDIRQ_DISABLE(); + spin_lock(&lock_A); + local_lock_acquire(&local_A); /* IN-IRQ <-> IRQ-ON cycle, false */ + local_lock_release(&local_A); + spin_unlock(&lock_A); + HARDIRQ_ENABLE(); +} + +static void local_lock_3A(void) +{ + local_lock_acquire(&local_A); /* IRQ-ON */ + spin_lock(&lock_B); /* IRQ-ON */ + spin_unlock(&lock_B); + local_lock_release(&local_A); + + HARDIRQ_ENTER(); + spin_lock(&lock_A); /* IN-IRQ */ + spin_unlock(&lock_A); + HARDIRQ_EXIT() + + HARDIRQ_DISABLE(); + spin_lock(&lock_A); + local_lock_acquire(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */ + local_lock_release(&local_A); + spin_unlock(&lock_A); + HARDIRQ_ENABLE(); +} + +static void local_lock_3B(void) +{ + local_lock_acquire(&local_A); /* IRQ-ON */ + spin_lock(&lock_B); /* IRQ-ON */ + spin_unlock(&lock_B); + local_lock_release(&local_A); + + HARDIRQ_ENTER(); + spin_lock(&lock_A); /* IN-IRQ */ + spin_unlock(&lock_A); + HARDIRQ_EXIT() + + HARDIRQ_DISABLE(); + spin_lock(&lock_A); + local_lock_acquire(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */ + local_lock_release(&local_A); + spin_unlock(&lock_A); + HARDIRQ_ENABLE(); + + HARDIRQ_DISABLE(); + spin_lock(&lock_A); + spin_lock(&lock_B); /* IN-IRQ <-> IRQ-ON cycle, true */ + spin_unlock(&lock_B); + spin_unlock(&lock_A); + HARDIRQ_DISABLE(); + +} + +static void local_lock_tests(void) +{ + printk(" --------------------------------------------------------------------------\n"); + printk(" | local_lock tests |\n"); + printk(" ---------------------\n"); + + print_testname("local_lock inversion 2"); + dotest(local_lock_2, SUCCESS, LOCKTYPE_LL); + pr_cont("\n"); + + print_testname("local_lock inversion 3A"); + dotest(local_lock_3A, SUCCESS, LOCKTYPE_LL); + pr_cont("\n"); + + print_testname("local_lock inversion 3B"); + dotest(local_lock_3B, FAILURE, LOCKTYPE_LL); + pr_cont("\n"); +} + void locking_selftest(void) { /* @@ -2446,7 +2771,6 @@ void locking_selftest(void) printk(" --------------------------------------------------------------------------\n"); init_shared_classes(); - debug_locks_silent = !debug_locks_verbose; lockdep_set_selftest_task(current); DO_TESTCASE_6R("A-A deadlock", AA); @@ -2542,6 +2866,12 @@ void locking_selftest(void) fs_reclaim_tests(); + /* Wait context test cases that are specific for RAW_LOCK_NESTING */ + if (IS_ENABLED(CONFIG_PROVE_RAW_LOCK_NESTING)) + wait_context_tests(); + + local_lock_tests(); + if (unexpected_testcase_failures) { printk("-----------------------------------------------------------------\n"); debug_locks = 0; |