diff options
author | Yuyang Du <duyuyang@gmail.com> | 2019-05-06 16:19:23 +0800 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2019-06-03 11:55:42 +0200 |
commit | e196e479a3b844da6e6e71e0d2a8694040cb4e52 (patch) | |
tree | 3d6c6505da33833db9e844774871356ed29055d7 | |
parent | d16dbd1b8a29bb9f8aca2c2f3bd1a0d2b7621126 (diff) | |
download | linux-e196e479a3b844da6e6e71e0d2a8694040cb4e52.tar.bz2 |
locking/lockdep: Use lockdep_init_task for task initiation consistently
Despite that there is a lockdep_init_task() which does nothing, lockdep
initiates tasks by assigning lockdep fields and does so inconsistently. Fix
this by using lockdep_init_task().
Signed-off-by: Yuyang Du <duyuyang@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: bvanassche@acm.org
Cc: frederic@kernel.org
Cc: ming.lei@redhat.com
Cc: will.deacon@arm.com
Link: https://lkml.kernel.org/r/20190506081939.74287-8-duyuyang@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | include/linux/lockdep.h | 7 | ||||
-rw-r--r-- | init/init_task.c | 2 | ||||
-rw-r--r-- | kernel/fork.c | 3 | ||||
-rw-r--r-- | kernel/locking/lockdep.c | 11 |
4 files changed, 16 insertions, 7 deletions
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 851d44fa5457..5d05b8149f19 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -287,6 +287,8 @@ extern void lockdep_free_key_range(void *start, unsigned long size); extern asmlinkage void lockdep_sys_exit(void); extern void lockdep_set_selftest_task(struct task_struct *task); +extern void lockdep_init_task(struct task_struct *task); + extern void lockdep_off(void); extern void lockdep_on(void); @@ -411,6 +413,10 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); #else /* !CONFIG_LOCKDEP */ +static inline void lockdep_init_task(struct task_struct *task) +{ +} + static inline void lockdep_off(void) { } @@ -503,7 +509,6 @@ enum xhlock_context_t { { .name = (_name), .key = (void *)(_key), } static inline void lockdep_invariant_state(bool force) {} -static inline void lockdep_init_task(struct task_struct *task) {} static inline void lockdep_free_task(struct task_struct *task) {} #ifdef CONFIG_LOCK_STAT diff --git a/init/init_task.c b/init/init_task.c index c70ef656d0f4..1b15cb90d64f 100644 --- a/init/init_task.c +++ b/init/init_task.c @@ -166,6 +166,8 @@ struct task_struct init_task .softirqs_enabled = 1, #endif #ifdef CONFIG_LOCKDEP + .lockdep_depth = 0, /* no locks held yet */ + .curr_chain_key = 0, .lockdep_recursion = 0, #endif #ifdef CONFIG_FUNCTION_GRAPH_TRACER diff --git a/kernel/fork.c b/kernel/fork.c index 75675b9bf6df..735d0b4a89e2 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1984,9 +1984,6 @@ static __latent_entropy struct task_struct *copy_process( p->pagefault_disabled = 0; #ifdef CONFIG_LOCKDEP - p->lockdep_depth = 0; /* no locks held yet */ - p->curr_chain_key = 0; - p->lockdep_recursion = 0; lockdep_init_task(p); #endif diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index bc1efc12a8c5..b7d9c28ecf3b 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -359,6 +359,13 @@ static inline u64 iterate_chain_key(u64 key, u32 idx) return k0 | (u64)k1 << 32; } +void lockdep_init_task(struct task_struct *task) +{ + task->lockdep_depth = 0; /* no locks held yet */ + task->curr_chain_key = 0; + task->lockdep_recursion = 0; +} + void lockdep_off(void) { current->lockdep_recursion++; @@ -4589,9 +4596,7 @@ void lockdep_reset(void) int i; raw_local_irq_save(flags); - current->curr_chain_key = 0; - current->lockdep_depth = 0; - current->lockdep_recursion = 0; + lockdep_init_task(current); memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock)); nr_hardirq_chains = 0; nr_softirq_chains = 0; |