diff options
author | Joel Fernandes (Google) <joel@joelfernandes.org> | 2019-09-22 13:03:17 -0700 |
---|---|---|
committer | Paul E. McKenney <paulmck@kernel.org> | 2020-01-24 10:24:31 -0800 |
commit | e99637becb2e684bee2b9117f817f4d1346b8353 (patch) | |
tree | 4ce903202e537e4e16f89eff82bb401d722d9626 /kernel/rcu | |
parent | 0392bebebf26f09434e6c7ca4c09c014efeef76a (diff) | |
download | linux-e99637becb2e684bee2b9117f817f4d1346b8353.tar.bz2 |
rcu: Add support for debug_objects debugging for kfree_rcu()
This commit applies RCU's debug_objects debugging to the new batched
kfree_rcu() implementations. The object is queued at the kfree_rcu()
call and dequeued during reclaim.
Tested that enabling CONFIG_DEBUG_OBJECTS_RCU_HEAD successfully detects
double kfree_rcu() calls.
Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
[ paulmck: Fix IRQ per kbuild test robot <lkp@intel.com> feedback. ]
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Diffstat (limited to 'kernel/rcu')
-rw-r--r-- | kernel/rcu/tree.c | 8 |
1 files changed, 8 insertions, 0 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index a40fd58bd4b6..0512221cd84b 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2749,6 +2749,7 @@ static void kfree_rcu_work(struct work_struct *work) for (; head; head = next) { next = head->next; // Potentially optimize with kfree_bulk in future. + debug_rcu_head_unqueue(head); __rcu_reclaim(rcu_state.name, head); cond_resched_tasks_rcu_qs(); } @@ -2855,6 +2856,12 @@ void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func) spin_lock(&krcp->lock); // Queue the object but don't yet schedule the batch. + if (debug_rcu_head_queue(head)) { + // Probable double kfree_rcu(), just leak. + WARN_ONCE(1, "%s(): Double-freed call. rcu_head %p\n", + __func__, head); + goto unlock_return; + } head->func = func; head->next = krcp->head; krcp->head = head; @@ -2866,6 +2873,7 @@ void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func) schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES); } +unlock_return: if (krcp->initialized) spin_unlock(&krcp->lock); local_irq_restore(flags); |