summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-08-11 09:30:24 +0200
committerIngo Molnar <mingo@elte.hu>2008-08-11 09:30:24 +0200
commitb7d39aff91454f2534db2275f55908656ec0470c (patch)
tree24481f2fe7206ef69800a33df6ac3e1716085326 /kernel
parent7531e2f34d1d551b096143f19111139f0dd84c8b (diff)
downloadlinux-b7d39aff91454f2534db2275f55908656ec0470c.tar.bz2
lockdep: spin_lock_nest_lock()
Expose the new lock protection lock. This can be used to annotate places where we take multiple locks of the same class and avoid deadlocks by always taking another (top-level) lock first. NOTE: we're still bound to the MAX_LOCK_DEPTH (48) limit. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/spinlock.c11
1 files changed, 11 insertions, 0 deletions
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index a1fb54c93cdd..44baeea94ab9 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -292,6 +292,7 @@ void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
}
EXPORT_SYMBOL(_spin_lock_nested);
+
unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
{
unsigned long flags;
@@ -314,6 +315,16 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclas
EXPORT_SYMBOL(_spin_lock_irqsave_nested);
+void __lockfunc _spin_lock_nest_lock(spinlock_t *lock,
+ struct lockdep_map *nest_lock)
+{
+ preempt_disable();
+ spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
+ LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
+}
+
+EXPORT_SYMBOL(_spin_lock_nest_lock);
+
#endif
void __lockfunc _spin_unlock(spinlock_t *lock)