diff options
author | Tejun Heo <tj@kernel.org> | 2014-01-10 08:57:20 -0500 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2014-01-10 13:44:25 -0800 |
commit | a69d001cfc712b96ec9d7ba44d6285702a38dabf (patch) | |
tree | 295a4f2c57a883d45596078a28278cb054bf9a02 /fs/kernfs/dir.c | |
parent | ea1c472dfeada211a0100daa7976e8e8e779b858 (diff) | |
download | linux-a69d001cfc712b96ec9d7ba44d6285702a38dabf.tar.bz2 |
kernfs: remove KERNFS_ACTIVE_REF and add kernfs_lockdep()
There currently are two mechanisms gating active ref lockdep
annotations - KERNFS_LOCKDEP flag and KERNFS_ACTIVE_REF type mask.
The former disables lockdep annotations in kernfs_get/put_active()
while the latter disables all of kernfs_deactivate().
While KERNFS_ACTIVE_REF also behaves as an optimization to skip the
deactivation step for non-file nodes, the benefit is marginal and it
needlessly diverges code paths. Let's drop KERNFS_ACTIVE_REF and use
KERNFS_LOCKDEP in kernfs_deactivate() too.
While at it, add a test helper kernfs_lockdep() to test KERNFS_LOCKDEP
flag so that it's more convenient and the related code can be compiled
out when not enabled.
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'fs/kernfs/dir.c')
-rw-r--r-- | fs/kernfs/dir.c | 31 |
1 files changed, 20 insertions, 11 deletions
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c index ed62de6cdf8f..1c9130a33048 100644 --- a/fs/kernfs/dir.c +++ b/fs/kernfs/dir.c @@ -22,6 +22,15 @@ DEFINE_MUTEX(kernfs_mutex); #define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb) +static bool kernfs_lockdep(struct kernfs_node *kn) +{ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + return kn->flags & KERNFS_LOCKDEP; +#else + return false; +#endif +} + /** * kernfs_name_hash * @name: Null terminated string to hash @@ -138,7 +147,7 @@ struct kernfs_node *kernfs_get_active(struct kernfs_node *kn) if (!atomic_inc_unless_negative(&kn->active)) return NULL; - if (kn->flags & KERNFS_LOCKDEP) + if (kernfs_lockdep(kn)) rwsem_acquire_read(&kn->dep_map, 0, 1, _RET_IP_); return kn; } @@ -158,7 +167,7 @@ void kernfs_put_active(struct kernfs_node *kn) if (unlikely(!kn)) return; - if (kn->flags & KERNFS_LOCKDEP) + if (kernfs_lockdep(kn)) rwsem_release(&kn->dep_map, 1, _RET_IP_); v = atomic_dec_return(&kn->active); if (likely(v != KN_DEACTIVATED_BIAS)) @@ -179,21 +188,21 @@ static void kernfs_deactivate(struct kernfs_node *kn) BUG_ON(!(kn->flags & KERNFS_REMOVED)); - if (!(kernfs_type(kn) & KERNFS_ACTIVE_REF)) - return; - - rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_); - atomic_add(KN_DEACTIVATED_BIAS, &kn->active); - if (atomic_read(&kn->active) != KN_DEACTIVATED_BIAS) - lock_contended(&kn->dep_map, _RET_IP_); + if (kernfs_lockdep(kn)) { + rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_); + if (atomic_read(&kn->active) != KN_DEACTIVATED_BIAS) + lock_contended(&kn->dep_map, _RET_IP_); + } wait_event(root->deactivate_waitq, atomic_read(&kn->active) == KN_DEACTIVATED_BIAS); - lock_acquired(&kn->dep_map, _RET_IP_); - rwsem_release(&kn->dep_map, 1, _RET_IP_); + if (kernfs_lockdep(kn)) { + lock_acquired(&kn->dep_map, _RET_IP_); + rwsem_release(&kn->dep_map, 1, _RET_IP_); + } } /** |