summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJiri Pirko <jpirko@redhat.com>2012-01-09 06:36:54 +0000
committerDavid S. Miller <davem@davemloft.net>2012-01-09 12:46:58 -0800
commit2429f7ac2ef429378536d87fcbbf6f424aa5b47f (patch)
tree15e0fd0c42f6e10ede9f1cf84553c2a01e593d81
parentab16ebf375f0513d6b0f5193de84186a3fc0c33b (diff)
downloadlinux-2429f7ac2ef429378536d87fcbbf6f424aa5b47f.tar.bz2
net: introduce netif_addr_lock_nested() and call if when appropriate
dev_uc_sync() and dev_mc_sync() are acquiring netif_addr_lock for destination device of synchronization. Since netif_addr_lock is already held at the time for source device, this triggers lockdep deadlock warning. There's no way this deadlock can happen so use spin_lock_nested() to silence the warning. Signed-off-by: Jiri Pirko <jpirko@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/netdevice.h5
-rw-r--r--net/core/dev_addr_lists.c12
2 files changed, 11 insertions, 6 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index a1d109590da4..d0522bb2d4a0 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2450,6 +2450,11 @@ static inline void netif_addr_lock(struct net_device *dev)
spin_lock(&dev->addr_list_lock);
}
+static inline void netif_addr_lock_nested(struct net_device *dev)
+{
+ spin_lock_nested(&dev->addr_list_lock, SINGLE_DEPTH_NESTING);
+}
+
static inline void netif_addr_lock_bh(struct net_device *dev)
{
spin_lock_bh(&dev->addr_list_lock);
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index c34ce9f9c976..29c07fef9228 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -439,11 +439,11 @@ int dev_uc_sync(struct net_device *to, struct net_device *from)
if (to->addr_len != from->addr_len)
return -EINVAL;
- netif_addr_lock_bh(to);
+ netif_addr_lock_nested(to);
err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
if (!err)
__dev_set_rx_mode(to);
- netif_addr_unlock_bh(to);
+ netif_addr_unlock(to);
return err;
}
EXPORT_SYMBOL(dev_uc_sync);
@@ -463,7 +463,7 @@ void dev_uc_unsync(struct net_device *to, struct net_device *from)
return;
netif_addr_lock_bh(from);
- netif_addr_lock(to);
+ netif_addr_lock_nested(to);
__hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
__dev_set_rx_mode(to);
netif_addr_unlock(to);
@@ -602,11 +602,11 @@ int dev_mc_sync(struct net_device *to, struct net_device *from)
if (to->addr_len != from->addr_len)
return -EINVAL;
- netif_addr_lock_bh(to);
+ netif_addr_lock_nested(to);
err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
if (!err)
__dev_set_rx_mode(to);
- netif_addr_unlock_bh(to);
+ netif_addr_unlock(to);
return err;
}
EXPORT_SYMBOL(dev_mc_sync);
@@ -626,7 +626,7 @@ void dev_mc_unsync(struct net_device *to, struct net_device *from)
return;
netif_addr_lock_bh(from);
- netif_addr_lock(to);
+ netif_addr_lock_nested(to);
__hw_addr_unsync(&to->mc, &from->mc, to->addr_len);
__dev_set_rx_mode(to);
netif_addr_unlock(to);