summaryrefslogtreecommitdiffstats
path: root/net/bridge
diff options
context:
space:
mode:
authorPablo Neira Ayuso <pablo@netfilter.org>2015-10-17 14:11:08 +0200
committerPablo Neira Ayuso <pablo@netfilter.org>2015-10-17 14:28:03 +0200
commitf0a0a978b66fea782a52b0a7075b3fa9ab27ad0a (patch)
tree52ecc0eafbac697c6afaa542efe324984484120c /net/bridge
parentc8d71d08aa23679f56e7072358383442c6ede352 (diff)
parent4be3158abe1e02d24f82b34101e41d662fae2185 (diff)
downloadlinux-f0a0a978b66fea782a52b0a7075b3fa9ab27ad0a.tar.bz2
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
This merge resolves conflicts with 75aec9df3a78 ("bridge: Remove br_nf_push_frag_xmit_sk") as part of Eric Biederman's effort to improve netns support in the network stack that reached upstream via David's net-next tree. Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> Conflicts: net/bridge/br_netfilter_hooks.c
Diffstat (limited to 'net/bridge')
-rw-r--r--net/bridge/br_device.c2
-rw-r--r--net/bridge/br_fdb.c129
-rw-r--r--net/bridge/br_forward.c6
-rw-r--r--net/bridge/br_if.c6
-rw-r--r--net/bridge/br_input.c4
-rw-r--r--net/bridge/br_ioctl.c3
-rw-r--r--net/bridge/br_netfilter_hooks.c13
-rw-r--r--net/bridge/br_netlink.c381
-rw-r--r--net/bridge/br_private.h37
-rw-r--r--net/bridge/br_stp.c26
-rw-r--r--net/bridge/br_sysfs_br.c11
-rw-r--r--net/bridge/br_vlan.c296
12 files changed, 706 insertions, 208 deletions
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index bdfb9544ca03..5e88d3e17546 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -56,7 +56,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
skb_reset_mac_header(skb);
skb_pull(skb, ETH_HLEN);
- if (!br_allowed_ingress(br, br_vlan_group(br), skb, &vid))
+ if (!br_allowed_ingress(br, br_vlan_group_rcu(br), skb, &vid))
goto out;
if (is_broadcast_ether_addr(dest))
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 7f7d55132dd5..c88bd8e8937e 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -134,11 +134,14 @@ static void fdb_del_hw_addr(struct net_bridge *br, const unsigned char *addr)
static void fdb_del_external_learn(struct net_bridge_fdb_entry *f)
{
struct switchdev_obj_port_fdb fdb = {
- .obj.id = SWITCHDEV_OBJ_ID_PORT_FDB,
- .addr = f->addr.addr,
+ .obj = {
+ .id = SWITCHDEV_OBJ_ID_PORT_FDB,
+ .flags = SWITCHDEV_F_DEFER,
+ },
.vid = f->vlan_id,
};
+ ether_addr_copy(fdb.addr, f->addr.addr);
switchdev_port_obj_del(f->dst->dev, &fdb.obj);
}
@@ -608,13 +611,14 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
}
}
-static int fdb_to_nud(const struct net_bridge_fdb_entry *fdb)
+static int fdb_to_nud(const struct net_bridge *br,
+ const struct net_bridge_fdb_entry *fdb)
{
if (fdb->is_local)
return NUD_PERMANENT;
else if (fdb->is_static)
return NUD_NOARP;
- else if (has_expired(fdb->dst->br, fdb))
+ else if (has_expired(br, fdb))
return NUD_STALE;
else
return NUD_REACHABLE;
@@ -640,7 +644,7 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
ndm->ndm_flags = fdb->added_by_external_learn ? NTF_EXT_LEARNED : 0;
ndm->ndm_type = 0;
ndm->ndm_ifindex = fdb->dst ? fdb->dst->dev->ifindex : br->dev->ifindex;
- ndm->ndm_state = fdb_to_nud(fdb);
+ ndm->ndm_state = fdb_to_nud(br, fdb);
if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr))
goto nla_put_failure;
@@ -785,7 +789,7 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
}
}
- if (fdb_to_nud(fdb) != state) {
+ if (fdb_to_nud(br, fdb) != state) {
if (state & NUD_PERMANENT) {
fdb->is_local = 1;
if (!fdb->is_static) {
@@ -846,8 +850,9 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
const unsigned char *addr, u16 vid, u16 nlh_flags)
{
struct net_bridge_vlan_group *vg;
- struct net_bridge_port *p;
+ struct net_bridge_port *p = NULL;
struct net_bridge_vlan *v;
+ struct net_bridge *br = NULL;
int err = 0;
if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) {
@@ -860,26 +865,36 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
return -EINVAL;
}
- p = br_port_get_rtnl(dev);
- if (p == NULL) {
- pr_info("bridge: RTM_NEWNEIGH %s not a bridge port\n",
- dev->name);
- return -EINVAL;
+ if (dev->priv_flags & IFF_EBRIDGE) {
+ br = netdev_priv(dev);
+ vg = br_vlan_group(br);
+ } else {
+ p = br_port_get_rtnl(dev);
+ if (!p) {
+ pr_info("bridge: RTM_NEWNEIGH %s not a bridge port\n",
+ dev->name);
+ return -EINVAL;
+ }
+ vg = nbp_vlan_group(p);
}
- vg = nbp_vlan_group(p);
if (vid) {
v = br_vlan_find(vg, vid);
- if (!v) {
- pr_info("bridge: RTM_NEWNEIGH with unconfigured "
- "vlan %d on port %s\n", vid, dev->name);
+ if (!v || !br_vlan_should_use(v)) {
+ pr_info("bridge: RTM_NEWNEIGH with unconfigured vlan %d on %s\n", vid, dev->name);
return -EINVAL;
}
/* VID was specified, so use it. */
- err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
+ if (dev->priv_flags & IFF_EBRIDGE)
+ err = br_fdb_insert(br, NULL, addr, vid);
+ else
+ err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
} else {
- err = __br_fdb_add(ndm, p, addr, nlh_flags, 0);
+ if (dev->priv_flags & IFF_EBRIDGE)
+ err = br_fdb_insert(br, NULL, addr, 0);
+ else
+ err = __br_fdb_add(ndm, p, addr, nlh_flags, 0);
if (err || !vg || !vg->num_vlans)
goto out;
@@ -888,7 +903,13 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
* vlan on this port.
*/
list_for_each_entry(v, &vg->vlan_list, vlist) {
- err = __br_fdb_add(ndm, p, addr, nlh_flags, v->vid);
+ if (!br_vlan_should_use(v))
+ continue;
+ if (dev->priv_flags & IFF_EBRIDGE)
+ err = br_fdb_insert(br, NULL, addr, v->vid);
+ else
+ err = __br_fdb_add(ndm, p, addr, nlh_flags,
+ v->vid);
if (err)
goto out;
}
@@ -898,6 +919,32 @@ out:
return err;
}
+static int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr,
+ u16 vid)
+{
+ struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
+ struct net_bridge_fdb_entry *fdb;
+
+ fdb = fdb_find(head, addr, vid);
+ if (!fdb)
+ return -ENOENT;
+
+ fdb_delete(br, fdb);
+ return 0;
+}
+
+static int __br_fdb_delete_by_addr(struct net_bridge *br,
+ const unsigned char *addr, u16 vid)
+{
+ int err;
+
+ spin_lock_bh(&br->hash_lock);
+ err = fdb_delete_by_addr(br, addr, vid);
+ spin_unlock_bh(&br->hash_lock);
+
+ return err;
+}
+
static int fdb_delete_by_addr_and_port(struct net_bridge_port *p,
const u8 *addr, u16 vlan)
{
@@ -931,35 +978,53 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
const unsigned char *addr, u16 vid)
{
struct net_bridge_vlan_group *vg;
- struct net_bridge_port *p;
+ struct net_bridge_port *p = NULL;
struct net_bridge_vlan *v;
+ struct net_bridge *br = NULL;
int err;
- p = br_port_get_rtnl(dev);
- if (p == NULL) {
- pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n",
- dev->name);
- return -EINVAL;
+ if (dev->priv_flags & IFF_EBRIDGE) {
+ br = netdev_priv(dev);
+ vg = br_vlan_group(br);
+ } else {
+ p = br_port_get_rtnl(dev);
+ if (!p) {
+ pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n",
+ dev->name);
+ return -EINVAL;
+ }
+ vg = nbp_vlan_group(p);
}
- vg = nbp_vlan_group(p);
if (vid) {
v = br_vlan_find(vg, vid);
if (!v) {
- pr_info("bridge: RTM_DELNEIGH with unconfigured "
- "vlan %d on port %s\n", vid, dev->name);
+ pr_info("bridge: RTM_DELNEIGH with unconfigured vlan %d on %s\n", vid, dev->name);
return -EINVAL;
}
- err = __br_fdb_delete(p, addr, vid);
+ if (dev->priv_flags & IFF_EBRIDGE)
+ err = __br_fdb_delete_by_addr(br, addr, vid);
+ else
+ err = __br_fdb_delete(p, addr, vid);
} else {
err = -ENOENT;
- err &= __br_fdb_delete(p, addr, 0);
+ if (dev->priv_flags & IFF_EBRIDGE)
+ err = __br_fdb_delete_by_addr(br, addr, 0);
+ else
+ err &= __br_fdb_delete(p, addr, 0);
+
if (!vg || !vg->num_vlans)
goto out;
- list_for_each_entry(v, &vg->vlan_list, vlist)
- err &= __br_fdb_delete(p, addr, v->vid);
+ list_for_each_entry(v, &vg->vlan_list, vlist) {
+ if (!br_vlan_should_use(v))
+ continue;
+ if (dev->priv_flags & IFF_EBRIDGE)
+ err = __br_fdb_delete_by_addr(br, addr, v->vid);
+ else
+ err &= __br_fdb_delete(p, addr, v->vid);
+ }
}
out:
return err;
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 6d5ed795c3e2..a9d424e20229 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -32,7 +32,7 @@ static inline int should_deliver(const struct net_bridge_port *p,
{
struct net_bridge_vlan_group *vg;
- vg = nbp_vlan_group(p);
+ vg = nbp_vlan_group_rcu(p);
return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
br_allowed_egress(vg, skb) && p->state == BR_STATE_FORWARDING;
}
@@ -80,7 +80,7 @@ static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
{
struct net_bridge_vlan_group *vg;
- vg = nbp_vlan_group(to);
+ vg = nbp_vlan_group_rcu(to);
skb = br_handle_vlan(to->br, vg, skb);
if (!skb)
return;
@@ -112,7 +112,7 @@ static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
return;
}
- vg = nbp_vlan_group(to);
+ vg = nbp_vlan_group_rcu(to);
skb = br_handle_vlan(to->br, vg, skb);
if (!skb)
return;
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 934cae9fa317..ec02f5869a78 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <net/sock.h>
#include <linux/if_vlan.h>
+#include <net/switchdev.h>
#include "br_private.h"
@@ -248,7 +249,10 @@ static void del_nbp(struct net_bridge_port *p)
list_del_rcu(&p->list);
+ nbp_vlan_flush(p);
br_fdb_delete_by_port(br, p, 0, 1);
+ switchdev_deferred_process();
+
nbp_update_port_count(br);
netdev_upper_dev_unlink(dev, br->dev);
@@ -256,8 +260,6 @@ static void del_nbp(struct net_bridge_port *p)
dev->priv_flags &= ~IFF_BRIDGE_PORT;
netdev_rx_handler_unregister(dev);
- /* use the synchronize_rcu done by netdev_rx_handler_unregister */
- nbp_vlan_flush(p);
br_multicast_del_port(p);
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index f5c5a4500e2f..f7fba74108a9 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -44,7 +44,7 @@ static int br_pass_frame_up(struct sk_buff *skb)
brstats->rx_bytes += skb->len;
u64_stats_update_end(&brstats->syncp);
- vg = br_vlan_group(br);
+ vg = br_vlan_group_rcu(br);
/* Bridge is just like any other port. Make sure the
* packet is allowed except in promisc modue when someone
* may be running packet capture.
@@ -140,7 +140,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
if (!p || p->state == BR_STATE_DISABLED)
goto drop;
- if (!br_allowed_ingress(p->br, nbp_vlan_group(p), skb, &vid))
+ if (!br_allowed_ingress(p->br, nbp_vlan_group_rcu(p), skb, &vid))
goto out;
/* insert into forwarding database after filtering to avoid spoofing */
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index 8d423bc649b9..263b4de4de57 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -200,8 +200,7 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
return -EPERM;
- br->ageing_time = clock_t_to_jiffies(args[1]);
- return 0;
+ return br_set_ageing_time(br, args[1]);
case BRCTL_GET_PORT_INFO:
{
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 9d3051916a64..7ddbe7ec81d6 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -688,15 +688,10 @@ static int br_nf_push_frag_xmit(struct net *net, struct sock *sk, struct sk_buff
nf_bridge_info_free(skb);
return br_dev_queue_push_xmit(net, sk, skb);
}
-static int br_nf_push_frag_xmit_sk(struct sock *sk, struct sk_buff *skb)
-{
- struct net *net = dev_net(skb_dst(skb)->dev);
- return br_nf_push_frag_xmit(net, sk, skb);
-}
static int
br_nf_ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
- int (*output)(struct sock *, struct sk_buff *))
+ int (*output)(struct net *, struct sock *, struct sk_buff *))
{
unsigned int mtu = ip_skb_dst_mtu(skb);
struct iphdr *iph = ip_hdr(skb);
@@ -709,7 +704,7 @@ br_nf_ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
return -EMSGSIZE;
}
- return ip_do_fragment(sk, skb, output);
+ return ip_do_fragment(net, sk, skb, output);
}
static unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
@@ -757,7 +752,7 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff
skb_copy_from_linear_data_offset(skb, -data->size, data->mac,
data->size);
- return br_nf_ip_fragment(net, sk, skb, br_nf_push_frag_xmit_sk);
+ return br_nf_ip_fragment(net, sk, skb, br_nf_push_frag_xmit);
}
if (IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) &&
skb->protocol == htons(ETH_P_IPV6)) {
@@ -779,7 +774,7 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff
data->size);
if (v6ops)
- return v6ops->fragment(sk, skb, br_nf_push_frag_xmit_sk);
+ return v6ops->fragment(net, sk, skb, br_nf_push_frag_xmit);
kfree_skb(skb);
return -EMSGSIZE;
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index c64dcad11662..94b4de8c4646 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -34,7 +34,7 @@ static int __get_num_vlan_infos(struct net_bridge_vlan_group *vg,
pvid = br_get_pvid(vg);
/* Count number of vlan infos */
- list_for_each_entry(v, &vg->vlan_list, vlist) {
+ list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
flags = 0;
/* only a context, bridge vlan not activated */
if (!br_vlan_should_use(v))
@@ -76,13 +76,19 @@ initvars:
static int br_get_num_vlan_infos(struct net_bridge_vlan_group *vg,
u32 filter_mask)
{
+ int num_vlans;
+
if (!vg)
return 0;
if (filter_mask & RTEXT_FILTER_BRVLAN)
return vg->num_vlans;
- return __get_num_vlan_infos(vg, filter_mask);
+ rcu_read_lock();
+ num_vlans = __get_num_vlan_infos(vg, filter_mask);
+ rcu_read_unlock();
+
+ return num_vlans;
}
static size_t br_get_link_af_size_filtered(const struct net_device *dev,
@@ -96,10 +102,10 @@ static size_t br_get_link_af_size_filtered(const struct net_device *dev,
rcu_read_lock();
if (br_port_exists(dev)) {
p = br_port_get_rcu(dev);
- vg = nbp_vlan_group(p);
+ vg = nbp_vlan_group_rcu(p);
} else if (dev->priv_flags & IFF_EBRIDGE) {
br = netdev_priv(dev);
- vg = br_vlan_group(br);
+ vg = br_vlan_group_rcu(br);
}
num_vlan_infos = br_get_num_vlan_infos(vg, filter_mask);
rcu_read_unlock();
@@ -121,6 +127,20 @@ static inline size_t br_port_info_size(void)
+ nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */
+ nla_total_size(1) /* IFLA_BRPORT_PROXYARP */
+ nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */
+ + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_ROOT_ID */
+ + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_BRIDGE_ID */
+ + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_PORT */
+ + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_COST */
+ + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_ID */
+ + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_NO */
+ + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_TOPOLOGY_CHANGE_ACK */
+ + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_CONFIG_PENDING */
+ + nla_total_size(sizeof(u64)) /* IFLA_BRPORT_MESSAGE_AGE_TIMER */
+ + nla_total_size(sizeof(u64)) /* IFLA_BRPORT_FORWARD_DELAY_TIMER */
+ + nla_total_size(sizeof(u64)) /* IFLA_BRPORT_HOLD_TIMER */
+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+ + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_MULTICAST_ROUTER */
+#endif
+ 0;
}
@@ -142,6 +162,7 @@ static int br_port_fill_attrs(struct sk_buff *skb,
const struct net_bridge_port *p)
{
u8 mode = !!(p->flags & BR_HAIRPIN_MODE);
+ u64 timerval;
if (nla_put_u8(skb, IFLA_BRPORT_STATE, p->state) ||
nla_put_u16(skb, IFLA_BRPORT_PRIORITY, p->priority) ||
@@ -154,8 +175,35 @@ static int br_port_fill_attrs(struct sk_buff *skb,
nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, !!(p->flags & BR_FLOOD)) ||
nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP)) ||
nla_put_u8(skb, IFLA_BRPORT_PROXYARP_WIFI,
- !!(p->flags & BR_PROXYARP_WIFI)))
+ !!(p->flags & BR_PROXYARP_WIFI)) ||
+ nla_put(skb, IFLA_BRPORT_ROOT_ID, sizeof(struct ifla_bridge_id),
+ &p->designated_root) ||
+ nla_put(skb, IFLA_BRPORT_BRIDGE_ID, sizeof(struct ifla_bridge_id),
+ &p->designated_bridge) ||
+ nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_PORT, p->designated_port) ||
+ nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_COST, p->designated_cost) ||
+ nla_put_u16(skb, IFLA_BRPORT_ID, p->port_id) ||
+ nla_put_u16(skb, IFLA_BRPORT_NO, p->port_no) ||
+ nla_put_u8(skb, IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
+ p->topology_change_ack) ||
+ nla_put_u8(skb, IFLA_BRPORT_CONFIG_PENDING, p->config_pending))
+ return -EMSGSIZE;
+
+ timerval = br_timer_value(&p->message_age_timer);
+ if (nla_put_u64(skb, IFLA_BRPORT_MESSAGE_AGE_TIMER, timerval))
return -EMSGSIZE;
+ timerval = br_timer_value(&p->forward_delay_timer);
+ if (nla_put_u64(skb, IFLA_BRPORT_FORWARD_DELAY_TIMER, timerval))
+ return -EMSGSIZE;
+ timerval = br_timer_value(&p->hold_timer);
+ if (nla_put_u64(skb, IFLA_BRPORT_HOLD_TIMER, timerval))
+ return -EMSGSIZE;
+
+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+ if (nla_put_u8(skb, IFLA_BRPORT_MULTICAST_ROUTER,
+ p->multicast_router))
+ return -EMSGSIZE;
+#endif
return 0;
}
@@ -205,7 +253,7 @@ static int br_fill_ifvlaninfo_compressed(struct sk_buff *skb,
* if vlaninfo represents a range
*/
pvid = br_get_pvid(vg);
- list_for_each_entry(v, &vg->vlan_list, vlist) {
+ list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
flags = 0;
if (!br_vlan_should_use(v))
continue;
@@ -255,7 +303,7 @@ static int br_fill_ifvlaninfo(struct sk_buff *skb,
u16 pvid;
pvid = br_get_pvid(vg);
- list_for_each_entry(v, &vg->vlan_list, vlist) {
+ list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
if (!br_vlan_should_use(v))
continue;
@@ -338,22 +386,27 @@ static int br_fill_ifinfo(struct sk_buff *skb,
struct nlattr *af;
int err;
+ /* RCU needed because of the VLAN locking rules (rcu || rtnl) */
+ rcu_read_lock();
if (port)
- vg = nbp_vlan_group(port);
+ vg = nbp_vlan_group_rcu(port);
else
- vg = br_vlan_group(br);
+ vg = br_vlan_group_rcu(br);
- if (!vg || !vg->num_vlans)
+ if (!vg || !vg->num_vlans) {
+ rcu_read_unlock();
goto done;
-
+ }
af = nla_nest_start(skb, IFLA_AF_SPEC);
- if (!af)
+ if (!af) {
+ rcu_read_unlock();
goto nla_put_failure;
-
+ }
if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)
err = br_fill_ifvlaninfo_compressed(skb, vg);
else
err = br_fill_ifvlaninfo(skb, vg);
+ rcu_read_unlock();
if (err)
goto nla_put_failure;
nla_nest_end(skb, af);
@@ -476,6 +529,9 @@ static int br_afspec(struct net_bridge *br,
if (vinfo_start)
return -EINVAL;
vinfo_start = vinfo;
+ /* don't allow range of pvids */
+ if (vinfo_start->flags & BRIDGE_VLAN_INFO_PVID)
+ return -EINVAL;
continue;
}
@@ -521,6 +577,7 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
[IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
[IFLA_BRPORT_PROXYARP] = { .type = NLA_U8 },
[IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 },
+ [IFLA_BRPORT_MULTICAST_ROUTER] = { .type = NLA_U8 },
};
/* Change the state of the port and notify spanning tree */
@@ -592,6 +649,18 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
return err;
}
+ if (tb[IFLA_BRPORT_FLUSH])
+ br_fdb_delete_by_port(p->br, p, 0, 0);
+
+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+ if (tb[IFLA_BRPORT_MULTICAST_ROUTER]) {
+ u8 mcast_router = nla_get_u8(tb[IFLA_BRPORT_MULTICAST_ROUTER]);
+
+ err = br_multicast_set_port_router(p, mcast_router);
+ if (err)
+ return err;
+ }
+#endif
br_port_flags_change(p, old_flags ^ p->flags);
return 0;
}
@@ -758,6 +827,27 @@ static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = {
[IFLA_BR_PRIORITY] = { .type = NLA_U16 },
[IFLA_BR_VLAN_FILTERING] = { .type = NLA_U8 },
[IFLA_BR_VLAN_PROTOCOL] = { .type = NLA_U16 },
+ [IFLA_BR_GROUP_FWD_MASK] = { .type = NLA_U16 },
+ [IFLA_BR_GROUP_ADDR] = { .type = NLA_BINARY,
+ .len = ETH_ALEN },
+ [IFLA_BR_MCAST_ROUTER] = { .type = NLA_U8 },
+ [IFLA_BR_MCAST_SNOOPING] = { .type = NLA_U8 },
+ [IFLA_BR_MCAST_QUERY_USE_IFADDR] = { .type = NLA_U8 },
+ [IFLA_BR_MCAST_QUERIER] = { .type = NLA_U8 },
+ [IFLA_BR_MCAST_HASH_ELASTICITY] = { .type = NLA_U32 },
+ [IFLA_BR_MCAST_HASH_MAX] = { .type = NLA_U32 },
+ [IFLA_BR_MCAST_LAST_MEMBER_CNT] = { .type = NLA_U32 },
+ [IFLA_BR_MCAST_STARTUP_QUERY_CNT] = { .type = NLA_U32 },
+ [IFLA_BR_MCAST_LAST_MEMBER_INTVL] = { .type = NLA_U64 },
+ [IFLA_BR_MCAST_MEMBERSHIP_INTVL] = { .type = NLA_U64 },
+ [IFLA_BR_MCAST_QUERIER_INTVL] = { .type = NLA_U64 },
+ [IFLA_BR_MCAST_QUERY_INTVL] = { .type = NLA_U64 },
+ [IFLA_BR_MCAST_QUERY_RESPONSE_INTVL] = { .type = NLA_U64 },
+ [IFLA_BR_MCAST_STARTUP_QUERY_INTVL] = { .type = NLA_U64 },
+ [IFLA_BR_NF_CALL_IPTABLES] = { .type = NLA_U8 },
+ [IFLA_BR_NF_CALL_IP6TABLES] = { .type = NLA_U8 },
+ [IFLA_BR_NF_CALL_ARPTABLES] = { .type = NLA_U8 },
+ [IFLA_BR_VLAN_DEFAULT_PVID] = { .type = NLA_U16 },
};
static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
@@ -788,9 +878,9 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
}
if (data[IFLA_BR_AGEING_TIME]) {
- u32 ageing_time = nla_get_u32(data[IFLA_BR_AGEING_TIME]);
-
- br->ageing_time = clock_t_to_jiffies(ageing_time);
+ err = br_set_ageing_time(br, nla_get_u32(data[IFLA_BR_AGEING_TIME]));
+ if (err)
+ return err;
}
if (data[IFLA_BR_STP_STATE]) {
@@ -821,6 +911,158 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
if (err)
return err;
}
+
+ if (data[IFLA_BR_VLAN_DEFAULT_PVID]) {
+ __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]);
+
+ err = __br_vlan_set_default_pvid(br, defpvid);
+ if (err)
+ return err;
+ }
+#endif
+
+ if (data[IFLA_BR_GROUP_FWD_MASK]) {
+ u16 fwd_mask = nla_get_u16(data[IFLA_BR_GROUP_FWD_MASK]);
+
+ if (fwd_mask & BR_GROUPFWD_RESTRICTED)
+ return -EINVAL;
+ br->group_fwd_mask = fwd_mask;
+ }
+
+ if (data[IFLA_BR_GROUP_ADDR]) {
+ u8 new_addr[ETH_ALEN];
+
+ if (nla_len(data[IFLA_BR_GROUP_ADDR]) != ETH_ALEN)
+ return -EINVAL;
+ memcpy(new_addr, nla_data(data[IFLA_BR_GROUP_ADDR]), ETH_ALEN);
+ if (!is_link_local_ether_addr(new_addr))
+ return -EINVAL;
+ if (new_addr[5] == 1 || /* 802.3x Pause address */
+ new_addr[5] == 2 || /* 802.3ad Slow protocols */
+ new_addr[5] == 3) /* 802.1X PAE address */
+ return -EINVAL;
+ spin_lock_bh(&br->lock);
+ memcpy(br->group_addr, new_addr, sizeof(br->group_addr));
+ spin_unlock_bh(&br->lock);
+ br->group_addr_set = true;
+ br_recalculate_fwd_mask(br);
+ }
+
+ if (data[IFLA_BR_FDB_FLUSH])
+ br_fdb_flush(br);
+
+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+ if (data[IFLA_BR_MCAST_ROUTER]) {
+ u8 multicast_router = nla_get_u8(data[IFLA_BR_MCAST_ROUTER]);
+
+ err = br_multicast_set_router(br, multicast_router);
+ if (err)
+ return err;
+ }
+
+ if (data[IFLA_BR_MCAST_SNOOPING]) {
+ u8 mcast_snooping = nla_get_u8(data[IFLA_BR_MCAST_SNOOPING]);
+
+ err = br_multicast_toggle(br, mcast_snooping);
+ if (err)
+ return err;
+ }
+
+ if (data[IFLA_BR_MCAST_QUERY_USE_IFADDR]) {
+ u8 val;
+
+ val = nla_get_u8(data[IFLA_BR_MCAST_QUERY_USE_IFADDR]);
+ br->multicast_query_use_ifaddr = !!val;
+ }
+
+ if (data[IFLA_BR_MCAST_QUERIER]) {
+ u8 mcast_querier = nla_get_u8(data[IFLA_BR_MCAST_QUERIER]);
+
+ err = br_multicast_set_querier(br, mcast_querier);
+ if (err)
+ return err;
+ }
+
+ if (data[IFLA_BR_MCAST_HASH_ELASTICITY]) {
+ u32 val = nla_get_u32(data[IFLA_BR_MCAST_HASH_ELASTICITY]);
+
+ br->hash_elasticity = val;
+ }
+
+ if (data[IFLA_BR_MCAST_HASH_MAX]) {
+ u32 hash_max = nla_get_u32(data[IFLA_BR_MCAST_HASH_MAX]);
+
+ err = br_multicast_set_hash_max(br, hash_max);
+ if (err)
+ return err;
+ }
+
+ if (data[IFLA_BR_MCAST_LAST_MEMBER_CNT]) {
+ u32 val = nla_get_u32(data[IFLA_BR_MCAST_LAST_MEMBER_CNT]);
+
+ br->multicast_last_member_count = val;
+ }
+
+ if (data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]) {
+ u32 val = nla_get_u32(data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]);
+
+ br->multicast_startup_query_count = val;
+ }
+
+ if (data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]) {
+ u64 val = nla_get_u64(data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]);
+
+ br->multicast_last_member_interval = clock_t_to_jiffies(val);
+ }
+
+ if (data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]) {
+ u64 val = nla_get_u64(data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]);
+
+ br->multicast_membership_interval = clock_t_to_jiffies(val);
+ }
+
+ if (data[IFLA_BR_MCAST_QUERIER_INTVL]) {
+ u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERIER_INTVL]);
+
+ br->multicast_querier_interval = clock_t_to_jiffies(val);
+ }
+
+ if (data[IFLA_BR_MCAST_QUERY_INTVL]) {
+ u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_INTVL]);
+
+ br->multicast_query_interval = clock_t_to_jiffies(val);
+ }
+
+ if (data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]) {
+ u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]);
+
+ br->multicast_query_response_interval = clock_t_to_jiffies(val);
+ }
+
+ if (data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]) {
+ u64 val = nla_get_u64(data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]);
+
+ br->multicast_startup_query_interval = clock_t_to_jiffies(val);
+ }
+#endif
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+ if (data[IFLA_BR_NF_CALL_IPTABLES]) {
+ u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IPTABLES]);
+
+ br->nf_call_iptables = val ? true : false;
+ }
+
+ if (data[IFLA_BR_NF_CALL_IP6TABLES]) {
+ u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IP6TABLES]);
+
+ br->nf_call_ip6tables = val ? true : false;
+ }
+
+ if (data[IFLA_BR_NF_CALL_ARPTABLES]) {
+ u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_ARPTABLES]);
+
+ br->nf_call_arptables = val ? true : false;
+ }
#endif
return 0;
@@ -837,6 +1079,40 @@ static size_t br_get_size(const struct net_device *brdev)
nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_FILTERING */
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
nla_total_size(sizeof(__be16)) + /* IFLA_BR_VLAN_PROTOCOL */
+ nla_total_size(sizeof(u16)) + /* IFLA_BR_VLAN_DEFAULT_PVID */
+#endif
+ nla_total_size(sizeof(u16)) + /* IFLA_BR_GROUP_FWD_MASK */
+ nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_ROOT_ID */
+ nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_BRIDGE_ID */
+ nla_total_size(sizeof(u16)) + /* IFLA_BR_ROOT_PORT */
+ nla_total_size(sizeof(u32)) + /* IFLA_BR_ROOT_PATH_COST */
+ nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE */
+ nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE_DETECTED */
+ nla_total_size(sizeof(u64)) + /* IFLA_BR_HELLO_TIMER */
+ nla_total_size(sizeof(u64)) + /* IFLA_BR_TCN_TIMER */
+ nla_total_size(sizeof(u64)) + /* IFLA_BR_TOPOLOGY_CHANGE_TIMER */
+ nla_total_size(sizeof(u64)) + /* IFLA_BR_GC_TIMER */
+ nla_total_size(ETH_ALEN) + /* IFLA_BR_GROUP_ADDR */
+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+ nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_ROUTER */
+ nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_SNOOPING */
+ nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERY_USE_IFADDR */
+ nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERIER */
+ nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_ELASTICITY */
+ nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_MAX */
+ nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_LAST_MEMBER_CNT */
+ nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_STARTUP_QUERY_CNT */
+ nla_total_size(sizeof(u64)) + /* IFLA_BR_MCAST_LAST_MEMBER_INTVL */
+ nla_total_size(sizeof(u64)) + /* IFLA_BR_MCAST_MEMBERSHIP_INTVL */
+ nla_total_size(sizeof(u64)) + /* IFLA_BR_MCAST_QUERIER_INTVL */
+ nla_total_size(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_INTVL */
+ nla_total_size(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_RESPONSE_INTVL */
+ nla_total_size(sizeof(u64)) + /* IFLA_BR_MCAST_STARTUP_QUERY_INTVL */
+#endif
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+ nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IPTABLES */
+ nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IP6TABLES */
+ nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_ARPTABLES */
#endif
0;
}
@@ -851,6 +1127,20 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
u32 stp_enabled = br->stp_enabled;
u16 priority = (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1];
u8 vlan_enabled = br_vlan_enabled(br);
+ u64 clockval;
+
+ clockval = br_timer_value(&br->hello_timer);
+ if (nla_put_u64(skb, IFLA_BR_HELLO_TIMER, clockval))
+ return -EMSGSIZE;
+ clockval = br_timer_value(&br->tcn_timer);
+ if (nla_put_u64(skb, IFLA_BR_TCN_TIMER, clockval))
+ return -EMSGSIZE;
+ clockval = br_timer_value(&br->topology_change_timer);
+ if (nla_put_u64(skb, IFLA_BR_TOPOLOGY_CHANGE_TIMER, clockval))
+ return -EMSGSIZE;
+ clockval = br_timer_value(&br->gc_timer);
+ if (nla_put_u64(skb, IFLA_BR_GC_TIMER, clockval))
+ return -EMSGSIZE;
if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) ||
nla_put_u32(skb, IFLA_BR_HELLO_TIME, hello_time) ||
@@ -858,11 +1148,66 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
nla_put_u32(skb, IFLA_BR_AGEING_TIME, ageing_time) ||
nla_put_u32(skb, IFLA_BR_STP_STATE, stp_enabled) ||
nla_put_u16(skb, IFLA_BR_PRIORITY, priority) ||
- nla_put_u8(skb, IFLA_BR_VLAN_FILTERING, vlan_enabled))
+ nla_put_u8(skb, IFLA_BR_VLAN_FILTERING, vlan_enabled) ||
+ nla_put_u16(skb, IFLA_BR_GROUP_FWD_MASK, br->group_fwd_mask) ||
+ nla_put(skb, IFLA_BR_BRIDGE_ID, sizeof(struct ifla_bridge_id),
+ &br->bridge_id) ||
+ nla_put(skb, IFLA_BR_ROOT_ID, sizeof(struct ifla_bridge_id),
+ &br->designated_root) ||
+ nla_put_u16(skb, IFLA_BR_ROOT_PORT, br->root_port) ||
+ nla_put_u32(skb, IFLA_BR_ROOT_PATH_COST, br->root_path_cost) ||
+ nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE, br->topology_change) ||
+ nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
+ br->topology_change_detected) ||
+ nla_put(skb, IFLA_BR_GROUP_ADDR, ETH_ALEN, br->group_addr))
return -EMSGSIZE;
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
- if (nla_put_be16(skb, IFLA_BR_VLAN_PROTOCOL, br->vlan_proto))
+ if (nla_put_be16(skb, IFLA_BR_VLAN_PROTOCOL, br->vlan_proto) ||
+ nla_put_u16(skb, IFLA_BR_VLAN_DEFAULT_PVID, br->default_pvid))
+ return -EMSGSIZE;
+#endif
+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+ if (nla_put_u8(skb, IFLA_BR_MCAST_ROUTER, br->multicast_router) ||
+ nla_put_u8(skb, IFLA_BR_MCAST_SNOOPING, !br->multicast_disabled) ||
+ nla_put_u8(skb, IFLA_BR_MCAST_QUERY_USE_IFADDR,
+ br->multicast_query_use_ifaddr) ||
+ nla_put_u8(skb, IFLA_BR_MCAST_QUERIER, br->multicast_querier) ||
+ nla_put_u32(skb, IFLA_BR_MCAST_HASH_ELASTICITY,
+ br->hash_elasticity) ||
+ nla_put_u32(skb, IFLA_BR_MCAST_HASH_MAX, br->hash_max) ||
+ nla_put_u32(skb, IFLA_BR_MCAST_LAST_MEMBER_CNT,
+ br->multicast_last_member_count) ||
+ nla_put_u32(skb, IFLA_BR_MCAST_STARTUP_QUERY_CNT,
+ br->multicast_startup_query_count))
+ return -EMSGSIZE;
+
+ clockval = jiffies_to_clock_t(br->multicast_last_member_interval);
+ if (nla_put_u64(skb, IFLA_BR_MCAST_LAST_MEMBER_INTVL, clockval))
+ return -EMSGSIZE;
+ clockval = jiffies_to_clock_t(br->multicast_membership_interval);
+ if (nla_put_u64(skb, IFLA_BR_MCAST_MEMBERSHIP_INTVL, clockval))
+ return -EMSGSIZE;
+ clockval = jiffies_to_clock_t(br->multicast_querier_interval);
+ if (nla_put_u64(skb, IFLA_BR_MCAST_QUERIER_INTVL, clockval))
+ return -EMSGSIZE;
+ clockval = jiffies_to_clock_t(br->multicast_query_interval);
+ if (nla_put_u64(skb, IFLA_BR_MCAST_QUERY_INTVL, clockval))
+ return -EMSGSIZE;
+ clockval = jiffies_to_clock_t(br->multicast_query_response_interval);
+ if (nla_put_u64(skb, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, clockval))
+ return -EMSGSIZE;
+ clockval = jiffies_to_clock_t(br->multicast_startup_query_interval);
+ if (nla_put_u64(skb, IFLA_BR_MCAST_STARTUP_QUERY_INTVL, clockval))
+ return -EMSGSIZE;
+#endif
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+ if (nla_put_u8(skb, IFLA_BR_NF_CALL_IPTABLES,
+ br->nf_call_iptables ? 1 : 0) ||
+ nla_put_u8(skb, IFLA_BR_NF_CALL_IP6TABLES,
+ br->nf_call_ip6tables ? 1 : 0) ||
+ nla_put_u8(skb, IFLA_BR_NF_CALL_ARPTABLES,
+ br->nf_call_arptables ? 1 : 0))
return -EMSGSIZE;
#endif
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 4ed8308db66e..216018c76018 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -229,7 +229,7 @@ struct net_bridge_port
struct netpoll *np;
#endif
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
- struct net_bridge_vlan_group *vlgrp;
+ struct net_bridge_vlan_group __rcu *vlgrp;
#endif
};
@@ -337,7 +337,7 @@ struct net_bridge
struct kobject *ifobj;
u32 auto_cnt;
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
- struct net_bridge_vlan_group *vlgrp;
+ struct net_bridge_vlan_group __rcu *vlgrp;
u8 vlan_enabled;
__be16 vlan_proto;
u16 default_pvid;
@@ -400,7 +400,7 @@ static inline bool br_vlan_is_brentry(const struct net_bridge_vlan *v)
return v->flags & BRIDGE_VLAN_INFO_BRENTRY;
}
-/* check if we should use the vlan entry is usable */
+/* check if we should use the vlan entry, returns false if it's only context */
static inline bool br_vlan_should_use(const struct net_bridge_vlan *v)
{
if (br_vlan_is_master(v)) {
@@ -690,6 +690,7 @@ int __br_vlan_set_proto(struct net_bridge *br, __be16 proto);
int br_vlan_set_proto(struct net_bridge *br, unsigned long val);
int br_vlan_init(struct net_bridge *br);
int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val);
+int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid);
int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags);
int nbp_vlan_delete(struct net_bridge_port *port, u16 vid);
void nbp_vlan_flush(struct net_bridge_port *port);
@@ -699,13 +700,25 @@ int nbp_get_num_vlan_infos(struct net_bridge_port *p, u32 filter_mask);
static inline struct net_bridge_vlan_group *br_vlan_group(
const struct net_bridge *br)
{
- return br->vlgrp;
+ return rtnl_dereference(br->vlgrp);
}
static inline struct net_bridge_vlan_group *nbp_vlan_group(
const struct net_bridge_port *p)
{
- return p->vlgrp;
+ return rtnl_dereference(p->vlgrp);
+}
+
+static inline struct net_bridge_vlan_group *br_vlan_group_rcu(
+ const struct net_bridge *br)
+{
+ return rcu_dereference(br->vlgrp);
+}
+
+static inline struct net_bridge_vlan_group *nbp_vlan_group_rcu(
+ const struct net_bridge_port *p)
+{
+ return rcu_dereference(p->vlgrp);
}
/* Since bridge now depends on 8021Q module, but the time bridge sees the
@@ -852,6 +865,19 @@ static inline struct net_bridge_vlan_group *nbp_vlan_group(
{
return NULL;
}
+
+static inline struct net_bridge_vlan_group *br_vlan_group_rcu(
+ const struct net_bridge *br)
+{
+ return NULL;
+}
+
+static inline struct net_bridge_vlan_group *nbp_vlan_group_rcu(
+ const struct net_bridge_port *p)
+{
+ return NULL;
+}
+
#endif
struct nf_br_ops {
@@ -881,6 +907,7 @@ void __br_set_forward_delay(struct net_bridge *br, unsigned long t);
int br_set_forward_delay(struct net_bridge *br, unsigned long x);
int br_set_hello_time(struct net_bridge *br, unsigned long x);
int br_set_max_age(struct net_bridge *br, unsigned long x);
+int br_set_ageing_time(struct net_bridge *br, u32 ageing_time);
/* br_stp_if.c */
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index 3a982c02599a..80c34d70218c 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -41,13 +41,14 @@ void br_set_state(struct net_bridge_port *p, unsigned int state)
{
struct switchdev_attr attr = {
.id = SWITCHDEV_ATTR_ID_PORT_STP_STATE,
+ .flags = SWITCHDEV_F_DEFER,
.u.stp_state = state,
};
int err;
p->state = state;
err = switchdev_port_attr_set(p->dev, &attr);
- if (err && err != -EOPNOTSUPP)
+ if (err)
br_warn(p->br, "error setting offload STP state on port %u(%s)\n",
(unsigned int) p->port_no, p->dev->name);
}
@@ -566,6 +567,29 @@ int br_set_max_age(struct net_bridge *br, unsigned long val)
}
+int br_set_ageing_time(struct net_bridge *br, u32 ageing_time)
+{
+ struct switchdev_attr attr = {
+ .id = SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME,
+ .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
+ .u.ageing_time = ageing_time,
+ };
+ unsigned long t = clock_t_to_jiffies(ageing_time);
+ int err;
+
+ if (t < BR_MIN_AGEING_TIME || t > BR_MAX_AGEING_TIME)
+ return -ERANGE;
+
+ err = switchdev_port_attr_set(br->dev, &attr);
+ if (err)
+ return err;
+
+ br->ageing_time = t;
+ mod_timer(&br->gc_timer, jiffies);
+
+ return 0;
+}
+
void __br_set_forward_delay(struct net_bridge *br, unsigned long t)
{
br->bridge_forward_delay = t;
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 4c97fc50fb70..8365bd53c421 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -102,8 +102,15 @@ static ssize_t ageing_time_show(struct device *d,
static int set_ageing_time(struct net_bridge *br, unsigned long val)
{
- br->ageing_time = clock_t_to_jiffies(val);
- return 0;
+ int ret;
+
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ ret = br_set_ageing_time(br, val);
+ rtnl_unlock();
+
+ return ret;
}
static ssize_t ageing_time_store(struct device *d,
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 1a79e199ca3b..5f0d0cc4744f 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -54,9 +54,9 @@ static void __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
struct net_bridge_vlan_group *vg;
if (br_vlan_is_master(v))
- vg = v->br->vlgrp;
+ vg = br_vlan_group(v->br);
else
- vg = v->port->vlgrp;
+ vg = nbp_vlan_group(v->port);
if (flags & BRIDGE_VLAN_INFO_PVID)
__vlan_add_pvid(vg, v->vid);
@@ -72,38 +72,35 @@ static void __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
u16 vid, u16 flags)
{
- const struct net_device_ops *ops = dev->netdev_ops;
+ struct switchdev_obj_port_vlan v = {
+ .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
+ .flags = flags,
+ .vid_begin = vid,
+ .vid_end = vid,
+ };
int err;
- /* If driver uses VLAN ndo ops, use 8021q to install vid
- * on device, otherwise try switchdev ops to install vid.
+ /* Try switchdev op first. In case it is not supported, fallback to
+ * 8021q add.
*/
-
- if (ops->ndo_vlan_rx_add_vid) {
- err = vlan_vid_add(dev, br->vlan_proto, vid);
- } else {
- struct switchdev_obj_port_vlan v = {
- .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
- .flags = flags,
- .vid_begin = vid,
- .vid_end = vid,
- };
-
- err = switchdev_port_obj_add(dev, &v.obj);
- if (err == -EOPNOTSUPP)
- err = 0;
- }
-
+ err = switchdev_port_obj_add(dev, &v.obj);
+ if (err == -EOPNOTSUPP)
+ return vlan_vid_add(dev, br->vlan_proto, vid);
return err;
}
static void __vlan_add_list(struct net_bridge_vlan *v)
{
+ struct net_bridge_vlan_group *vg;
struct list_head *headp, *hpos;
struct net_bridge_vlan *vent;
- headp = br_vlan_is_master(v) ? &v->br->vlgrp->vlan_list :
- &v->port->vlgrp->vlan_list;
+ if (br_vlan_is_master(v))
+ vg = br_vlan_group(v->br);
+ else
+ vg = nbp_vlan_group(v->port);
+
+ headp = &vg->vlan_list;
list_for_each_prev(hpos, headp) {
vent = list_entry(hpos, struct net_bridge_vlan, vlist);
if (v->vid < vent->vid)
@@ -111,39 +108,72 @@ static void __vlan_add_list(struct net_bridge_vlan *v)
else
break;
}
- list_add(&v->vlist, hpos);
+ list_add_rcu(&v->vlist, hpos);
}
static void __vlan_del_list(struct net_bridge_vlan *v)
{
- list_del(&v->vlist);
+ list_del_rcu(&v->vlist);
}
static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
u16 vid)
{
- const struct net_device_ops *ops = dev->netdev_ops;
- int err = 0;
+ struct switchdev_obj_port_vlan v = {
+ .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
+ .vid_begin = vid,
+ .vid_end = vid,
+ };
+ int err;
- /* If driver uses VLAN ndo ops, use 8021q to delete vid
- * on device, otherwise try switchdev ops to delete vid.
+ /* Try switchdev op first. In case it is not supported, fallback to
+ * 8021q del.
*/
-
- if (ops->ndo_vlan_rx_kill_vid) {
+ err = switchdev_port_obj_del(dev, &v.obj);
+ if (err == -EOPNOTSUPP) {
vlan_vid_del(dev, br->vlan_proto, vid);
- } else {
- struct switchdev_obj_port_vlan v = {
- .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
- .vid_begin = vid,
- .vid_end = vid,
- };
+ return 0;
+ }
+ return err;
+}
+
+/* Returns a master vlan, if it didn't exist it gets created. In all cases a
+ * a reference is taken to the master vlan before returning.
+ */
+static struct net_bridge_vlan *br_vlan_get_master(struct net_bridge *br, u16 vid)
+{
+ struct net_bridge_vlan_group *vg;
+ struct net_bridge_vlan *masterv;
- err = switchdev_port_obj_del(dev, &v.obj);
- if (err == -EOPNOTSUPP)
- err = 0;
+ vg = br_vlan_group(br);
+ masterv = br_vlan_find(vg, vid);
+ if (!masterv) {
+ /* missing global ctx, create it now */
+ if (br_vlan_add(br, vid, 0))
+ return NULL;
+ masterv = br_vlan_find(vg, vid);
+ if (WARN_ON(!masterv))
+ return NULL;
}
+ atomic_inc(&masterv->refcnt);
- return err;
+ return masterv;
+}
+
+static void br_vlan_put_master(struct net_bridge_vlan *masterv)
+{
+ struct net_bridge_vlan_group *vg;
+
+ if (!br_vlan_is_master(masterv))
+ return;
+
+ vg = br_vlan_group(masterv->br);
+ if (atomic_dec_and_test(&masterv->refcnt)) {
+ rhashtable_remove_fast(&vg->vlan_hash,
+ &masterv->vnode, br_vlan_rht_params);
+ __vlan_del_list(masterv);
+ kfree_rcu(masterv, rcu);
+ }
}
/* This is the shared VLAN add function which works for both ports and bridge
@@ -161,7 +191,7 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags)
{
struct net_bridge_vlan *masterv = NULL;
struct net_bridge_port *p = NULL;
- struct rhashtable *tbl;
+ struct net_bridge_vlan_group *vg;
struct net_device *dev;
struct net_bridge *br;
int err;
@@ -169,17 +199,15 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags)
if (br_vlan_is_master(v)) {
br = v->br;
dev = br->dev;
- tbl = &br->vlgrp->vlan_hash;
+ vg = br_vlan_group(br);
} else {
p = v->port;
br = p->br;
dev = p->dev;
- tbl = &p->vlgrp->vlan_hash;
+ vg = nbp_vlan_group(p);
}
if (p) {
- u16 master_flags = flags;
-
/* Add VLAN to the device filter if it is supported.
* This ensures tagged traffic enters the bridge when
* promiscuous mode is disabled by br_manage_promisc().
@@ -190,57 +218,49 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags)
/* need to work on the master vlan too */
if (flags & BRIDGE_VLAN_INFO_MASTER) {
- master_flags |= BRIDGE_VLAN_INFO_BRENTRY;
- err = br_vlan_add(br, v->vid, master_flags);
+ err = br_vlan_add(br, v->vid, flags |
+ BRIDGE_VLAN_INFO_BRENTRY);
if (err)
goto out_filt;
}
- masterv = br_vlan_find(br->vlgrp, v->vid);
- if (!masterv) {
- /* missing global ctx, create it now */
- err = br_vlan_add(br, v->vid, 0);
- if (err)
- goto out_filt;
- masterv = br_vlan_find(br->vlgrp, v->vid);
- WARN_ON(!masterv);
- }
- atomic_inc(&masterv->refcnt);
+ masterv = br_vlan_get_master(br, v->vid);
+ if (!masterv)
+ goto out_filt;
v->brvlan = masterv;
}
- /* Add the dev mac only if it's a usable vlan */
+ /* Add the dev mac and count the vlan only if it's usable */
if (br_vlan_should_use(v)) {
err = br_fdb_insert(br, p, dev->dev_addr, v->vid);
if (err) {
br_err(br, "failed insert local address into bridge forwarding table\n");
goto out_filt;
}
+ vg->num_vlans++;
}
- err = rhashtable_lookup_insert_fast(tbl, &v->vnode, br_vlan_rht_params);
+ err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
+ br_vlan_rht_params);
if (err)
goto out_fdb_insert;
__vlan_add_list(v);
__vlan_add_flags(v, flags);
- if (br_vlan_is_master(v)) {
- if (br_vlan_is_brentry(v))
- br->vlgrp->num_vlans++;
- } else {
- p->vlgrp->num_vlans++;
- }
out:
return err;
out_fdb_insert:
- br_fdb_find_delete_local(br, p, br->dev->dev_addr, v->vid);
+ if (br_vlan_should_use(v)) {
+ br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
+ vg->num_vlans--;
+ }
out_filt:
if (p) {
__vlan_vid_del(dev, br, v->vid);
if (masterv) {
- atomic_dec(&masterv->refcnt);
+ br_vlan_put_master(masterv);
v->brvlan = NULL;
}
}
@@ -253,16 +273,13 @@ static int __vlan_del(struct net_bridge_vlan *v)
struct net_bridge_vlan *masterv = v;
struct net_bridge_vlan_group *vg;
struct net_bridge_port *p = NULL;
- struct net_bridge *br;
int err = 0;
if (br_vlan_is_master(v)) {
- br = v->br;
- vg = v->br->vlgrp;
+ vg = br_vlan_group(v->br);
} else {
p = v->port;
- br = p->br;
- vg = v->port->vlgrp;
+ vg = nbp_vlan_group(v->port);
masterv = v->brvlan;
}
@@ -273,13 +290,9 @@ static int __vlan_del(struct net_bridge_vlan *v)
goto out;
}
- if (br_vlan_is_master(v)) {
- if (br_vlan_is_brentry(v)) {
- v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
- br->vlgrp->num_vlans--;
- }
- } else {
- p->vlgrp->num_vlans--;
+ if (br_vlan_should_use(v)) {
+ v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
+ vg->num_vlans--;
}
if (masterv != v) {
@@ -289,25 +302,25 @@ static int __vlan_del(struct net_bridge_vlan *v)
kfree_rcu(v, rcu);
}
- if (atomic_dec_and_test(&masterv->refcnt)) {
- rhashtable_remove_fast(&masterv->br->vlgrp->vlan_hash,
- &masterv->vnode, br_vlan_rht_params);
- __vlan_del_list(masterv);
- kfree_rcu(masterv, rcu);
- }
+ br_vlan_put_master(masterv);
out:
return err;
}
-static void __vlan_flush(struct net_bridge_vlan_group *vlgrp)
+static void __vlan_group_free(struct net_bridge_vlan_group *vg)
+{
+ WARN_ON(!list_empty(&vg->vlan_list));
+ rhashtable_destroy(&vg->vlan_hash);
+ kfree(vg);
+}
+
+static void __vlan_flush(struct net_bridge_vlan_group *vg)
{
struct net_bridge_vlan *vlan, *tmp;
- __vlan_delete_pvid(vlgrp, vlgrp->pvid);
- list_for_each_entry_safe(vlan, tmp, &vlgrp->vlan_list, vlist)
+ __vlan_delete_pvid(vg, vg->pvid);
+ list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist)
__vlan_del(vlan);
- rhashtable_destroy(&vlgrp->vlan_hash);
- kfree(vlgrp);
}
struct sk_buff *br_handle_vlan(struct net_bridge *br,
@@ -469,7 +482,7 @@ bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
if (!br->vlan_enabled)
return true;
- vg = p->vlgrp;
+ vg = nbp_vlan_group(p);
if (!vg || !vg->num_vlans)
return false;
@@ -495,12 +508,14 @@ bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
*/
int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
{
+ struct net_bridge_vlan_group *vg;
struct net_bridge_vlan *vlan;
int ret;
ASSERT_RTNL();
- vlan = br_vlan_find(br->vlgrp, vid);
+ vg = br_vlan_group(br);
+ vlan = br_vlan_find(vg, vid);
if (vlan) {
if (!br_vlan_is_brentry(vlan)) {
/* Trying to change flags of non-existent bridge vlan */
@@ -515,7 +530,7 @@ int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
}
atomic_inc(&vlan->refcnt);
vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
- br->vlgrp->num_vlans++;
+ vg->num_vlans++;
}
__vlan_add_flags(vlan, flags);
return 0;
@@ -543,24 +558,33 @@ int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
*/
int br_vlan_delete(struct net_bridge *br, u16 vid)
{
+ struct net_bridge_vlan_group *vg;
struct net_bridge_vlan *v;
ASSERT_RTNL();
- v = br_vlan_find(br->vlgrp, vid);
+ vg = br_vlan_group(br);
+ v = br_vlan_find(vg, vid);
if (!v || !br_vlan_is_brentry(v))
return -ENOENT;
br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
+ br_fdb_delete_by_port(br, NULL, vid, 0);
return __vlan_del(v);
}
void br_vlan_flush(struct net_bridge *br)
{
+ struct net_bridge_vlan_group *vg;
+
ASSERT_RTNL();
- __vlan_flush(br_vlan_group(br));
+ vg = br_vlan_group(br);
+ __vlan_flush(vg);
+ RCU_INIT_POINTER(br->vlgrp, NULL);
+ synchronize_rcu();
+ __vlan_group_free(vg);
}
struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
@@ -627,6 +651,7 @@ int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
int err = 0;
struct net_bridge_port *p;
struct net_bridge_vlan *vlan;
+ struct net_bridge_vlan_group *vg;
__be16 oldproto;
if (br->vlan_proto == proto)
@@ -634,7 +659,8 @@ int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
/* Add VLANs for the new proto to the device filter. */
list_for_each_entry(p, &br->port_list, list) {
- list_for_each_entry(vlan, &p->vlgrp->vlan_list, vlist) {
+ vg = nbp_vlan_group(p);
+ list_for_each_entry(vlan, &vg->vlan_list, vlist) {
err = vlan_vid_add(p->dev, proto, vlan->vid);
if (err)
goto err_filt;
@@ -648,19 +674,23 @@ int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
br_recalculate_fwd_mask(br);
/* Delete VLANs for the old proto from the device filter. */
- list_for_each_entry(p, &br->port_list, list)
- list_for_each_entry(vlan, &p->vlgrp->vlan_list, vlist)
+ list_for_each_entry(p, &br->port_list, list) {
+ vg = nbp_vlan_group(p);
+ list_for_each_entry(vlan, &vg->vlan_list, vlist)
vlan_vid_del(p->dev, oldproto, vlan->vid);
+ }
return 0;
err_filt:
- list_for_each_entry_continue_reverse(vlan, &p->vlgrp->vlan_list, vlist)
+ list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist)
vlan_vid_del(p->dev, proto, vlan->vid);
- list_for_each_entry_continue_reverse(p, &br->port_list, list)
- list_for_each_entry(vlan, &p->vlgrp->vlan_list, vlist)
+ list_for_each_entry_continue_reverse(p, &br->port_list, list) {
+ vg = nbp_vlan_group(p);
+ list_for_each_entry(vlan, &vg->vlan_list, vlist)
vlan_vid_del(p->dev, proto, vlan->vid);
+ }
return err;
}
@@ -704,25 +734,31 @@ static void br_vlan_disable_default_pvid(struct net_bridge *br)
/* Disable default_pvid on all ports where it is still
* configured.
*/
- if (vlan_default_pvid(br->vlgrp, pvid))
+ if (vlan_default_pvid(br_vlan_group(br), pvid))
br_vlan_delete(br, pvid);
list_for_each_entry(p, &br->port_list, list) {
- if (vlan_default_pvid(p->vlgrp, pvid))
+ if (vlan_default_pvid(nbp_vlan_group(p), pvid))
nbp_vlan_delete(p, pvid);
}
br->default_pvid = 0;
}
-static int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid)
+int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid)
{
const struct net_bridge_vlan *pvent;
+ struct net_bridge_vlan_group *vg;
struct net_bridge_port *p;
u16 old_pvid;
int err = 0;
unsigned long *changed;
+ if (!pvid) {
+ br_vlan_disable_default_pvid(br);
+ return 0;
+ }
+
changed = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long),
GFP_KERNEL);
if (!changed)
@@ -733,8 +769,9 @@ static int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid)
/* Update default_pvid config only if we do not conflict with
* user configuration.
*/
- pvent = br_vlan_find(br->vlgrp, pvid);
- if ((!old_pvid || vlan_default_pvid(br->vlgrp, old_pvid)) &&
+ vg = br_vlan_group(br);
+ pvent = br_vlan_find(vg, pvid);
+ if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) &&
(!pvent || !br_vlan_should_use(pvent))) {
err = br_vlan_add(br, pvid,
BRIDGE_VLAN_INFO_PVID |
@@ -750,9 +787,10 @@ static int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid)
/* Update default_pvid config only if we do not conflict with
* user configuration.
*/
+ vg = nbp_vlan_group(p);
if ((old_pvid &&
- !vlan_default_pvid(p->vlgrp, old_pvid)) ||
- br_vlan_find(p->vlgrp, pvid))
+ !vlan_default_pvid(vg, old_pvid)) ||
+ br_vlan_find(vg, pvid))
continue;
err = nbp_vlan_add(p, pvid,
@@ -813,12 +851,7 @@ int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val)
err = -EPERM;
goto unlock;
}
-
- if (!pvid)
- br_vlan_disable_default_pvid(br);
- else
- err = __br_vlan_set_default_pvid(br, pvid);
-
+ err = __br_vlan_set_default_pvid(br, pvid);
unlock:
rtnl_unlock();
return err;
@@ -826,17 +859,19 @@ unlock:
int br_vlan_init(struct net_bridge *br)
{
+ struct net_bridge_vlan_group *vg;
int ret = -ENOMEM;
- br->vlgrp = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
- if (!br->vlgrp)
+ vg = kzalloc(sizeof(*vg), GFP_KERNEL);
+ if (!vg)
goto out;
- ret = rhashtable_init(&br->vlgrp->vlan_hash, &br_vlan_rht_params);
+ ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
if (ret)
goto err_rhtbl;
- INIT_LIST_HEAD(&br->vlgrp->vlan_list);
+ INIT_LIST_HEAD(&vg->vlan_list);
br->vlan_proto = htons(ETH_P_8021Q);
br->default_pvid = 1;
+ rcu_assign_pointer(br->vlgrp, vg);
ret = br_vlan_add(br, 1,
BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED |
BRIDGE_VLAN_INFO_BRENTRY);
@@ -847,9 +882,9 @@ out:
return ret;
err_vlan_add:
- rhashtable_destroy(&br->vlgrp->vlan_hash);
+ rhashtable_destroy(&vg->vlan_hash);
err_rhtbl:
- kfree(br->vlgrp);
+ kfree(vg);
goto out;
}
@@ -867,9 +902,7 @@ int nbp_vlan_init(struct net_bridge_port *p)
if (ret)
goto err_rhtbl;
INIT_LIST_HEAD(&vg->vlan_list);
- /* Make sure everything's committed before publishing vg */
- smp_wmb();
- p->vlgrp = vg;
+ rcu_assign_pointer(p->vlgrp, vg);
if (p->br->default_pvid) {
ret = nbp_vlan_add(p, p->br->default_pvid,
BRIDGE_VLAN_INFO_PVID |
@@ -898,7 +931,7 @@ int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
ASSERT_RTNL();
- vlan = br_vlan_find(port->vlgrp, vid);
+ vlan = br_vlan_find(nbp_vlan_group(port), vid);
if (vlan) {
__vlan_add_flags(vlan, flags);
return 0;
@@ -926,7 +959,7 @@ int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
ASSERT_RTNL();
- v = br_vlan_find(port->vlgrp, vid);
+ v = br_vlan_find(nbp_vlan_group(port), vid);
if (!v)
return -ENOENT;
br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
@@ -937,12 +970,13 @@ int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
void nbp_vlan_flush(struct net_bridge_port *port)
{
- struct net_bridge_vlan *vlan;
+ struct net_bridge_vlan_group *vg;
ASSERT_RTNL();
- list_for_each_entry(vlan, &port->vlgrp->vlan_list, vlist)
- vlan_vid_del(port->dev, port->br->vlan_proto, vlan->vid);
-
- __vlan_flush(nbp_vlan_group(port));
+ vg = nbp_vlan_group(port);
+ __vlan_flush(vg);
+ RCU_INIT_POINTER(port->vlgrp, NULL);
+ synchronize_rcu();
+ __vlan_group_free(vg);
}