summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_core.c111
-rw-r--r--net/8021q/vlan_dev.c14
-rw-r--r--net/appletalk/aarp.c5
-rw-r--r--net/bluetooth/bnep/bnep.h1
-rw-r--r--net/bluetooth/bnep/core.c12
-rw-r--r--net/bluetooth/bnep/netdev.c33
-rw-r--r--net/can/af_can.c15
-rw-r--r--net/can/bcm.c12
-rw-r--r--net/can/raw.c15
-rw-r--r--net/core/dev.c82
-rw-r--r--net/dsa/slave.c51
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv6/af_inet6.c107
-rw-r--r--net/ipv6/tcp_ipv6.c45
-rw-r--r--net/phonet/pep-gprs.c12
-rw-r--r--net/sched/sch_teql.c20
-rw-r--r--net/wimax/Kconfig14
-rw-r--r--net/wimax/id-table.c8
-rw-r--r--net/wimax/op-rfkill.c2
-rw-r--r--net/wireless/wext.c4
20 files changed, 432 insertions, 133 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index dd86a1dc4cd0..6c1323940263 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -3,46 +3,35 @@
#include <linux/if_vlan.h>
#include "vlan.h"
-struct vlan_hwaccel_cb {
- struct net_device *dev;
-};
-
-static inline struct vlan_hwaccel_cb *vlan_hwaccel_cb(struct sk_buff *skb)
-{
- return (struct vlan_hwaccel_cb *)skb->cb;
-}
-
/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */
int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
u16 vlan_tci, int polling)
{
- struct vlan_hwaccel_cb *cb = vlan_hwaccel_cb(skb);
-
- if (skb_bond_should_drop(skb)) {
- dev_kfree_skb_any(skb);
- return NET_RX_DROP;
- }
+ if (skb_bond_should_drop(skb))
+ goto drop;
skb->vlan_tci = vlan_tci;
- cb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
+ skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
+
+ if (!skb->dev)
+ goto drop;
return (polling ? netif_receive_skb(skb) : netif_rx(skb));
+
+drop:
+ dev_kfree_skb_any(skb);
+ return NET_RX_DROP;
}
EXPORT_SYMBOL(__vlan_hwaccel_rx);
int vlan_hwaccel_do_receive(struct sk_buff *skb)
{
- struct vlan_hwaccel_cb *cb = vlan_hwaccel_cb(skb);
- struct net_device *dev = cb->dev;
+ struct net_device *dev = skb->dev;
struct net_device_stats *stats;
+ skb->dev = vlan_dev_info(dev)->real_dev;
netif_nit_deliver(skb);
- if (dev == NULL) {
- kfree_skb(skb);
- return -1;
- }
-
skb->dev = dev;
skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci);
skb->vlan_tci = 0;
@@ -80,3 +69,79 @@ u16 vlan_dev_vlan_id(const struct net_device *dev)
return vlan_dev_info(dev)->vlan_id;
}
EXPORT_SYMBOL_GPL(vlan_dev_vlan_id);
+
+static int vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
+ unsigned int vlan_tci, struct sk_buff *skb)
+{
+ struct sk_buff *p;
+
+ if (skb_bond_should_drop(skb))
+ goto drop;
+
+ skb->vlan_tci = vlan_tci;
+ skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
+
+ if (!skb->dev)
+ goto drop;
+
+ for (p = napi->gro_list; p; p = p->next) {
+ NAPI_GRO_CB(p)->same_flow = p->dev == skb->dev;
+ NAPI_GRO_CB(p)->flush = 0;
+ }
+
+ return dev_gro_receive(napi, skb);
+
+drop:
+ return 2;
+}
+
+int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
+ unsigned int vlan_tci, struct sk_buff *skb)
+{
+ int err = NET_RX_SUCCESS;
+
+ switch (vlan_gro_common(napi, grp, vlan_tci, skb)) {
+ case -1:
+ return netif_receive_skb(skb);
+
+ case 2:
+ err = NET_RX_DROP;
+ /* fall through */
+
+ case 1:
+ kfree_skb(skb);
+ break;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(vlan_gro_receive);
+
+int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
+ unsigned int vlan_tci, struct napi_gro_fraginfo *info)
+{
+ struct sk_buff *skb = napi_fraginfo_skb(napi, info);
+ int err = NET_RX_DROP;
+
+ if (!skb)
+ goto out;
+
+ err = NET_RX_SUCCESS;
+
+ switch (vlan_gro_common(napi, grp, vlan_tci, skb)) {
+ case -1:
+ return netif_receive_skb(skb);
+
+ case 2:
+ err = NET_RX_DROP;
+ /* fall through */
+
+ case 1:
+ napi_reuse_skb(napi, skb);
+ break;
+ }
+
+out:
+ return err;
+}
+EXPORT_SYMBOL(vlan_gro_frags);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 89a3bbdfca3f..4a19acd3a32b 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -546,6 +546,18 @@ static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return err;
}
+static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa)
+{
+ struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+ const struct net_device_ops *ops = real_dev->netdev_ops;
+ int err = 0;
+
+ if (netif_device_present(real_dev) && ops->ndo_neigh_setup)
+ err = ops->ndo_neigh_setup(dev, pa);
+
+ return err;
+}
+
static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
{
struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
@@ -713,6 +725,7 @@ static const struct net_device_ops vlan_netdev_ops = {
.ndo_set_multicast_list = vlan_dev_set_rx_mode,
.ndo_change_rx_flags = vlan_dev_change_rx_flags,
.ndo_do_ioctl = vlan_dev_ioctl,
+ .ndo_neigh_setup = vlan_dev_neigh_setup,
};
static const struct net_device_ops vlan_netdev_accel_ops = {
@@ -728,6 +741,7 @@ static const struct net_device_ops vlan_netdev_accel_ops = {
.ndo_set_multicast_list = vlan_dev_set_rx_mode,
.ndo_change_rx_flags = vlan_dev_change_rx_flags,
.ndo_do_ioctl = vlan_dev_ioctl,
+ .ndo_neigh_setup = vlan_dev_neigh_setup,
};
void vlan_setup(struct net_device *dev)
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index b03ff58e9308..89f99d3beb60 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -443,13 +443,14 @@ static void aarp_send_probe_phase1(struct atalk_iface *iface)
{
struct ifreq atreq;
struct sockaddr_at *sa = (struct sockaddr_at *)&atreq.ifr_addr;
+ const struct net_device_ops *ops = iface->dev->netdev_ops;
sa->sat_addr.s_node = iface->address.s_node;
sa->sat_addr.s_net = ntohs(iface->address.s_net);
/* We pass the Net:Node to the drivers/cards by a Device ioctl. */
- if (!(iface->dev->do_ioctl(iface->dev, &atreq, SIOCSIFADDR))) {
- (void)iface->dev->do_ioctl(iface->dev, &atreq, SIOCGIFADDR);
+ if (!(ops->ndo_do_ioctl(iface->dev, &atreq, SIOCSIFADDR))) {
+ ops->ndo_do_ioctl(iface->dev, &atreq, SIOCGIFADDR);
if (iface->address.s_net != htons(sa->sat_addr.s_net) ||
iface->address.s_node != sa->sat_addr.s_node)
iface->status |= ATIF_PROBE_FAIL;
diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h
index d20f8a40f36e..0d9e506f5d5a 100644
--- a/net/bluetooth/bnep/bnep.h
+++ b/net/bluetooth/bnep/bnep.h
@@ -165,7 +165,6 @@ struct bnep_session {
struct socket *sock;
struct net_device *dev;
- struct net_device_stats stats;
};
void bnep_net_setup(struct net_device *dev);
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 70fea8bdb4e5..52a6ce0d772b 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -306,7 +306,7 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
struct sk_buff *nskb;
u8 type;
- s->stats.rx_bytes += skb->len;
+ dev->stats.rx_bytes += skb->len;
type = *(u8 *) skb->data; skb_pull(skb, 1);
@@ -343,7 +343,7 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
* may not be modified and because of the alignment requirements. */
nskb = alloc_skb(2 + ETH_HLEN + skb->len, GFP_KERNEL);
if (!nskb) {
- s->stats.rx_dropped++;
+ dev->stats.rx_dropped++;
kfree_skb(skb);
return -ENOMEM;
}
@@ -378,14 +378,14 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
skb_copy_from_linear_data(skb, __skb_put(nskb, skb->len), skb->len);
kfree_skb(skb);
- s->stats.rx_packets++;
+ dev->stats.rx_packets++;
nskb->ip_summed = CHECKSUM_NONE;
nskb->protocol = eth_type_trans(nskb, dev);
netif_rx_ni(nskb);
return 0;
badframe:
- s->stats.rx_errors++;
+ dev->stats.rx_errors++;
kfree_skb(skb);
return 0;
}
@@ -448,8 +448,8 @@ send:
kfree_skb(skb);
if (len > 0) {
- s->stats.tx_bytes += len;
- s->stats.tx_packets++;
+ s->dev->stats.tx_bytes += len;
+ s->dev->stats.tx_packets++;
return 0;
}
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index f897da6e0444..d7a0e9722def 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -55,12 +55,6 @@ static int bnep_net_close(struct net_device *dev)
return 0;
}
-static struct net_device_stats *bnep_net_get_stats(struct net_device *dev)
-{
- struct bnep_session *s = netdev_priv(dev);
- return &s->stats;
-}
-
static void bnep_net_set_mc_list(struct net_device *dev)
{
#ifdef CONFIG_BT_BNEP_MC_FILTER
@@ -128,11 +122,6 @@ static void bnep_net_timeout(struct net_device *dev)
netif_wake_queue(dev);
}
-static int bnep_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- return -EINVAL;
-}
-
#ifdef CONFIG_BT_BNEP_MC_FILTER
static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s)
{
@@ -217,6 +206,18 @@ static int bnep_net_xmit(struct sk_buff *skb, struct net_device *dev)
return 0;
}
+static const struct net_device_ops bnep_netdev_ops = {
+ .ndo_open = bnep_net_open,
+ .ndo_stop = bnep_net_close,
+ .ndo_start_xmit = bnep_net_xmit,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_multicast_list = bnep_net_set_mc_list,
+ .ndo_set_mac_address = bnep_net_set_mac_addr,
+ .ndo_tx_timeout = bnep_net_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+
+};
+
void bnep_net_setup(struct net_device *dev)
{
@@ -224,15 +225,7 @@ void bnep_net_setup(struct net_device *dev)
dev->addr_len = ETH_ALEN;
ether_setup(dev);
-
- dev->open = bnep_net_open;
- dev->stop = bnep_net_close;
- dev->hard_start_xmit = bnep_net_xmit;
- dev->get_stats = bnep_net_get_stats;
- dev->do_ioctl = bnep_net_ioctl;
- dev->set_mac_address = bnep_net_set_mac_addr;
- dev->set_multicast_list = bnep_net_set_mc_list;
+ dev->netdev_ops = &bnep_netdev_ops;
dev->watchdog_timeo = HZ * 2;
- dev->tx_timeout = bnep_net_timeout;
}
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 3dadb338addd..fa417ca6cbe6 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -414,6 +414,12 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
* The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can
* filter for error frames (CAN_ERR_FLAG bit set in mask).
*
+ * The provided pointer to the sk_buff is guaranteed to be valid as long as
+ * the callback function is running. The callback function must *not* free
+ * the given sk_buff while processing it's task. When the given sk_buff is
+ * needed after the end of the callback function it must be cloned inside
+ * the callback function with skb_clone().
+ *
* Return:
* 0 on success
* -ENOMEM on missing cache mem to create subscription entry
@@ -569,13 +575,8 @@ EXPORT_SYMBOL(can_rx_unregister);
static inline void deliver(struct sk_buff *skb, struct receiver *r)
{
- struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
-
- if (clone) {
- clone->sk = skb->sk;
- r->func(clone, r->data);
- r->matches++;
- }
+ r->func(skb, r->data);
+ r->matches++;
}
static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 6248ae2502c7..1649c8ab2c2f 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -633,7 +633,7 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
hrtimer_cancel(&op->timer);
if (op->can_id != rxframe->can_id)
- goto rx_freeskb;
+ return;
/* save rx timestamp */
op->rx_stamp = skb->tstamp;
@@ -645,19 +645,19 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
if (op->flags & RX_RTR_FRAME) {
/* send reply for RTR-request (placed in op->frames[0]) */
bcm_can_tx(op);
- goto rx_freeskb;
+ return;
}
if (op->flags & RX_FILTER_ID) {
/* the easiest case */
bcm_rx_update_and_send(op, &op->last_frames[0], rxframe);
- goto rx_freeskb_starttimer;
+ goto rx_starttimer;
}
if (op->nframes == 1) {
/* simple compare with index 0 */
bcm_rx_cmp_to_index(op, 0, rxframe);
- goto rx_freeskb_starttimer;
+ goto rx_starttimer;
}
if (op->nframes > 1) {
@@ -678,10 +678,8 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
}
}
-rx_freeskb_starttimer:
+rx_starttimer:
bcm_rx_starttimer(op);
-rx_freeskb:
- kfree_skb(skb);
}
/*
diff --git a/net/can/raw.c b/net/can/raw.c
index 27aab63df467..0703cba4bf9f 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -99,13 +99,14 @@ static void raw_rcv(struct sk_buff *skb, void *data)
struct raw_sock *ro = raw_sk(sk);
struct sockaddr_can *addr;
- if (!ro->recv_own_msgs) {
- /* check the received tx sock reference */
- if (skb->sk == sk) {
- kfree_skb(skb);
- return;
- }
- }
+ /* check the received tx sock reference */
+ if (!ro->recv_own_msgs && skb->sk == sk)
+ return;
+
+ /* clone the given skb to be able to enqueue it into the rcv queue */
+ skb = skb_clone(skb, GFP_ATOMIC);
+ if (!skb)
+ return;
/*
* Put the datagram to the queue so that raw_recvmsg() can
diff --git a/net/core/dev.c b/net/core/dev.c
index 382df6c09eec..bab8bcedd62e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2387,7 +2387,7 @@ void napi_gro_flush(struct napi_struct *napi)
}
EXPORT_SYMBOL(napi_gro_flush);
-static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
+int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
{
struct sk_buff **pp = NULL;
struct packet_type *ptype;
@@ -2417,11 +2417,14 @@ static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
for (p = napi->gro_list; p; p = p->next) {
count++;
- NAPI_GRO_CB(p)->same_flow =
- p->mac_len == mac_len &&
- !memcmp(skb_mac_header(p), skb_mac_header(skb),
- mac_len);
- NAPI_GRO_CB(p)->flush = 0;
+
+ if (!NAPI_GRO_CB(p)->same_flow)
+ continue;
+
+ if (p->mac_len != mac_len ||
+ memcmp(skb_mac_header(p), skb_mac_header(skb),
+ mac_len))
+ NAPI_GRO_CB(p)->same_flow = 0;
}
pp = ptype->gro_receive(&napi->gro_list, skb);
@@ -2463,6 +2466,19 @@ ok:
normal:
return -1;
}
+EXPORT_SYMBOL(dev_gro_receive);
+
+static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
+{
+ struct sk_buff *p;
+
+ for (p = napi->gro_list; p; p = p->next) {
+ NAPI_GRO_CB(p)->same_flow = 1;
+ NAPI_GRO_CB(p)->flush = 0;
+ }
+
+ return dev_gro_receive(napi, skb);
+}
int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
{
@@ -2479,11 +2495,26 @@ int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
}
EXPORT_SYMBOL(napi_gro_receive);
-int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info)
+void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
+{
+ skb_shinfo(skb)->nr_frags = 0;
+
+ skb->len -= skb->data_len;
+ skb->truesize -= skb->data_len;
+ skb->data_len = 0;
+
+ __skb_pull(skb, skb_headlen(skb));
+ skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
+
+ napi->skb = skb;
+}
+EXPORT_SYMBOL(napi_reuse_skb);
+
+struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi,
+ struct napi_gro_fraginfo *info)
{
struct net_device *dev = napi->dev;
struct sk_buff *skb = napi->skb;
- int err = NET_RX_DROP;
napi->skb = NULL;
@@ -2503,16 +2534,31 @@ int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info)
skb->len += info->len;
skb->truesize += info->len;
- if (!pskb_may_pull(skb, ETH_HLEN))
- goto reuse;
-
- err = NET_RX_SUCCESS;
+ if (!pskb_may_pull(skb, ETH_HLEN)) {
+ napi_reuse_skb(napi, skb);
+ goto out;
+ }
skb->protocol = eth_type_trans(skb, dev);
skb->ip_summed = info->ip_summed;
skb->csum = info->csum;
+out:
+ return skb;
+}
+EXPORT_SYMBOL(napi_fraginfo_skb);
+
+int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info)
+{
+ struct sk_buff *skb = napi_fraginfo_skb(napi, info);
+ int err = NET_RX_DROP;
+
+ if (!skb)
+ goto out;
+
+ err = NET_RX_SUCCESS;
+
switch (__napi_gro_receive(napi, skb)) {
case -1:
return netif_receive_skb(skb);
@@ -2521,17 +2567,7 @@ int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info)
goto out;
}
-reuse:
- skb_shinfo(skb)->nr_frags = 0;
-
- skb->len -= skb->data_len;
- skb->truesize -= skb->data_len;
- skb->data_len = 0;
-
- __skb_pull(skb, skb_headlen(skb));
- skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
-
- napi->skb = skb;
+ napi_reuse_skb(napi, skb);
out:
return err;
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index a3a410d20da0..a68fd79e9eca 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -286,6 +286,42 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = {
.get_sset_count = dsa_slave_get_sset_count,
};
+#ifdef CONFIG_NET_DSA_TAG_DSA
+static const struct net_device_ops dsa_netdev_ops = {
+ .ndo_open = dsa_slave_open,
+ .ndo_stop = dsa_slave_close,
+ .ndo_start_xmit = dsa_xmit,
+ .ndo_change_rx_flags = dsa_slave_change_rx_flags,
+ .ndo_set_rx_mode = dsa_slave_set_rx_mode,
+ .ndo_set_multicast_list = dsa_slave_set_rx_mode,
+ .ndo_set_mac_address = dsa_slave_set_mac_address,
+ .ndo_do_ioctl = dsa_slave_ioctl,
+};
+#endif
+#ifdef CONFIG_NET_DSA_TAG_EDSA
+static const struct net_device_ops edsa_netdev_ops = {
+ .ndo_open = dsa_slave_open,
+ .ndo_stop = dsa_slave_close,
+ .ndo_start_xmit = edsa_xmit,
+ .ndo_change_rx_flags = dsa_slave_change_rx_flags,
+ .ndo_set_rx_mode = dsa_slave_set_rx_mode,
+ .ndo_set_multicast_list = dsa_slave_set_rx_mode,
+ .ndo_set_mac_address = dsa_slave_set_mac_address,
+ .ndo_do_ioctl = dsa_slave_ioctl,
+};
+#endif
+#ifdef CONFIG_NET_DSA_TAG_TRAILER
+static const struct net_device_ops trailer_netdev_ops = {
+ .ndo_open = dsa_slave_open,
+ .ndo_stop = dsa_slave_close,
+ .ndo_start_xmit = trailer_xmit,
+ .ndo_change_rx_flags = dsa_slave_change_rx_flags,
+ .ndo_set_rx_mode = dsa_slave_set_rx_mode,
+ .ndo_set_multicast_list = dsa_slave_set_rx_mode,
+ .ndo_set_mac_address = dsa_slave_set_mac_address,
+ .ndo_do_ioctl = dsa_slave_ioctl,
+};
+#endif
/* slave device setup *******************************************************/
struct net_device *
@@ -306,32 +342,27 @@ dsa_slave_create(struct dsa_switch *ds, struct device *parent,
SET_ETHTOOL_OPS(slave_dev, &dsa_slave_ethtool_ops);
memcpy(slave_dev->dev_addr, master->dev_addr, ETH_ALEN);
slave_dev->tx_queue_len = 0;
+
switch (ds->tag_protocol) {
#ifdef CONFIG_NET_DSA_TAG_DSA
case htons(ETH_P_DSA):
- slave_dev->hard_start_xmit = dsa_xmit;
+ slave_dev->netdev_ops = &dsa_netdev_ops;
break;
#endif
#ifdef CONFIG_NET_DSA_TAG_EDSA
case htons(ETH_P_EDSA):
- slave_dev->hard_start_xmit = edsa_xmit;
+ slave_dev->netdev_ops = &edsa_netdev_ops;
break;
#endif
#ifdef CONFIG_NET_DSA_TAG_TRAILER
case htons(ETH_P_TRAILER):
- slave_dev->hard_start_xmit = trailer_xmit;
+ slave_dev->netdev_ops = &trailer_netdev_ops;
break;
#endif
default:
BUG();
}
- slave_dev->open = dsa_slave_open;
- slave_dev->stop = dsa_slave_close;
- slave_dev->change_rx_flags = dsa_slave_change_rx_flags;
- slave_dev->set_rx_mode = dsa_slave_set_rx_mode;
- slave_dev->set_multicast_list = dsa_slave_set_rx_mode;
- slave_dev->set_mac_address = dsa_slave_set_mac_address;
- slave_dev->do_ioctl = dsa_slave_ioctl;
+
SET_NETDEV_DEV(slave_dev, parent);
slave_dev->vlan_features = master->vlan_features;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 35bcddf8a932..bd6ff907d9e4 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2542,6 +2542,7 @@ out:
return pp;
}
+EXPORT_SYMBOL(tcp_gro_receive);
int tcp_gro_complete(struct sk_buff *skb)
{
@@ -2558,6 +2559,7 @@ int tcp_gro_complete(struct sk_buff *skb)
return 0;
}
+EXPORT_SYMBOL(tcp_gro_complete);
#ifdef CONFIG_TCP_MD5SIG
static unsigned long tcp_md5sig_users;
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 437b750b98fd..94f74f5b0cbf 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -672,8 +672,7 @@ int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb)
EXPORT_SYMBOL_GPL(ipv6_opt_accepted);
-static struct inet6_protocol *ipv6_gso_pull_exthdrs(struct sk_buff *skb,
- int proto)
+static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
{
struct inet6_protocol *ops = NULL;
@@ -704,7 +703,7 @@ static struct inet6_protocol *ipv6_gso_pull_exthdrs(struct sk_buff *skb,
__skb_pull(skb, len);
}
- return ops;
+ return proto;
}
static int ipv6_gso_send_check(struct sk_buff *skb)
@@ -721,7 +720,9 @@ static int ipv6_gso_send_check(struct sk_buff *skb)
err = -EPROTONOSUPPORT;
rcu_read_lock();
- ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
+ ops = rcu_dereference(inet6_protos[
+ ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]);
+
if (likely(ops && ops->gso_send_check)) {
skb_reset_transport_header(skb);
err = ops->gso_send_check(skb);
@@ -757,7 +758,9 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features)
segs = ERR_PTR(-EPROTONOSUPPORT);
rcu_read_lock();
- ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
+ ops = rcu_dereference(inet6_protos[
+ ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]);
+
if (likely(ops && ops->gso_segment)) {
skb_reset_transport_header(skb);
segs = ops->gso_segment(skb, features);
@@ -777,11 +780,105 @@ out:
return segs;
}
+struct ipv6_gro_cb {
+ struct napi_gro_cb napi;
+ int proto;
+};
+
+#define IPV6_GRO_CB(skb) ((struct ipv6_gro_cb *)(skb)->cb)
+
+static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
+ struct sk_buff *skb)
+{
+ struct inet6_protocol *ops;
+ struct sk_buff **pp = NULL;
+ struct sk_buff *p;
+ struct ipv6hdr *iph;
+ unsigned int nlen;
+ int flush = 1;
+ int proto;
+
+ if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
+ goto out;
+
+ iph = ipv6_hdr(skb);
+ __skb_pull(skb, sizeof(*iph));
+
+ flush += ntohs(iph->payload_len) != skb->len;
+
+ rcu_read_lock();
+ proto = ipv6_gso_pull_exthdrs(skb, iph->nexthdr);
+ IPV6_GRO_CB(skb)->proto = proto;
+ ops = rcu_dereference(inet6_protos[proto]);
+ if (!ops || !ops->gro_receive)
+ goto out_unlock;
+
+ flush--;
+ skb_reset_transport_header(skb);
+ nlen = skb_network_header_len(skb);
+
+ for (p = *head; p; p = p->next) {
+ struct ipv6hdr *iph2;
+
+ if (!NAPI_GRO_CB(p)->same_flow)
+ continue;
+
+ iph2 = ipv6_hdr(p);
+
+ /* All fields must match except length. */
+ if (nlen != skb_network_header_len(p) ||
+ memcmp(iph, iph2, offsetof(struct ipv6hdr, payload_len)) ||
+ memcmp(&iph->nexthdr, &iph2->nexthdr,
+ nlen - offsetof(struct ipv6hdr, nexthdr))) {
+ NAPI_GRO_CB(p)->same_flow = 0;
+ continue;
+ }
+
+ NAPI_GRO_CB(p)->flush |= flush;
+ }
+
+ NAPI_GRO_CB(skb)->flush |= flush;
+
+ pp = ops->gro_receive(head, skb);
+
+out_unlock:
+ rcu_read_unlock();
+
+out:
+ NAPI_GRO_CB(skb)->flush |= flush;
+
+ return pp;
+}
+
+static int ipv6_gro_complete(struct sk_buff *skb)
+{
+ struct inet6_protocol *ops;
+ struct ipv6hdr *iph = ipv6_hdr(skb);
+ int err = -ENOSYS;
+
+ iph->payload_len = htons(skb->len - skb_network_offset(skb) -
+ sizeof(*iph));
+
+ rcu_read_lock();
+ ops = rcu_dereference(inet6_protos[IPV6_GRO_CB(skb)->proto]);
+ if (WARN_ON(!ops || !ops->gro_complete))
+ goto out_unlock;
+
+ err = ops->gro_complete(skb);
+
+out_unlock:
+ rcu_read_unlock();
+
+ return err;
+}
+
static struct packet_type ipv6_packet_type = {
.type = __constant_htons(ETH_P_IPV6),
.func = ipv6_rcv,
.gso_send_check = ipv6_gso_send_check,
.gso_segment = ipv6_gso_segment,
+ .gro_receive = ipv6_gro_receive,
+ .gro_complete = ipv6_gro_complete,
};
static int __init ipv6_packet_init(void)
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index e8b8337a8310..1297306d729c 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -101,7 +101,7 @@ static void tcp_v6_hash(struct sock *sk)
}
}
-static __inline__ __sum16 tcp_v6_check(struct tcphdr *th, int len,
+static __inline__ __sum16 tcp_v6_check(int len,
struct in6_addr *saddr,
struct in6_addr *daddr,
__wsum base)
@@ -501,7 +501,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req)
if (skb) {
struct tcphdr *th = tcp_hdr(skb);
- th->check = tcp_v6_check(th, skb->len,
+ th->check = tcp_v6_check(skb->len,
&treq->loc_addr, &treq->rmt_addr,
csum_partial(th, skb->len, skb->csum));
@@ -942,6 +942,41 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb)
return 0;
}
+struct sk_buff **tcp6_gro_receive(struct sk_buff **head, struct sk_buff *skb)
+{
+ struct ipv6hdr *iph = ipv6_hdr(skb);
+
+ switch (skb->ip_summed) {
+ case CHECKSUM_COMPLETE:
+ if (!tcp_v6_check(skb->len, &iph->saddr, &iph->daddr,
+ skb->csum)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ break;
+ }
+
+ /* fall through */
+ case CHECKSUM_NONE:
+ NAPI_GRO_CB(skb)->flush = 1;
+ return NULL;
+ }
+
+ return tcp_gro_receive(head, skb);
+}
+EXPORT_SYMBOL(tcp6_gro_receive);
+
+int tcp6_gro_complete(struct sk_buff *skb)
+{
+ struct ipv6hdr *iph = ipv6_hdr(skb);
+ struct tcphdr *th = tcp_hdr(skb);
+
+ th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
+ &iph->saddr, &iph->daddr, 0);
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+
+ return tcp_gro_complete(skb);
+}
+EXPORT_SYMBOL(tcp6_gro_complete);
+
static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
u32 ts, struct tcp_md5sig_key *key, int rst)
{
@@ -1429,14 +1464,14 @@ out:
static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
{
if (skb->ip_summed == CHECKSUM_COMPLETE) {
- if (!tcp_v6_check(tcp_hdr(skb), skb->len, &ipv6_hdr(skb)->saddr,
+ if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr, skb->csum)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
return 0;
}
}
- skb->csum = ~csum_unfold(tcp_v6_check(tcp_hdr(skb), skb->len,
+ skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr, 0));
@@ -2062,6 +2097,8 @@ static struct inet6_protocol tcpv6_protocol = {
.err_handler = tcp_v6_err,
.gso_send_check = tcp_v6_gso_send_check,
.gso_segment = tcp_tso_segment,
+ .gro_receive = tcp6_gro_receive,
+ .gro_complete = tcp6_gro_complete,
.flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
};
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c
index b0ceac2d6cd1..6a91a32a80c1 100644
--- a/net/phonet/pep-gprs.c
+++ b/net/phonet/pep-gprs.c
@@ -227,6 +227,13 @@ static int gprs_set_mtu(struct net_device *dev, int new_mtu)
return 0;
}
+static const struct net_device_ops gprs_netdev_ops = {
+ .ndo_open = gprs_open,
+ .ndo_stop = gprs_close,
+ .ndo_start_xmit = gprs_xmit,
+ .ndo_change_mtu = gprs_set_mtu,
+};
+
static void gprs_setup(struct net_device *dev)
{
dev->features = NETIF_F_FRAGLIST;
@@ -237,11 +244,8 @@ static void gprs_setup(struct net_device *dev)
dev->addr_len = 0;
dev->tx_queue_len = 10;
+ dev->netdev_ops = &gprs_netdev_ops;
dev->destructor = free_netdev;
- dev->open = gprs_open;
- dev->stop = gprs_close;
- dev->hard_start_xmit = gprs_xmit; /* mandatory */
- dev->change_mtu = gprs_set_mtu;
}
/*
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index cfc8e7caba62..ec697cebb63b 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -289,9 +289,9 @@ restart:
do {
struct net_device *slave = qdisc_dev(q);
- struct netdev_queue *slave_txq;
+ struct netdev_queue *slave_txq = netdev_get_tx_queue(slave, 0);
+ const struct net_device_ops *slave_ops = slave->netdev_ops;
- slave_txq = netdev_get_tx_queue(slave, 0);
if (slave_txq->qdisc_sleeping != q)
continue;
if (__netif_subqueue_stopped(slave, subq) ||
@@ -305,7 +305,7 @@ restart:
if (__netif_tx_trylock(slave_txq)) {
if (!netif_tx_queue_stopped(slave_txq) &&
!netif_tx_queue_frozen(slave_txq) &&
- slave->hard_start_xmit(skb, slave) == 0) {
+ slave_ops->ndo_start_xmit(skb, slave) == 0) {
__netif_tx_unlock(slave_txq);
master->slaves = NEXT_SLAVE(q);
netif_wake_queue(dev);
@@ -420,6 +420,14 @@ static int teql_master_mtu(struct net_device *dev, int new_mtu)
return 0;
}
+static const struct net_device_ops teql_netdev_ops = {
+ .ndo_open = teql_master_open,
+ .ndo_stop = teql_master_close,
+ .ndo_start_xmit = teql_master_xmit,
+ .ndo_get_stats = teql_master_stats,
+ .ndo_change_mtu = teql_master_mtu,
+};
+
static __init void teql_master_setup(struct net_device *dev)
{
struct teql_master *master = netdev_priv(dev);
@@ -436,11 +444,7 @@ static __init void teql_master_setup(struct net_device *dev)
ops->destroy = teql_destroy;
ops->owner = THIS_MODULE;
- dev->open = teql_master_open;
- dev->hard_start_xmit = teql_master_xmit;
- dev->stop = teql_master_close;
- dev->get_stats = teql_master_stats;
- dev->change_mtu = teql_master_mtu;
+ dev->netdev_ops = &teql_netdev_ops;
dev->type = ARPHRD_VOID;
dev->mtu = 1500;
dev->tx_queue_len = 100;
diff --git a/net/wimax/Kconfig b/net/wimax/Kconfig
index 0bdbb6928205..18495cdcd10d 100644
--- a/net/wimax/Kconfig
+++ b/net/wimax/Kconfig
@@ -1,9 +1,23 @@
#
# WiMAX LAN device configuration
#
+# Note the ugly 'depends on' on WIMAX: that disallows RFKILL to be a
+# module if WIMAX is to be linked in. The WiMAX code is done in such a
+# way that it doesn't require and explicit dependency on RFKILL in
+# case an embedded system wants to rip it out.
+#
+# As well, enablement of the RFKILL code means we need the INPUT layer
+# support to inject events coming from hw rfkill switches. That
+# dependency could be killed if input.h provided appropiate means to
+# work when input is disabled.
+
+comment "WiMAX Wireless Broadband support requires CONFIG_INPUT enabled"
+ depends on INPUT = n && RFKILL != n
menuconfig WIMAX
tristate "WiMAX Wireless Broadband support"
+ depends on (y && RFKILL != m) || m
+ depends on (INPUT && RFKILL != n) || RFKILL = n
help
Select to configure support for devices that provide
diff --git a/net/wimax/id-table.c b/net/wimax/id-table.c
index d3b88558682c..5e685f7eda90 100644
--- a/net/wimax/id-table.c
+++ b/net/wimax/id-table.c
@@ -123,15 +123,17 @@ void wimax_id_table_rm(struct wimax_dev *wimax_dev)
/*
* Release the gennetlink family id / mapping table
*
- * On debug, verify that the table is empty upon removal.
+ * On debug, verify that the table is empty upon removal. We want the
+ * code always compiled, to ensure it doesn't bit rot. It will be
+ * compiled out if CONFIG_BUG is disabled.
*/
void wimax_id_table_release(void)
{
+ struct wimax_dev *wimax_dev;
+
#ifndef CONFIG_BUG
return;
#endif
- struct wimax_dev *wimax_dev;
-
spin_lock(&wimax_id_table_lock);
list_for_each_entry(wimax_dev, &wimax_id_table, id_table_node) {
printk(KERN_ERR "BUG: %s wimax_dev %p ifindex %d not cleared\n",
diff --git a/net/wimax/op-rfkill.c b/net/wimax/op-rfkill.c
index 8745bac173f1..2b75aee04217 100644
--- a/net/wimax/op-rfkill.c
+++ b/net/wimax/op-rfkill.c
@@ -71,7 +71,7 @@
#define D_SUBMODULE op_rfkill
#include "debug-levels.h"
-#ifdef CONFIG_RFKILL
+#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
/**
diff --git a/net/wireless/wext.c b/net/wireless/wext.c
index e49a2d1ef1e4..cb6a5bb85d80 100644
--- a/net/wireless/wext.c
+++ b/net/wireless/wext.c
@@ -1055,8 +1055,8 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
return private(dev, iwr, cmd, info, handler);
}
/* Old driver API : call driver ioctl handler */
- if (dev->do_ioctl)
- return dev->do_ioctl(dev, ifr, cmd);
+ if (dev->netdev_ops->ndo_do_ioctl)
+ return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
return -EOPNOTSUPP;
}