#include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/if_vlan.h> #include <linux/netpoll.h> #include "vlan.h" bool vlan_do_receive(struct sk_buff **skbp) { struct sk_buff *skb = *skbp; u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK; struct net_device *vlan_dev; struct vlan_pcpu_stats *rx_stats; vlan_dev = vlan_find_dev(skb->dev, vlan_id); if (!vlan_dev) { if (vlan_id) skb->pkt_type = PACKET_OTHERHOST; return false; } skb = *skbp = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) return false; skb->dev = vlan_dev; skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci); skb->vlan_tci = 0; rx_stats = this_cpu_ptr(vlan_dev_info(vlan_dev)->vlan_pcpu_stats); u64_stats_update_begin(&rx_stats->syncp); rx_stats->rx_packets++; rx_stats->rx_bytes += skb->len; switch (skb->pkt_type) { case PACKET_BROADCAST: break; case PACKET_MULTICAST: rx_stats->rx_multicast++; break; case PACKET_OTHERHOST: /* Our lower layer thinks this is not local, let's make sure. * This allows the VLAN to have a different MAC than the * underlying device, and still route correctly. */ if (!compare_ether_addr(eth_hdr(skb)->h_dest, vlan_dev->dev_addr)) skb->pkt_type = PACKET_HOST; break; } u64_stats_update_end(&rx_stats->syncp); return true; } struct net_device *vlan_dev_real_dev(const struct net_device *dev) { return vlan_dev_info(dev)->real_dev; } EXPORT_SYMBOL(vlan_dev_real_dev); u16 vlan_dev_vlan_id(const struct net_device *dev) { return vlan_dev_info(dev)->vlan_id; } EXPORT_SYMBOL(vlan_dev_vlan_id); /* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, u16 vlan_tci, int polling) { __vlan_hwaccel_put_tag(skb, vlan_tci); return polling ? netif_receive_skb(skb) : netif_rx(skb); } EXPORT_SYMBOL(__vlan_hwaccel_rx); gro_result_t vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, unsigned int vlan_tci, struct sk_buff *skb) { __vlan_hwaccel_put_tag(skb, vlan_tci); return napi_gro_receive(napi, skb); } EXPORT_SYMBOL(vlan_gro_receive); gro_result_t vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp, unsigned int vlan_tci) { __vlan_hwaccel_put_tag(napi->skb, vlan_tci); return napi_gro_frags(napi); } EXPORT_SYMBOL(vlan_gro_frags); static struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb) { if (vlan_dev_info(skb->dev)->flags & VLAN_FLAG_REORDER_HDR) { if (skb_cow(skb, skb_headroom(skb)) < 0) skb = NULL; if (skb) { /* Lifted from Gleb's VLAN code... */ memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 12); skb->mac_header += VLAN_HLEN; } } return skb; } static void vlan_set_encap_proto(struct sk_buff *skb, struct vlan_hdr *vhdr) { __be16 proto; unsigned char *rawp; /* * Was a VLAN packet, grab the encapsulated protocol, which the layer * three protocols care about. */ proto = vhdr->h_vlan_encapsulated_proto; if (ntohs(proto) >= 1536) { skb->protocol = proto; return; } rawp = skb->data; if (*(unsigned short *) rawp == 0xFFFF) /* * This is a magic hack to spot IPX packets. Older Novell * breaks the protocol design and runs IPX over 802.3 without * an 802.2 LLC layer. We look for FFFF which isn't a used * 802.2 SSAP/DSAP. This won't work for fault tolerant netware * but does for the rest. */ skb->protocol = htons(ETH_P_802_3); else /* * Real 802.2 LLC */ skb->protocol = htons(ETH_P_802_2); } struct sk_buff *vlan_untag(struct sk_buff *skb) { struct vlan_hdr *vhdr; u16 vlan_tci; if (unlikely(vlan_tx_tag_present(skb))) { /* vlan_tci is already set-up so leave this for another time */ return skb; } skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) goto err_free; if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) goto err_free; vhdr = (struct vlan_hdr *) skb->data; vlan_tci = ntohs(vhdr->h_vlan_TCI); __vlan_hwaccel_put_tag(skb, vlan_tci); skb_pull_rcsum(skb, VLAN_HLEN); vlan_set_encap_proto(skb, vhdr); skb = vlan_check_reorder_header(skb); if (unlikely(!skb)) goto err_free; return skb; err_free: kfree_skb(skb); return NULL; }