diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-06 17:22:09 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-06 17:22:09 -0800 |
commit | 9753dfe19a85e7e45a34a56f4cb2048bb4f50e27 (patch) | |
tree | c017a1b4a70b8447c71b01d8b320e071546b5c9d /net/caif | |
parent | edf7c8148ec40c0fd27c0ef3f688defcc65e3913 (diff) | |
parent | 9f42f126154786e6e76df513004800c8c633f020 (diff) | |
download | linux-9753dfe19a85e7e45a34a56f4cb2048bb4f50e27.tar.bz2 |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1958 commits)
net: pack skb_shared_info more efficiently
net_sched: red: split red_parms into parms and vars
net_sched: sfq: extend limits
cnic: Improve error recovery on bnx2x devices
cnic: Re-init dev->stats_addr after chip reset
net_sched: Bug in netem reordering
bna: fix sparse warnings/errors
bna: make ethtool_ops and strings const
xgmac: cleanups
net: make ethtool_ops const
vmxnet3" make ethtool ops const
xen-netback: make ops structs const
virtio_net: Pass gfp flags when allocating rx buffers.
ixgbe: FCoE: Add support for ndo_get_fcoe_hbainfo() call
netdev: FCoE: Add new ndo_get_fcoe_hbainfo() call
igb: reset PHY after recovering from PHY power down
igb: add basic runtime PM support
igb: Add support for byte queue limits.
e1000: cleanup CE4100 MDIO registers access
e1000: unmap ce4100_gbe_mdio_base_virt in e1000_remove
...
Diffstat (limited to 'net/caif')
-rw-r--r-- | net/caif/Kconfig | 11 | ||||
-rw-r--r-- | net/caif/Makefile | 1 | ||||
-rw-r--r-- | net/caif/caif_dev.c | 273 | ||||
-rw-r--r-- | net/caif/caif_usb.c | 208 | ||||
-rw-r--r-- | net/caif/cfcnfg.c | 47 | ||||
-rw-r--r-- | net/caif/cfpkt_skbuff.c | 15 | ||||
-rw-r--r-- | net/caif/cfrfml.c | 2 | ||||
-rw-r--r-- | net/caif/cfserl.c | 3 |
8 files changed, 454 insertions, 106 deletions
diff --git a/net/caif/Kconfig b/net/caif/Kconfig index 529750da9624..936361e5a2b6 100644 --- a/net/caif/Kconfig +++ b/net/caif/Kconfig @@ -40,3 +40,14 @@ config CAIF_NETDEV If you select to build it as a built-in then the main CAIF device must also be a built-in. If unsure say Y. + +config CAIF_USB + tristate "CAIF USB support" + depends on CAIF + default n + ---help--- + Say Y if you are using CAIF over USB CDC NCM. + This can be either built-in or a loadable module, + If you select to build it as a built-in then the main CAIF device must + also be a built-in. + If unsure say N. diff --git a/net/caif/Makefile b/net/caif/Makefile index ebcd4e7e6f47..cc2b51154d03 100644 --- a/net/caif/Makefile +++ b/net/caif/Makefile @@ -10,5 +10,6 @@ caif-y := caif_dev.o \ obj-$(CONFIG_CAIF) += caif.o obj-$(CONFIG_CAIF_NETDEV) += chnl_net.o obj-$(CONFIG_CAIF) += caif_socket.o +obj-$(CONFIG_CAIF_USB) += caif_usb.o export-y := caif.o diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c index f1fa1f6e658d..b0ce14fbf6ef 100644 --- a/net/caif/caif_dev.c +++ b/net/caif/caif_dev.c @@ -17,6 +17,7 @@ #include <linux/netdevice.h> #include <linux/mutex.h> #include <linux/module.h> +#include <linux/spinlock.h> #include <net/netns/generic.h> #include <net/net_namespace.h> #include <net/pkt_sched.h> @@ -24,6 +25,7 @@ #include <net/caif/caif_layer.h> #include <net/caif/cfpkt.h> #include <net/caif/cfcnfg.h> +#include <net/caif/cfserl.h> MODULE_LICENSE("GPL"); @@ -33,6 +35,10 @@ struct caif_device_entry { struct list_head list; struct net_device *netdev; int __percpu *pcpu_refcnt; + spinlock_t flow_lock; + struct sk_buff *xoff_skb; + void (*xoff_skb_dtor)(struct sk_buff *skb); + bool xoff; }; struct caif_device_entry_list { @@ -47,13 +53,14 @@ struct caif_net { }; static int caif_net_id; +static int q_high = 50; /* Percent */ struct cfcnfg *get_cfcnfg(struct net *net) { struct caif_net *caifn; - BUG_ON(!net); caifn = net_generic(net, caif_net_id); - BUG_ON(!caifn); + if (!caifn) + return NULL; return caifn->cfg; } EXPORT_SYMBOL(get_cfcnfg); @@ -61,9 +68,9 @@ EXPORT_SYMBOL(get_cfcnfg); static struct caif_device_entry_list *caif_device_list(struct net *net) { struct caif_net *caifn; - BUG_ON(!net); caifn = net_generic(net, caif_net_id); - BUG_ON(!caifn); + if (!caifn) + return NULL; return &caifn->caifdevs; } @@ -92,7 +99,8 @@ static struct caif_device_entry *caif_device_alloc(struct net_device *dev) struct caif_device_entry *caifd; caifdevs = caif_device_list(dev_net(dev)); - BUG_ON(!caifdevs); + if (!caifdevs) + return NULL; caifd = kzalloc(sizeof(*caifd), GFP_KERNEL); if (!caifd) @@ -112,7 +120,9 @@ static struct caif_device_entry *caif_get(struct net_device *dev) struct caif_device_entry_list *caifdevs = caif_device_list(dev_net(dev)); struct caif_device_entry *caifd; - BUG_ON(!caifdevs); + if (!caifdevs) + return NULL; + list_for_each_entry_rcu(caifd, &caifdevs->list, list) { if (caifd->netdev == dev) return caifd; @@ -120,15 +130,106 @@ static struct caif_device_entry *caif_get(struct net_device *dev) return NULL; } +void caif_flow_cb(struct sk_buff *skb) +{ + struct caif_device_entry *caifd; + void (*dtor)(struct sk_buff *skb) = NULL; + bool send_xoff; + + WARN_ON(skb->dev == NULL); + + rcu_read_lock(); + caifd = caif_get(skb->dev); + caifd_hold(caifd); + rcu_read_unlock(); + + spin_lock_bh(&caifd->flow_lock); + send_xoff = caifd->xoff; + caifd->xoff = 0; + if (!WARN_ON(caifd->xoff_skb_dtor == NULL)) { + WARN_ON(caifd->xoff_skb != skb); + dtor = caifd->xoff_skb_dtor; + caifd->xoff_skb = NULL; + caifd->xoff_skb_dtor = NULL; + } + spin_unlock_bh(&caifd->flow_lock); + + if (dtor) + dtor(skb); + + if (send_xoff) + caifd->layer.up-> + ctrlcmd(caifd->layer.up, + _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND, + caifd->layer.id); + caifd_put(caifd); +} + static int transmit(struct cflayer *layer, struct cfpkt *pkt) { - int err; + int err, high = 0, qlen = 0; + struct caif_dev_common *caifdev; struct caif_device_entry *caifd = container_of(layer, struct caif_device_entry, layer); struct sk_buff *skb; + struct netdev_queue *txq; + + rcu_read_lock_bh(); skb = cfpkt_tonative(pkt); skb->dev = caifd->netdev; + skb_reset_network_header(skb); + skb->protocol = htons(ETH_P_CAIF); + caifdev = netdev_priv(caifd->netdev); + + /* Check if we need to handle xoff */ + if (likely(caifd->netdev->tx_queue_len == 0)) + goto noxoff; + + if (unlikely(caifd->xoff)) + goto noxoff; + + if (likely(!netif_queue_stopped(caifd->netdev))) { + /* If we run with a TX queue, check if the queue is too long*/ + txq = netdev_get_tx_queue(skb->dev, 0); + qlen = qdisc_qlen(rcu_dereference_bh(txq->qdisc)); + + if (likely(qlen == 0)) + goto noxoff; + + high = (caifd->netdev->tx_queue_len * q_high) / 100; + if (likely(qlen < high)) + goto noxoff; + } + + /* Hold lock while accessing xoff */ + spin_lock_bh(&caifd->flow_lock); + if (caifd->xoff) { + spin_unlock_bh(&caifd->flow_lock); + goto noxoff; + } + + /* + * Handle flow off, we do this by temporary hi-jacking this + * skb's destructor function, and replace it with our own + * flow-on callback. The callback will set flow-on and call + * the original destructor. + */ + + pr_debug("queue has stopped(%d) or is full (%d > %d)\n", + netif_queue_stopped(caifd->netdev), + qlen, high); + caifd->xoff = 1; + caifd->xoff_skb = skb; + caifd->xoff_skb_dtor = skb->destructor; + skb->destructor = caif_flow_cb; + spin_unlock_bh(&caifd->flow_lock); + + caifd->layer.up->ctrlcmd(caifd->layer.up, + _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND, + caifd->layer.id); +noxoff: + rcu_read_unlock_bh(); err = dev_queue_xmit(skb); if (err > 0) @@ -172,7 +273,10 @@ static int receive(struct sk_buff *skb, struct net_device *dev, /* Release reference to stack upwards */ caifd_put(caifd); - return 0; + + if (err != 0) + err = NET_RX_DROP; + return err; } static struct packet_type caif_packet_type __read_mostly = { @@ -203,6 +307,57 @@ static void dev_flowctrl(struct net_device *dev, int on) caifd_put(caifd); } +void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, + struct cflayer *link_support, int head_room, + struct cflayer **layer, int (**rcv_func)( + struct sk_buff *, struct net_device *, + struct packet_type *, struct net_device *)) +{ + struct caif_device_entry *caifd; + enum cfcnfg_phy_preference pref; + struct cfcnfg *cfg = get_cfcnfg(dev_net(dev)); + struct caif_device_entry_list *caifdevs; + + caifdevs = caif_device_list(dev_net(dev)); + if (!cfg || !caifdevs) + return; + caifd = caif_device_alloc(dev); + if (!caifd) + return; + *layer = &caifd->layer; + spin_lock_init(&caifd->flow_lock); + + switch (caifdev->link_select) { + case CAIF_LINK_HIGH_BANDW: + pref = CFPHYPREF_HIGH_BW; + break; + case CAIF_LINK_LOW_LATENCY: + pref = CFPHYPREF_LOW_LAT; + break; + default: + pref = CFPHYPREF_HIGH_BW; + break; + } + mutex_lock(&caifdevs->lock); + list_add_rcu(&caifd->list, &caifdevs->list); + + strncpy(caifd->layer.name, dev->name, + sizeof(caifd->layer.name) - 1); + caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0; + caifd->layer.transmit = transmit; + cfcnfg_add_phy_layer(cfg, + dev, + &caifd->layer, + pref, + link_support, + caifdev->use_fcs, + head_room); + mutex_unlock(&caifdevs->lock); + if (rcv_func) + *rcv_func = receive; +} +EXPORT_SYMBOL(caif_enroll_dev); + /* notify Caif of device events */ static int caif_device_notify(struct notifier_block *me, unsigned long what, void *arg) @@ -210,62 +365,40 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what, struct net_device *dev = arg; struct caif_device_entry *caifd = NULL; struct caif_dev_common *caifdev; - enum cfcnfg_phy_preference pref; - enum cfcnfg_phy_type phy_type; struct cfcnfg *cfg; + struct cflayer *layer, *link_support; + int head_room = 0; struct caif_device_entry_list *caifdevs; - if (dev->type != ARPHRD_CAIF) - return 0; - cfg = get_cfcnfg(dev_net(dev)); - if (cfg == NULL) + caifdevs = caif_device_list(dev_net(dev)); + if (!cfg || !caifdevs) return 0; - caifdevs = caif_device_list(dev_net(dev)); + caifd = caif_get(dev); + if (caifd == NULL && dev->type != ARPHRD_CAIF) + return 0; switch (what) { case NETDEV_REGISTER: - caifd = caif_device_alloc(dev); - if (!caifd) - return 0; + if (caifd != NULL) + break; caifdev = netdev_priv(dev); - caifdev->flowctrl = dev_flowctrl; - caifd->layer.transmit = transmit; - - if (caifdev->use_frag) - phy_type = CFPHYTYPE_FRAG; - else - phy_type = CFPHYTYPE_CAIF; - - switch (caifdev->link_select) { - case CAIF_LINK_HIGH_BANDW: - pref = CFPHYPREF_HIGH_BW; - break; - case CAIF_LINK_LOW_LATENCY: - pref = CFPHYPREF_LOW_LAT; - break; - default: - pref = CFPHYPREF_HIGH_BW; - break; + link_support = NULL; + if (caifdev->use_frag) { + head_room = 1; + link_support = cfserl_create(dev->ifindex, + caifdev->use_stx); + if (!link_support) { + pr_warn("Out of memory\n"); + break; + } } - strncpy(caifd->layer.name, dev->name, - sizeof(caifd->layer.name) - 1); - caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0; - - mutex_lock(&caifdevs->lock); - list_add_rcu(&caifd->list, &caifdevs->list); - - cfcnfg_add_phy_layer(cfg, - phy_type, - dev, - &caifd->layer, - pref, - caifdev->use_fcs, - caifdev->use_stx); - mutex_unlock(&caifdevs->lock); + caif_enroll_dev(dev, caifdev, link_support, head_room, + &layer, NULL); + caifdev->flowctrl = dev_flowctrl; break; case NETDEV_UP: @@ -277,6 +410,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what, break; } + caifd->xoff = 0; cfcnfg_set_phy_state(cfg, &caifd->layer, true); rcu_read_unlock(); @@ -298,6 +432,24 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what, caifd->layer.up->ctrlcmd(caifd->layer.up, _CAIF_CTRLCMD_PHYIF_DOWN_IND, caifd->layer.id); + + spin_lock_bh(&caifd->flow_lock); + + /* + * Replace our xoff-destructor with original destructor. + * We trust that skb->destructor *always* is called before + * the skb reference is invalid. The hijacked SKB destructor + * takes the flow_lock so manipulating the skb->destructor here + * should be safe. + */ + if (caifd->xoff_skb_dtor != NULL && caifd->xoff_skb != NULL) + caifd->xoff_skb->destructor = caifd->xoff_skb_dtor; + + caifd->xoff = 0; + caifd->xoff_skb_dtor = NULL; + caifd->xoff_skb = NULL; + + spin_unlock_bh(&caifd->flow_lock); caifd_put(caifd); break; @@ -353,15 +505,15 @@ static struct notifier_block caif_device_notifier = { static int caif_init_net(struct net *net) { struct caif_net *caifn = net_generic(net, caif_net_id); - BUG_ON(!caifn); + if (WARN_ON(!caifn)) + return -EINVAL; + INIT_LIST_HEAD(&caifn->caifdevs.list); mutex_init(&caifn->caifdevs.lock); caifn->cfg = cfcnfg_create(); - if (!caifn->cfg) { - pr_warn("can't create cfcnfg\n"); + if (!caifn->cfg) return -ENOMEM; - } return 0; } @@ -371,17 +523,14 @@ static void caif_exit_net(struct net *net) struct caif_device_entry *caifd, *tmp; struct caif_device_entry_list *caifdevs = caif_device_list(net); - struct cfcnfg *cfg; + struct cfcnfg *cfg = get_cfcnfg(net); + + if (!cfg || !caifdevs) + return; rtnl_lock(); mutex_lock(&caifdevs->lock); - cfg = get_cfcnfg(net); - if (cfg == NULL) { - mutex_unlock(&caifdevs->lock); - return; - } - list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) { int i = 0; list_del_rcu(&caifd->list); diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c new file mode 100644 index 000000000000..5fc9eca8cd41 --- /dev/null +++ b/net/caif/caif_usb.c @@ -0,0 +1,208 @@ +/* + * CAIF USB handler + * Copyright (C) ST-Ericsson AB 2011 + * Author: Sjur Brendeland/sjur.brandeland@stericsson.com + * License terms: GNU General Public License (GPL) version 2 + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ + +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/slab.h> +#include <linux/netdevice.h> +#include <linux/mii.h> +#include <linux/usb.h> +#include <linux/usb/usbnet.h> +#include <net/netns/generic.h> +#include <net/caif/caif_dev.h> +#include <net/caif/caif_layer.h> +#include <net/caif/cfpkt.h> +#include <net/caif/cfcnfg.h> + +MODULE_LICENSE("GPL"); + +#define CFUSB_PAD_DESCR_SZ 1 /* Alignment descriptor length */ +#define CFUSB_ALIGNMENT 4 /* Number of bytes to align. */ +#define CFUSB_MAX_HEADLEN (CFUSB_PAD_DESCR_SZ + CFUSB_ALIGNMENT-1) +#define STE_USB_VID 0x04cc /* USB Product ID for ST-Ericsson */ +#define STE_USB_PID_CAIF 0x2306 /* Product id for CAIF Modems */ + +struct cfusbl { + struct cflayer layer; + u8 tx_eth_hdr[ETH_HLEN]; +}; + +static bool pack_added; + +static int cfusbl_receive(struct cflayer *layr, struct cfpkt *pkt) +{ + u8 hpad; + + /* Remove padding. */ + cfpkt_extr_head(pkt, &hpad, 1); + cfpkt_extr_head(pkt, NULL, hpad); + return layr->up->receive(layr->up, pkt); +} + +static int cfusbl_transmit(struct cflayer *layr, struct cfpkt *pkt) +{ + struct caif_payload_info *info; + u8 hpad; + u8 zeros[CFUSB_ALIGNMENT]; + struct sk_buff *skb; + struct cfusbl *usbl = container_of(layr, struct cfusbl, layer); + + skb = cfpkt_tonative(pkt); + + skb_reset_network_header(skb); + skb->protocol = htons(ETH_P_IP); + + info = cfpkt_info(pkt); + hpad = (info->hdr_len + CFUSB_PAD_DESCR_SZ) & (CFUSB_ALIGNMENT - 1); + + if (skb_headroom(skb) < ETH_HLEN + CFUSB_PAD_DESCR_SZ + hpad) { + pr_warn("Headroom to small\n"); + kfree_skb(skb); + return -EIO; + } + memset(zeros, 0, hpad); + + cfpkt_add_head(pkt, zeros, hpad); + cfpkt_add_head(pkt, &hpad, 1); + cfpkt_add_head(pkt, usbl->tx_eth_hdr, sizeof(usbl->tx_eth_hdr)); + return layr->dn->transmit(layr->dn, pkt); +} + +static void cfusbl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, + int phyid) +{ + if (layr->up && layr->up->ctrlcmd) + layr->up->ctrlcmd(layr->up, ctrl, layr->id); +} + +struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN], + u8 braddr[ETH_ALEN]) +{ + struct cfusbl *this = kmalloc(sizeof(struct cfusbl), GFP_ATOMIC); + + if (!this) { + pr_warn("Out of memory\n"); + return NULL; + } + caif_assert(offsetof(struct cfusbl, layer) == 0); + + memset(this, 0, sizeof(struct cflayer)); + this->layer.receive = cfusbl_receive; + this->layer.transmit = cfusbl_transmit; + this->layer.ctrlcmd = cfusbl_ctrlcmd; + snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "usb%d", phyid); + this->layer.id = phyid; + + /* + * Construct TX ethernet header: + * 0-5 destination address + * 5-11 source address + * 12-13 protocol type + */ + memcpy(&this->tx_eth_hdr[ETH_ALEN], braddr, ETH_ALEN); + memcpy(&this->tx_eth_hdr[ETH_ALEN], ethaddr, ETH_ALEN); + this->tx_eth_hdr[12] = cpu_to_be16(ETH_P_802_EX1) & 0xff; + this->tx_eth_hdr[13] = (cpu_to_be16(ETH_P_802_EX1) >> 8) & 0xff; + pr_debug("caif ethernet TX-header dst:%pM src:%pM type:%02x%02x\n", + this->tx_eth_hdr, this->tx_eth_hdr + ETH_ALEN, + this->tx_eth_hdr[12], this->tx_eth_hdr[13]); + + return (struct cflayer *) this; +} + +static struct packet_type caif_usb_type __read_mostly = { + .type = cpu_to_be16(ETH_P_802_EX1), +}; + +static int cfusbl_device_notify(struct notifier_block *me, unsigned long what, + void *arg) +{ + struct net_device *dev = arg; + struct caif_dev_common common; + struct cflayer *layer, *link_support; + struct usbnet *usbnet = netdev_priv(dev); + struct usb_device *usbdev = usbnet->udev; + struct ethtool_drvinfo drvinfo; + + /* + * Quirks: High-jack ethtool to find if we have a NCM device, + * and find it's VID/PID. + */ + if (dev->ethtool_ops == NULL || dev->ethtool_ops->get_drvinfo == NULL) + return 0; + + dev->ethtool_ops->get_drvinfo(dev, &drvinfo); + if (strncmp(drvinfo.driver, "cdc_ncm", 7) != 0) + return 0; + + pr_debug("USB CDC NCM device VID:0x%4x PID:0x%4x\n", + le16_to_cpu(usbdev->descriptor.idVendor), + le16_to_cpu(usbdev->descriptor.idProduct)); + + /* Check for VID/PID that supports CAIF */ + if (!(le16_to_cpu(usbdev->descriptor.idVendor) == STE_USB_VID && + le16_to_cpu(usbdev->descriptor.idProduct) == STE_USB_PID_CAIF)) + return 0; + + if (what == NETDEV_UNREGISTER) + module_put(THIS_MODULE); + + if (what != NETDEV_REGISTER) + return 0; + + __module_get(THIS_MODULE); + + memset(&common, 0, sizeof(common)); + common.use_frag = false; + common.use_fcs = false; + common.use_stx = false; + common.link_select = CAIF_LINK_HIGH_BANDW; + common.flowctrl = NULL; + + link_support = cfusbl_create(dev->ifindex, dev->dev_addr, + dev->broadcast); + + if (!link_support) + return -ENOMEM; + + if (dev->num_tx_queues > 1) + pr_warn("USB device uses more than one tx queue\n"); + + caif_enroll_dev(dev, &common, link_support, CFUSB_MAX_HEADLEN, + &layer, &caif_usb_type.func); + if (!pack_added) + dev_add_pack(&caif_usb_type); + pack_added = true; + + strncpy(layer->name, dev->name, + sizeof(layer->name) - 1); + layer->name[sizeof(layer->name) - 1] = 0; + + return 0; +} + +static struct notifier_block caif_device_notifier = { + .notifier_call = cfusbl_device_notify, + .priority = 0, +}; + +static int __init cfusbl_init(void) +{ + return register_netdevice_notifier(&caif_device_notifier); +} + +static void __exit cfusbl_exit(void) +{ + unregister_netdevice_notifier(&caif_device_notifier); + dev_remove_pack(&caif_usb_type); +} + +module_init(cfusbl_init); +module_exit(cfusbl_exit); diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c index 00523ecc4ced..598aafb4cb51 100644 --- a/net/caif/cfcnfg.c +++ b/net/caif/cfcnfg.c @@ -45,8 +45,8 @@ struct cfcnfg_phyinfo { /* Interface index */ int ifindex; - /* Use Start of frame extension */ - bool use_stx; + /* Protocol head room added for CAIF link layer */ + int head_room; /* Use Start of frame checksum */ bool use_fcs; @@ -187,11 +187,11 @@ int caif_disconnect_client(struct net *net, struct cflayer *adap_layer) if (channel_id != 0) { struct cflayer *servl; servl = cfmuxl_remove_uplayer(cfg->mux, channel_id); + cfctrl_linkdown_req(cfg->ctrl, channel_id, adap_layer); if (servl != NULL) layer_set_up(servl, NULL); } else pr_debug("nothing to disconnect\n"); - cfctrl_linkdown_req(cfg->ctrl, channel_id, adap_layer); /* Do RCU sync before initiating cleanup */ synchronize_rcu(); @@ -350,9 +350,7 @@ int caif_connect_client(struct net *net, struct caif_connect_request *conn_req, *ifindex = phy->ifindex; *proto_tail = 2; - *proto_head = - - protohead[param.linktype] + (phy->use_stx ? 1 : 0); + *proto_head = protohead[param.linktype] + phy->head_room; rcu_read_unlock(); @@ -460,13 +458,13 @@ unlock: } void -cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type, +cfcnfg_add_phy_layer(struct cfcnfg *cnfg, struct net_device *dev, struct cflayer *phy_layer, enum cfcnfg_phy_preference pref, - bool fcs, bool stx) + struct cflayer *link_support, + bool fcs, int head_room) { struct cflayer *frml; - struct cflayer *phy_driver = NULL; struct cfcnfg_phyinfo *phyinfo = NULL; int i; u8 phyid; @@ -482,26 +480,13 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type, goto got_phyid; } pr_warn("Too many CAIF Link Layers (max 6)\n"); - goto out_err; + goto out; got_phyid: phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC); if (!phyinfo) goto out_err; - switch (phy_type) { - case CFPHYTYPE_FRAG: - phy_driver = - cfserl_create(CFPHYTYPE_FRAG, phyid, stx); - if (!phy_driver) - goto out_err; - break; - case CFPHYTYPE_CAIF: - phy_driver = NULL; - break; - default: - goto out_err; - } phy_layer->id = phyid; phyinfo->pref = pref; phyinfo->id = phyid; @@ -509,7 +494,7 @@ got_phyid: phyinfo->dev_info.dev = dev; phyinfo->phy_layer = phy_layer; phyinfo->ifindex = dev->ifindex; - phyinfo->use_stx = stx; + phyinfo->head_room = head_room; phyinfo->use_fcs = fcs; frml = cffrml_create(phyid, fcs); @@ -519,23 +504,23 @@ got_phyid: phyinfo->frm_layer = frml; layer_set_up(frml, cnfg->mux); - if (phy_driver != NULL) { - phy_driver->id = phyid; - layer_set_dn(frml, phy_driver); - layer_set_up(phy_driver, frml); - layer_set_dn(phy_driver, phy_layer); - layer_set_up(phy_layer, phy_driver); + if (link_support != NULL) { + link_support->id = phyid; + layer_set_dn(frml, link_support); + layer_set_up(link_support, frml); + layer_set_dn(link_support, phy_layer); + layer_set_up(phy_layer, link_support); } else { layer_set_dn(frml, phy_layer); layer_set_up(phy_layer, frml); } list_add_rcu(&phyinfo->node, &cnfg->phys); +out: mutex_unlock(&cnfg->lock); return; out_err: - kfree(phy_driver); kfree(phyinfo); mutex_unlock(&cnfg->lock); } diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c index df08c47183d4..e335ba859b97 100644 --- a/net/caif/cfpkt_skbuff.c +++ b/net/caif/cfpkt_skbuff.c @@ -63,7 +63,6 @@ static inline struct cfpkt *skb_to_pkt(struct sk_buff *skb) return (struct cfpkt *) skb; } - struct cfpkt *cfpkt_fromnative(enum caif_direction dir, void *nativepkt) { struct cfpkt *pkt = skb_to_pkt(nativepkt); @@ -105,14 +104,12 @@ void cfpkt_destroy(struct cfpkt *pkt) kfree_skb(skb); } - inline bool cfpkt_more(struct cfpkt *pkt) { struct sk_buff *skb = pkt_to_skb(pkt); return skb->len > 0; } - int cfpkt_peek_head(struct cfpkt *pkt, void *data, u16 len) { struct sk_buff *skb = pkt_to_skb(pkt); @@ -144,9 +141,11 @@ int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len) } from = skb_pull(skb, len); from -= len; - memcpy(data, from, len); + if (data) + memcpy(data, from, len); return 0; } +EXPORT_SYMBOL(cfpkt_extr_head); int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len) { @@ -170,13 +169,11 @@ int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len) return 0; } - int cfpkt_pad_trail(struct cfpkt *pkt, u16 len) { return cfpkt_add_body(pkt, NULL, len); } - int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len) { struct sk_buff *skb = pkt_to_skb(pkt); @@ -255,21 +252,19 @@ int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len) memcpy(to, data, len); return 0; } - +EXPORT_SYMBOL(cfpkt_add_head); inline int cfpkt_add_trail(struct cfpkt *pkt, const void *data, u16 len) { return cfpkt_add_body(pkt, data, len); } - inline u16 cfpkt_getlen(struct cfpkt *pkt) { struct sk_buff *skb = pkt_to_skb(pkt); return skb->len; } - inline u16 cfpkt_iterate(struct cfpkt *pkt, u16 (*iter_func)(u16, void *, u16), u16 data) @@ -287,7 +282,6 @@ inline u16 cfpkt_iterate(struct cfpkt *pkt, return iter_func(data, pkt->skb.data, cfpkt_getlen(pkt)); } - int cfpkt_setlen(struct cfpkt *pkt, u16 len) { struct sk_buff *skb = pkt_to_skb(pkt); @@ -399,3 +393,4 @@ struct caif_payload_info *cfpkt_info(struct cfpkt *pkt) { return (struct caif_payload_info *)&pkt_to_skb(pkt)->cb; } +EXPORT_SYMBOL(cfpkt_info); diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c index 81660f809713..6dc75d4f8d94 100644 --- a/net/caif/cfrfml.c +++ b/net/caif/cfrfml.c @@ -190,7 +190,7 @@ out: static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt) { - caif_assert(cfpkt_getlen(pkt) < rfml->fragment_size); + caif_assert(cfpkt_getlen(pkt) < rfml->fragment_size + RFM_HEAD_SIZE); /* Add info for MUX-layer to route the packet out. */ cfpkt_info(pkt)->channel_id = rfml->serv.layer.id; diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c index 797c8d165993..8e68b97f13ee 100644 --- a/net/caif/cfserl.c +++ b/net/caif/cfserl.c @@ -31,7 +31,7 @@ static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt); static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, int phyid); -struct cflayer *cfserl_create(int type, int instance, bool use_stx) +struct cflayer *cfserl_create(int instance, bool use_stx) { struct cfserl *this = kzalloc(sizeof(struct cfserl), GFP_ATOMIC); if (!this) @@ -40,7 +40,6 @@ struct cflayer *cfserl_create(int type, int instance, bool use_stx) this->layer.receive = cfserl_receive; this->layer.transmit = cfserl_transmit; this->layer.ctrlcmd = cfserl_ctrlcmd; - this->layer.type = type; this->usestx = use_stx; spin_lock_init(&this->sync); snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "ser1"); |