summaryrefslogtreecommitdiffstats
path: root/net/caif
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-05-21 10:03:46 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-21 10:03:46 -0700
commitcb62ab71fe2b16e8203a0f0a2ef4eda23d761338 (patch)
tree536ba39658e47d511a489c52f7aac60cd78967e5 /net/caif
parent31ed8e6f93a27304c9e157dab0267772cd94eaad (diff)
parent74863948f925d9f3bb4e3d3a783e49e9c662d839 (diff)
downloadlinux-cb62ab71fe2b16e8203a0f0a2ef4eda23d761338.tar.bz2
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking changes from David Miller: 1) Get rid of the error prone NLA_PUT*() macros that used an embedded goto. 2) Kill off the token-ring and MCA networking drivers, from Paul Gortmaker. 3) Reduce high-order allocations made by datagram AF_UNIX sockets, from Eric Dumazet. 4) Add PTP hardware clock support to IGB and IXGBE, from Richard Cochran and Jacob Keller. 5) Allow users to query timestamping capabilities of a card via ethtool, from Richard Cochran. 6) Add loadbalance mode to the teaming driver, from Jiri Pirko. Part of this is that we can now have BPF filters not attached to sockets, and the loadbalancing function is calculated using one. 7) Francois Romieu went through the network drivers removing gratuitous uses of netdev->base_addr, perhaps some day we can remove it completely but it's used for ISA probing still. 8) Add a BPF JIT for sparc. I know, who cares, right? :-) 9) Move networking sysctl registry away from using the compatability mode interfaces in the sysctl code. From Eric W Biederman. 10) Pavel Emelyanov added a way to save and restore TCP socket state via TCP_REPAIR, TCP_REPAIR_QUEUE, and TCP_QUEUE_SEQ socket options as well as a way to forcefully bind a socket to a port via the sk->sk_reuse value SK_FORCE_REUSE. There is also a TCP_REPAIR_OPTIONS which allows to reinstante the TCP options enabled on the connection. 11) Several enhancements from Eric Dumazet that, in particular, can enhance splice performance on TCP sockets significantly. a) Reset the offset of the per-socket sendmsg page when we know we're the only use of the page in linear_to_page(). b) Add facilities such that skb->data can be backed a page rather than SLAB kmalloc'd memory. In particular devices which were receiving into linear RX buffers can now end up providing paged data. The big result is that code like splice and GRO do not have to copy any more. 12) Allow a pure sender to more gracefully handle ACK backlogs in TCP. What can happen at high rates is that the sender hasn't grown his receive buffer limits at all (he's not receiving data so really doesn't need to), but the non-data ACKs consume receive buffer space. sk_add_backlog() is too aggressive in dropping frames in this case, so relax it's requirements by using the receive buffer plus the send buffer limit as the backlog limit instead of just the former. Also from Eric Dumazet. 13) Add ipv6 support to L2TP, from Benjamin LaHaise, James Chapman, and Chris Elston. 14) Implement TCP early retransmit (RFC 5827), from Yuchung Cheng. Basically, we can start fast retransmit before hiting the dupack threshold under certain conditions. 15) New CODEL active queue management packet scheduler, from Eric Dumazet based upon initial work by Dave Taht. Basically, the big feature is that packets are dropped (or ECN bits are set) based upon how long packets live in the queue, rather than the queue length (which is what RED uses). * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1341 commits) drivers/net/stmmac: seq_file fix memory leak ipv6/exthdrs: strict Pad1 and PadN check USB: qmi_wwan: Add ZTE (Vodafone) K3520-Z USB: qmi_wwan: Add ZTE (Vodafone) K3765-Z USB: qmi_wwan: Make forced int 4 whitelist generic net/ipv4: replace simple_strtoul with kstrtoul net/ipv4/ipconfig: neaten __setup placement net: qmi_wwan: Add Vodafone/Huawei K5005 support net: cdc_ether: Add ZTE WWAN matches before generic Ethernet ipv6: use skb coalescing in reassembly ipv4: use skb coalescing in defragmentation net: introduce skb_try_coalesce() net:ipv6:fixed space issues relating to operators. net:ipv6:fixed a trailing white space issue. ipv6: disable GSO on sockets hitting dst_allfrag tg3: use netdev_alloc_frag() API net: napi_frags_skb() is static ppp: avoid false drop_monitor false positives ipv6: bool/const conversions phase2 ipx: Remove spurious NULL checking in ipx_ioctl(). ...
Diffstat (limited to 'net/caif')
-rw-r--r--net/caif/caif_socket.c28
-rw-r--r--net/caif/cfctrl.c4
-rw-r--r--net/caif/cfpkt_skbuff.c7
-rw-r--r--net/caif/cfsrvl.c3
-rw-r--r--net/caif/chnl_net.c14
5 files changed, 40 insertions, 16 deletions
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 5016fa57b623..fb8944355264 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -19,7 +19,7 @@
#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <linux/caif/caif_socket.h>
-#include <linux/atomic.h>
+#include <linux/pkt_sched.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <net/caif/caif_layer.h>
@@ -130,11 +130,10 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
- (unsigned)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
- if (net_ratelimit())
- pr_debug("sending flow OFF (queue len = %d %d)\n",
- atomic_read(&cf_sk->sk.sk_rmem_alloc),
- sk_rcvbuf_lowwater(cf_sk));
+ (unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
+ net_dbg_ratelimited("sending flow OFF (queue len = %d %d)\n",
+ atomic_read(&cf_sk->sk.sk_rmem_alloc),
+ sk_rcvbuf_lowwater(cf_sk));
set_rx_flow_off(cf_sk);
caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
}
@@ -144,8 +143,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
return err;
if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) {
set_rx_flow_off(cf_sk);
- if (net_ratelimit())
- pr_debug("sending flow OFF due to rmem_schedule\n");
+ net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n");
caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
}
skb->dev = NULL;
@@ -505,6 +503,7 @@ static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk,
pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb);
memset(skb->cb, 0, sizeof(struct caif_payload_info));
+ cfpkt_set_prio(pkt, cf_sk->sk.sk_priority);
if (cf_sk->layer.dn == NULL) {
kfree_skb(skb);
@@ -1062,6 +1061,18 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
/* Store the protocol */
sk->sk_protocol = (unsigned char) protocol;
+ /* Initialize default priority for well-known cases */
+ switch (protocol) {
+ case CAIFPROTO_AT:
+ sk->sk_priority = TC_PRIO_CONTROL;
+ break;
+ case CAIFPROTO_RFM:
+ sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
+ break;
+ default:
+ sk->sk_priority = TC_PRIO_BESTEFFORT;
+ }
+
/*
* Lock in order to try to stop someone from opening the socket
* too early.
@@ -1081,7 +1092,6 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
set_rx_flow_on(cf_sk);
/* Set default options on configuration */
- cf_sk->sk.sk_priority = CAIF_PRIO_NORMAL;
cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY;
cf_sk->conn_req.protocol = protocol;
release_sock(&cf_sk->sk);
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index 5cf52225692e..047cd0eec022 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -9,6 +9,7 @@
#include <linux/stddef.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
+#include <linux/pkt_sched.h>
#include <net/caif/caif_layer.h>
#include <net/caif/cfpkt.h>
#include <net/caif/cfctrl.h>
@@ -189,6 +190,7 @@ void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid)
cfctrl->serv.dev_info.id = physlinkid;
cfpkt_addbdy(pkt, CFCTRL_CMD_ENUM);
cfpkt_addbdy(pkt, physlinkid);
+ cfpkt_set_prio(pkt, TC_PRIO_CONTROL);
dn->transmit(dn, pkt);
}
@@ -281,6 +283,7 @@ int cfctrl_linkup_request(struct cflayer *layer,
* might arrive with the newly allocated channel ID.
*/
cfpkt_info(pkt)->dev_info->id = param->phyid;
+ cfpkt_set_prio(pkt, TC_PRIO_CONTROL);
ret =
dn->transmit(dn, pkt);
if (ret < 0) {
@@ -314,6 +317,7 @@ int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid,
cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY);
cfpkt_addbdy(pkt, channelid);
init_info(cfpkt_info(pkt), cfctrl);
+ cfpkt_set_prio(pkt, TC_PRIO_CONTROL);
ret =
dn->transmit(dn, pkt);
#ifndef CAIF_NO_LOOP
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index e335ba859b97..863dedd91bb6 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -381,6 +381,7 @@ struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos)
memcpy(skb2->data, split, len2nd);
skb2->tail += len2nd;
skb2->len += len2nd;
+ skb2->priority = skb->priority;
return skb_to_pkt(skb2);
}
@@ -394,3 +395,9 @@ struct caif_payload_info *cfpkt_info(struct cfpkt *pkt)
return (struct caif_payload_info *)&pkt_to_skb(pkt)->cb;
}
EXPORT_SYMBOL(cfpkt_info);
+
+void cfpkt_set_prio(struct cfpkt *pkt, int prio)
+{
+ pkt_to_skb(pkt)->priority = prio;
+}
+EXPORT_SYMBOL(cfpkt_set_prio);
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
index 4aa33d4496b6..dd485f6128e8 100644
--- a/net/caif/cfsrvl.c
+++ b/net/caif/cfsrvl.c
@@ -11,6 +11,7 @@
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/pkt_sched.h>
#include <net/caif/caif_layer.h>
#include <net/caif/cfsrvl.h>
#include <net/caif/cfpkt.h>
@@ -120,6 +121,7 @@ static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
info->channel_id = service->layer.id;
info->hdr_len = 1;
info->dev_info = &service->dev_info;
+ cfpkt_set_prio(pkt, TC_PRIO_CONTROL);
return layr->dn->transmit(layr->dn, pkt);
}
case CAIF_MODEMCMD_FLOW_OFF_REQ:
@@ -140,6 +142,7 @@ static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
info->channel_id = service->layer.id;
info->hdr_len = 1;
info->dev_info = &service->dev_info;
+ cfpkt_set_prio(pkt, TC_PRIO_CONTROL);
return layr->dn->transmit(layr->dn, pkt);
}
default:
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index d09340e1523f..69771c04ba8f 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -424,14 +424,14 @@ static int ipcaif_fill_info(struct sk_buff *skb, const struct net_device *dev)
struct chnl_net *priv;
u8 loop;
priv = netdev_priv(dev);
- NLA_PUT_U32(skb, IFLA_CAIF_IPV4_CONNID,
- priv->conn_req.sockaddr.u.dgm.connection_id);
- NLA_PUT_U32(skb, IFLA_CAIF_IPV6_CONNID,
- priv->conn_req.sockaddr.u.dgm.connection_id);
+ if (nla_put_u32(skb, IFLA_CAIF_IPV4_CONNID,
+ priv->conn_req.sockaddr.u.dgm.connection_id) ||
+ nla_put_u32(skb, IFLA_CAIF_IPV6_CONNID,
+ priv->conn_req.sockaddr.u.dgm.connection_id))
+ goto nla_put_failure;
loop = priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP;
- NLA_PUT_U8(skb, IFLA_CAIF_LOOPBACK, loop);
-
-
+ if (nla_put_u8(skb, IFLA_CAIF_LOOPBACK, loop))
+ goto nla_put_failure;
return 0;
nla_put_failure:
return -EMSGSIZE;