From 9446f3efc53512e5ad9e0966539021a2a41fe5a0 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 17 Jan 2012 10:32:01 +0100 Subject: mac80211: fix debugfs key->station symlink Since stations moved into a virtual interface subdirectory, this link has been broken. Fix it. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville --- net/mac80211/debugfs_key.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c index 38e6101190d9..59edcd95a58d 100644 --- a/net/mac80211/debugfs_key.c +++ b/net/mac80211/debugfs_key.c @@ -225,9 +225,9 @@ KEY_OPS(key); key, &key_##name##_ops); void ieee80211_debugfs_key_add(struct ieee80211_key *key) - { +{ static int keycount; - char buf[50]; + char buf[100]; struct sta_info *sta; if (!key->local->debugfs.keys) @@ -244,7 +244,8 @@ void ieee80211_debugfs_key_add(struct ieee80211_key *key) sta = key->sta; if (sta) { - sprintf(buf, "../../stations/%pM", sta->sta.addr); + sprintf(buf, "../../netdev:%s/stations/%pM", + sta->sdata->name, sta->sta.addr); key->debugfs.stalink = debugfs_create_symlink("station", key->debugfs.dir, buf); } -- cgit v1.2.3 From 65e8b0ccb6cf176f8eddb1b05534be46580da9dd Mon Sep 17 00:00:00 2001 From: Javier Cardona Date: Tue, 17 Jan 2012 18:17:46 -0800 Subject: mac80211: Use the right headroom size for mesh mgmt frames Use local->tx_headroom instad of local->hw.extra_tx_headroom. local->tx_headroom is the max of hw.extra_tx_headroom required by the driver and the headroom required by mac80211 for status reporting. On drivers where hw.extra_tx_headroom is smaller than what mac80211 requires (e.g. ath5k), we would not reserve sufficient buffer space to report tx status. Also, don't reserve local->tx_headroom + local->hw.extra_tx_headroom. Reported-by: Simon Morgenthaler Reported-by: Kai Scharwies Signed-off-by: Javier Cardona Signed-off-by: John W. Linville --- net/mac80211/mesh_hwmp.c | 8 ++++---- net/mac80211/mesh_plink.c | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 73abb7524b2c..54df1b2bafd2 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -119,12 +119,12 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags, int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.mesh_action) + sizeof(mgmt->u.action.u.mesh_action); - skb = dev_alloc_skb(local->hw.extra_tx_headroom + + skb = dev_alloc_skb(local->tx_headroom + hdr_len + 2 + 37); /* max HWMP IE */ if (!skb) return -1; - skb_reserve(skb, local->hw.extra_tx_headroom); + skb_reserve(skb, local->tx_headroom); mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len); memset(mgmt, 0, hdr_len); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | @@ -250,12 +250,12 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn, if (time_before(jiffies, ifmsh->next_perr)) return -EAGAIN; - skb = dev_alloc_skb(local->hw.extra_tx_headroom + + skb = dev_alloc_skb(local->tx_headroom + hdr_len + 2 + 15 /* PERR IE */); if (!skb) return -1; - skb_reserve(skb, local->tx_headroom + local->hw.extra_tx_headroom); + skb_reserve(skb, local->tx_headroom); mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len); memset(mgmt, 0, hdr_len); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index 41ef1b476442..a17251730b9e 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -172,7 +172,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.self_prot) + sizeof(mgmt->u.action.u.self_prot); - skb = dev_alloc_skb(local->hw.extra_tx_headroom + + skb = dev_alloc_skb(local->tx_headroom + hdr_len + 2 + /* capability info */ 2 + /* AID */ @@ -186,7 +186,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, sdata->u.mesh.ie_len); if (!skb) return -1; - skb_reserve(skb, local->hw.extra_tx_headroom); + skb_reserve(skb, local->tx_headroom); mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len); memset(mgmt, 0, hdr_len); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | -- cgit v1.2.3 From bc4934bc61d0a11fd62c5187ff83645628f8be8b Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 18 Jan 2012 14:10:25 +0100 Subject: mac80211: fix work removal on deauth request When deauth is requested while an auth or assoc work item is in progress, we currently delete it without regard for any state it might need to clean up. Fix it by cleaning up for those items. In the case Pontus found, the problem manifested itself as such: authenticate with 00:23:69:aa:dd:7b (try 1) authenticated failed to insert Dummy STA entry for the AP (error -17) deauthenticating from 00:23:69:aa:dd:7b by local choice (reason=2) It could also happen differently if the driver uses the tx_sync callback. We can't just call the ->done() method of the work items because that will lock up due to the locking in cfg80211. This fix isn't very clean, but that seems acceptable since I have patches pending to remove this code completely. Cc: stable@vger.kernel.org Reported-by: Pontus Fuchs Tested-by: Pontus Fuchs Signed-off-by: Johannes Berg Signed-off-by: John W. Linville --- net/mac80211/mlme.c | 38 +++++++++++++++++++++++++++----------- 1 file changed, 27 insertions(+), 11 deletions(-) (limited to 'net') diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index ecb4c84c1bb3..295be92f7c77 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -2750,7 +2750,6 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, { struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; - struct ieee80211_work *wk; u8 bssid[ETH_ALEN]; bool assoc_bss = false; @@ -2763,30 +2762,47 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, assoc_bss = true; } else { bool not_auth_yet = false; + struct ieee80211_work *tmp, *wk = NULL; mutex_unlock(&ifmgd->mtx); mutex_lock(&local->mtx); - list_for_each_entry(wk, &local->work_list, list) { - if (wk->sdata != sdata) + list_for_each_entry(tmp, &local->work_list, list) { + if (tmp->sdata != sdata) continue; - if (wk->type != IEEE80211_WORK_DIRECT_PROBE && - wk->type != IEEE80211_WORK_AUTH && - wk->type != IEEE80211_WORK_ASSOC && - wk->type != IEEE80211_WORK_ASSOC_BEACON_WAIT) + if (tmp->type != IEEE80211_WORK_DIRECT_PROBE && + tmp->type != IEEE80211_WORK_AUTH && + tmp->type != IEEE80211_WORK_ASSOC && + tmp->type != IEEE80211_WORK_ASSOC_BEACON_WAIT) continue; - if (memcmp(req->bss->bssid, wk->filter_ta, ETH_ALEN)) + if (memcmp(req->bss->bssid, tmp->filter_ta, ETH_ALEN)) continue; - not_auth_yet = wk->type == IEEE80211_WORK_DIRECT_PROBE; - list_del_rcu(&wk->list); - free_work(wk); + not_auth_yet = tmp->type == IEEE80211_WORK_DIRECT_PROBE; + list_del_rcu(&tmp->list); + synchronize_rcu(); + wk = tmp; break; } mutex_unlock(&local->mtx); + if (wk && wk->type == IEEE80211_WORK_ASSOC) { + /* clean up dummy sta & TX sync */ + sta_info_destroy_addr(wk->sdata, wk->filter_ta); + if (wk->assoc.synced) + drv_finish_tx_sync(local, wk->sdata, + wk->filter_ta, + IEEE80211_TX_SYNC_ASSOC); + } else if (wk && wk->type == IEEE80211_WORK_AUTH) { + if (wk->probe_auth.synced) + drv_finish_tx_sync(local, wk->sdata, + wk->filter_ta, + IEEE80211_TX_SYNC_AUTH); + } + kfree(wk); + /* * If somebody requests authentication and we haven't * sent out an auth frame yet there's no need to send -- cgit v1.2.3 From 013d97e9da1877f1334aa8ff3a19921ebbfe99b5 Mon Sep 17 00:00:00 2001 From: Francesco Ruggeri Date: Mon, 16 Jan 2012 10:40:10 +0000 Subject: net: race condition in ipv6 forwarding and disable_ipv6 parameters There is a race condition in addrconf_sysctl_forward() and addrconf_sysctl_disable(). These functions change idev->cnf.forwarding (resp. idev->cnf.disable_ipv6) and then try to grab the rtnl lock before performing any actions. If that fails they restore the original value and restart the syscall. This creates race conditions if ipv6 code tries to access these parameters, or if multiple instances try to do the same operation. As an example of the former, if __ipv6_ifa_notify() finds a 0 in idev->cnf.forwarding when invoked by addrconf_ifdown() it may not free anycast addresses, ultimately resulting in the net_device not being freed. This patch reads the user parameters into a temporary location and only writes the actual parameters when the rtnl lock is acquired. Tested in 2.6.38.8. Signed-off-by: Francesco Ruggeri Signed-off-by: David S. Miller --- net/ipv6/addrconf.c | 61 +++++++++++++++++++++++++++++++++++------------------ 1 file changed, 40 insertions(+), 21 deletions(-) (limited to 'net') diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index a225d5ee3c2f..c02280a4d126 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -502,29 +502,31 @@ static void addrconf_forward_change(struct net *net, __s32 newf) rcu_read_unlock(); } -static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old) +static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf) { struct net *net; + int old; + + if (!rtnl_trylock()) + return restart_syscall(); net = (struct net *)table->extra2; - if (p == &net->ipv6.devconf_dflt->forwarding) - return 0; + old = *p; + *p = newf; - if (!rtnl_trylock()) { - /* Restore the original values before restarting */ - *p = old; - return restart_syscall(); + if (p == &net->ipv6.devconf_dflt->forwarding) { + rtnl_unlock(); + return 0; } if (p == &net->ipv6.devconf_all->forwarding) { - __s32 newf = net->ipv6.devconf_all->forwarding; net->ipv6.devconf_dflt->forwarding = newf; addrconf_forward_change(net, newf); - } else if ((!*p) ^ (!old)) + } else if ((!newf) ^ (!old)) dev_forward_change((struct inet6_dev *)table->extra1); rtnl_unlock(); - if (*p) + if (newf) rt6_purge_dflt_routers(net); return 1; } @@ -4260,9 +4262,17 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write, int *valp = ctl->data; int val = *valp; loff_t pos = *ppos; + ctl_table lctl; int ret; - ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + /* + * ctl->data points to idev->cnf.forwarding, we should + * not modify it until we get the rtnl lock. + */ + lctl = *ctl; + lctl.data = &val; + + ret = proc_dointvec(&lctl, write, buffer, lenp, ppos); if (write) ret = addrconf_fixup_forwarding(ctl, valp, val); @@ -4300,26 +4310,27 @@ static void addrconf_disable_change(struct net *net, __s32 newf) rcu_read_unlock(); } -static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int old) +static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf) { struct net *net; + int old; + + if (!rtnl_trylock()) + return restart_syscall(); net = (struct net *)table->extra2; + old = *p; + *p = newf; - if (p == &net->ipv6.devconf_dflt->disable_ipv6) + if (p == &net->ipv6.devconf_dflt->disable_ipv6) { + rtnl_unlock(); return 0; - - if (!rtnl_trylock()) { - /* Restore the original values before restarting */ - *p = old; - return restart_syscall(); } if (p == &net->ipv6.devconf_all->disable_ipv6) { - __s32 newf = net->ipv6.devconf_all->disable_ipv6; net->ipv6.devconf_dflt->disable_ipv6 = newf; addrconf_disable_change(net, newf); - } else if ((!*p) ^ (!old)) + } else if ((!newf) ^ (!old)) dev_disable_change((struct inet6_dev *)table->extra1); rtnl_unlock(); @@ -4333,9 +4344,17 @@ int addrconf_sysctl_disable(ctl_table *ctl, int write, int *valp = ctl->data; int val = *valp; loff_t pos = *ppos; + ctl_table lctl; int ret; - ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + /* + * ctl->data points to idev->cnf.disable_ipv6, we should + * not modify it until we get the rtnl lock. + */ + lctl = *ctl; + lctl.data = &val; + + ret = proc_dointvec(&lctl, write, buffer, lenp, ppos); if (write) ret = addrconf_disable_ipv6(ctl, valp, val); -- cgit v1.2.3 From fc16dcd8c2e1e9bc91ed765957e1f2bbf334253e Mon Sep 17 00:00:00 2001 From: Neal Cardwell Date: Wed, 18 Jan 2012 17:47:58 +0000 Subject: tcp: fix undo after RTO for BIC This patch fixes BIC so that cwnd reductions made during RTOs can be undone (just as they already can be undone when using the default/Reno behavior). When undoing cwnd reductions, BIC-derived congestion control modules were restoring the cwnd from last_max_cwnd. There were two problems with using last_max_cwnd to restore a cwnd during undo: (a) last_max_cwnd was set to 0 on state transitions into TCP_CA_Loss (by calling the module's reset() functions), so cwnd reductions from RTOs could not be undone. (b) when fast_covergence is enabled (which it is by default) last_max_cwnd does not actually hold the value of snd_cwnd before the loss; instead, it holds a scaled-down version of snd_cwnd. This patch makes the following changes: (1) upon undo, revert snd_cwnd to ca->loss_cwnd, which is already, as the existing comment notes, the "congestion window at last loss" (2) stop forgetting ca->loss_cwnd on TCP_CA_Loss events (3) use ca->last_max_cwnd to check if we're in slow start Signed-off-by: Neal Cardwell Signed-off-by: David S. Miller --- net/ipv4/tcp_bic.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c index 6187eb4d1dcf..f45e1c242440 100644 --- a/net/ipv4/tcp_bic.c +++ b/net/ipv4/tcp_bic.c @@ -63,7 +63,6 @@ static inline void bictcp_reset(struct bictcp *ca) { ca->cnt = 0; ca->last_max_cwnd = 0; - ca->loss_cwnd = 0; ca->last_cwnd = 0; ca->last_time = 0; ca->epoch_start = 0; @@ -72,7 +71,11 @@ static inline void bictcp_reset(struct bictcp *ca) static void bictcp_init(struct sock *sk) { - bictcp_reset(inet_csk_ca(sk)); + struct bictcp *ca = inet_csk_ca(sk); + + bictcp_reset(ca); + ca->loss_cwnd = 0; + if (initial_ssthresh) tcp_sk(sk)->snd_ssthresh = initial_ssthresh; } @@ -127,7 +130,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd) } /* if in slow start or link utilization is very low */ - if (ca->loss_cwnd == 0) { + if (ca->last_max_cwnd == 0) { if (ca->cnt > 20) /* increase cwnd 5% per RTT */ ca->cnt = 20; } @@ -185,7 +188,7 @@ static u32 bictcp_undo_cwnd(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); const struct bictcp *ca = inet_csk_ca(sk); - return max(tp->snd_cwnd, ca->last_max_cwnd); + return max(tp->snd_cwnd, ca->loss_cwnd); } static void bictcp_state(struct sock *sk, u8 new_state) -- cgit v1.2.3 From 5a45f0086a2dcf50db7e6a0bf5be933880f85127 Mon Sep 17 00:00:00 2001 From: Neal Cardwell Date: Wed, 18 Jan 2012 17:47:59 +0000 Subject: tcp: fix undo after RTO for CUBIC This patch fixes CUBIC so that cwnd reductions made during RTOs can be undone (just as they already can be undone when using the default/Reno behavior). When undoing cwnd reductions, BIC-derived congestion control modules were restoring the cwnd from last_max_cwnd. There were two problems with using last_max_cwnd to restore a cwnd during undo: (a) last_max_cwnd was set to 0 on state transitions into TCP_CA_Loss (by calling the module's reset() functions), so cwnd reductions from RTOs could not be undone. (b) when fast_covergence is enabled (which it is by default) last_max_cwnd does not actually hold the value of snd_cwnd before the loss; instead, it holds a scaled-down version of snd_cwnd. This patch makes the following changes: (1) upon undo, revert snd_cwnd to ca->loss_cwnd, which is already, as the existing comment notes, the "congestion window at last loss" (2) stop forgetting ca->loss_cwnd on TCP_CA_Loss events (3) use ca->last_max_cwnd to check if we're in slow start Signed-off-by: Neal Cardwell Acked-by: Stephen Hemminger Acked-by: Sangtae Ha Signed-off-by: David S. Miller --- net/ipv4/tcp_cubic.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index f376b05cca81..a9077f441cb2 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -107,7 +107,6 @@ static inline void bictcp_reset(struct bictcp *ca) { ca->cnt = 0; ca->last_max_cwnd = 0; - ca->loss_cwnd = 0; ca->last_cwnd = 0; ca->last_time = 0; ca->bic_origin_point = 0; @@ -142,7 +141,10 @@ static inline void bictcp_hystart_reset(struct sock *sk) static void bictcp_init(struct sock *sk) { - bictcp_reset(inet_csk_ca(sk)); + struct bictcp *ca = inet_csk_ca(sk); + + bictcp_reset(ca); + ca->loss_cwnd = 0; if (hystart) bictcp_hystart_reset(sk); @@ -275,7 +277,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd) * The initial growth of cubic function may be too conservative * when the available bandwidth is still unknown. */ - if (ca->loss_cwnd == 0 && ca->cnt > 20) + if (ca->last_max_cwnd == 0 && ca->cnt > 20) ca->cnt = 20; /* increase cwnd 5% per RTT */ /* TCP Friendly */ @@ -342,7 +344,7 @@ static u32 bictcp_undo_cwnd(struct sock *sk) { struct bictcp *ca = inet_csk_ca(sk); - return max(tcp_sk(sk)->snd_cwnd, ca->last_max_cwnd); + return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd); } static void bictcp_state(struct sock *sk, u8 new_state) -- cgit v1.2.3 From a42b4799c683723e8c464de4026af085b2ebd5fa Mon Sep 17 00:00:00 2001 From: Vijay Subramanian Date: Thu, 19 Jan 2012 10:20:59 +0000 Subject: netem: Fix off-by-one bug in reordering With netem reordering, a gap of N is supposed to reorder every Nth packet with given reorder probability. However, the code currently skips N packets and reorders every (N+1)th packet. Signed-off-by: Vijay Subramanian Signed-off-by: David S. Miller --- net/sched/sch_netem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index e7e1d0b57b3d..2776012132ea 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -419,7 +419,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) cb = netem_skb_cb(skb); if (q->gap == 0 || /* not doing reordering */ - q->counter < q->gap || /* inside last reordering gap */ + q->counter < q->gap - 1 || /* inside last reordering gap */ q->reorder < get_crandom(&q->reorder_cor)) { psched_time_t now; psched_tdiff_t delay; -- cgit v1.2.3 From 974c12360dfe6ab01201fe9e708e7755c413f8b6 Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Thu, 19 Jan 2012 14:42:21 +0000 Subject: tcp: detect loss above high_seq in recovery Correctly implement a loss detection heuristic: New sequences (above high_seq) sent during the fast recovery are deemed lost when higher sequences are SACKed. Current code does not catch these losses, because tcp_mark_head_lost() does not check packets beyond high_seq. The fix is straight-forward by checking packets until the highest sacked packet. In addition, all the FLAG_DATA_LOST logic are in-effective and redundant and can be removed. Update the loss heuristic comments. The algorithm above is documented as heuristic B, but it is redundant too because heuristic A already covers B. Note that this change only marks some forward-retransmitted packets LOST. It does NOT forbid TCP performing further CWR on new losses. A potential follow-up patch under preparation is to perform another CWR on "new" losses such as 1) sequence above high_seq is lost (by resetting high_seq to snd_nxt) 2) retransmission is lost. Signed-off-by: Yuchung Cheng Signed-off-by: David S. Miller --- include/linux/snmp.h | 1 - net/ipv4/proc.c | 1 - net/ipv4/tcp_input.c | 41 +++++++++++++++-------------------------- 3 files changed, 15 insertions(+), 28 deletions(-) (limited to 'net') diff --git a/include/linux/snmp.h b/include/linux/snmp.h index e16557a357e5..c1241c428179 100644 --- a/include/linux/snmp.h +++ b/include/linux/snmp.h @@ -192,7 +192,6 @@ enum LINUX_MIB_TCPPARTIALUNDO, /* TCPPartialUndo */ LINUX_MIB_TCPDSACKUNDO, /* TCPDSACKUndo */ LINUX_MIB_TCPLOSSUNDO, /* TCPLossUndo */ - LINUX_MIB_TCPLOSS, /* TCPLoss */ LINUX_MIB_TCPLOSTRETRANSMIT, /* TCPLostRetransmit */ LINUX_MIB_TCPRENOFAILURES, /* TCPRenoFailures */ LINUX_MIB_TCPSACKFAILURES, /* TCPSackFailures */ diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 3569d8ecaeac..6afc807ee2ad 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -216,7 +216,6 @@ static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_ITEM("TCPPartialUndo", LINUX_MIB_TCPPARTIALUNDO), SNMP_MIB_ITEM("TCPDSACKUndo", LINUX_MIB_TCPDSACKUNDO), SNMP_MIB_ITEM("TCPLossUndo", LINUX_MIB_TCPLOSSUNDO), - SNMP_MIB_ITEM("TCPLoss", LINUX_MIB_TCPLOSS), SNMP_MIB_ITEM("TCPLostRetransmit", LINUX_MIB_TCPLOSTRETRANSMIT), SNMP_MIB_ITEM("TCPRenoFailures", LINUX_MIB_TCPRENOFAILURES), SNMP_MIB_ITEM("TCPSackFailures", LINUX_MIB_TCPSACKFAILURES), diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 2877c3e09587..976034f82320 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -105,7 +105,6 @@ int sysctl_tcp_abc __read_mostly; #define FLAG_SYN_ACKED 0x10 /* This ACK acknowledged SYN. */ #define FLAG_DATA_SACKED 0x20 /* New SACK. */ #define FLAG_ECE 0x40 /* ECE in this ACK */ -#define FLAG_DATA_LOST 0x80 /* SACK detected data lossage. */ #define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ #define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */ #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ @@ -1040,13 +1039,11 @@ static void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, * These 6 states form finite state machine, controlled by the following events: * 1. New ACK (+SACK) arrives. (tcp_sacktag_write_queue()) * 2. Retransmission. (tcp_retransmit_skb(), tcp_xmit_retransmit_queue()) - * 3. Loss detection event of one of three flavors: + * 3. Loss detection event of two flavors: * A. Scoreboard estimator decided the packet is lost. * A'. Reno "three dupacks" marks head of queue lost. - * A''. Its FACK modfication, head until snd.fack is lost. - * B. SACK arrives sacking data transmitted after never retransmitted - * hole was sent out. - * C. SACK arrives sacking SND.NXT at the moment, when the + * A''. Its FACK modification, head until snd.fack is lost. + * B. SACK arrives sacking SND.NXT at the moment, when the * segment was retransmitted. * 4. D-SACK added new rule: D-SACK changes any tag to S. * @@ -1153,7 +1150,7 @@ static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack, } /* Check for lost retransmit. This superb idea is borrowed from "ratehalving". - * Event "C". Later note: FACK people cheated me again 8), we have to account + * Event "B". Later note: FACK people cheated me again 8), we have to account * for reordering! Ugly, but should help. * * Search retransmitted skbs from write_queue that were sent when snd_nxt was @@ -1844,10 +1841,6 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, if (found_dup_sack && ((i + 1) == first_sack_index)) next_dup = &sp[i + 1]; - /* Event "B" in the comment above. */ - if (after(end_seq, tp->high_seq)) - state.flag |= FLAG_DATA_LOST; - /* Skip too early cached blocks */ while (tcp_sack_cache_ok(tp, cache) && !before(start_seq, cache->end_seq)) @@ -2515,8 +2508,11 @@ static void tcp_timeout_skbs(struct sock *sk) tcp_verify_left_out(tp); } -/* Mark head of queue up as lost. With RFC3517 SACK, the packets is - * is against sacked "cnt", otherwise it's against facked "cnt" +/* Detect loss in event "A" above by marking head of queue up as lost. + * For FACK or non-SACK(Reno) senders, the first "packets" number of segments + * are considered lost. For RFC3517 SACK, a segment is considered lost if it + * has at least tp->reordering SACKed seqments above it; "packets" refers to + * the maximum SACKed segments to pass before reaching this limit. */ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head) { @@ -2525,6 +2521,8 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head) int cnt, oldcnt; int err; unsigned int mss; + /* Use SACK to deduce losses of new sequences sent during recovery */ + const u32 loss_high = tcp_is_sack(tp) ? tp->snd_nxt : tp->high_seq; WARN_ON(packets > tp->packets_out); if (tp->lost_skb_hint) { @@ -2546,7 +2544,7 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head) tp->lost_skb_hint = skb; tp->lost_cnt_hint = cnt; - if (after(TCP_SKB_CB(skb)->end_seq, tp->high_seq)) + if (after(TCP_SKB_CB(skb)->end_seq, loss_high)) break; oldcnt = cnt; @@ -3033,19 +3031,10 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, if (tcp_check_sack_reneging(sk, flag)) return; - /* C. Process data loss notification, provided it is valid. */ - if (tcp_is_fack(tp) && (flag & FLAG_DATA_LOST) && - before(tp->snd_una, tp->high_seq) && - icsk->icsk_ca_state != TCP_CA_Open && - tp->fackets_out > tp->reordering) { - tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering, 0); - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSS); - } - - /* D. Check consistency of the current state. */ + /* C. Check consistency of the current state. */ tcp_verify_left_out(tp); - /* E. Check state exit conditions. State can be terminated + /* D. Check state exit conditions. State can be terminated * when high_seq is ACKed. */ if (icsk->icsk_ca_state == TCP_CA_Open) { WARN_ON(tp->retrans_out != 0); @@ -3077,7 +3066,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, } } - /* F. Process state. */ + /* E. Process state. */ switch (icsk->icsk_ca_state) { case TCP_CA_Recovery: if (!(flag & FLAG_SND_UNA_ADVANCED)) { -- cgit v1.2.3 From bf0813bd282f6ded0a5efca3db238287f7a3dbe8 Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Thu, 19 Jan 2012 15:40:06 +0000 Subject: pktgen: Fix unsigned function that is returning negative vals Every call to num_args() immediately checks the return value for less than zero, as it will return -EFAULT for a failed get_user() call. So it makes no sense for the function to be declared as an unsigned long. Signed-off-by: Paul Gortmaker Signed-off-by: David S. Miller --- net/core/pktgen.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 65f80c7b1656..4d8ce93cd503 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -767,8 +767,8 @@ done: return i; } -static unsigned long num_arg(const char __user * user_buffer, - unsigned long maxlen, unsigned long *num) +static long num_arg(const char __user *user_buffer, unsigned long maxlen, + unsigned long *num) { int i; *num = 0; -- cgit v1.2.3 From 8a622e71f58ec9f092fc99eacae0e6cf14f6e742 Mon Sep 17 00:00:00 2001 From: shawnlu Date: Fri, 20 Jan 2012 12:22:04 +0000 Subject: tcp: md5: using remote adress for md5 lookup in rst packet md5 key is added in socket through remote address. remote address should be used in finding md5 key when sending out reset packet. Signed-off-by: shawnlu Signed-off-by: David S. Miller --- net/ipv4/tcp_ipv4.c | 2 +- net/ipv6/tcp_ipv6.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 1eb4ad57670e..337ba4cca052 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -631,7 +631,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) arg.iov[0].iov_len = sizeof(rep.th); #ifdef CONFIG_TCP_MD5SIG - key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr) : NULL; + key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->saddr) : NULL; if (key) { rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 906c7ca43542..3edd05ae4388 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1083,7 +1083,7 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb) #ifdef CONFIG_TCP_MD5SIG if (sk) - key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr); + key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr); #endif if (th->ack) -- cgit v1.2.3 From 0e90b31f4ba77027a7c21cbfc66404df0851ca21 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Fri, 20 Jan 2012 04:57:16 +0000 Subject: net: introduce res_counter_charge_nofail() for socket allocations There is a case in __sk_mem_schedule(), where an allocation is beyond the maximum, but yet we are allowed to proceed. It happens under the following condition: sk->sk_wmem_queued + size >= sk->sk_sndbuf The network code won't revert the allocation in this case, meaning that at some point later it'll try to do it. Since this is never communicated to the underlying res_counter code, there is an inbalance in res_counter uncharge operation. I see two ways of fixing this: 1) storing the information about those allocations somewhere in memcg, and then deducting from that first, before we start draining the res_counter, 2) providing a slightly different allocation function for the res_counter, that matches the original behavior of the network code more closely. I decided to go for #2 here, believing it to be more elegant, since #1 would require us to do basically that, but in a more obscure way. Signed-off-by: Glauber Costa Cc: KAMEZAWA Hiroyuki Cc: Johannes Weiner Cc: Michal Hocko CC: Tejun Heo CC: Li Zefan CC: Laurent Chavey Acked-by: Tejun Heo Signed-off-by: David S. Miller --- include/linux/res_counter.h | 6 ++++++ include/net/sock.h | 10 ++++------ kernel/res_counter.c | 25 +++++++++++++++++++++++++ net/core/sock.c | 4 ++-- 4 files changed, 37 insertions(+), 8 deletions(-) (limited to 'net') diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h index d06d014afda6..da81af086eaf 100644 --- a/include/linux/res_counter.h +++ b/include/linux/res_counter.h @@ -109,12 +109,18 @@ void res_counter_init(struct res_counter *counter, struct res_counter *parent); * * returns 0 on success and <0 if the counter->usage will exceed the * counter->limit _locked call expects the counter->lock to be taken + * + * charge_nofail works the same, except that it charges the resource + * counter unconditionally, and returns < 0 if the after the current + * charge we are over limit. */ int __must_check res_counter_charge_locked(struct res_counter *counter, unsigned long val); int __must_check res_counter_charge(struct res_counter *counter, unsigned long val, struct res_counter **limit_fail_at); +int __must_check res_counter_charge_nofail(struct res_counter *counter, + unsigned long val, struct res_counter **limit_fail_at); /* * uncharge - tell that some portion of the resource is released diff --git a/include/net/sock.h b/include/net/sock.h index 0e7a9b05f92b..4c69ac165e6b 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1008,9 +1008,8 @@ static inline void memcg_memory_allocated_add(struct cg_proto *prot, struct res_counter *fail; int ret; - ret = res_counter_charge(prot->memory_allocated, - amt << PAGE_SHIFT, &fail); - + ret = res_counter_charge_nofail(prot->memory_allocated, + amt << PAGE_SHIFT, &fail); if (ret < 0) *parent_status = OVER_LIMIT; } @@ -1054,12 +1053,11 @@ sk_memory_allocated_add(struct sock *sk, int amt, int *parent_status) } static inline void -sk_memory_allocated_sub(struct sock *sk, int amt, int parent_status) +sk_memory_allocated_sub(struct sock *sk, int amt) { struct proto *prot = sk->sk_prot; - if (mem_cgroup_sockets_enabled && sk->sk_cgrp && - parent_status != OVER_LIMIT) /* Otherwise was uncharged already */ + if (mem_cgroup_sockets_enabled && sk->sk_cgrp) memcg_memory_allocated_sub(sk->sk_cgrp, amt); atomic_long_sub(amt, prot->memory_allocated); diff --git a/kernel/res_counter.c b/kernel/res_counter.c index 6d269cce7aa1..d508363858b3 100644 --- a/kernel/res_counter.c +++ b/kernel/res_counter.c @@ -66,6 +66,31 @@ done: return ret; } +int res_counter_charge_nofail(struct res_counter *counter, unsigned long val, + struct res_counter **limit_fail_at) +{ + int ret, r; + unsigned long flags; + struct res_counter *c; + + r = ret = 0; + *limit_fail_at = NULL; + local_irq_save(flags); + for (c = counter; c != NULL; c = c->parent) { + spin_lock(&c->lock); + r = res_counter_charge_locked(c, val); + if (r) + c->usage += val; + spin_unlock(&c->lock); + if (r < 0 && ret == 0) { + *limit_fail_at = c; + ret = r; + } + } + local_irq_restore(flags); + + return ret; +} void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val) { if (WARN_ON(counter->usage < val)) diff --git a/net/core/sock.c b/net/core/sock.c index 5c5af9988f94..3e81fd2e3c75 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1827,7 +1827,7 @@ suppress_allocation: /* Alas. Undo changes. */ sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM; - sk_memory_allocated_sub(sk, amt, parent_status); + sk_memory_allocated_sub(sk, amt); return 0; } @@ -1840,7 +1840,7 @@ EXPORT_SYMBOL(__sk_mem_schedule); void __sk_mem_reclaim(struct sock *sk) { sk_memory_allocated_sub(sk, - sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT, 0); + sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT); sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1; if (sk_under_memory_pressure(sk) && -- cgit v1.2.3 From b1cc16b8e643096adb92bbcb76c6c4c564141c40 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 22 Jan 2012 14:45:14 -0500 Subject: bluetooth: hci: Fix type of "enable_hs" to bool. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes: net/bluetooth/hci_core.c: In function ‘__check_enable_hs’: net/bluetooth/hci_core.c:2587:1: warning: return from incompatible pointer type [enabled by default] Signed-off-by: David S. Miller --- include/net/bluetooth/hci.h | 2 +- net/bluetooth/hci_core.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 5b2fed5eebf2..00596e816b4d 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -1388,6 +1388,6 @@ struct hci_inquiry_req { }; #define IREQ_CACHE_FLUSH 0x0001 -extern int enable_hs; +extern bool enable_hs; #endif /* __HCI_H */ diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 845da3ee56a0..9de93714213a 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -55,7 +55,7 @@ #define AUTO_OFF_TIMEOUT 2000 -int enable_hs; +bool enable_hs; static void hci_rx_work(struct work_struct *work); static void hci_cmd_work(struct work_struct *work); -- cgit v1.2.3 From f80400a26a2e8bff541de12834a1134358bb6642 Mon Sep 17 00:00:00 2001 From: Michał Mirosław Date: Sun, 22 Jan 2012 00:20:40 +0000 Subject: ethtool: allow ETHTOOL_GSSET_INFO for users MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Allow ETHTOOL_GSSET_INFO ethtool ioctl() for unprivileged users. ETHTOOL_GSTRINGS is already allowed, but is unusable without this one. Signed-off-by: Michał Mirosław Acked-by: Ben Hutchings Signed-off-by: David S. Miller --- net/core/ethtool.c | 1 + 1 file changed, 1 insertion(+) (limited to 'net') diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 921aa2b4b415..369b41894527 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -1311,6 +1311,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) case ETHTOOL_GRXCSUM: case ETHTOOL_GTXCSUM: case ETHTOOL_GSG: + case ETHTOOL_GSSET_INFO: case ETHTOOL_GSTRINGS: case ETHTOOL_GTSO: case ETHTOOL_GPERMADDR: -- cgit v1.2.3 From 875ad3f8e7dff6bc1d053e5bfe73d8e8d2e6ae67 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 23 Jan 2012 12:49:36 -0500 Subject: SUNRPC: Fix machine creds in generic_create_cred and generic_match - generic_create_cred needs to copy the '.principal' field. - generic_match needs to ignore the groups and match on the '.principal' field. This fixes an Oops that was introduced by commit 68c9715 (SUNRPC: Clean up the RPCSEC_GSS service ticket requests) Reported-by: J. Bruce Fields Signed-off-by: Trond Myklebust Tested-by: J. Bruce Fields --- net/sunrpc/auth_generic.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/sunrpc/auth_generic.c b/net/sunrpc/auth_generic.c index 1426ec3d0a53..75762f346975 100644 --- a/net/sunrpc/auth_generic.c +++ b/net/sunrpc/auth_generic.c @@ -92,6 +92,7 @@ generic_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) if (gcred->acred.group_info != NULL) get_group_info(gcred->acred.group_info); gcred->acred.machine_cred = acred->machine_cred; + gcred->acred.principal = acred->principal; dprintk("RPC: allocated %s cred %p for uid %d gid %d\n", gcred->acred.machine_cred ? "machine" : "generic", @@ -123,6 +124,17 @@ generic_destroy_cred(struct rpc_cred *cred) call_rcu(&cred->cr_rcu, generic_free_cred_callback); } +static int +machine_cred_match(struct auth_cred *acred, struct generic_cred *gcred, int flags) +{ + if (!gcred->acred.machine_cred || + gcred->acred.principal != acred->principal || + gcred->acred.uid != acred->uid || + gcred->acred.gid != acred->gid) + return 0; + return 1; +} + /* * Match credentials against current process creds. */ @@ -132,9 +144,12 @@ generic_match(struct auth_cred *acred, struct rpc_cred *cred, int flags) struct generic_cred *gcred = container_of(cred, struct generic_cred, gc_base); int i; + if (acred->machine_cred) + return machine_cred_match(acred, gcred, flags); + if (gcred->acred.uid != acred->uid || gcred->acred.gid != acred->gid || - gcred->acred.machine_cred != acred->machine_cred) + gcred->acred.machine_cred != 0) goto out_nomatch; /* Optimisation in the case where pointers are identical... */ -- cgit v1.2.3 From ba1960257c5980f9b58057995ce3394bd8e48ca3 Mon Sep 17 00:00:00 2001 From: Eliad Peller Date: Tue, 10 Jan 2012 15:19:54 +0200 Subject: mac80211: update oper_channel on ibss join Commit 13c40c5 ("mac80211: Add HT operation modes for IBSS") broke ibss operation by mistakenly removing the local->oper_channel update (causing ibss to start on the wrong channel). fix it. Signed-off-by: Eliad Peller Acked-by: Simon Wunderlich Signed-off-by: John W. Linville --- net/mac80211/ibss.c | 1 + 1 file changed, 1 insertion(+) (limited to 'net') diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index b3d76b756cd5..a4643969a13b 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -106,6 +106,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0; + local->oper_channel = chan; channel_type = ifibss->channel_type; if (channel_type > NL80211_CHAN_HT20 && !cfg80211_can_beacon_sec_chan(local->hw.wiphy, chan, channel_type)) -- cgit v1.2.3 From 405385f8ce7a2ed8f82e216d88b5282142e1288b Mon Sep 17 00:00:00 2001 From: Eliad Peller Date: Wed, 11 Jan 2012 13:11:50 +0200 Subject: mac80211: set bss_conf.idle when vif is connected __ieee80211_recalc_idle() iterates through the vifs, sets bss_conf.idle = true if they are disconnected, and increases "count" if they are not (which later gets evaluated in order to determine whether the device is idle). However, the loop doesn't set bss_conf.idle = false (along with increasing "count"), causing the device idle state and the vif idle state to get out of sync in some cases. Signed-off-by: Eliad Peller Signed-off-by: John W. Linville --- net/mac80211/iface.c | 1 + 1 file changed, 1 insertion(+) (limited to 'net') diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index e47768cb8cb3..01a21c2f6ab3 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -1314,6 +1314,7 @@ u32 __ieee80211_recalc_idle(struct ieee80211_local *local) continue; } /* count everything else */ + sdata->vif.bss_conf.idle = false; count++; } -- cgit v1.2.3 From 56ac11cf2f21366ad48b356f7a0d1af8cff3588e Mon Sep 17 00:00:00 2001 From: Radu Iliescu Date: Thu, 19 Jan 2012 03:57:57 +0000 Subject: llc: Fix race condition in llc_ui_recvmsg There is a race on sk_receive_queue between llc_ui_recvmsg and sock_queue_rcv_skb. Our current solution is to protect skb_eat in llc_ui_recvmsg with the queue spinlock. Signed-off-by: Radu Iliescu Signed-off-by: David S. Miller --- net/llc/af_llc.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'net') diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index a18e6c3d36e3..b9bef2c75026 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -713,6 +713,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb = NULL; struct sock *sk = sock->sk; struct llc_sock *llc = llc_sk(sk); + unsigned long cpu_flags; size_t copied = 0; u32 peek_seq = 0; u32 *seq; @@ -838,7 +839,9 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, goto copy_uaddr; if (!(flags & MSG_PEEK)) { + spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); sk_eat_skb(sk, skb, 0); + spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags); *seq = 0; } @@ -859,7 +862,9 @@ copy_uaddr: llc_cmsg_rcv(msg, skb); if (!(flags & MSG_PEEK)) { + spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); sk_eat_skb(sk, skb, 0); + spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags); *seq = 0; } -- cgit v1.2.3 From c452ed70771cea3af73d21a5914989137fbd28b8 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Tue, 24 Jan 2012 16:03:33 -0500 Subject: net: flow_dissector.c missing include linux/export.h The file net/core/flow_dissector.c seems to be missing including linux/export.h. Signed-off-by: Jesper Dangaard Brouer Signed-off-by: David S. Miller --- net/core/flow_dissector.c | 1 + 1 file changed, 1 insertion(+) (limited to 'net') diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 0985b9b14b80..a225089df5b6 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -1,4 +1,5 @@ #include +#include #include #include #include -- cgit v1.2.3 From efc3dbc37412c027e363736b4f4c74ee5e8ecffc Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 24 Jan 2012 17:03:44 -0500 Subject: rds: Make rds_sock_lock BH rather than IRQ safe. rds_sock_info() triggers locking warnings because we try to perform a local_bh_enable() (via sock_i_ino()) while hardware interrupts are disabled (via taking rds_sock_lock). There is no reason for rds_sock_lock to be a hardware IRQ disabling lock, none of these access paths run in hardware interrupt context. Therefore making it a BH disabling lock is safe and sufficient to fix this bug. Reported-by: Kumar Sanghvi Reported-by: Josh Boyer Signed-off-by: David S. Miller --- net/rds/af_rds.c | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) (limited to 'net') diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c index bb6ad81b671d..424ff622ab5f 100644 --- a/net/rds/af_rds.c +++ b/net/rds/af_rds.c @@ -68,7 +68,6 @@ static int rds_release(struct socket *sock) { struct sock *sk = sock->sk; struct rds_sock *rs; - unsigned long flags; if (!sk) goto out; @@ -94,10 +93,10 @@ static int rds_release(struct socket *sock) rds_rdma_drop_keys(rs); rds_notify_queue_get(rs, NULL); - spin_lock_irqsave(&rds_sock_lock, flags); + spin_lock_bh(&rds_sock_lock); list_del_init(&rs->rs_item); rds_sock_count--; - spin_unlock_irqrestore(&rds_sock_lock, flags); + spin_unlock_bh(&rds_sock_lock); rds_trans_put(rs->rs_transport); @@ -409,7 +408,6 @@ static const struct proto_ops rds_proto_ops = { static int __rds_create(struct socket *sock, struct sock *sk, int protocol) { - unsigned long flags; struct rds_sock *rs; sock_init_data(sock, sk); @@ -426,10 +424,10 @@ static int __rds_create(struct socket *sock, struct sock *sk, int protocol) spin_lock_init(&rs->rs_rdma_lock); rs->rs_rdma_keys = RB_ROOT; - spin_lock_irqsave(&rds_sock_lock, flags); + spin_lock_bh(&rds_sock_lock); list_add_tail(&rs->rs_item, &rds_sock_list); rds_sock_count++; - spin_unlock_irqrestore(&rds_sock_lock, flags); + spin_unlock_bh(&rds_sock_lock); return 0; } @@ -471,12 +469,11 @@ static void rds_sock_inc_info(struct socket *sock, unsigned int len, { struct rds_sock *rs; struct rds_incoming *inc; - unsigned long flags; unsigned int total = 0; len /= sizeof(struct rds_info_message); - spin_lock_irqsave(&rds_sock_lock, flags); + spin_lock_bh(&rds_sock_lock); list_for_each_entry(rs, &rds_sock_list, rs_item) { read_lock(&rs->rs_recv_lock); @@ -492,7 +489,7 @@ static void rds_sock_inc_info(struct socket *sock, unsigned int len, read_unlock(&rs->rs_recv_lock); } - spin_unlock_irqrestore(&rds_sock_lock, flags); + spin_unlock_bh(&rds_sock_lock); lens->nr = total; lens->each = sizeof(struct rds_info_message); @@ -504,11 +501,10 @@ static void rds_sock_info(struct socket *sock, unsigned int len, { struct rds_info_socket sinfo; struct rds_sock *rs; - unsigned long flags; len /= sizeof(struct rds_info_socket); - spin_lock_irqsave(&rds_sock_lock, flags); + spin_lock_bh(&rds_sock_lock); if (len < rds_sock_count) goto out; @@ -529,7 +525,7 @@ out: lens->nr = rds_sock_count; lens->each = sizeof(struct rds_info_socket); - spin_unlock_irqrestore(&rds_sock_lock, flags); + spin_unlock_bh(&rds_sock_lock); } static void rds_exit(void) -- cgit v1.2.3 From 68315801dbf3ab2001679fd2074c9dc5dcf87dfa Mon Sep 17 00:00:00 2001 From: James Chapman Date: Wed, 25 Jan 2012 02:39:05 +0000 Subject: l2tp: l2tp_ip - fix possible oops on packet receive When a packet is received on an L2TP IP socket (L2TPv3 IP link encapsulation), the l2tpip socket's backlog_rcv function calls xfrm4_policy_check(). This is not necessary, since it was called before the skb was added to the backlog. With CONFIG_NET_NS enabled, xfrm4_policy_check() will oops if skb->dev is null, so this trivial patch removes the call. This bug has always been present, but only when CONFIG_NET_NS is enabled does it cause problems. Most users are probably using UDP encapsulation for L2TP, hence the problem has only recently surfaced. EIP: 0060:[] EFLAGS: 00210246 CPU: 0 EIP is at l2tp_ip_recvmsg+0xd4/0x2a7 EAX: 00000001 EBX: d77b5180 ECX: 00000000 EDX: 00200246 ESI: 00000000 EDI: d63cbd30 EBP: d63cbd18 ESP: d63cbcf4 DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068 Call Trace: [] sock_common_recvmsg+0x31/0x46 [] __sock_recvmsg_nosec+0x45/0x4d [] __sock_recvmsg+0x31/0x3b [] sock_recvmsg+0x96/0xab [] ? might_fault+0x47/0x81 [] ? might_fault+0x47/0x81 [] ? _copy_from_user+0x31/0x115 [] ? copy_from_user+0x8/0xa [] ? verify_iovec+0x3e/0x78 [] __sys_recvmsg+0x10a/0x1aa [] ? sock_recvmsg+0x0/0xab [] ? __lock_acquire+0xbdf/0xbee [] ? do_page_fault+0x193/0x375 [] ? fcheck_files+0x9b/0xca [] ? fget_light+0x2a/0x9c [] sys_recvmsg+0x2b/0x43 [] sys_socketcall+0x16d/0x1a5 [] ? trace_hardirqs_on_thunk+0xc/0x10 [] sysenter_do_call+0x12/0x38 Code: c6 05 8c ea a8 c1 01 e8 0c d4 d9 ff 85 f6 74 07 3e ff 86 80 00 00 00 b9 17 b6 2b c1 ba 01 00 00 00 b8 78 ed 48 c1 e8 23 f6 d9 ff 76 0c 68 28 e3 30 c1 68 2d 44 41 c1 e8 89 57 01 00 83 c4 0c Signed-off-by: James Chapman Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/l2tp/l2tp_ip.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'net') diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index d21e7ebd91ca..55670ec3cd0f 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -393,11 +393,6 @@ static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb) { int rc; - if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) - goto drop; - - nf_reset(skb); - /* Charge it to the socket, dropping if the queue is full. */ rc = sock_queue_rcv_skb(sk, skb); if (rc < 0) -- cgit v1.2.3 From 2b05ad33e1e624e7f08b8676d270dc7725403b7e Mon Sep 17 00:00:00 2001 From: Flavio Leitner Date: Wed, 25 Jan 2012 08:34:51 +0000 Subject: tcp: bind() fix autoselection to share ports The current code checks for conflicts when the application requests a specific port. If there is no conflict, then the request is granted. On the other hand, the port autoselection done by the kernel fails when all ports are bound even when there is a port with no conflict available. The fix changes port autoselection to check if there is a conflict and use it if not. Signed-off-by: Flavio Leitner Signed-off-by: Marcelo Ricardo Leitner Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/inet_connection_sock.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'net') diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 2e4e24476c4c..ecd19b5a7ee2 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -128,6 +128,11 @@ again: goto have_snum; } } + if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) { + spin_unlock(&head->lock); + snum = rover; + goto have_snum; + } goto next; } break; -- cgit v1.2.3 From fddb7b5761f104f034a0e708ece756d9b2eb2cac Mon Sep 17 00:00:00 2001 From: Flavio Leitner Date: Wed, 25 Jan 2012 08:34:52 +0000 Subject: tcp: bind() optimize port allocation Port autoselection finds a port and then drop the lock, then right after that, gets the hash bucket again and lock it. Fix it to go direct. Signed-off-by: Flavio Leitner Signed-off-by: Marcelo Ricardo Leitner Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/inet_connection_sock.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index ecd19b5a7ee2..19d66cefd7d3 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -123,15 +123,13 @@ again: smallest_size = tb->num_owners; smallest_rover = rover; if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) { - spin_unlock(&head->lock); snum = smallest_rover; - goto have_snum; + goto tb_found; } } if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) { - spin_unlock(&head->lock); snum = rover; - goto have_snum; + goto tb_found; } goto next; } -- cgit v1.2.3 From 073862ba5d249c20bd5c49fc6d904ff0e1f6a672 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 26 Jan 2012 00:41:38 +0000 Subject: netns: fix net_alloc_generic() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When a new net namespace is created, we should attach to it a "struct net_generic" with enough slots (even empty), or we can hit the following BUG_ON() : [ 200.752016] kernel BUG at include/net/netns/generic.h:40! ... [ 200.752016] [] ? get_cfcnfg+0x3a/0x180 [ 200.752016] [] ? lockdep_rtnl_is_held+0x10/0x20 [ 200.752016] [] caif_device_notify+0x2e/0x530 [ 200.752016] [] notifier_call_chain+0x67/0x110 [ 200.752016] [] raw_notifier_call_chain+0x11/0x20 [ 200.752016] [] call_netdevice_notifiers+0x32/0x60 [ 200.752016] [] register_netdevice+0x196/0x300 [ 200.752016] [] register_netdev+0x19/0x30 [ 200.752016] [] loopback_net_init+0x4a/0xa0 [ 200.752016] [] ops_init+0x42/0x180 [ 200.752016] [] setup_net+0x6b/0x100 [ 200.752016] [] copy_net_ns+0x86/0x110 [ 200.752016] [] create_new_namespaces+0xd9/0x190 net_alloc_generic() should take into account the maximum index into the ptr array, as a subsystem might use net_generic() anytime. This also reduces number of reallocations in net_assign_generic() Reported-by: Sasha Levin Tested-by: Sasha Levin Signed-off-by: Eric Dumazet Cc: Sjur Brændeland Cc: Eric W. Biederman Cc: Pavel Emelyanov Signed-off-by: David S. Miller --- net/core/net_namespace.c | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) (limited to 'net') diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index aefcd7acbffa..0e950fda9a0a 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -30,6 +30,20 @@ EXPORT_SYMBOL(init_net); #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */ +static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS; + +static struct net_generic *net_alloc_generic(void) +{ + struct net_generic *ng; + size_t generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]); + + ng = kzalloc(generic_size, GFP_KERNEL); + if (ng) + ng->len = max_gen_ptrs; + + return ng; +} + static int net_assign_generic(struct net *net, int id, void *data) { struct net_generic *ng, *old_ng; @@ -43,8 +57,7 @@ static int net_assign_generic(struct net *net, int id, void *data) if (old_ng->len >= id) goto assign; - ng = kzalloc(sizeof(struct net_generic) + - id * sizeof(void *), GFP_KERNEL); + ng = net_alloc_generic(); if (ng == NULL) return -ENOMEM; @@ -59,7 +72,6 @@ static int net_assign_generic(struct net *net, int id, void *data) * the old copy for kfree after a grace period. */ - ng->len = id; memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*)); rcu_assign_pointer(net->gen, ng); @@ -161,18 +173,6 @@ out_undo: goto out; } -static struct net_generic *net_alloc_generic(void) -{ - struct net_generic *ng; - size_t generic_size = sizeof(struct net_generic) + - INITIAL_NET_GEN_PTRS * sizeof(void *); - - ng = kzalloc(generic_size, GFP_KERNEL); - if (ng) - ng->len = INITIAL_NET_GEN_PTRS; - - return ng; -} #ifdef CONFIG_NET_NS static struct kmem_cache *net_cachep; @@ -483,6 +483,7 @@ again: } return error; } + max_gen_ptrs = max_t(unsigned int, max_gen_ptrs, *ops->id); } error = __register_pernet_operations(list, ops); if (error) { -- cgit v1.2.3 From f2b3ee9e4200b32d113b1bd3c93f9a836c97357c Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Thu, 26 Jan 2012 10:34:35 +0000 Subject: ipv6: Fix ip_gre lockless xmits. Tunnel devices set NETIF_F_LLTX to bypass HARD_TX_LOCK. Sit and ipip set this unconditionally in ops->setup, but gre enables it conditionally after parameter passing in ops->newlink. This is not called during tunnel setup as below, however, so GRE tunnels are still taking the lock. modprobe ip_gre ip tunnel add test0 mode gre remote 10.5.1.1 dev lo ip link set test0 up ip addr add 10.6.0.1 dev test0 # cat /sys/class/net/test0/features # $DIR/test_tunnel_xmit 10 10.5.2.1 ip route add 10.5.2.0/24 dev test0 ip tunnel del test0 The newlink callback is only called in rtnl_netlink, and only if the device is new, as it calls register_netdevice internally. Gre tunnels are created at 'ip tunnel add' with ioctl SIOCADDTUNNEL, which calls ipgre_tunnel_locate, which calls register_netdev. rtnl_newlink is called at 'ip link set', but skips ops->newlink and the device is up with locking still enabled. The equivalent ipip tunnel works fine, btw (just substitute 'method gre' for 'method ipip'). On kernels before /sys/class/net/*/features was removed [1], the first commented out line returns 0x6000 with method gre, which indicates that NETIF_F_LLTX (0x1000) is not set. With ipip, it reports 0x7000. This test cannot be used on recent kernels where the sysfs file is removed (and ETHTOOL_GFEATURES does not currently work for tunnel devices, because they lack dev->ethtool_ops). The second commented out line calls a simple transmission test [2] that sends on 24 cores at maximum rate. Results of a single run: ipip: 19,372,306 gre before patch: 4,839,753 gre after patch: 19,133,873 This patch replicates the condition check in ipgre_newlink to ipgre_tunnel_locate. It works for me, both with oseq on and off. This is the first time I looked at rtnetlink and iproute2 code, though, so someone more knowledgeable should probably check the patch. Thanks. The tail of both functions is now identical, by the way. To avoid code duplication, I'll be happy to rework this and merge the two. [1] http://patchwork.ozlabs.org/patch/104610/ [2] http://kernel.googlecode.com/files/xmit_udp_parallel.c Signed-off-by: Willem de Bruijn Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/ip_gre.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'net') diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 2b53a1f7abf6..6b3ca5ba4450 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -422,6 +422,10 @@ static struct ip_tunnel *ipgre_tunnel_locate(struct net *net, if (register_netdevice(dev) < 0) goto failed_free; + /* Can use a lockless transmit, unless we generate output sequences */ + if (!(nt->parms.o_flags & GRE_SEQ)) + dev->features |= NETIF_F_LLTX; + dev_hold(dev); ipgre_tunnel_link(ign, nt); return nt; -- cgit v1.2.3 From f18da14565819ba43b8321237e2426a2914cc2ef Mon Sep 17 00:00:00 2001 From: Stefan Gula Date: Thu, 26 Jan 2012 11:01:06 +0000 Subject: net: RTNETLINK adjusting values of min_ifinfo_dump_size Setting link parameters on a netdevice changes the value of if_nlmsg_size(), therefore it is necessary to recalculate min_ifinfo_dump_size. Signed-off-by: Stefan Gula Signed-off-by: David S. Miller --- net/core/rtnetlink.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'net') diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index f16444bc6cbb..65aebd450027 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1509,6 +1509,9 @@ errout: if (send_addr_notify) call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); + min_ifinfo_dump_size = max_t(u16, if_nlmsg_size(dev), + min_ifinfo_dump_size); + return err; } -- cgit v1.2.3 From 8a8ee9aff6c3077dd9c2c7a77478e8ed362b96c6 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 26 Jan 2012 14:04:53 +0000 Subject: net caif: Register properly as a pernet subsystem. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit caif is a subsystem and as such it needs to register with register_pernet_subsys instead of register_pernet_device. Among other problems using register_pernet_device was resulting in net_generic being called before the caif_net structure was allocated. Which has been causing net_generic to fail with either BUG_ON's or by return NULL pointers. A more ugly problem that could be caused is packets in flight why the subsystem is shutting down. To remove confusion also remove the cruft cause by inappropriately trying to fix this bug. With the aid of the previous patch I have tested this patch and confirmed that using register_pernet_subsys makes the failure go away as it should. Signed-off-by: Eric W. Biederman Acked-by: Sjur Brændeland Tested-by: Sasha Levin Signed-off-by: David S. Miller --- net/caif/caif_dev.c | 22 ++-------------------- net/caif/cfcnfg.c | 1 - 2 files changed, 2 insertions(+), 21 deletions(-) (limited to 'net') diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c index 673728add60b..82c57069415f 100644 --- a/net/caif/caif_dev.c +++ b/net/caif/caif_dev.c @@ -59,8 +59,6 @@ struct cfcnfg *get_cfcnfg(struct net *net) { struct caif_net *caifn; caifn = net_generic(net, caif_net_id); - if (!caifn) - return NULL; return caifn->cfg; } EXPORT_SYMBOL(get_cfcnfg); @@ -69,8 +67,6 @@ static struct caif_device_entry_list *caif_device_list(struct net *net) { struct caif_net *caifn; caifn = net_generic(net, caif_net_id); - if (!caifn) - return NULL; return &caifn->caifdevs; } @@ -99,8 +95,6 @@ static struct caif_device_entry *caif_device_alloc(struct net_device *dev) struct caif_device_entry *caifd; caifdevs = caif_device_list(dev_net(dev)); - if (!caifdevs) - return NULL; caifd = kzalloc(sizeof(*caifd), GFP_KERNEL); if (!caifd) @@ -120,8 +114,6 @@ static struct caif_device_entry *caif_get(struct net_device *dev) struct caif_device_entry_list *caifdevs = caif_device_list(dev_net(dev)); struct caif_device_entry *caifd; - if (!caifdevs) - return NULL; list_for_each_entry_rcu(caifd, &caifdevs->list, list) { if (caifd->netdev == dev) @@ -321,8 +313,6 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, struct caif_device_entry_list *caifdevs; caifdevs = caif_device_list(dev_net(dev)); - if (!cfg || !caifdevs) - return; caifd = caif_device_alloc(dev); if (!caifd) return; @@ -374,8 +364,6 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what, cfg = get_cfcnfg(dev_net(dev)); caifdevs = caif_device_list(dev_net(dev)); - if (!cfg || !caifdevs) - return 0; caifd = caif_get(dev); if (caifd == NULL && dev->type != ARPHRD_CAIF) @@ -507,9 +495,6 @@ static struct notifier_block caif_device_notifier = { static int caif_init_net(struct net *net) { struct caif_net *caifn = net_generic(net, caif_net_id); - if (WARN_ON(!caifn)) - return -EINVAL; - INIT_LIST_HEAD(&caifn->caifdevs.list); mutex_init(&caifn->caifdevs.lock); @@ -527,9 +512,6 @@ static void caif_exit_net(struct net *net) caif_device_list(net); struct cfcnfg *cfg = get_cfcnfg(net); - if (!cfg || !caifdevs) - return; - rtnl_lock(); mutex_lock(&caifdevs->lock); @@ -569,7 +551,7 @@ static int __init caif_device_init(void) { int result; - result = register_pernet_device(&caif_net_ops); + result = register_pernet_subsys(&caif_net_ops); if (result) return result; @@ -582,7 +564,7 @@ static int __init caif_device_init(void) static void __exit caif_device_exit(void) { - unregister_pernet_device(&caif_net_ops); + unregister_pernet_subsys(&caif_net_ops); unregister_netdevice_notifier(&caif_device_notifier); dev_remove_pack(&caif_packet_type); } diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c index 598aafb4cb51..ba9cfd47778a 100644 --- a/net/caif/cfcnfg.c +++ b/net/caif/cfcnfg.c @@ -309,7 +309,6 @@ int caif_connect_client(struct net *net, struct caif_connect_request *conn_req, int err; struct cfctrl_link_param param; struct cfcnfg *cfg = get_cfcnfg(net); - caif_assert(cfg != NULL); rcu_read_lock(); err = caif_connect_req_to_link_param(cfg, conn_req, ¶m); -- cgit v1.2.3 From 4acb41903b2f99f3dffd4c3df9acc84ca5942cb2 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 30 Jan 2012 01:20:17 +0000 Subject: net/tcp: Fix tcp memory limits initialization when !CONFIG_SYSCTL sysctl_tcp_mem() initialization was moved to sysctl_tcp_ipv4.c in commit 3dc43e3e4d0b52197d3205214fe8f162f9e0c334, since it became a per-ns value. That code, however, will never run when CONFIG_SYSCTL is disabled, leading to bogus values on those fields - causing hung TCP sockets. This patch fixes it by keeping an initialization code in tcp_init(). It will be overwritten by the first net namespace init if CONFIG_SYSCTL is compiled in, and do the right thing if it is compiled out. It is also named properly as tcp_init_mem(), to properly signal its non-sysctl side effect on TCP limits. Reported-by: Ingo Molnar Signed-off-by: Glauber Costa Cc: David S. Miller Link: http://lkml.kernel.org/r/4F22D05A.8030604@parallels.com [ renamed the function, tidied up the changelog a bit ] Signed-off-by: Ingo Molnar Signed-off-by: David S. Miller --- include/net/tcp.h | 2 ++ net/ipv4/sysctl_net_ipv4.c | 1 + net/ipv4/tcp.c | 16 +++++++++++++--- 3 files changed, 16 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/include/net/tcp.h b/include/net/tcp.h index 0118ea999f67..d49db0113a06 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -311,6 +311,8 @@ extern struct proto tcp_prot; #define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val) #define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val) +extern void tcp_init_mem(struct net *net); + extern void tcp_v4_err(struct sk_buff *skb, u32); extern void tcp_shutdown (struct sock *sk, int how); diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 4aa7e9dc0cbb..4cb9cd2f2c39 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -814,6 +814,7 @@ static __net_init int ipv4_sysctl_init_net(struct net *net) net->ipv4.sysctl_rt_cache_rebuild_count = 4; + tcp_init_mem(net); limit = nr_free_buffer_pages() / 8; limit = max(limit, 128UL); net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 9bcdec3ad772..06373b4a449a 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -3216,6 +3216,16 @@ static int __init set_thash_entries(char *str) } __setup("thash_entries=", set_thash_entries); +void tcp_init_mem(struct net *net) +{ + /* Set per-socket limits to no more than 1/128 the pressure threshold */ + unsigned long limit = nr_free_buffer_pages() / 8; + limit = max(limit, 128UL); + net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3; + net->ipv4.sysctl_tcp_mem[1] = limit; + net->ipv4.sysctl_tcp_mem[2] = net->ipv4.sysctl_tcp_mem[0] * 2; +} + void __init tcp_init(void) { struct sk_buff *skb = NULL; @@ -3276,9 +3286,9 @@ void __init tcp_init(void) sysctl_tcp_max_orphans = cnt / 2; sysctl_max_syn_backlog = max(128, cnt / 256); - /* Set per-socket limits to no more than 1/128 the pressure threshold */ - limit = ((unsigned long)init_net.ipv4.sysctl_tcp_mem[1]) - << (PAGE_SHIFT - 7); + tcp_init_mem(&init_net); + limit = nr_free_buffer_pages() / 8; + limit = max(limit, 128UL); max_share = min(4UL*1024*1024, limit); sysctl_tcp_wmem[0] = SK_MEM_QUANTUM; -- cgit v1.2.3 From 5b35e1e6e9ca651e6b291c96d1106043c9af314a Mon Sep 17 00:00:00 2001 From: Neal Cardwell Date: Sat, 28 Jan 2012 17:29:46 +0000 Subject: tcp: fix tcp_trim_head() to adjust segment count with skb MSS MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This commit fixes tcp_trim_head() to recalculate the number of segments in the skb with the skb's existing MSS, so trimming the head causes the skb segment count to be monotonically non-increasing - it should stay the same or go down, but not increase. Previously tcp_trim_head() used the current MSS of the connection. But if there was a decrease in MSS between original transmission and ACK (e.g. due to PMTUD), this could cause tcp_trim_head() to counter-intuitively increase the segment count when trimming bytes off the head of an skb. This violated assumptions in tcp_tso_acked() that tcp_trim_head() only decreases the packet count, so that packets_acked in tcp_tso_acked() could underflow, leading tcp_clean_rtx_queue() to pass u32 pkts_acked values as large as 0xffffffff to ca_ops->pkts_acked(). As an aside, if tcp_trim_head() had really wanted the skb to reflect the current MSS, it should have called tcp_set_skb_tso_segs() unconditionally, since a decrease in MSS would mean that a single-packet skb should now be sliced into multiple segments. Signed-off-by: Neal Cardwell Acked-by: Nandita Dukkipati Acked-by: Ilpo Järvinen Signed-off-by: David S. Miller --- net/ipv4/tcp_output.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 8c8de2780c7a..4ff3b6dc74fc 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1141,11 +1141,9 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) sk_mem_uncharge(sk, len); sock_set_flag(sk, SOCK_QUEUE_SHRUNK); - /* Any change of skb->len requires recalculation of tso - * factor and mss. - */ + /* Any change of skb->len requires recalculation of tso factor. */ if (tcp_skb_pcount(skb) > 1) - tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk)); + tcp_set_skb_tso_segs(sk, skb, tcp_skb_mss(skb)); return 0; } -- cgit v1.2.3 From 6f01fd6e6f6809061b56e78f1e8d143099716d70 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sat, 28 Jan 2012 16:11:03 +0000 Subject: af_unix: fix EPOLLET regression for stream sockets Commit 0884d7aa24 (AF_UNIX: Fix poll blocking problem when reading from a stream socket) added a regression for epoll() in Edge Triggered mode (EPOLLET) Appropriate fix is to use skb_peek()/skb_unlink() instead of skb_dequeue(), and only call skb_unlink() when skb is fully consumed. This remove the need to requeue a partial skb into sk_receive_queue head and the extra sk->sk_data_ready() calls that added the regression. This is safe because once skb is given to sk_receive_queue, it is not modified by a writer, and readers are serialized by u->readlock mutex. This also reduce number of spinlock acquisition for small reads or MSG_PEEK users so should improve overall performance. Reported-by: Nick Mathewson Signed-off-by: Eric Dumazet Cc: Alexey Moiseytsev Signed-off-by: David S. Miller --- net/unix/af_unix.c | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) (limited to 'net') diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index aad8fb699989..85d3bb7490aa 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -1918,7 +1918,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb; unix_state_lock(sk); - skb = skb_dequeue(&sk->sk_receive_queue); + skb = skb_peek(&sk->sk_receive_queue); if (skb == NULL) { unix_sk(sk)->recursion_level = 0; if (copied >= target) @@ -1958,11 +1958,8 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, if (check_creds) { /* Never glue messages from different writers */ if ((UNIXCB(skb).pid != siocb->scm->pid) || - (UNIXCB(skb).cred != siocb->scm->cred)) { - skb_queue_head(&sk->sk_receive_queue, skb); - sk->sk_data_ready(sk, skb->len); + (UNIXCB(skb).cred != siocb->scm->cred)) break; - } } else { /* Copy credentials */ scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred); @@ -1977,8 +1974,6 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, chunk = min_t(unsigned int, skb->len, size); if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) { - skb_queue_head(&sk->sk_receive_queue, skb); - sk->sk_data_ready(sk, skb->len); if (copied == 0) copied = -EFAULT; break; @@ -1993,13 +1988,10 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, if (UNIXCB(skb).fp) unix_detach_fds(siocb->scm, skb); - /* put the skb back if we didn't use it up.. */ - if (skb->len) { - skb_queue_head(&sk->sk_receive_queue, skb); - sk->sk_data_ready(sk, skb->len); + if (skb->len) break; - } + skb_unlink(skb, &sk->sk_receive_queue); consume_skb(skb); if (siocb->scm->fp) @@ -2010,9 +2002,6 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, if (UNIXCB(skb).fp) siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp); - /* put message back and return */ - skb_queue_head(&sk->sk_receive_queue, skb); - sk->sk_data_ready(sk, skb->len); break; } } while (size); -- cgit v1.2.3