summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2011-12-23 17:13:56 -0500
committerDavid S. Miller <davem@davemloft.net>2011-12-23 17:13:56 -0500
commitabb434cb0539fb355c1c921f8fd761efbbac3462 (patch)
tree24a7d99ec161f8fd4dc9ff03c9c4cc93be883ce6 /net
parent2494654d4890316e7340fb8b3458daad0474a1b9 (diff)
parent6350323ad8def2ac00d77cdee3b79c9b9fba75c4 (diff)
downloadlinux-abb434cb0539fb355c1c921f8fd761efbbac3462.tar.bz2
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: net/bluetooth/l2cap_core.c Just two overlapping changes, one added an initialization of a local variable, and another change added a new local variable. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/bluetooth/hci_conn.c2
-rw-r--r--net/bluetooth/l2cap_core.c12
-rw-r--r--net/bluetooth/rfcomm/core.c1
-rw-r--r--net/bridge/br_netfilter.c8
-rw-r--r--net/ceph/crush/mapper.c35
-rw-r--r--net/core/flow.c12
-rw-r--r--net/core/net-sysfs.c7
-rw-r--r--net/core/sock.c6
-rw-r--r--net/ipv4/ipconfig.c4
-rw-r--r--net/ipv4/route.c112
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/llc/af_llc.c14
-rw-r--r--net/netfilter/xt_connbytes.c6
-rw-r--r--net/packet/af_packet.c6
-rw-r--r--net/sched/sch_mqprio.c2
-rw-r--r--net/sctp/associola.c2
-rw-r--r--net/sctp/output.c8
-rw-r--r--net/sctp/outqueue.c6
-rw-r--r--net/sctp/protocol.c3
-rw-r--r--net/sctp/socket.c2
-rw-r--r--net/sctp/sysctl.c13
-rw-r--r--net/sunrpc/sched.c30
-rw-r--r--net/sunrpc/xprt.c10
-rw-r--r--net/xfrm/xfrm_policy.c18
24 files changed, 243 insertions, 78 deletions
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index b328ac611ccd..3fa08dda825e 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -674,7 +674,7 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
goto encrypt;
auth:
- if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
+ if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
return 0;
if (!hci_conn_auth(conn, sec_level, auth_type))
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 26dc3f6a8346..9bc22e4c4c61 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -2372,7 +2372,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
void *ptr = req->data;
int type, olen;
unsigned long val;
- struct l2cap_conf_rfc rfc;
+ struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
struct l2cap_conf_efs efs;
BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
@@ -2522,6 +2522,16 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
}
}
+ /* Use sane default values in case a misbehaving remote device
+ * did not send an RFC option.
+ */
+ rfc.mode = chan->mode;
+ rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
+ rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
+ rfc.max_pdu_size = cpu_to_le16(chan->imtu);
+
+ BT_ERR("Expected RFC option was not found, using defaults");
+
done:
switch (rfc.mode) {
case L2CAP_MODE_ERTM:
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index e5ddef081e69..eac849b935a1 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -1162,6 +1162,7 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci)
if (list_empty(&s->dlcs)) {
s->state = BT_DISCONN;
rfcomm_send_disc(s, 0);
+ rfcomm_session_clear_timer(s);
}
break;
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 52e44605fb7b..84122472656c 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -114,12 +114,18 @@ static struct neighbour *fake_neigh_lookup(const struct dst_entry *dst, const vo
return NULL;
}
+static unsigned int fake_mtu(const struct dst_entry *dst)
+{
+ return dst->dev->mtu;
+}
+
static struct dst_ops fake_dst_ops = {
.family = AF_INET,
.protocol = cpu_to_be16(ETH_P_IP),
.update_pmtu = fake_update_pmtu,
.cow_metrics = fake_cow_metrics,
.neigh_lookup = fake_neigh_lookup,
+ .mtu = fake_mtu,
};
/*
@@ -141,7 +147,7 @@ void br_netfilter_rtable_init(struct net_bridge *br)
rt->dst.dev = br->dev;
rt->dst.path = &rt->dst;
dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
- rt->dst.flags = DST_NOXFRM;
+ rt->dst.flags = DST_NOXFRM | DST_NOPEER;
rt->dst.ops = &fake_dst_ops;
}
diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c
index 42599e31dcad..3a94eae7abe9 100644
--- a/net/ceph/crush/mapper.c
+++ b/net/ceph/crush/mapper.c
@@ -477,7 +477,6 @@ int crush_do_rule(struct crush_map *map,
int i, j;
int numrep;
int firstn;
- int rc = -1;
BUG_ON(ruleno >= map->max_rules);
@@ -491,23 +490,18 @@ int crush_do_rule(struct crush_map *map,
* that this may or may not correspond to the specific types
* referenced by the crush rule.
*/
- if (force >= 0) {
- if (force >= map->max_devices ||
- map->device_parents[force] == 0) {
- /*dprintk("CRUSH: forcefed device dne\n");*/
- rc = -1; /* force fed device dne */
- goto out;
- }
- if (!is_out(map, weight, force, x)) {
- while (1) {
- force_context[++force_pos] = force;
- if (force >= 0)
- force = map->device_parents[force];
- else
- force = map->bucket_parents[-1-force];
- if (force == 0)
- break;
- }
+ if (force >= 0 &&
+ force < map->max_devices &&
+ map->device_parents[force] != 0 &&
+ !is_out(map, weight, force, x)) {
+ while (1) {
+ force_context[++force_pos] = force;
+ if (force >= 0)
+ force = map->device_parents[force];
+ else
+ force = map->bucket_parents[-1-force];
+ if (force == 0)
+ break;
}
}
@@ -600,10 +594,7 @@ int crush_do_rule(struct crush_map *map,
BUG_ON(1);
}
}
- rc = result_len;
-
-out:
- return rc;
+ return result_len;
}
diff --git a/net/core/flow.c b/net/core/flow.c
index 8ae42de9c79e..e318c7e98042 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -358,6 +358,18 @@ void flow_cache_flush(void)
put_online_cpus();
}
+static void flow_cache_flush_task(struct work_struct *work)
+{
+ flow_cache_flush();
+}
+
+static DECLARE_WORK(flow_cache_flush_work, flow_cache_flush_task);
+
+void flow_cache_flush_deferred(void)
+{
+ schedule_work(&flow_cache_flush_work);
+}
+
static int __cpuinit flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
{
struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 9d134636f87c..4b4d0b0a3543 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -669,11 +669,14 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
if (count) {
int i;
- if (count > 1<<30) {
+ if (count > INT_MAX)
+ return -EINVAL;
+ count = roundup_pow_of_two(count);
+ if (count > (ULONG_MAX - sizeof(struct rps_dev_flow_table))
+ / sizeof(struct rps_dev_flow)) {
/* Enforce a limit to prevent overflow */
return -EINVAL;
}
- count = roundup_pow_of_two(count);
table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(count));
if (!table)
return -ENOMEM;
diff --git a/net/core/sock.c b/net/core/sock.c
index 0c67facf42f3..002939cfc069 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -339,11 +339,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
unsigned long flags;
struct sk_buff_head *list = &sk->sk_receive_queue;
- /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces
- number of warnings when compiling with -W --ANK
- */
- if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
- (unsigned)sk->sk_rcvbuf) {
+ if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
atomic_inc(&sk->sk_drops);
trace_sock_rcvqueue_full(sk, skb);
return -ENOMEM;
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 915eb5265b2e..7e4ec9fc2cef 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -253,6 +253,10 @@ static int __init ic_open_devs(void)
}
}
+ /* no point in waiting if we could not bring up at least one device */
+ if (!ic_first_dev)
+ goto have_carrier;
+
/* wait for a carrier on at least one device */
start = jiffies;
while (jiffies - start < msecs_to_jiffies(CONF_CARRIER_TIMEOUT)) {
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index f30112f7559a..bcacf54e5418 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -91,6 +91,7 @@
#include <linux/rcupdate.h>
#include <linux/times.h>
#include <linux/slab.h>
+#include <linux/prefetch.h>
#include <net/dst.h>
#include <net/net_namespace.h>
#include <net/protocol.h>
@@ -119,6 +120,7 @@
static int ip_rt_max_size;
static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
+static int ip_rt_gc_interval __read_mostly = 60 * HZ;
static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
static int ip_rt_redirect_number __read_mostly = 9;
static int ip_rt_redirect_load __read_mostly = HZ / 50;
@@ -132,6 +134,9 @@ static int ip_rt_min_advmss __read_mostly = 256;
static int rt_chain_length_max __read_mostly = 20;
static int redirect_genid;
+static struct delayed_work expires_work;
+static unsigned long expires_ljiffies;
+
/*
* Interface to generic destination cache.
*/
@@ -829,6 +834,97 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth)
return ONE;
}
+static void rt_check_expire(void)
+{
+ static unsigned int rover;
+ unsigned int i = rover, goal;
+ struct rtable *rth;
+ struct rtable __rcu **rthp;
+ unsigned long samples = 0;
+ unsigned long sum = 0, sum2 = 0;
+ unsigned long delta;
+ u64 mult;
+
+ delta = jiffies - expires_ljiffies;
+ expires_ljiffies = jiffies;
+ mult = ((u64)delta) << rt_hash_log;
+ if (ip_rt_gc_timeout > 1)
+ do_div(mult, ip_rt_gc_timeout);
+ goal = (unsigned int)mult;
+ if (goal > rt_hash_mask)
+ goal = rt_hash_mask + 1;
+ for (; goal > 0; goal--) {
+ unsigned long tmo = ip_rt_gc_timeout;
+ unsigned long length;
+
+ i = (i + 1) & rt_hash_mask;
+ rthp = &rt_hash_table[i].chain;
+
+ if (need_resched())
+ cond_resched();
+
+ samples++;
+
+ if (rcu_dereference_raw(*rthp) == NULL)
+ continue;
+ length = 0;
+ spin_lock_bh(rt_hash_lock_addr(i));
+ while ((rth = rcu_dereference_protected(*rthp,
+ lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) {
+ prefetch(rth->dst.rt_next);
+ if (rt_is_expired(rth)) {
+ *rthp = rth->dst.rt_next;
+ rt_free(rth);
+ continue;
+ }
+ if (rth->dst.expires) {
+ /* Entry is expired even if it is in use */
+ if (time_before_eq(jiffies, rth->dst.expires)) {
+nofree:
+ tmo >>= 1;
+ rthp = &rth->dst.rt_next;
+ /*
+ * We only count entries on
+ * a chain with equal hash inputs once
+ * so that entries for different QOS
+ * levels, and other non-hash input
+ * attributes don't unfairly skew
+ * the length computation
+ */
+ length += has_noalias(rt_hash_table[i].chain, rth);
+ continue;
+ }
+ } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
+ goto nofree;
+
+ /* Cleanup aged off entries. */
+ *rthp = rth->dst.rt_next;
+ rt_free(rth);
+ }
+ spin_unlock_bh(rt_hash_lock_addr(i));
+ sum += length;
+ sum2 += length*length;
+ }
+ if (samples) {
+ unsigned long avg = sum / samples;
+ unsigned long sd = int_sqrt(sum2 / samples - avg*avg);
+ rt_chain_length_max = max_t(unsigned long,
+ ip_rt_gc_elasticity,
+ (avg + 4*sd) >> FRACT_BITS);
+ }
+ rover = i;
+}
+
+/*
+ * rt_worker_func() is run in process context.
+ * we call rt_check_expire() to scan part of the hash table
+ */
+static void rt_worker_func(struct work_struct *work)
+{
+ rt_check_expire();
+ schedule_delayed_work(&expires_work, ip_rt_gc_interval);
+}
+
/*
* Perturbation of rt_genid by a small quantity [1..256]
* Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
@@ -1265,7 +1361,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
{
struct rtable *rt = (struct rtable *) dst;
- if (rt) {
+ if (rt && !(rt->dst.flags & DST_NOPEER)) {
if (rt->peer == NULL)
rt_bind_peer(rt, rt->rt_dst, 1);
@@ -1276,7 +1372,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
iph->id = htons(inet_getid(rt->peer, more));
return;
}
- } else
+ } else if (!rt)
printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
__builtin_return_address(0));
@@ -3173,6 +3269,13 @@ static ctl_table ipv4_route_table[] = {
.proc_handler = proc_dointvec_jiffies,
},
{
+ .procname = "gc_interval",
+ .data = &ip_rt_gc_interval,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ {
.procname = "redirect_load",
.data = &ip_rt_redirect_load,
.maxlen = sizeof(int),
@@ -3382,6 +3485,11 @@ int __init ip_rt_init(void)
devinet_init();
ip_fib_init();
+ INIT_DELAYED_WORK_DEFERRABLE(&expires_work, rt_worker_func);
+ expires_ljiffies = jiffies;
+ schedule_delayed_work(&expires_work,
+ net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
+
if (ip_rt_proc_init())
printk(KERN_ERR "Unable to create route proc files\n");
#ifdef CONFIG_XFRM
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 71d26999c955..d97e07183ce9 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -604,7 +604,7 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
static atomic_t ipv6_fragmentation_id;
int old, new;
- if (rt) {
+ if (rt && !(rt->dst.flags & DST_NOPEER)) {
struct inet_peer *peer;
if (!rt->rt6i_peer)
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index dfd3a648a551..a18e6c3d36e3 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -833,15 +833,15 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
copied += used;
len -= used;
+ /* For non stream protcols we get one packet per recvmsg call */
+ if (sk->sk_type != SOCK_STREAM)
+ goto copy_uaddr;
+
if (!(flags & MSG_PEEK)) {
sk_eat_skb(sk, skb, 0);
*seq = 0;
}
- /* For non stream protcols we get one packet per recvmsg call */
- if (sk->sk_type != SOCK_STREAM)
- goto copy_uaddr;
-
/* Partial read */
if (used + offset < skb->len)
continue;
@@ -857,6 +857,12 @@ copy_uaddr:
}
if (llc_sk(sk)->cmsg_flags)
llc_cmsg_rcv(msg, skb);
+
+ if (!(flags & MSG_PEEK)) {
+ sk_eat_skb(sk, skb, 0);
+ *seq = 0;
+ }
+
goto out;
}
diff --git a/net/netfilter/xt_connbytes.c b/net/netfilter/xt_connbytes.c
index 5b138506690e..9ddf1c3bfb39 100644
--- a/net/netfilter/xt_connbytes.c
+++ b/net/netfilter/xt_connbytes.c
@@ -87,10 +87,10 @@ connbytes_mt(const struct sk_buff *skb, struct xt_action_param *par)
break;
}
- if (sinfo->count.to)
+ if (sinfo->count.to >= sinfo->count.from)
return what <= sinfo->count.to && what >= sinfo->count.from;
- else
- return what >= sinfo->count.from;
+ else /* inverted */
+ return what < sinfo->count.to || what > sinfo->count.from;
}
static int connbytes_mt_check(const struct xt_mtchk_param *par)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 0da505c9ac23..e56ca75e3f43 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1631,8 +1631,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
if (snaplen > res)
snaplen = res;
- if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
- (unsigned)sk->sk_rcvbuf)
+ if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
goto drop_n_acct;
if (skb_shared(skb)) {
@@ -1763,8 +1762,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
if (po->tp_version <= TPACKET_V2) {
if (macoff + snaplen > po->rx_ring.frame_size) {
if (po->copy_thresh &&
- atomic_read(&sk->sk_rmem_alloc) + skb->truesize
- < (unsigned)sk->sk_rcvbuf) {
+ atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
if (skb_shared(skb)) {
copy_skb = skb_clone(skb, GFP_ATOMIC);
} else {
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index f88256cbacbf..28de43092330 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -107,7 +107,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
if (!netif_is_multiqueue(dev))
return -EOPNOTSUPP;
- if (nla_len(opt) < sizeof(*qopt))
+ if (!opt || nla_len(opt) < sizeof(*qopt))
return -EINVAL;
qopt = nla_data(opt);
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 152b5b3c3fff..acd2edbc073e 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -173,7 +173,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
- (unsigned long)sp->autoclose * HZ;
+ min_t(unsigned long, sp->autoclose, sctp_max_autoclose) * HZ;
/* Initializes the timers */
for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 08b3cead6503..817174eb5f41 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -697,13 +697,7 @@ static void sctp_packet_append_data(struct sctp_packet *packet,
/* Keep track of how many bytes are in flight to the receiver. */
asoc->outqueue.outstanding_bytes += datasize;
- /* Update our view of the receiver's rwnd. Include sk_buff overhead
- * while updating peer.rwnd so that it reduces the chances of a
- * receiver running out of receive buffer space even when receive
- * window is still open. This can happen when a sender is sending
- * sending small messages.
- */
- datasize += sizeof(struct sk_buff);
+ /* Update our view of the receiver's rwnd. */
if (datasize < rwnd)
rwnd -= datasize;
else
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 14c2b06028ff..cfeb1d4a1ee6 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -411,8 +411,7 @@ void sctp_retransmit_mark(struct sctp_outq *q,
chunk->transport->flight_size -=
sctp_data_size(chunk);
q->outstanding_bytes -= sctp_data_size(chunk);
- q->asoc->peer.rwnd += (sctp_data_size(chunk) +
- sizeof(struct sk_buff));
+ q->asoc->peer.rwnd += sctp_data_size(chunk);
}
continue;
}
@@ -432,8 +431,7 @@ void sctp_retransmit_mark(struct sctp_outq *q,
* (Section 7.2.4)), add the data size of those
* chunks to the rwnd.
*/
- q->asoc->peer.rwnd += (sctp_data_size(chunk) +
- sizeof(struct sk_buff));
+ q->asoc->peer.rwnd += sctp_data_size(chunk);
q->outstanding_bytes -= sctp_data_size(chunk);
if (chunk->transport)
transport->flight_size -= sctp_data_size(chunk);
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 544a9b68eb53..5942d27b1444 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1285,6 +1285,9 @@ SCTP_STATIC __init int sctp_init(void)
sctp_max_instreams = SCTP_DEFAULT_INSTREAMS;
sctp_max_outstreams = SCTP_DEFAULT_OUTSTREAMS;
+ /* Initialize maximum autoclose timeout. */
+ sctp_max_autoclose = INT_MAX / HZ;
+
/* Initialize handle used for association ids. */
idr_init(&sctp_assocs_id);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index db0308344d07..408ebd0e7330 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2200,8 +2200,6 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
return -EINVAL;
if (copy_from_user(&sp->autoclose, optval, optlen))
return -EFAULT;
- /* make sure it won't exceed MAX_SCHEDULE_TIMEOUT */
- sp->autoclose = min_t(long, sp->autoclose, MAX_SCHEDULE_TIMEOUT / HZ);
return 0;
}
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 6b3952961b85..60ffbd067ff7 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -53,6 +53,10 @@ static int sack_timer_min = 1;
static int sack_timer_max = 500;
static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */
static int rwnd_scale_max = 16;
+static unsigned long max_autoclose_min = 0;
+static unsigned long max_autoclose_max =
+ (MAX_SCHEDULE_TIMEOUT / HZ > UINT_MAX)
+ ? UINT_MAX : MAX_SCHEDULE_TIMEOUT / HZ;
extern long sysctl_sctp_mem[3];
extern int sysctl_sctp_rmem[3];
@@ -258,6 +262,15 @@ static ctl_table sctp_table[] = {
.extra1 = &one,
.extra2 = &rwnd_scale_max,
},
+ {
+ .procname = "max_autoclose",
+ .data = &sctp_max_autoclose,
+ .maxlen = sizeof(unsigned long),
+ .mode = 0644,
+ .proc_handler = &proc_doulongvec_minmax,
+ .extra1 = &max_autoclose_min,
+ .extra2 = &max_autoclose_max,
+ },
{ /* sentinel */ }
};
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index d12ffa545811..00a1a2acd587 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -590,6 +590,27 @@ void rpc_prepare_task(struct rpc_task *task)
task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
}
+static void
+rpc_init_task_statistics(struct rpc_task *task)
+{
+ /* Initialize retry counters */
+ task->tk_garb_retry = 2;
+ task->tk_cred_retry = 2;
+ task->tk_rebind_retry = 2;
+
+ /* starting timestamp */
+ task->tk_start = ktime_get();
+}
+
+static void
+rpc_reset_task_statistics(struct rpc_task *task)
+{
+ task->tk_timeouts = 0;
+ task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_KILLED|RPC_TASK_SENT);
+
+ rpc_init_task_statistics(task);
+}
+
/*
* Helper that calls task->tk_ops->rpc_call_done if it exists
*/
@@ -602,6 +623,7 @@ void rpc_exit_task(struct rpc_task *task)
WARN_ON(RPC_ASSASSINATED(task));
/* Always release the RPC slot and buffer memory */
xprt_release(task);
+ rpc_reset_task_statistics(task);
}
}
}
@@ -804,11 +826,6 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
task->tk_calldata = task_setup_data->callback_data;
INIT_LIST_HEAD(&task->tk_task);
- /* Initialize retry counters */
- task->tk_garb_retry = 2;
- task->tk_cred_retry = 2;
- task->tk_rebind_retry = 2;
-
task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
task->tk_owner = current->tgid;
@@ -818,8 +835,7 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
if (task->tk_ops->rpc_call_prepare != NULL)
task->tk_action = rpc_prepare_task;
- /* starting timestamp */
- task->tk_start = ktime_get();
+ rpc_init_task_statistics(task);
dprintk("RPC: new task initialized, procpid %u\n",
task_pid_nr(current));
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index f4385e45a5fc..c64c0ef519b5 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -995,13 +995,11 @@ out_init_req:
static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
- if (xprt_dynamic_free_slot(xprt, req))
- return;
-
- memset(req, 0, sizeof(*req)); /* mark unused */
-
spin_lock(&xprt->reserve_lock);
- list_add(&req->rq_list, &xprt->free);
+ if (!xprt_dynamic_free_slot(xprt, req)) {
+ memset(req, 0, sizeof(*req)); /* mark unused */
+ list_add(&req->rq_list, &xprt->free);
+ }
rpc_wake_up_next(&xprt->backlog);
spin_unlock(&xprt->reserve_lock);
}
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index eb6b0b7781a5..7661576b6f45 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2276,8 +2276,6 @@ static void __xfrm_garbage_collect(struct net *net)
{
struct dst_entry *head, *next;
- flow_cache_flush();
-
spin_lock_bh(&xfrm_policy_sk_bundle_lock);
head = xfrm_policy_sk_bundles;
xfrm_policy_sk_bundles = NULL;
@@ -2290,6 +2288,18 @@ static void __xfrm_garbage_collect(struct net *net)
}
}
+static void xfrm_garbage_collect(struct net *net)
+{
+ flow_cache_flush();
+ __xfrm_garbage_collect(net);
+}
+
+static void xfrm_garbage_collect_deferred(struct net *net)
+{
+ flow_cache_flush_deferred();
+ __xfrm_garbage_collect(net);
+}
+
static void xfrm_init_pmtu(struct dst_entry *dst)
{
do {
@@ -2422,7 +2432,7 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
if (likely(dst_ops->neigh_lookup == NULL))
dst_ops->neigh_lookup = xfrm_neigh_lookup;
if (likely(afinfo->garbage_collect == NULL))
- afinfo->garbage_collect = __xfrm_garbage_collect;
+ afinfo->garbage_collect = xfrm_garbage_collect_deferred;
xfrm_policy_afinfo[afinfo->family] = afinfo;
}
write_unlock_bh(&xfrm_policy_afinfo_lock);
@@ -2516,7 +2526,7 @@ static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void
switch (event) {
case NETDEV_DOWN:
- __xfrm_garbage_collect(dev_net(dev));
+ xfrm_garbage_collect(dev_net(dev));
}
return NOTIFY_DONE;
}