summaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2015-05-19 13:26:55 -0700
committerDavid S. Miller <davem@davemloft.net>2015-05-21 16:56:40 -0400
commiteb9344781a2f8381ed60cd9e662d9ced2d168ecb (patch)
treef1924cedc2aa178d4a1bc953204fce035a6a4233 /net/ipv4/tcp.c
parent765c9c639fbb132af0cafc6e1da22fe6cea26bb8 (diff)
downloadlinux-eb9344781a2f8381ed60cd9e662d9ced2d168ecb.tar.bz2
tcp: add a force_schedule argument to sk_stream_alloc_skb()
In commit 8e4d980ac215 ("tcp: fix behavior for epoll edge trigger") we fixed a possible hang of TCP sockets under memory pressure, by allowing sk_stream_alloc_skb() to use sk_forced_mem_schedule() if no packet is in socket write queue. It turns out there are other cases where we want to force memory schedule : tcp_fragment() & tso_fragment() need to split a big TSO packet into two smaller ones. If we block here because of TCP memory pressure, we can effectively block TCP socket from sending new data. If no further ACK is coming, this hang would be definitive, and socket has no chance to effectively reduce its memory usage. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r--net/ipv4/tcp.c19
1 files changed, 11 insertions, 8 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index bb9bb844204f..ca1d476c80ef 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -808,7 +808,8 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
}
EXPORT_SYMBOL(tcp_splice_read);
-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
+struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
+ bool force_schedule)
{
struct sk_buff *skb;
@@ -820,15 +821,15 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
if (likely(skb)) {
- bool mem_schedule;
+ bool mem_scheduled;
- if (skb_queue_len(&sk->sk_write_queue) == 0) {
- mem_schedule = true;
+ if (force_schedule) {
+ mem_scheduled = true;
sk_forced_mem_schedule(sk, skb->truesize);
} else {
- mem_schedule = sk_wmem_schedule(sk, skb->truesize);
+ mem_scheduled = sk_wmem_schedule(sk, skb->truesize);
}
- if (likely(mem_schedule)) {
+ if (likely(mem_scheduled)) {
skb_reserve(skb, sk->sk_prot->max_header);
/*
* Make sure that we have exactly size bytes
@@ -918,7 +919,8 @@ new_segment:
if (!sk_stream_memory_free(sk))
goto wait_for_sndbuf;
- skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
+ skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation,
+ skb_queue_empty(&sk->sk_write_queue));
if (!skb)
goto wait_for_memory;
@@ -1154,7 +1156,8 @@ new_segment:
skb = sk_stream_alloc_skb(sk,
select_size(sk, sg),
- sk->sk_allocation);
+ sk->sk_allocation,
+ skb_queue_empty(&sk->sk_write_queue));
if (!skb)
goto wait_for_memory;