diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2010-06-05 03:03:30 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-06-05 03:03:30 -0700 |
commit | 72e09ad107e78d69ff4d3b97a69f0aad2b77280f (patch) | |
tree | daa8a97c230802bcbb8149a21172eca2c6155d91 | |
parent | ca7335948e294faf8adf65f2c95ca18ea78540db (diff) | |
download | linux-72e09ad107e78d69ff4d3b97a69f0aad2b77280f.tar.bz2 |
ipv6: avoid high order allocations
With mtu=9000, mld_newpack() use order-2 GFP_ATOMIC allocations, that
are very unreliable, on machines where PAGE_SIZE=4K
Limit allocated skbs to be at most one page. (order-0 allocations)
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | net/ipv6/mcast.c | 5 |
1 files changed, 4 insertions, 1 deletions
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 59f1881968c7..ab1622d7d409 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -1356,7 +1356,10 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size) IPV6_TLV_PADN, 0 }; /* we assume size > sizeof(ra) here */ - skb = sock_alloc_send_skb(sk, size + LL_ALLOCATED_SPACE(dev), 1, &err); + size += LL_ALLOCATED_SPACE(dev); + /* limit our allocations to order-0 page */ + size = min_t(int, size, SKB_MAX_ORDER(0, 0)); + skb = sock_alloc_send_skb(sk, size, 1, &err); if (!skb) return NULL; |