summaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorJason Wang <jasowang@redhat.com>2012-05-30 21:18:10 +0000
committerDavid S. Miller <davem@davemloft.net>2012-05-31 18:22:45 -0400
commitcc9b17ad29ecaa20bfe426a8d4dbfb94b13ff1cc (patch)
treedbf402b10788ab22d1d9d8c14866a910be497c29 /net/core
parent914bec1011a25f65cdc94988a6f974bfb9a3c10d (diff)
downloadlinux-cc9b17ad29ecaa20bfe426a8d4dbfb94b13ff1cc.tar.bz2
net: sock: validate data_len before allocating skb in sock_alloc_send_pskb()
We need to validate the number of pages consumed by data_len, otherwise frags array could be overflowed by userspace. So this patch validate data_len and return -EMSGSIZE when data_len may occupies more frags than MAX_SKB_FRAGS. Signed-off-by: Jason Wang <jasowang@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/sock.c7
1 files changed, 5 insertions, 2 deletions
diff --git a/net/core/sock.c b/net/core/sock.c
index 653f8c0aedc5..9e5b71fda6ec 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1592,6 +1592,11 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
gfp_t gfp_mask;
long timeo;
int err;
+ int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+
+ err = -EMSGSIZE;
+ if (npages > MAX_SKB_FRAGS)
+ goto failure;
gfp_mask = sk->sk_allocation;
if (gfp_mask & __GFP_WAIT)
@@ -1610,14 +1615,12 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
skb = alloc_skb(header_len, gfp_mask);
if (skb) {
- int npages;
int i;
/* No pages, we're done... */
if (!data_len)
break;
- npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
skb->truesize += data_len;
skb_shinfo(skb)->nr_frags = npages;
for (i = 0; i < npages; i++) {