summaryrefslogtreecommitdiffstats
path: root/net/core/skbuff.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r--net/core/skbuff.c307
1 files changed, 149 insertions, 158 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index c2e4fb8f3546..1a94a3037370 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -210,7 +210,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
shinfo->gso_type = 0;
shinfo->ip6_frag_id = 0;
shinfo->tx_flags.flags = 0;
- shinfo->frag_list = NULL;
+ skb_frag_list_init(skb);
memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps));
if (fclone) {
@@ -323,7 +323,7 @@ static void skb_clone_fraglist(struct sk_buff *skb)
{
struct sk_buff *list;
- for (list = skb_shinfo(skb)->frag_list; list; list = list->next)
+ skb_walk_frags(skb, list)
skb_get(list);
}
@@ -338,7 +338,7 @@ static void skb_release_data(struct sk_buff *skb)
put_page(skb_shinfo(skb)->frags[i].page);
}
- if (skb_shinfo(skb)->frag_list)
+ if (skb_has_frags(skb))
skb_drop_fraglist(skb);
kfree(skb->head);
@@ -381,7 +381,7 @@ static void kfree_skbmem(struct sk_buff *skb)
static void skb_release_head_state(struct sk_buff *skb)
{
- dst_release(skb->dst);
+ skb_dst_drop(skb);
#ifdef CONFIG_XFRM
secpath_put(skb->sp);
#endif
@@ -503,7 +503,7 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size)
shinfo->gso_type = 0;
shinfo->ip6_frag_id = 0;
shinfo->tx_flags.flags = 0;
- shinfo->frag_list = NULL;
+ skb_frag_list_init(skb);
memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps));
memset(skb, 0, offsetof(struct sk_buff, tail));
@@ -521,13 +521,12 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
new->transport_header = old->transport_header;
new->network_header = old->network_header;
new->mac_header = old->mac_header;
- new->dst = dst_clone(old->dst);
+ skb_dst_set(new, dst_clone(skb_dst(old)));
#ifdef CONFIG_XFRM
new->sp = secpath_get(old->sp);
#endif
memcpy(new->cb, old->cb, sizeof(old->cb));
- new->csum_start = old->csum_start;
- new->csum_offset = old->csum_offset;
+ new->csum = old->csum;
new->local_df = old->local_df;
new->pkt_type = old->pkt_type;
new->ip_summed = old->ip_summed;
@@ -538,6 +537,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
#endif
new->protocol = old->protocol;
new->mark = old->mark;
+ new->iif = old->iif;
__nf_copy(new, old);
#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
@@ -550,10 +550,17 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
#endif
#endif
new->vlan_tci = old->vlan_tci;
+#if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE)
+ new->do_not_encrypt = old->do_not_encrypt;
+#endif
skb_copy_secmark(new, old);
}
+/*
+ * You should not add any new code to this function. Add it to
+ * __copy_skb_header above instead.
+ */
static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
{
#define C(x) n->x = skb->x
@@ -569,16 +576,11 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
n->cloned = 1;
n->nohdr = 0;
n->destructor = NULL;
- C(iif);
C(tail);
C(end);
C(head);
C(data);
C(truesize);
-#if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE)
- C(do_not_encrypt);
- C(requeue);
-#endif
atomic_set(&n->users, 1);
atomic_inc(&(skb_shinfo(skb)->dataref));
@@ -755,7 +757,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
skb_shinfo(n)->nr_frags = i;
}
- if (skb_shinfo(skb)->frag_list) {
+ if (skb_has_frags(skb)) {
skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
skb_clone_fraglist(n);
}
@@ -818,7 +820,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
get_page(skb_shinfo(skb)->frags[i].page);
- if (skb_shinfo(skb)->frag_list)
+ if (skb_has_frags(skb))
skb_clone_fraglist(skb);
skb_release_data(skb);
@@ -1090,7 +1092,7 @@ drop_pages:
for (; i < nfrags; i++)
put_page(skb_shinfo(skb)->frags[i].page);
- if (skb_shinfo(skb)->frag_list)
+ if (skb_has_frags(skb))
skb_drop_fraglist(skb);
goto done;
}
@@ -1185,7 +1187,7 @@ unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
/* Optimization: no fragments, no reasons to preestimate
* size of pulled pages. Superb.
*/
- if (!skb_shinfo(skb)->frag_list)
+ if (!skb_has_frags(skb))
goto pull_pages;
/* Estimate size of pulled pages. */
@@ -1282,8 +1284,9 @@ EXPORT_SYMBOL(__pskb_pull_tail);
int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
{
- int i, copy;
int start = skb_headlen(skb);
+ struct sk_buff *frag_iter;
+ int i, copy;
if (offset > (int)skb->len - len)
goto fault;
@@ -1325,28 +1328,23 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
start = end;
}
- if (skb_shinfo(skb)->frag_list) {
- struct sk_buff *list = skb_shinfo(skb)->frag_list;
+ skb_walk_frags(skb, frag_iter) {
+ int end;
- for (; list; list = list->next) {
- int end;
-
- WARN_ON(start > offset + len);
-
- end = start + list->len;
- if ((copy = end - offset) > 0) {
- if (copy > len)
- copy = len;
- if (skb_copy_bits(list, offset - start,
- to, copy))
- goto fault;
- if ((len -= copy) == 0)
- return 0;
- offset += copy;
- to += copy;
- }
- start = end;
+ WARN_ON(start > offset + len);
+
+ end = start + frag_iter->len;
+ if ((copy = end - offset) > 0) {
+ if (copy > len)
+ copy = len;
+ if (skb_copy_bits(frag_iter, offset - start, to, copy))
+ goto fault;
+ if ((len -= copy) == 0)
+ return 0;
+ offset += copy;
+ to += copy;
}
+ start = end;
}
if (!len)
return 0;
@@ -1531,6 +1529,7 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
.ops = &sock_pipe_buf_ops,
.spd_release = sock_spd_release,
};
+ struct sk_buff *frag_iter;
struct sock *sk = skb->sk;
/*
@@ -1545,13 +1544,11 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
/*
* now see if we have a frag_list to map
*/
- if (skb_shinfo(skb)->frag_list) {
- struct sk_buff *list = skb_shinfo(skb)->frag_list;
-
- for (; list && tlen; list = list->next) {
- if (__skb_splice_bits(list, &offset, &tlen, &spd, sk))
- break;
- }
+ skb_walk_frags(skb, frag_iter) {
+ if (!tlen)
+ break;
+ if (__skb_splice_bits(frag_iter, &offset, &tlen, &spd, sk))
+ break;
}
done:
@@ -1590,8 +1587,9 @@ done:
int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
{
- int i, copy;
int start = skb_headlen(skb);
+ struct sk_buff *frag_iter;
+ int i, copy;
if (offset > (int)skb->len - len)
goto fault;
@@ -1632,28 +1630,24 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
start = end;
}
- if (skb_shinfo(skb)->frag_list) {
- struct sk_buff *list = skb_shinfo(skb)->frag_list;
+ skb_walk_frags(skb, frag_iter) {
+ int end;
- for (; list; list = list->next) {
- int end;
-
- WARN_ON(start > offset + len);
-
- end = start + list->len;
- if ((copy = end - offset) > 0) {
- if (copy > len)
- copy = len;
- if (skb_store_bits(list, offset - start,
- from, copy))
- goto fault;
- if ((len -= copy) == 0)
- return 0;
- offset += copy;
- from += copy;
- }
- start = end;
+ WARN_ON(start > offset + len);
+
+ end = start + frag_iter->len;
+ if ((copy = end - offset) > 0) {
+ if (copy > len)
+ copy = len;
+ if (skb_store_bits(frag_iter, offset - start,
+ from, copy))
+ goto fault;
+ if ((len -= copy) == 0)
+ return 0;
+ offset += copy;
+ from += copy;
}
+ start = end;
}
if (!len)
return 0;
@@ -1670,6 +1664,7 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
{
int start = skb_headlen(skb);
int i, copy = start - offset;
+ struct sk_buff *frag_iter;
int pos = 0;
/* Checksum header. */
@@ -1709,29 +1704,25 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
start = end;
}
- if (skb_shinfo(skb)->frag_list) {
- struct sk_buff *list = skb_shinfo(skb)->frag_list;
+ skb_walk_frags(skb, frag_iter) {
+ int end;
- for (; list; list = list->next) {
- int end;
-
- WARN_ON(start > offset + len);
-
- end = start + list->len;
- if ((copy = end - offset) > 0) {
- __wsum csum2;
- if (copy > len)
- copy = len;
- csum2 = skb_checksum(list, offset - start,
- copy, 0);
- csum = csum_block_add(csum, csum2, pos);
- if ((len -= copy) == 0)
- return csum;
- offset += copy;
- pos += copy;
- }
- start = end;
+ WARN_ON(start > offset + len);
+
+ end = start + frag_iter->len;
+ if ((copy = end - offset) > 0) {
+ __wsum csum2;
+ if (copy > len)
+ copy = len;
+ csum2 = skb_checksum(frag_iter, offset - start,
+ copy, 0);
+ csum = csum_block_add(csum, csum2, pos);
+ if ((len -= copy) == 0)
+ return csum;
+ offset += copy;
+ pos += copy;
}
+ start = end;
}
BUG_ON(len);
@@ -1746,6 +1737,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
{
int start = skb_headlen(skb);
int i, copy = start - offset;
+ struct sk_buff *frag_iter;
int pos = 0;
/* Copy header. */
@@ -1790,31 +1782,27 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
start = end;
}
- if (skb_shinfo(skb)->frag_list) {
- struct sk_buff *list = skb_shinfo(skb)->frag_list;
+ skb_walk_frags(skb, frag_iter) {
+ __wsum csum2;
+ int end;
- for (; list; list = list->next) {
- __wsum csum2;
- int end;
-
- WARN_ON(start > offset + len);
-
- end = start + list->len;
- if ((copy = end - offset) > 0) {
- if (copy > len)
- copy = len;
- csum2 = skb_copy_and_csum_bits(list,
- offset - start,
- to, copy, 0);
- csum = csum_block_add(csum, csum2, pos);
- if ((len -= copy) == 0)
- return csum;
- offset += copy;
- to += copy;
- pos += copy;
- }
- start = end;
+ WARN_ON(start > offset + len);
+
+ end = start + frag_iter->len;
+ if ((copy = end - offset) > 0) {
+ if (copy > len)
+ copy = len;
+ csum2 = skb_copy_and_csum_bits(frag_iter,
+ offset - start,
+ to, copy, 0);
+ csum = csum_block_add(csum, csum2, pos);
+ if ((len -= copy) == 0)
+ return csum;
+ offset += copy;
+ to += copy;
+ pos += copy;
}
+ start = end;
}
BUG_ON(len);
return csum;
@@ -2324,8 +2312,7 @@ next_skb:
st->frag_data = NULL;
}
- if (st->root_skb == st->cur_skb &&
- skb_shinfo(st->root_skb)->frag_list) {
+ if (st->root_skb == st->cur_skb && skb_has_frags(st->root_skb)) {
st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
st->frag_idx = 0;
goto next_skb;
@@ -2636,7 +2623,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
} else
skb_get(fskb2);
- BUG_ON(skb_shinfo(nskb)->frag_list);
+ SKB_FRAG_ASSERT(nskb);
skb_shinfo(nskb)->frag_list = fskb2;
}
@@ -2661,30 +2648,40 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
{
struct sk_buff *p = *head;
struct sk_buff *nskb;
+ struct skb_shared_info *skbinfo = skb_shinfo(skb);
+ struct skb_shared_info *pinfo = skb_shinfo(p);
unsigned int headroom;
unsigned int len = skb_gro_len(skb);
+ unsigned int offset = skb_gro_offset(skb);
+ unsigned int headlen = skb_headlen(skb);
if (p->len + len >= 65536)
return -E2BIG;
- if (skb_shinfo(p)->frag_list)
+ if (pinfo->frag_list)
goto merge;
- else if (skb_headlen(skb) <= skb_gro_offset(skb)) {
- if (skb_shinfo(p)->nr_frags + skb_shinfo(skb)->nr_frags >
- MAX_SKB_FRAGS)
+ else if (headlen <= offset) {
+ skb_frag_t *frag;
+ skb_frag_t *frag2;
+ int i = skbinfo->nr_frags;
+ int nr_frags = pinfo->nr_frags + i;
+
+ offset -= headlen;
+
+ if (nr_frags > MAX_SKB_FRAGS)
return -E2BIG;
- skb_shinfo(skb)->frags[0].page_offset +=
- skb_gro_offset(skb) - skb_headlen(skb);
- skb_shinfo(skb)->frags[0].size -=
- skb_gro_offset(skb) - skb_headlen(skb);
+ pinfo->nr_frags = nr_frags;
+ skbinfo->nr_frags = 0;
- memcpy(skb_shinfo(p)->frags + skb_shinfo(p)->nr_frags,
- skb_shinfo(skb)->frags,
- skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
+ frag = pinfo->frags + nr_frags;
+ frag2 = skbinfo->frags + i;
+ do {
+ *--frag = *--frag2;
+ } while (--i);
- skb_shinfo(p)->nr_frags += skb_shinfo(skb)->nr_frags;
- skb_shinfo(skb)->nr_frags = 0;
+ frag->page_offset += offset;
+ frag->size -= offset;
skb->truesize -= skb->data_len;
skb->len -= skb->data_len;
@@ -2715,7 +2712,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
*NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p);
skb_shinfo(nskb)->frag_list = p;
- skb_shinfo(nskb)->gso_size = skb_shinfo(p)->gso_size;
+ skb_shinfo(nskb)->gso_size = pinfo->gso_size;
skb_header_release(p);
nskb->prev = p;
@@ -2730,16 +2727,13 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
p = nskb;
merge:
- if (skb_gro_offset(skb) > skb_headlen(skb)) {
- skb_shinfo(skb)->frags[0].page_offset +=
- skb_gro_offset(skb) - skb_headlen(skb);
- skb_shinfo(skb)->frags[0].size -=
- skb_gro_offset(skb) - skb_headlen(skb);
- skb_gro_reset_offset(skb);
- skb_gro_pull(skb, skb_headlen(skb));
+ if (offset > headlen) {
+ skbinfo->frags[0].page_offset += offset - headlen;
+ skbinfo->frags[0].size -= offset - headlen;
+ offset = headlen;
}
- __skb_pull(skb, skb_gro_offset(skb));
+ __skb_pull(skb, offset);
p->prev->next = skb;
p->prev = skb;
@@ -2786,6 +2780,7 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
{
int start = skb_headlen(skb);
int i, copy = start - offset;
+ struct sk_buff *frag_iter;
int elt = 0;
if (copy > 0) {
@@ -2819,26 +2814,22 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
start = end;
}
- if (skb_shinfo(skb)->frag_list) {
- struct sk_buff *list = skb_shinfo(skb)->frag_list;
-
- for (; list; list = list->next) {
- int end;
+ skb_walk_frags(skb, frag_iter) {
+ int end;
- WARN_ON(start > offset + len);
+ WARN_ON(start > offset + len);
- end = start + list->len;
- if ((copy = end - offset) > 0) {
- if (copy > len)
- copy = len;
- elt += __skb_to_sgvec(list, sg+elt, offset - start,
- copy);
- if ((len -= copy) == 0)
- return elt;
- offset += copy;
- }
- start = end;
+ end = start + frag_iter->len;
+ if ((copy = end - offset) > 0) {
+ if (copy > len)
+ copy = len;
+ elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start,
+ copy);
+ if ((len -= copy) == 0)
+ return elt;
+ offset += copy;
}
+ start = end;
}
BUG_ON(len);
return elt;
@@ -2886,7 +2877,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
return -ENOMEM;
/* Easy case. Most of packets will go this way. */
- if (!skb_shinfo(skb)->frag_list) {
+ if (!skb_has_frags(skb)) {
/* A little of trouble, not enough of space for trailer.
* This should not happen, when stack is tuned to generate
* good frames. OK, on miss we reallocate and reserve even more
@@ -2921,7 +2912,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
if (skb1->next == NULL && tailbits) {
if (skb_shinfo(skb1)->nr_frags ||
- skb_shinfo(skb1)->frag_list ||
+ skb_has_frags(skb1) ||
skb_tailroom(skb1) < tailbits)
ntail = tailbits + 128;
}
@@ -2930,7 +2921,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
skb_cloned(skb1) ||
ntail ||
skb_shinfo(skb1)->nr_frags ||
- skb_shinfo(skb1)->frag_list) {
+ skb_has_frags(skb1)) {
struct sk_buff *skb2;
/* Fuck, we are miserable poor guys... */
@@ -3016,12 +3007,12 @@ EXPORT_SYMBOL_GPL(skb_tstamp_tx);
*/
bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
{
- if (unlikely(start > skb->len - 2) ||
- unlikely((int)start + off > skb->len - 2)) {
+ if (unlikely(start > skb_headlen(skb)) ||
+ unlikely((int)start + off > skb_headlen(skb) - 2)) {
if (net_ratelimit())
printk(KERN_WARNING
"bad partial csum: csum=%u/%u len=%u\n",
- start, off, skb->len);
+ start, off, skb_headlen(skb));
return false;
}
skb->ip_summed = CHECKSUM_PARTIAL;