summaryrefslogtreecommitdiffstats
path: root/net/ipv4/udp_offload.c
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2018-05-07 11:08:40 -0700
committerDavid S. Miller <davem@davemloft.net>2018-05-08 22:30:06 -0400
commit0ad6509571e06b302d519f2f05e616ac8c1a10d7 (patch)
tree339ea1f0c15f38b9510b353b95da1c46939e381c /net/ipv4/udp_offload.c
parent9a0d41b3598ff62ecb26661bbfb1d523586cdea3 (diff)
downloadlinux-0ad6509571e06b302d519f2f05e616ac8c1a10d7.tar.bz2
udp: Partially unroll handling of first segment and last segment
This patch allows us to take care of unrolling the first segment and the last segment of the loop for processing the segmented skb. Part of the motivation for this is that it makes it easier to process the fact that the first fame and all of the frames in between should be mostly identical in terms of header data, and the last frame has differences in the length and partial checksum. In addition I am dropping the header length calculation since we don't really need it for anything but the last frame and it can be easily obtained by just pulling the data_len and offset of tail from the transport header. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Reviewed-by: Eric Dumazet <edumazet@google.com> Acked-by: Willem de Bruijn <willemb@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/udp_offload.c')
-rw-r--r--net/ipv4/udp_offload.c33
1 files changed, 19 insertions, 14 deletions
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 92c182e99ddc..b15c78ac3f23 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -193,7 +193,6 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
struct sock *sk = gso_skb->sk;
unsigned int sum_truesize = 0;
struct sk_buff *segs, *seg;
- unsigned int hdrlen;
struct udphdr *uh;
unsigned int mss;
__sum16 check;
@@ -203,7 +202,6 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
if (gso_skb->len <= sizeof(*uh) + mss)
return ERR_PTR(-EINVAL);
- hdrlen = gso_skb->data - skb_mac_header(gso_skb);
skb_pull(gso_skb, sizeof(*uh));
/* clear destructor to avoid skb_segment assigning it to tail */
@@ -216,30 +214,37 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
return segs;
}
- uh = udp_hdr(segs);
+ seg = segs;
+ uh = udp_hdr(seg);
/* compute checksum adjustment based on old length versus new */
newlen = htons(sizeof(*uh) + mss);
check = csum16_add(csum16_sub(uh->check, uh->len), newlen);
- for (seg = segs; seg; seg = seg->next) {
- uh = udp_hdr(seg);
+ for (;;) {
+ seg->destructor = sock_wfree;
+ seg->sk = sk;
+ sum_truesize += seg->truesize;
- /* last packet can be partial gso_size */
- if (!seg->next) {
- newlen = htons(seg->len - hdrlen);
- check = csum16_add(csum16_sub(uh->check, uh->len),
- newlen);
- }
+ if (!seg->next)
+ break;
uh->len = newlen;
uh->check = check;
- seg->destructor = sock_wfree;
- seg->sk = sk;
- sum_truesize += seg->truesize;
+ seg = seg->next;
+ uh = udp_hdr(seg);
}
+ /* last packet can be partial gso_size, account for that in checksum */
+ newlen = htons(skb_tail_pointer(seg) - skb_transport_header(seg) +
+ seg->data_len);
+ check = csum16_add(csum16_sub(uh->check, uh->len), newlen);
+
+ uh->len = newlen;
+ uh->check = check;
+
+ /* update refcount for the packet */
refcount_add(sum_truesize - gso_skb->truesize, &sk->sk_wmem_alloc);
return segs;