summaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorPaul Durrant <Paul.Durrant@citrix.com>2014-03-28 11:39:06 +0000
committerDavid S. Miller <davem@davemloft.net>2014-03-29 18:50:34 -0400
commita02eb4732cf975d7fc71b6d1a71c058c9988b949 (patch)
tree5a8d6dca5909de547b7a4d10c65329752d15d7ac /drivers/net
parent0576eddf24df716d8570ef8ca11452a9f98eaab2 (diff)
downloadlinux-a02eb4732cf975d7fc71b6d1a71c058c9988b949.tar.bz2
xen-netback: worse-case estimate in xenvif_rx_action is underestimating
The worse-case estimate for skb ring slot usage in xenvif_rx_action() fails to take fragment page_offset into account. The page_offset does, however, affect the number of times the fragmentation code calls start_new_rx_buffer() (i.e. consume another slot) and the worse-case should assume that will always return true. This patch adds the page_offset into the DIV_ROUND_UP for each frag. Unfortunately some frontends aggressively limit the number of requests they post into the shared ring so to avoid an estimate that is 'too' pessimal it is capped at MAX_SKB_FRAGS. Signed-off-by: Paul Durrant <paul.durrant@citrix.com> Cc: Ian Campbell <ian.campbell@citrix.com> Cc: Wei Liu <wei.liu2@citrix.com> Cc: Sander Eikelenboom <linux@eikelenboom.it> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/xen-netback/netback.c21
1 files changed, 20 insertions, 1 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 72314c7998fc..573f3e81e5d2 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -493,9 +493,28 @@ static void xenvif_rx_action(struct xenvif *vif)
PAGE_SIZE);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
unsigned int size;
+ unsigned int offset;
+
size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
- max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE);
+ offset = skb_shinfo(skb)->frags[i].page_offset;
+
+ /* For a worse-case estimate we need to factor in
+ * the fragment page offset as this will affect the
+ * number of times xenvif_gop_frag_copy() will
+ * call start_new_rx_buffer().
+ */
+ max_slots_needed += DIV_ROUND_UP(offset + size,
+ PAGE_SIZE);
}
+
+ /* To avoid the estimate becoming too pessimal for some
+ * frontends that limit posted rx requests, cap the estimate
+ * at MAX_SKB_FRAGS.
+ */
+ if (max_slots_needed > MAX_SKB_FRAGS)
+ max_slots_needed = MAX_SKB_FRAGS;
+
+ /* We may need one more slot for GSO metadata */
if (skb_is_gso(skb) &&
(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))