summaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgb/ixgb_main.c
diff options
context:
space:
mode:
authorJoe Perches <joe@perches.com>2010-05-13 15:26:17 +0000
committerDavid S. Miller <davem@davemloft.net>2010-05-14 03:06:19 -0700
commit57bf6eef2f43ae810504753208b3a2c0bb2e4765 (patch)
tree54ea4e292e1a6279139580b7d3e9ea74f3d09c61 /drivers/net/ixgb/ixgb_main.c
parent621b99b6f6a8ae69ca9b69dec0fec3a68f774bb7 (diff)
downloadlinux-57bf6eef2f43ae810504753208b3a2c0bb2e4765.tar.bz2
ixgb and e1000: Use new function for copybreak tests
There appears to be an off-by-1 defect in the maximum packet size copied when copybreak is speified in these modules. The copybreak module params are specified as: "Maximum size of packet that is copied to a new buffer on receive" The tests are changed from "< copybreak" to "<= copybreak" and moved into new static functions for readability. Signed-off-by: Joe Perches <joe@perches.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ixgb/ixgb_main.c')
-rw-r--r--drivers/net/ixgb/ixgb_main.c52
1 files changed, 31 insertions, 21 deletions
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index d58ca6b578cc..c6b75c83100c 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -1921,6 +1921,31 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter,
}
}
+/*
+ * this should improve performance for small packets with large amounts
+ * of reassembly being done in the stack
+ */
+static void ixgb_check_copybreak(struct net_device *netdev,
+ struct ixgb_buffer *buffer_info,
+ u32 length, struct sk_buff **skb)
+{
+ struct sk_buff *new_skb;
+
+ if (length > copybreak)
+ return;
+
+ new_skb = netdev_alloc_skb_ip_align(netdev, length);
+ if (!new_skb)
+ return;
+
+ skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
+ (*skb)->data - NET_IP_ALIGN,
+ length + NET_IP_ALIGN);
+ /* save the skb in buffer_info as good */
+ buffer_info->skb = *skb;
+ *skb = new_skb;
+}
+
/**
* ixgb_clean_rx_irq - Send received data up the network stack,
* @adapter: board private structure
@@ -1957,11 +1982,14 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
prefetch(skb->data - NET_IP_ALIGN);
- if (++i == rx_ring->count) i = 0;
+ if (++i == rx_ring->count)
+ i = 0;
next_rxd = IXGB_RX_DESC(*rx_ring, i);
prefetch(next_rxd);
- if ((j = i + 1) == rx_ring->count) j = 0;
+ j = i + 1;
+ if (j == rx_ring->count)
+ j = 0;
next2_buffer = &rx_ring->buffer_info[j];
prefetch(next2_buffer);
@@ -1997,25 +2025,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
goto rxdesc_done;
}
- /* code added for copybreak, this should improve
- * performance for small packets with large amounts
- * of reassembly being done in the stack */
- if (length < copybreak) {
- struct sk_buff *new_skb =
- netdev_alloc_skb_ip_align(netdev, length);
- if (new_skb) {
- skb_copy_to_linear_data_offset(new_skb,
- -NET_IP_ALIGN,
- (skb->data -
- NET_IP_ALIGN),
- (length +
- NET_IP_ALIGN));
- /* save the skb in buffer_info as good */
- buffer_info->skb = skb;
- skb = new_skb;
- }
- }
- /* end copybreak code */
+ ixgb_check_copybreak(netdev, buffer_info, length, &skb);
/* Good Receive */
skb_put(skb, length);