summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
diff options
context:
space:
mode:
authorEmil Tantilov <emil.s.tantilov@intel.com>2014-11-21 02:57:15 +0000
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2014-11-20 14:30:46 -0800
commitbad17234ba702a50aeec50ab04724ee58af89607 (patch)
tree977a85faef9e5ea20ab6b505ff68e47d1892ca83 /drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
parent4b95fe3de7f25402b22cd4d8b20b99cce1979309 (diff)
downloadlinux-bad17234ba702a50aeec50ab04724ee58af89607.tar.bz2
ixgbevf: Change receive model to use double buffered page based receives
This patch changes the basic receive path for ixgbevf so that instead of receiving the data into an skb it is received into a double buffered page. The main change is that the receives will be done in pages only and then pull the header out of the page and copy it into the sk_buff data. This has the advantages of reduced cache misses and improved performance on IOMMU enabled systems. v2: - added pfmemalloc check to a new function for reusable page - moved atomic_inc outside of #if/else in ixgbevf_add_rx_frag() - reverted the removal of the api check in ixgbevf_change_mtu() CC: Alexander Duyck <alexander.h.duyck@redhat.com> Signed-off-by: Emil Tantilov <emil.s.tantilov@intel.com> Tested-by: Phil Schmitt <phillip.j.schmitt@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbevf/ixgbevf.h')
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h24
1 files changed, 9 insertions, 15 deletions
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 72a354b2128e..666049849d3f 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -58,8 +58,9 @@ struct ixgbevf_tx_buffer {
};
struct ixgbevf_rx_buffer {
- struct sk_buff *skb;
dma_addr_t dma;
+ struct page *page;
+ unsigned int page_offset;
};
struct ixgbevf_stats {
@@ -91,9 +92,10 @@ struct ixgbevf_ring {
void *desc; /* descriptor ring memory */
dma_addr_t dma; /* phys. address of descriptor ring */
unsigned int size; /* length in bytes */
- unsigned int count; /* amount of descriptors */
- unsigned int next_to_use;
- unsigned int next_to_clean;
+ u16 count; /* amount of descriptors */
+ u16 next_to_use;
+ u16 next_to_clean;
+ u16 next_to_alloc;
union {
struct ixgbevf_tx_buffer *tx_buffer_info;
@@ -109,12 +111,11 @@ struct ixgbevf_ring {
u64 hw_csum_rx_error;
u8 __iomem *tail;
+ struct sk_buff *skb;
u16 reg_idx; /* holds the special value that gets the hardware register
* offset associated with this ring, which is different
* for DCB and RSS modes */
-
- u16 rx_buf_len;
int queue_index; /* needed for multiqueue queue management */
};
@@ -133,12 +134,10 @@ struct ixgbevf_ring {
/* Supported Rx Buffer Sizes */
#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */
-#define IXGBEVF_RXBUFFER_2K 2048
-#define IXGBEVF_RXBUFFER_4K 4096
-#define IXGBEVF_RXBUFFER_8K 8192
-#define IXGBEVF_RXBUFFER_10K 10240
+#define IXGBEVF_RXBUFFER_2048 2048
#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256
+#define IXGBEVF_RX_BUFSZ IXGBEVF_RXBUFFER_2048
#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
@@ -430,11 +429,6 @@ enum ixbgevf_state_t {
__IXGBEVF_WORK_INIT,
};
-struct ixgbevf_cb {
- struct sk_buff *prev;
-};
-#define IXGBE_CB(skb) ((struct ixgbevf_cb *)(skb)->cb)
-
enum ixgbevf_boards {
board_82599_vf,
board_X540_vf,