summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEmil Tantilov <emil.s.tantilov@intel.com>2017-12-11 10:37:20 -0800
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2018-01-26 07:46:51 -0800
commit40b8178bc97dfcc688eb42d04df45e2f3c905830 (patch)
tree6b1a11436a703ff8689bfd4c8edd09997fd69f92
parent2a35efe582116abc3135151bdb53221087ca43e3 (diff)
downloadlinux-40b8178bc97dfcc688eb42d04df45e2f3c905830.tar.bz2
ixgbevf: clear rx_buffer_info in configure instead of clean
Based on commit d2bead576e67 ("igb: Clear Rx buffer_info in configure instead of clean") This change makes it so that instead of going through the entire ring on Rx cleanup we only go through the region that was designated to be cleaned up and stop when we reach the region where new allocations should start. In addition we can avoid having to perform a memset on the Rx buffer_info structures until we are about to start using the ring again. Signed-off-by: Emil Tantilov <emil.s.tantilov@intel.com> Tested-by: Krishneil Singh <krishneil.k.singh@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 350afec3dde8..a793f9ea05e7 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1773,6 +1773,10 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx);
+ /* initialize rx_buffer_info */
+ memset(ring->rx_buffer_info, 0,
+ sizeof(struct ixgbevf_rx_buffer) * ring->count);
+
/* initialize Rx descriptor 0 */
rx_desc = IXGBEVF_RX_DESC(ring, 0);
rx_desc->wb.upper.length = 0;
@@ -2131,8 +2135,7 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter)
**/
static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
{
- unsigned long size;
- unsigned int i;
+ u16 i = rx_ring->next_to_clean;
/* Free Rx ring sk_buff */
if (rx_ring->skb) {
@@ -2140,17 +2143,11 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
rx_ring->skb = NULL;
}
- /* ring already cleared, nothing to do */
- if (!rx_ring->rx_buffer_info)
- return;
-
/* Free all the Rx ring pages */
- for (i = 0; i < rx_ring->count; i++) {
+ while (i != rx_ring->next_to_alloc) {
struct ixgbevf_rx_buffer *rx_buffer;
rx_buffer = &rx_ring->rx_buffer_info[i];
- if (!rx_buffer->page)
- continue;
/* Invalidate cache lines that may have been written to by
* device so that we avoid corrupting memory.
@@ -2171,11 +2168,14 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
__page_frag_cache_drain(rx_buffer->page,
rx_buffer->pagecnt_bias);
- rx_buffer->page = NULL;
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
}
- size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
- memset(rx_ring->rx_buffer_info, 0, size);
+ rx_ring->next_to_alloc = 0;
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
}
/**
@@ -3090,7 +3090,7 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
int size;
size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
- rx_ring->rx_buffer_info = vzalloc(size);
+ rx_ring->rx_buffer_info = vmalloc(size);
if (!rx_ring->rx_buffer_info)
goto err;