summaryrefslogtreecommitdiffstats
path: root/drivers/net/sfc/rx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/sfc/rx.c')
-rw-r--r--drivers/net/sfc/rx.c144
1 files changed, 72 insertions, 72 deletions
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 3925fd621177..c0fdb59030fb 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -1,7 +1,7 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2005-2009 Solarflare Communications Inc.
+ * Copyright 2005-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -89,24 +89,37 @@ static unsigned int rx_refill_limit = 95;
*/
#define EFX_RXD_HEAD_ROOM 2
-static inline unsigned int efx_rx_buf_offset(struct efx_rx_buffer *buf)
+/* Offset of ethernet header within page */
+static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
+ struct efx_rx_buffer *buf)
{
/* Offset is always within one page, so we don't need to consider
* the page order.
*/
- return (__force unsigned long) buf->data & (PAGE_SIZE - 1);
+ return (((__force unsigned long) buf->dma_addr & (PAGE_SIZE - 1)) +
+ efx->type->rx_buffer_hash_size);
}
static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
{
return PAGE_SIZE << efx->rx_buffer_order;
}
-static inline u32 efx_rx_buf_hash(struct efx_rx_buffer *buf)
+static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf)
{
+ if (buf->is_page)
+ return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf);
+ else
+ return ((u8 *)buf->u.skb->data +
+ efx->type->rx_buffer_hash_size);
+}
+
+static inline u32 efx_rx_buf_hash(const u8 *eh)
+{
+ /* The ethernet header is always directly after any hash. */
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0
- return __le32_to_cpup((const __le32 *)(buf->data - 4));
+ return __le32_to_cpup((const __le32 *)(eh - 4));
#else
- const u8 *data = (const u8 *)(buf->data - 4);
+ const u8 *data = eh - 4;
return ((u32)data[0] |
(u32)data[1] << 8 |
(u32)data[2] << 16 |
@@ -129,6 +142,7 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
struct efx_nic *efx = rx_queue->efx;
struct net_device *net_dev = efx->net_dev;
struct efx_rx_buffer *rx_buf;
+ struct sk_buff *skb;
int skb_len = efx->rx_buffer_len;
unsigned index, count;
@@ -136,24 +150,23 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
index = rx_queue->added_count & rx_queue->ptr_mask;
rx_buf = efx_rx_buffer(rx_queue, index);
- rx_buf->skb = netdev_alloc_skb(net_dev, skb_len);
- if (unlikely(!rx_buf->skb))
+ rx_buf->u.skb = skb = netdev_alloc_skb(net_dev, skb_len);
+ if (unlikely(!skb))
return -ENOMEM;
- rx_buf->page = NULL;
/* Adjust the SKB for padding and checksum */
- skb_reserve(rx_buf->skb, NET_IP_ALIGN);
+ skb_reserve(skb, NET_IP_ALIGN);
rx_buf->len = skb_len - NET_IP_ALIGN;
- rx_buf->data = (char *)rx_buf->skb->data;
- rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY;
+ rx_buf->is_page = false;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
rx_buf->dma_addr = pci_map_single(efx->pci_dev,
- rx_buf->data, rx_buf->len,
+ skb->data, rx_buf->len,
PCI_DMA_FROMDEVICE);
if (unlikely(pci_dma_mapping_error(efx->pci_dev,
rx_buf->dma_addr))) {
- dev_kfree_skb_any(rx_buf->skb);
- rx_buf->skb = NULL;
+ dev_kfree_skb_any(skb);
+ rx_buf->u.skb = NULL;
return -EIO;
}
@@ -211,10 +224,9 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
index = rx_queue->added_count & rx_queue->ptr_mask;
rx_buf = efx_rx_buffer(rx_queue, index);
rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
- rx_buf->skb = NULL;
- rx_buf->page = page;
- rx_buf->data = page_addr + EFX_PAGE_IP_ALIGN;
+ rx_buf->u.page = page;
rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
+ rx_buf->is_page = true;
++rx_queue->added_count;
++rx_queue->alloc_page_count;
++state->refcnt;
@@ -235,19 +247,17 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
static void efx_unmap_rx_buffer(struct efx_nic *efx,
struct efx_rx_buffer *rx_buf)
{
- if (rx_buf->page) {
+ if (rx_buf->is_page && rx_buf->u.page) {
struct efx_rx_page_state *state;
- EFX_BUG_ON_PARANOID(rx_buf->skb);
-
- state = page_address(rx_buf->page);
+ state = page_address(rx_buf->u.page);
if (--state->refcnt == 0) {
pci_unmap_page(efx->pci_dev,
state->dma_addr,
efx_rx_buf_size(efx),
PCI_DMA_FROMDEVICE);
}
- } else if (likely(rx_buf->skb)) {
+ } else if (!rx_buf->is_page && rx_buf->u.skb) {
pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
rx_buf->len, PCI_DMA_FROMDEVICE);
}
@@ -256,12 +266,12 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
static void efx_free_rx_buffer(struct efx_nic *efx,
struct efx_rx_buffer *rx_buf)
{
- if (rx_buf->page) {
- __free_pages(rx_buf->page, efx->rx_buffer_order);
- rx_buf->page = NULL;
- } else if (likely(rx_buf->skb)) {
- dev_kfree_skb_any(rx_buf->skb);
- rx_buf->skb = NULL;
+ if (rx_buf->is_page && rx_buf->u.page) {
+ __free_pages(rx_buf->u.page, efx->rx_buffer_order);
+ rx_buf->u.page = NULL;
+ } else if (!rx_buf->is_page && rx_buf->u.skb) {
+ dev_kfree_skb_any(rx_buf->u.skb);
+ rx_buf->u.skb = NULL;
}
}
@@ -277,7 +287,7 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
struct efx_rx_buffer *rx_buf)
{
- struct efx_rx_page_state *state = page_address(rx_buf->page);
+ struct efx_rx_page_state *state = page_address(rx_buf->u.page);
struct efx_rx_buffer *new_buf;
unsigned fill_level, index;
@@ -292,16 +302,14 @@ static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
}
++state->refcnt;
- get_page(rx_buf->page);
+ get_page(rx_buf->u.page);
index = rx_queue->added_count & rx_queue->ptr_mask;
new_buf = efx_rx_buffer(rx_queue, index);
new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
- new_buf->skb = NULL;
- new_buf->page = rx_buf->page;
- new_buf->data = (void *)
- ((__force unsigned long)rx_buf->data ^ (PAGE_SIZE >> 1));
+ new_buf->u.page = rx_buf->u.page;
new_buf->len = rx_buf->len;
+ new_buf->is_page = true;
++rx_queue->added_count;
}
@@ -315,16 +323,15 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
struct efx_rx_buffer *new_buf;
unsigned index;
- if (rx_buf->page != NULL && efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
- page_count(rx_buf->page) == 1)
+ if (rx_buf->is_page && efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
+ page_count(rx_buf->u.page) == 1)
efx_resurrect_rx_buffer(rx_queue, rx_buf);
index = rx_queue->added_count & rx_queue->ptr_mask;
new_buf = efx_rx_buffer(rx_queue, index);
memcpy(new_buf, rx_buf, sizeof(*new_buf));
- rx_buf->page = NULL;
- rx_buf->skb = NULL;
+ rx_buf->u.page = NULL;
++rx_queue->added_count;
}
@@ -428,7 +435,7 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
* data at the end of the skb will be trashed. So
* we have no choice but to leak the fragment.
*/
- *leak_packet = (rx_buf->skb != NULL);
+ *leak_packet = !rx_buf->is_page;
efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
} else {
if (net_ratelimit())
@@ -448,19 +455,18 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
*/
static void efx_rx_packet_gro(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf,
- bool checksummed)
+ const u8 *eh, bool checksummed)
{
struct napi_struct *napi = &channel->napi_str;
gro_result_t gro_result;
/* Pass the skb/page into the GRO engine */
- if (rx_buf->page) {
+ if (rx_buf->is_page) {
struct efx_nic *efx = channel->efx;
- struct page *page = rx_buf->page;
+ struct page *page = rx_buf->u.page;
struct sk_buff *skb;
- EFX_BUG_ON_PARANOID(rx_buf->skb);
- rx_buf->page = NULL;
+ rx_buf->u.page = NULL;
skb = napi_get_frags(napi);
if (!skb) {
@@ -469,11 +475,11 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
}
if (efx->net_dev->features & NETIF_F_RXHASH)
- skb->rxhash = efx_rx_buf_hash(rx_buf);
+ skb->rxhash = efx_rx_buf_hash(eh);
skb_shinfo(skb)->frags[0].page = page;
skb_shinfo(skb)->frags[0].page_offset =
- efx_rx_buf_offset(rx_buf);
+ efx_rx_buf_offset(efx, rx_buf);
skb_shinfo(skb)->frags[0].size = rx_buf->len;
skb_shinfo(skb)->nr_frags = 1;
@@ -487,11 +493,10 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
gro_result = napi_gro_frags(napi);
} else {
- struct sk_buff *skb = rx_buf->skb;
+ struct sk_buff *skb = rx_buf->u.skb;
- EFX_BUG_ON_PARANOID(!skb);
EFX_BUG_ON_PARANOID(!checksummed);
- rx_buf->skb = NULL;
+ rx_buf->u.skb = NULL;
gro_result = napi_gro_receive(napi, skb);
}
@@ -513,9 +518,6 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
bool leak_packet = false;
rx_buf = efx_rx_buffer(rx_queue, index);
- EFX_BUG_ON_PARANOID(!rx_buf->data);
- EFX_BUG_ON_PARANOID(rx_buf->skb && rx_buf->page);
- EFX_BUG_ON_PARANOID(!(rx_buf->skb || rx_buf->page));
/* This allows the refill path to post another buffer.
* EFX_RXD_HEAD_ROOM ensures that the slot we are using
@@ -554,12 +556,12 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
/* Prefetch nice and early so data will (hopefully) be in cache by
* the time we look at it.
*/
- prefetch(rx_buf->data);
+ prefetch(efx_rx_buf_eh(efx, rx_buf));
/* Pipeline receives so that we give time for packet headers to be
* prefetched into cache.
*/
- rx_buf->len = len;
+ rx_buf->len = len - efx->type->rx_buffer_hash_size;
out:
if (channel->rx_pkt)
__efx_rx_packet(channel,
@@ -574,45 +576,43 @@ void __efx_rx_packet(struct efx_channel *channel,
{
struct efx_nic *efx = channel->efx;
struct sk_buff *skb;
-
- rx_buf->data += efx->type->rx_buffer_hash_size;
- rx_buf->len -= efx->type->rx_buffer_hash_size;
+ u8 *eh = efx_rx_buf_eh(efx, rx_buf);
/* If we're in loopback test, then pass the packet directly to the
* loopback layer, and free the rx_buf here
*/
if (unlikely(efx->loopback_selftest)) {
- efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len);
+ efx_loopback_rx_packet(efx, eh, rx_buf->len);
efx_free_rx_buffer(efx, rx_buf);
return;
}
- if (rx_buf->skb) {
- prefetch(skb_shinfo(rx_buf->skb));
+ if (!rx_buf->is_page) {
+ skb = rx_buf->u.skb;
- skb_reserve(rx_buf->skb, efx->type->rx_buffer_hash_size);
- skb_put(rx_buf->skb, rx_buf->len);
+ prefetch(skb_shinfo(skb));
+
+ skb_reserve(skb, efx->type->rx_buffer_hash_size);
+ skb_put(skb, rx_buf->len);
if (efx->net_dev->features & NETIF_F_RXHASH)
- rx_buf->skb->rxhash = efx_rx_buf_hash(rx_buf);
+ skb->rxhash = efx_rx_buf_hash(eh);
/* Move past the ethernet header. rx_buf->data still points
* at the ethernet header */
- rx_buf->skb->protocol = eth_type_trans(rx_buf->skb,
- efx->net_dev);
+ skb->protocol = eth_type_trans(skb, efx->net_dev);
- skb_record_rx_queue(rx_buf->skb, channel->channel);
+ skb_record_rx_queue(skb, channel->channel);
}
- if (likely(checksummed || rx_buf->page)) {
- efx_rx_packet_gro(channel, rx_buf, checksummed);
+ if (likely(checksummed || rx_buf->is_page)) {
+ efx_rx_packet_gro(channel, rx_buf, eh, checksummed);
return;
}
/* We now own the SKB */
- skb = rx_buf->skb;
- rx_buf->skb = NULL;
- EFX_BUG_ON_PARANOID(!skb);
+ skb = rx_buf->u.skb;
+ rx_buf->u.skb = NULL;
/* Set the SKB flags */
skb_checksum_none_assert(skb);