summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2020-01-08 13:28:03 -0800
committerDavid S. Miller <davem@davemloft.net>2020-01-08 13:28:28 -0800
commitdaea5b4dc16c3edc90392a512492dae504f1a37a (patch)
tree3e146ba607277dc4921b5f77225085c7fd1c2ae6
parentb9ae51273655a72a12fba730843fd72fb132735a (diff)
parent17d3b21c7ba82cd88855a798b931bb3ac1388cba (diff)
downloadlinux-daea5b4dc16c3edc90392a512492dae504f1a37a.tar.bz2
Merge branch 'sfc-code-refactoring'
Alex Maftei says: ==================== sfc: code refactoring Splitting some of the driver code into different files, which will later be used in another driver for a new product. ==================== Reviewed-by: Edward Cree <ecree@solarflare.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/sfc/Makefile7
-rw-r--r--drivers/net/ethernet/sfc/ef10.c3
-rw-r--r--drivers/net/ethernet/sfc/efx.c2123
-rw-r--r--drivers/net/ethernet/sfc/efx.h39
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c1232
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.h55
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c999
-rw-r--r--drivers/net/ethernet/sfc/efx_common.h61
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c3
-rw-r--r--drivers/net/ethernet/sfc/farch.c1
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h1
-rw-r--r--drivers/net/ethernet/sfc/mcdi_functions.h30
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c50
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port_common.h53
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h13
-rw-r--r--drivers/net/ethernet/sfc/nic.h6
-rw-r--r--drivers/net/ethernet/sfc/rx.c376
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c375
-rw-r--r--drivers/net/ethernet/sfc/rx_common.h42
-rw-r--r--drivers/net/ethernet/sfc/selftest.c2
-rw-r--r--drivers/net/ethernet/sfc/siena.c1
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.c1
-rw-r--r--drivers/net/ethernet/sfc/tx.c296
-rw-r--r--drivers/net/ethernet/sfc/tx_common.c310
-rw-r--r--drivers/net/ethernet/sfc/tx_common.h31
25 files changed, 3262 insertions, 2848 deletions
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index c5c297e78d06..40a54df34647 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -1,7 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
-sfc-y += efx.o nic.o farch.o siena.o ef10.o tx.o rx.o \
+sfc-y += efx.o efx_common.o efx_channels.o nic.o \
+ farch.o siena.o ef10.o \
+ tx.o tx_common.o rx.o rx_common.o \
selftest.o ethtool.o ptp.o tx_tso.o \
- mcdi.o mcdi_port.o mcdi_mon.o
+ mcdi.o mcdi_port.o \
+ mcdi_mon.o
sfc-$(CONFIG_SFC_MTD) += mtd.o
sfc-$(CONFIG_SFC_SRIOV) += sriov.o siena_sriov.o ef10_sriov.o
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 4d9bbccc6f89..d752ed34672d 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -5,14 +5,17 @@
*/
#include "net_driver.h"
+#include "rx_common.h"
#include "ef10_regs.h"
#include "io.h"
#include "mcdi.h"
#include "mcdi_pcol.h"
+#include "mcdi_port_common.h"
#include "nic.h"
#include "workarounds.h"
#include "selftest.h"
#include "ef10_sriov.h"
+#include "rx_common.h"
#include <linux/in.h>
#include <linux/jhash.h>
#include <linux/wait.h>
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 033907e6fdb0..655424e83d97 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -23,6 +23,10 @@
#include <net/gre.h>
#include <net/udp_tunnel.h>
#include "efx.h"
+#include "efx_common.h"
+#include "efx_channels.h"
+#include "rx_common.h"
+#include "tx_common.h"
#include "nic.h"
#include "io.h"
#include "selftest.h"
@@ -39,56 +43,6 @@
**************************************************************************
*/
-/* Loopback mode names (see LOOPBACK_MODE()) */
-const unsigned int efx_loopback_mode_max = LOOPBACK_MAX;
-const char *const efx_loopback_mode_names[] = {
- [LOOPBACK_NONE] = "NONE",
- [LOOPBACK_DATA] = "DATAPATH",
- [LOOPBACK_GMAC] = "GMAC",
- [LOOPBACK_XGMII] = "XGMII",
- [LOOPBACK_XGXS] = "XGXS",
- [LOOPBACK_XAUI] = "XAUI",
- [LOOPBACK_GMII] = "GMII",
- [LOOPBACK_SGMII] = "SGMII",
- [LOOPBACK_XGBR] = "XGBR",
- [LOOPBACK_XFI] = "XFI",
- [LOOPBACK_XAUI_FAR] = "XAUI_FAR",
- [LOOPBACK_GMII_FAR] = "GMII_FAR",
- [LOOPBACK_SGMII_FAR] = "SGMII_FAR",
- [LOOPBACK_XFI_FAR] = "XFI_FAR",
- [LOOPBACK_GPHY] = "GPHY",
- [LOOPBACK_PHYXS] = "PHYXS",
- [LOOPBACK_PCS] = "PCS",
- [LOOPBACK_PMAPMD] = "PMA/PMD",
- [LOOPBACK_XPORT] = "XPORT",
- [LOOPBACK_XGMII_WS] = "XGMII_WS",
- [LOOPBACK_XAUI_WS] = "XAUI_WS",
- [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR",
- [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
- [LOOPBACK_GMII_WS] = "GMII_WS",
- [LOOPBACK_XFI_WS] = "XFI_WS",
- [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR",
- [LOOPBACK_PHYXS_WS] = "PHYXS_WS",
-};
-
-const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
-const char *const efx_reset_type_names[] = {
- [RESET_TYPE_INVISIBLE] = "INVISIBLE",
- [RESET_TYPE_ALL] = "ALL",
- [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL",
- [RESET_TYPE_WORLD] = "WORLD",
- [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
- [RESET_TYPE_DATAPATH] = "DATAPATH",
- [RESET_TYPE_MC_BIST] = "MC_BIST",
- [RESET_TYPE_DISABLE] = "DISABLE",
- [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
- [RESET_TYPE_INT_ERROR] = "INT_ERROR",
- [RESET_TYPE_DMA_ERROR] = "DMA_ERROR",
- [RESET_TYPE_TX_SKIP] = "TX_SKIP",
- [RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
- [RESET_TYPE_MCDI_TIMEOUT] = "MCDI_TIMEOUT (FLR)",
-};
-
/* UDP tunnel type names */
static const char *const efx_udp_tunnel_type_names[] = {
[TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN] = "vxlan",
@@ -104,18 +58,6 @@ void efx_get_udp_tunnel_type_name(u16 type, char *buf, size_t buflen)
snprintf(buf, buflen, "type %d", type);
}
-/* Reset workqueue. If any NIC has a hardware failure then a reset will be
- * queued onto this work queue. This is not a per-nic work queue, because
- * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
- */
-static struct workqueue_struct *reset_workqueue;
-
-/* How often and how many times to poll for a reset while waiting for a
- * BIST that another function started to complete.
- */
-#define BIST_WAIT_DELAY_MS 100
-#define BIST_WAIT_DELAY_COUNT 100
-
/**************************************************************************
*
* Configurable values
@@ -135,21 +77,6 @@ module_param(efx_separate_tx_channels, bool, 0444);
MODULE_PARM_DESC(efx_separate_tx_channels,
"Use separate channels for TX and RX");
-/* This is the weight assigned to each of the (per-channel) virtual
- * NAPI devices.
- */
-static int napi_weight = 64;
-
-/* This is the time (in jiffies) between invocations of the hardware
- * monitor.
- * On Falcon-based NICs, this will:
- * - Check the on-board hardware monitor;
- * - Poll the link state and reconfigure the hardware as necessary.
- * On Siena-based NICs for power systems with EEH support, this will give EEH a
- * chance to start.
- */
-static unsigned int efx_monitor_interval = 1 * HZ;
-
/* Initial interrupt moderation settings. They can be modified after
* module load with ethtool.
*
@@ -169,38 +96,10 @@ static unsigned int rx_irq_mod_usec = 60;
*/
static unsigned int tx_irq_mod_usec = 150;
-/* This is the first interrupt mode to try out of:
- * 0 => MSI-X
- * 1 => MSI
- * 2 => legacy
- */
-static unsigned int interrupt_mode;
-
-/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
- * i.e. the number of CPUs among which we may distribute simultaneous
- * interrupt handling.
- *
- * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
- * The default (0) means to assign an interrupt to each core.
- */
-static unsigned int rss_cpus;
-module_param(rss_cpus, uint, 0444);
-MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
-
static bool phy_flash_cfg;
module_param(phy_flash_cfg, bool, 0644);
MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
-static unsigned irq_adapt_low_thresh = 8000;
-module_param(irq_adapt_low_thresh, uint, 0644);
-MODULE_PARM_DESC(irq_adapt_low_thresh,
- "Threshold score for reducing IRQ moderation");
-
-static unsigned irq_adapt_high_thresh = 16000;
-module_param(irq_adapt_high_thresh, uint, 0644);
-MODULE_PARM_DESC(irq_adapt_high_thresh,
- "Threshold score for increasing IRQ moderation");
-
static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
@@ -214,18 +113,8 @@ MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
*
*************************************************************************/
-static int efx_soft_enable_interrupts(struct efx_nic *efx);
-static void efx_soft_disable_interrupts(struct efx_nic *efx);
-static void efx_remove_channel(struct efx_channel *channel);
-static void efx_remove_channels(struct efx_nic *efx);
static const struct efx_channel_type efx_default_channel_type;
static void efx_remove_port(struct efx_nic *efx);
-static void efx_init_napi_channel(struct efx_channel *channel);
-static void efx_fini_napi(struct efx_nic *efx);
-static void efx_fini_napi_channel(struct efx_channel *channel);
-static void efx_fini_struct(struct efx_nic *efx);
-static void efx_start_all(struct efx_nic *efx);
-static void efx_stop_all(struct efx_nic *efx);
static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog);
static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp);
static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
@@ -239,761 +128,12 @@ static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
ASSERT_RTNL(); \
} while (0)
-static int efx_check_disabled(struct efx_nic *efx)
-{
- if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
- netif_err(efx, drv, efx->net_dev,
- "device is disabled due to earlier errors\n");
- return -EIO;
- }
- return 0;
-}
-
-/**************************************************************************
- *
- * Event queue processing
- *
- *************************************************************************/
-
-/* Process channel's event queue
- *
- * This function is responsible for processing the event queue of a
- * single channel. The caller must guarantee that this function will
- * never be concurrently called more than once on the same channel,
- * though different channels may be being processed concurrently.
- */
-static int efx_process_channel(struct efx_channel *channel, int budget)
-{
- struct efx_tx_queue *tx_queue;
- struct list_head rx_list;
- int spent;
-
- if (unlikely(!channel->enabled))
- return 0;
-
- /* Prepare the batch receive list */
- EFX_WARN_ON_PARANOID(channel->rx_list != NULL);
- INIT_LIST_HEAD(&rx_list);
- channel->rx_list = &rx_list;
-
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- tx_queue->pkts_compl = 0;
- tx_queue->bytes_compl = 0;
- }
-
- spent = efx_nic_process_eventq(channel, budget);
- if (spent && efx_channel_has_rx_queue(channel)) {
- struct efx_rx_queue *rx_queue =
- efx_channel_get_rx_queue(channel);
-
- efx_rx_flush_packet(channel);
- efx_fast_push_rx_descriptors(rx_queue, true);
- }
-
- /* Update BQL */
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- if (tx_queue->bytes_compl) {
- netdev_tx_completed_queue(tx_queue->core_txq,
- tx_queue->pkts_compl, tx_queue->bytes_compl);
- }
- }
-
- /* Receive any packets we queued up */
- netif_receive_skb_list(channel->rx_list);
- channel->rx_list = NULL;
-
- return spent;
-}
-
-/* NAPI poll handler
- *
- * NAPI guarantees serialisation of polls of the same device, which
- * provides the guarantee required by efx_process_channel().
- */
-static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel)
-{
- int step = efx->irq_mod_step_us;
-
- if (channel->irq_mod_score < irq_adapt_low_thresh) {
- if (channel->irq_moderation_us > step) {
- channel->irq_moderation_us -= step;
- efx->type->push_irq_moderation(channel);
- }
- } else if (channel->irq_mod_score > irq_adapt_high_thresh) {
- if (channel->irq_moderation_us <
- efx->irq_rx_moderation_us) {
- channel->irq_moderation_us += step;
- efx->type->push_irq_moderation(channel);
- }
- }
-
- channel->irq_count = 0;
- channel->irq_mod_score = 0;
-}
-
-static int efx_poll(struct napi_struct *napi, int budget)
-{
- struct efx_channel *channel =
- container_of(napi, struct efx_channel, napi_str);
- struct efx_nic *efx = channel->efx;
- int spent;
-
- netif_vdbg(efx, intr, efx->net_dev,
- "channel %d NAPI poll executing on CPU %d\n",
- channel->channel, raw_smp_processor_id());
-
- spent = efx_process_channel(channel, budget);
-
- xdp_do_flush_map();
-
- if (spent < budget) {
- if (efx_channel_has_rx_queue(channel) &&
- efx->irq_rx_adaptive &&
- unlikely(++channel->irq_count == 1000)) {
- efx_update_irq_mod(efx, channel);
- }
-
-#ifdef CONFIG_RFS_ACCEL
- /* Perhaps expire some ARFS filters */
- mod_delayed_work(system_wq, &channel->filter_work, 0);
-#endif
-
- /* There is no race here; although napi_disable() will
- * only wait for napi_complete(), this isn't a problem
- * since efx_nic_eventq_read_ack() will have no effect if
- * interrupts have already been disabled.
- */
- if (napi_complete_done(napi, spent))
- efx_nic_eventq_read_ack(channel);
- }
-
- return spent;
-}
-
-/* Create event queue
- * Event queue memory allocations are done only once. If the channel
- * is reset, the memory buffer will be reused; this guards against
- * errors during channel reset and also simplifies interrupt handling.
- */
-static int efx_probe_eventq(struct efx_channel *channel)
-{
- struct efx_nic *efx = channel->efx;
- unsigned long entries;
-
- netif_dbg(efx, probe, efx->net_dev,
- "chan %d create event queue\n", channel->channel);
-
- /* Build an event queue with room for one event per tx and rx buffer,
- * plus some extra for link state events and MCDI completions. */
- entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
- EFX_WARN_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
- channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
-
- return efx_nic_probe_eventq(channel);
-}
-
-/* Prepare channel's event queue */
-static int efx_init_eventq(struct efx_channel *channel)
-{
- struct efx_nic *efx = channel->efx;
- int rc;
-
- EFX_WARN_ON_PARANOID(channel->eventq_init);
-
- netif_dbg(efx, drv, efx->net_dev,
- "chan %d init event queue\n", channel->channel);
-
- rc = efx_nic_init_eventq(channel);
- if (rc == 0) {
- efx->type->push_irq_moderation(channel);
- channel->eventq_read_ptr = 0;
- channel->eventq_init = true;
- }
- return rc;
-}
-
-/* Enable event queue processing and NAPI */
-void efx_start_eventq(struct efx_channel *channel)
-{
- netif_dbg(channel->efx, ifup, channel->efx->net_dev,
- "chan %d start event queue\n", channel->channel);
-
- /* Make sure the NAPI handler sees the enabled flag set */
- channel->enabled = true;
- smp_wmb();
-
- napi_enable(&channel->napi_str);
- efx_nic_eventq_read_ack(channel);
-}
-
-/* Disable event queue processing and NAPI */
-void efx_stop_eventq(struct efx_channel *channel)
-{
- if (!channel->enabled)
- return;
-
- napi_disable(&channel->napi_str);
- channel->enabled = false;
-}
-
-static void efx_fini_eventq(struct efx_channel *channel)
-{
- if (!channel->eventq_init)
- return;
-
- netif_dbg(channel->efx, drv, channel->efx->net_dev,
- "chan %d fini event queue\n", channel->channel);
-
- efx_nic_fini_eventq(channel);
- channel->eventq_init = false;
-}
-
-static void efx_remove_eventq(struct efx_channel *channel)
-{
- netif_dbg(channel->efx, drv, channel->efx->net_dev,
- "chan %d remove event queue\n", channel->channel);
-
- efx_nic_remove_eventq(channel);
-}
-
-/**************************************************************************
- *
- * Channel handling
- *
- *************************************************************************/
-
-/* Allocate and initialise a channel structure. */
-static struct efx_channel *
-efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
-{
- struct efx_channel *channel;
- struct efx_rx_queue *rx_queue;
- struct efx_tx_queue *tx_queue;
- int j;
-
- channel = kzalloc(sizeof(*channel), GFP_KERNEL);
- if (!channel)
- return NULL;
-
- channel->efx = efx;
- channel->channel = i;
- channel->type = &efx_default_channel_type;
-
- for (j = 0; j < EFX_TXQ_TYPES; j++) {
- tx_queue = &channel->tx_queue[j];
- tx_queue->efx = efx;
- tx_queue->queue = i * EFX_TXQ_TYPES + j;
- tx_queue->channel = channel;
- }
-
-#ifdef CONFIG_RFS_ACCEL
- INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
-#endif
-
- rx_queue = &channel->rx_queue;
- rx_queue->efx = efx;
- timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
-
- return channel;
-}
-
-/* Allocate and initialise a channel structure, copying parameters
- * (but not resources) from an old channel structure.
- */
-static struct efx_channel *
-efx_copy_channel(const struct efx_channel *old_channel)
-{
- struct efx_channel *channel;
- struct efx_rx_queue *rx_queue;
- struct efx_tx_queue *tx_queue;
- int j;
-
- channel = kmalloc(sizeof(*channel), GFP_KERNEL);
- if (!channel)
- return NULL;
-
- *channel = *old_channel;
-
- channel->napi_dev = NULL;
- INIT_HLIST_NODE(&channel->napi_str.napi_hash_node);
- channel->napi_str.napi_id = 0;
- channel->napi_str.state = 0;
- memset(&channel->eventq, 0, sizeof(channel->eventq));
-
- for (j = 0; j < EFX_TXQ_TYPES; j++) {
- tx_queue = &channel->tx_queue[j];
- if (tx_queue->channel)
- tx_queue->channel = channel;
- tx_queue->buffer = NULL;
- memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
- }
-
- rx_queue = &channel->rx_queue;
- rx_queue->buffer = NULL;
- memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
- timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
-#ifdef CONFIG_RFS_ACCEL
- INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
-#endif
-
- return channel;
-}
-
-static int efx_probe_channel(struct efx_channel *channel)
-{
- struct efx_tx_queue *tx_queue;
- struct efx_rx_queue *rx_queue;
- int rc;
-
- netif_dbg(channel->efx, probe, channel->efx->net_dev,
- "creating channel %d\n", channel->channel);
-
- rc = channel->type->pre_probe(channel);
- if (rc)
- goto fail;
-
- rc = efx_probe_eventq(channel);
- if (rc)
- goto fail;
-
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- rc = efx_probe_tx_queue(tx_queue);
- if (rc)
- goto fail;
- }
-
- efx_for_each_channel_rx_queue(rx_queue, channel) {
- rc = efx_probe_rx_queue(rx_queue);
- if (rc)
- goto fail;
- }
-
- channel->rx_list = NULL;
-
- return 0;
-
-fail:
- efx_remove_channel(channel);
- return rc;
-}
-
-static void
-efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
-{
- struct efx_nic *efx = channel->efx;
- const char *type;
- int number;
-
- number = channel->channel;
-
- if (number >= efx->xdp_channel_offset &&
- !WARN_ON_ONCE(!efx->n_xdp_channels)) {
- type = "-xdp";
- number -= efx->xdp_channel_offset;
- } else if (efx->tx_channel_offset == 0) {
- type = "";
- } else if (number < efx->tx_channel_offset) {
- type = "-rx";
- } else {
- type = "-tx";
- number -= efx->tx_channel_offset;
- }
- snprintf(buf, len, "%s%s-%d", efx->name, type, number);
-}
-
-static void efx_set_channel_names(struct efx_nic *efx)
-{
- struct efx_channel *channel;
-
- efx_for_each_channel(channel, efx)
- channel->type->get_name(channel,
- efx->msi_context[channel->channel].name,
- sizeof(efx->msi_context[0].name));
-}
-
-static int efx_probe_channels(struct efx_nic *efx)
-{
- struct efx_channel *channel;
- int rc;
-
- /* Restart special buffer allocation */
- efx->next_buffer_table = 0;
-
- /* Probe channels in reverse, so that any 'extra' channels
- * use the start of the buffer table. This allows the traffic
- * channels to be resized without moving them or wasting the
- * entries before them.
- */
- efx_for_each_channel_rev(channel, efx) {
- rc = efx_probe_channel(channel);
- if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "failed to create channel %d\n",
- channel->channel);
- goto fail;
- }
- }
- efx_set_channel_names(efx);
-
- return 0;
-
-fail:
- efx_remove_channels(efx);
- return rc;
-}
-
-/* Channels are shutdown and reinitialised whilst the NIC is running
- * to propagate configuration changes (mtu, checksum offload), or
- * to clear hardware error conditions
- */
-static void efx_start_datapath(struct efx_nic *efx)
-{
- netdev_features_t old_features = efx->net_dev->features;
- bool old_rx_scatter = efx->rx_scatter;
- struct efx_tx_queue *tx_queue;
- struct efx_rx_queue *rx_queue;
- struct efx_channel *channel;
- size_t rx_buf_len;
-
- /* Calculate the rx buffer allocation parameters required to
- * support the current MTU, including padding for header
- * alignment and overruns.
- */
- efx->rx_dma_len = (efx->rx_prefix_size +
- EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
- efx->type->rx_buffer_padding);
- rx_buf_len = (sizeof(struct efx_rx_page_state) + XDP_PACKET_HEADROOM +
- efx->rx_ip_align + efx->rx_dma_len);
- if (rx_buf_len <= PAGE_SIZE) {
- efx->rx_scatter = efx->type->always_rx_scatter;
- efx->rx_buffer_order = 0;
- } else if (efx->type->can_rx_scatter) {
- BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES);
- BUILD_BUG_ON(sizeof(struct efx_rx_page_state) +
- 2 * ALIGN(NET_IP_ALIGN + EFX_RX_USR_BUF_SIZE,
- EFX_RX_BUF_ALIGNMENT) >
- PAGE_SIZE);
- efx->rx_scatter = true;
- efx->rx_dma_len = EFX_RX_USR_BUF_SIZE;
- efx->rx_buffer_order = 0;
- } else {
- efx->rx_scatter = false;
- efx->rx_buffer_order = get_order(rx_buf_len);
- }
-
- efx_rx_config_page_split(efx);
- if (efx->rx_buffer_order)
- netif_dbg(efx, drv, efx->net_dev,
- "RX buf len=%u; page order=%u batch=%u\n",
- efx->rx_dma_len, efx->rx_buffer_order,
- efx->rx_pages_per_batch);
- else
- netif_dbg(efx, drv, efx->net_dev,
- "RX buf len=%u step=%u bpp=%u; page batch=%u\n",
- efx->rx_dma_len, efx->rx_page_buf_step,
- efx->rx_bufs_per_page, efx->rx_pages_per_batch);
-
- /* Restore previously fixed features in hw_features and remove
- * features which are fixed now
- */
- efx->net_dev->hw_features |= efx->net_dev->features;
- efx->net_dev->hw_features &= ~efx->fixed_features;
- efx->net_dev->features |= efx->fixed_features;
- if (efx->net_dev->features != old_features)
- netdev_features_change(efx->net_dev);
-
- /* RX filters may also have scatter-enabled flags */
- if (efx->rx_scatter != old_rx_scatter)
- efx->type->filter_update_rx_scatter(efx);
-
- /* We must keep at least one descriptor in a TX ring empty.
- * We could avoid this when the queue size does not exactly
- * match the hardware ring size, but it's not that important.
- * Therefore we stop the queue when one more skb might fill
- * the ring completely. We wake it when half way back to
- * empty.
- */
- efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx);
- efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
-
- /* Initialise the channels */
- efx_for_each_channel(channel, efx) {
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- efx_init_tx_queue(tx_queue);
- atomic_inc(&efx->active_queues);
- }
-
- efx_for_each_channel_rx_queue(rx_queue, channel) {
- efx_init_rx_queue(rx_queue);
- atomic_inc(&efx->active_queues);
- efx_stop_eventq(channel);
- efx_fast_push_rx_descriptors(rx_queue, false);
- efx_start_eventq(channel);
- }
-
- WARN_ON(channel->rx_pkt_n_frags);
- }
-
- efx_ptp_start_datapath(efx);
-
- if (netif_device_present(efx->net_dev))
- netif_tx_wake_all_queues(efx->net_dev);
-}
-
-static void efx_stop_datapath(struct efx_nic *efx)
-{
- struct efx_channel *channel;
- struct efx_tx_queue *tx_queue;
- struct efx_rx_queue *rx_queue;
- int rc;
-
- EFX_ASSERT_RESET_SERIALISED(efx);
- BUG_ON(efx->port_enabled);
-
- efx_ptp_stop_datapath(efx);
-
- /* Stop RX refill */
- efx_for_each_channel(channel, efx) {
- efx_for_each_channel_rx_queue(rx_queue, channel)
- rx_queue->refill_enabled = false;
- }
-
- efx_for_each_channel(channel, efx) {
- /* RX packet processing is pipelined, so wait for the
- * NAPI handler to complete. At least event queue 0
- * might be kept active by non-data events, so don't
- * use napi_synchronize() but actually disable NAPI
- * temporarily.
- */
- if (efx_channel_has_rx_queue(channel)) {
- efx_stop_eventq(channel);
- efx_start_eventq(channel);
- }
- }
-
- rc = efx->type->fini_dmaq(efx);
- if (rc) {
- netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
- } else {
- netif_dbg(efx, drv, efx->net_dev,
- "successfully flushed all queues\n");
- }
-
- efx_for_each_channel(channel, efx) {
- efx_for_each_channel_rx_queue(rx_queue, channel)
- efx_fini_rx_queue(rx_queue);
- efx_for_each_possible_channel_tx_queue(tx_queue, channel)
- efx_fini_tx_queue(tx_queue);
- }
- efx->xdp_rxq_info_failed = false;
-}
-
-static void efx_remove_channel(struct efx_channel *channel)
-{
- struct efx_tx_queue *tx_queue;
- struct efx_rx_queue *rx_queue;
-
- netif_dbg(channel->efx, drv, channel->efx->net_dev,
- "destroy chan %d\n", channel->channel);
-
- efx_for_each_channel_rx_queue(rx_queue, channel)
- efx_remove_rx_queue(rx_queue);
- efx_for_each_possible_channel_tx_queue(tx_queue, channel)
- efx_remove_tx_queue(tx_queue);
- efx_remove_eventq(channel);
- channel->type->post_remove(channel);
-}
-
-static void efx_remove_channels(struct efx_nic *efx)
-{
- struct efx_channel *channel;
-
- efx_for_each_channel(channel, efx)
- efx_remove_channel(channel);
-
- kfree(efx->xdp_tx_queues);
-}
-
-int
-efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
-{
- struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
- u32 old_rxq_entries, old_txq_entries;
- unsigned i, next_buffer_table = 0;
- int rc, rc2;
-
- rc = efx_check_disabled(efx);
- if (rc)
- return rc;
-
- /* Not all channels should be reallocated. We must avoid
- * reallocating their buffer table entries.
- */
- efx_for_each_channel(channel, efx) {
- struct efx_rx_queue *rx_queue;
- struct efx_tx_queue *tx_queue;
-
- if (channel->type->copy)
- continue;
- next_buffer_table = max(next_buffer_table,
- channel->eventq.index +
- channel->eventq.entries);
- efx_for_each_channel_rx_queue(rx_queue, channel)
- next_buffer_table = max(next_buffer_table,
- rx_queue->rxd.index +
- rx_queue->rxd.entries);
- efx_for_each_channel_tx_queue(tx_queue, channel)
- next_buffer_table = max(next_buffer_table,
- tx_queue->txd.index +
- tx_queue->txd.entries);
- }
-
- efx_device_detach_sync(efx);
- efx_stop_all(efx);
- efx_soft_disable_interrupts(efx);
-
- /* Clone channels (where possible) */
- memset(other_channel, 0, sizeof(other_channel));
- for (i = 0; i < efx->n_channels; i++) {
- channel = efx->channel[i];
- if (channel->type->copy)
- channel = channel->type->copy(channel);
- if (!channel) {
- rc = -ENOMEM;
- goto out;
- }
- other_channel[i] = channel;
- }
-
- /* Swap entry counts and channel pointers */
- old_rxq_entries = efx->rxq_entries;
- old_txq_entries = efx->txq_entries;
- efx->rxq_entries = rxq_entries;
- efx->txq_entries = txq_entries;
- for (i = 0; i < efx->n_channels; i++) {
- channel = efx->channel[i];
- efx->channel[i] = other_channel[i];
- other_channel[i] = channel;
- }
-
- /* Restart buffer table allocation */
- efx->next_buffer_table = next_buffer_table;
-
- for (i = 0; i < efx->n_channels; i++) {
- channel = efx->channel[i];
- if (!channel->type->copy)
- continue;
- rc = efx_probe_channel(channel);
- if (rc)
- goto rollback;
- efx_init_napi_channel(efx->channel[i]);
- }
-
-out:
- /* Destroy unused channel structures */
- for (i = 0; i < efx->n_channels; i++) {
- channel = other_channel[i];
- if (channel && channel->type->copy) {
- efx_fini_napi_channel(channel);
- efx_remove_channel(channel);
- kfree(channel);
- }
- }
-
- rc2 = efx_soft_enable_interrupts(efx);
- if (rc2) {
- rc = rc ? rc : rc2;
- netif_err(efx, drv, efx->net_dev,
- "unable to restart interrupts on channel reallocation\n");
- efx_schedule_reset(efx, RESET_TYPE_DISABLE);
- } else {
- efx_start_all(efx);
- efx_device_attach_if_not_resetting(efx);
- }
- return rc;
-
-rollback:
- /* Swap back */
- efx->rxq_entries = old_rxq_entries;
- efx->txq_entries = old_txq_entries;
- for (i = 0; i < efx->n_channels; i++) {
- channel = efx->channel[i];
- efx->channel[i] = other_channel[i];
- other_channel[i] = channel;
- }
- goto out;
-}
-
-void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
-{
- mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10));
-}
-
-static bool efx_default_channel_want_txqs(struct efx_channel *channel)
-{
- return channel->channel - channel->efx->tx_channel_offset <
- channel->efx->n_tx_channels;
-}
-
-static const struct efx_channel_type efx_default_channel_type = {
- .pre_probe = efx_channel_dummy_op_int,
- .post_remove = efx_channel_dummy_op_void,
- .get_name = efx_get_channel_name,
- .copy = efx_copy_channel,
- .want_txqs = efx_default_channel_want_txqs,
- .keep_eventq = false,
- .want_pio = true,
-};
-
-int efx_channel_dummy_op_int(struct efx_channel *channel)
-{
- return 0;
-}
-
-void efx_channel_dummy_op_void(struct efx_channel *channel)
-{
-}
-
/**************************************************************************
*
* Port handling
*
**************************************************************************/
-/* This ensures that the kernel is kept informed (via
- * netif_carrier_on/off) of the link status, and also maintains the
- * link status's stop on the port's TX queue.
- */
-void efx_link_status_changed(struct efx_nic *efx)
-{
- struct efx_link_state *link_state = &efx->link_state;
-
- /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
- * that no events are triggered between unregister_netdev() and the
- * driver unloading. A more general condition is that NETDEV_CHANGE
- * can only be generated between NETDEV_UP and NETDEV_DOWN */
- if (!netif_running(efx->net_dev))
- return;
-
- if (link_state->up != netif_carrier_ok(efx->net_dev)) {
- efx->n_link_state_changes++;
-
- if (link_state->up)
- netif_carrier_on(efx->net_dev);
- else
- netif_carrier_off(efx->net_dev);
- }
-
- /* Status message for kernel log */
- if (link_state->up)
- netif_info(efx, link, efx->net_dev,
- "link up at %uMbps %s-duplex (MTU %d)\n",
- link_state->speed, link_state->fd ? "full" : "half",
- efx->net_dev->mtu);
- else
- netif_info(efx, link, efx->net_dev, "link down\n");
-}
-
void efx_link_set_advertising(struct efx_nic *efx,
const unsigned long *advertising)
{
@@ -1035,73 +175,6 @@ void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
static void efx_fini_port(struct efx_nic *efx);
-/* We assume that efx->type->reconfigure_mac will always try to sync RX
- * filters and therefore needs to read-lock the filter table against freeing
- */
-void efx_mac_reconfigure(struct efx_nic *efx)
-{
- down_read(&efx->filter_sem);
- efx->type->reconfigure_mac(efx);
- up_read(&efx->filter_sem);
-}
-
-/* Push loopback/power/transmit disable settings to the PHY, and reconfigure
- * the MAC appropriately. All other PHY configuration changes are pushed
- * through phy_op->set_settings(), and pushed asynchronously to the MAC
- * through efx_monitor().
- *
- * Callers must hold the mac_lock
- */
-int __efx_reconfigure_port(struct efx_nic *efx)
-{
- enum efx_phy_mode phy_mode;
- int rc;
-
- WARN_ON(!mutex_is_locked(&efx->mac_lock));
-
- /* Disable PHY transmit in mac level loopbacks */
- phy_mode = efx->phy_mode;
- if (LOOPBACK_INTERNAL(efx))
- efx->phy_mode |= PHY_MODE_TX_DISABLED;
- else
- efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
-
- rc = efx->type->reconfigure_port(efx);
-
- if (rc)
- efx->phy_mode = phy_mode;
-
- return rc;
-}
-
-/* Reinitialise the MAC to pick up new PHY settings, even if the port is
- * disabled. */
-int efx_reconfigure_port(struct efx_nic *efx)
-{
- int rc;
-
- EFX_ASSERT_RESET_SERIALISED(efx);
-
- mutex_lock(&efx->mac_lock);
- rc = __efx_reconfigure_port(efx);
- mutex_unlock(&efx->mac_lock);
-
- return rc;
-}
-
-/* Asynchronous work item for changing MAC promiscuity and multicast
- * hash. Avoid a drain/rx_ingress enable by reconfiguring the current
- * MAC directly. */
-static void efx_mac_work(struct work_struct *data)
-{
- struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
-
- mutex_lock(&efx->mac_lock);
- if (efx->port_enabled)
- efx_mac_reconfigure(efx);
- mutex_unlock(&efx->mac_lock);
-}
-
static int efx_probe_port(struct efx_nic *efx)
{
int rc;
@@ -1155,44 +228,6 @@ fail1:
return rc;
}
-static void efx_start_port(struct efx_nic *efx)
-{
- netif_dbg(efx, ifup, efx->net_dev, "start port\n");
- BUG_ON(efx->port_enabled);
-
- mutex_lock(&efx->mac_lock);
- efx->port_enabled = true;
-
- /* Ensure MAC ingress/egress is enabled */
- efx_mac_reconfigure(efx);
-
- mutex_unlock(&efx->mac_lock);
-}
-
-/* Cancel work for MAC reconfiguration, periodic hardware monitoring
- * and the async self-test, wait for them to finish and prevent them
- * being scheduled again. This doesn't cover online resets, which
- * should only be cancelled when removing the device.
- */
-static void efx_stop_port(struct efx_nic *efx)
-{
- netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
-
- EFX_ASSERT_RESET_SERIALISED(efx);
-
- mutex_lock(&efx->mac_lock);
- efx->port_enabled = false;
- mutex_unlock(&efx->mac_lock);
-
- /* Serialise against efx_set_multicast_list() */
- netif_addr_lock_bh(efx->net_dev);
- netif_addr_unlock_bh(efx->net_dev);
-
- cancel_delayed_work_sync(&efx->monitor_work);
- efx_selftest_async_cancel(efx);
- cancel_work_sync(&efx->mac_work);
-}
-
static void efx_fini_port(struct efx_nic *efx)
{
netif_dbg(efx, drv, efx->net_dev, "shut down port\n");
@@ -1291,100 +326,6 @@ static void efx_dissociate(struct efx_nic *efx)
}
}
-/* This configures the PCI device to enable I/O and DMA. */
-static int efx_init_io(struct efx_nic *efx)
-{
- struct pci_dev *pci_dev = efx->pci_dev;
- dma_addr_t dma_mask = efx->type->max_dma_mask;
- unsigned int mem_map_size = efx->type->mem_map_size(efx);
- int rc, bar;
-
- netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
-
- bar = efx->type->mem_bar(efx);
-
- rc = pci_enable_device(pci_dev);
- if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "failed to enable PCI device\n");
- goto fail1;
- }
-
- pci_set_master(pci_dev);
-
- /* Set the PCI DMA mask. Try all possibilities from our genuine mask
- * down to 32 bits, because some architectures will allow 40 bit
- * masks event though they reject 46 bit masks.
- */
- while (dma_mask > 0x7fffffffUL) {
- rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask);
- if (rc == 0)
- break;
- dma_mask >>= 1;
- }
- if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "could not find a suitable DMA mask\n");
- goto fail2;
- }
- netif_dbg(efx, probe, efx->net_dev,
- "using DMA mask %llx\n", (unsigned long long) dma_mask);
-
- efx->membase_phys = pci_resource_start(efx->pci_dev, bar);
- rc = pci_request_region(pci_dev, bar, "sfc");
- if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "request for memory BAR failed\n");
- rc = -EIO;
- goto fail3;
- }
- efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size);
- if (!efx->membase) {
- netif_err(efx, probe, efx->net_dev,
- "could not map memory BAR at %llx+%x\n",
- (unsigned long long)efx->membase_phys, mem_map_size);
- rc = -ENOMEM;
- goto fail4;
- }
- netif_dbg(efx, probe, efx->net_dev,
- "memory BAR at %llx+%x (virtual %p)\n",
- (unsigned long long)efx->membase_phys, mem_map_size,
- efx->membase);
-
- return 0;
-
- fail4:
- pci_release_region(efx->pci_dev, bar);
- fail3:
- efx->membase_phys = 0;
- fail2:
- pci_disable_device(efx->pci_dev);
- fail1:
- return rc;
-}
-
-static void efx_fini_io(struct efx_nic *efx)
-{
- int bar;
-
- netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
-
- if (efx->membase) {
- iounmap(efx->membase);
- efx->membase = NULL;
- }
-
- if (efx->membase_phys) {
- bar = efx->type->mem_bar(efx);
- pci_release_region(efx->pci_dev, bar);
- efx->membase_phys = 0;
- }
-
- /* Don't disable bus-mastering if VFs are assigned */
- if (!pci_vfs_assigned(efx->pci_dev))
- pci_disable_device(efx->pci_dev);
-}
-
void efx_set_default_rx_indir_table(struct efx_nic *efx,
struct efx_rss_context *ctx)
{
@@ -1395,478 +336,6 @@ void efx_set_default_rx_indir_table(struct efx_nic *efx,
ethtool_rxfh_indir_default(i, efx->rss_spread);
}
-static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
-{
- cpumask_var_t thread_mask;
- unsigned int count;
- int cpu;
-
- if (rss_cpus) {
- count = rss_cpus;
- } else {
- if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
- netif_warn(efx, probe, efx->net_dev,
- "RSS disabled due to allocation failure\n");
- return 1;
- }
-
- count = 0;
- for_each_online_cpu(cpu) {
- if (!cpumask_test_cpu(cpu, thread_mask)) {
- ++count;
- cpumask_or(thread_mask, thread_mask,
- topology_sibling_cpumask(cpu));
- }
- }
-
- free_cpumask_var(thread_mask);
- }
-
- if (count > EFX_MAX_RX_QUEUES) {
- netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
- "Reducing number of rx queues from %u to %u.\n",
- count, EFX_MAX_RX_QUEUES);
- count = EFX_MAX_RX_QUEUES;
- }
-
- /* If RSS is requested for the PF *and* VFs then we can't write RSS
- * table entries that are inaccessible to VFs
- */
-#ifdef CONFIG_SFC_SRIOV
- if (efx->type->sriov_wanted) {
- if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
- count > efx_vf_size(efx)) {
- netif_warn(efx, probe, efx->net_dev,
- "Reducing number of RSS channels from %u to %u for "
- "VF support. Increase vf-msix-limit to use more "
- "channels on the PF.\n",
- count, efx_vf_size(efx));
- count = efx_vf_size(efx);
- }
- }
-#endif
-
- return count;
-}
-
-static int efx_allocate_msix_channels(struct efx_nic *efx,
- unsigned int max_channels,
- unsigned int extra_channels,
- unsigned int parallelism)
-{
- unsigned int n_channels = parallelism;
- int vec_count;
- int n_xdp_tx;
- int n_xdp_ev;
-
- if (efx_separate_tx_channels)
- n_channels *= 2;
- n_channels += extra_channels;
-
- /* To allow XDP transmit to happen from arbitrary NAPI contexts
- * we allocate a TX queue per CPU. We share event queues across
- * multiple tx queues, assuming tx and ev queues are both
- * maximum size.
- */
-
- n_xdp_tx = num_possible_cpus();
- n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, EFX_TXQ_TYPES);
-
- vec_count = pci_msix_vec_count(efx->pci_dev);
- if (vec_count < 0)
- return vec_count;
-
- max_channels = min_t(unsigned int, vec_count, max_channels);
-
- /* Check resources.
- * We need a channel per event queue, plus a VI per tx queue.
- * This may be more pessimistic than it needs to be.
- */
- if (n_channels + n_xdp_ev > max_channels) {
- netif_err(efx, drv, efx->net_dev,
- "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
- n_xdp_ev, n_channels, max_channels);
- efx->n_xdp_channels = 0;
- efx->xdp_tx_per_channel = 0;
- efx->xdp_tx_queue_count = 0;
- } else {
- efx->n_xdp_channels = n_xdp_ev;
- efx->xdp_tx_per_channel = EFX_TXQ_TYPES;
- efx->xdp_tx_queue_count = n_xdp_tx;
- n_channels += n_xdp_ev;
- netif_dbg(efx, drv, efx->net_dev,
- "Allocating %d TX and %d event queues for XDP\n",
- n_xdp_tx, n_xdp_ev);
- }
-
- if (vec_count < n_channels) {
- netif_err(efx, drv, efx->net_dev,
- "WARNING: Insufficient MSI-X vectors available (%d < %u).\n",
- vec_count, n_channels);
- netif_err(efx, drv, efx->net_dev,
- "WARNING: Performance may be reduced.\n");
- n_channels = vec_count;
- }
-
- n_channels = min(n_channels, max_channels);
-
- efx->n_channels = n_channels;
-
- /* Ignore XDP tx channels when creating rx channels. */
- n_channels -= efx->n_xdp_channels;
-
- if (efx_separate_tx_channels) {
- efx->n_tx_channels =
- min(max(n_channels / 2, 1U),
- efx->max_tx_channels);
- efx->tx_channel_offset =
- n_channels - efx->n_tx_channels;
- efx->n_rx_channels =
- max(n_channels -
- efx->n_tx_channels, 1U);
- } else {
- efx->n_tx_channels = min(n_channels, efx->max_tx_channels);
- efx->tx_channel_offset = 0;
- efx->n_rx_channels = n_channels;
- }
-
- efx->n_rx_channels = min(efx->n_rx_channels, parallelism);
- efx->n_tx_channels = min(efx->n_tx_channels, parallelism);
-
- efx->xdp_channel_offset = n_channels;
-
- netif_dbg(efx, drv, efx->net_dev,
- "Allocating %u RX channels\n",
- efx->n_rx_channels);
-
- return efx->n_channels;
-}
-
-/* Probe the number and type of interrupts we are able to obtain, and
- * the resulting numbers of channels and RX queues.
- */
-static int efx_probe_interrupts(struct efx_nic *efx)
-{
- unsigned int extra_channels = 0;
- unsigned int rss_spread;
- unsigned int i, j;
- int rc;
-
- for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++)
- if (efx->extra_channel_type[i])
- ++extra_channels;
-
- if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
- unsigned int parallelism = efx_wanted_parallelism(efx);
- struct msix_entry xentries[EFX_MAX_CHANNELS];
- unsigned int n_channels;
-
- rc = efx_allocate_msix_channels(efx, efx->max_channels,
- extra_channels, parallelism);
- if (rc >= 0) {
- n_channels = rc;
- for (i = 0; i < n_channels; i++)
- xentries[i].entry = i;
- rc = pci_enable_msix_range(efx->pci_dev, xentries, 1,
- n_channels);
- }
- if (rc < 0) {
- /* Fall back to single channel MSI */
- netif_err(efx, drv, efx->net_dev,
- "could not enable MSI-X\n");
- if (efx->type->min_interrupt_mode >= EFX_INT_MODE_MSI)
- efx->interrupt_mode = EFX_INT_MODE_MSI;
- else
- return rc;
- } else if (rc < n_channels) {
- netif_err(efx, drv, efx->net_dev,
- "WARNING: Insufficient MSI-X vectors"
- " available (%d < %u).\n", rc, n_channels);
- netif_err(efx, drv, efx->net_dev,
- "WARNING: Performance may be reduced.\n");
- n_channels = rc;
- }
-
- if (rc > 0) {
- for (i = 0; i < efx->n_channels; i++)
- efx_get_channel(efx, i)->irq =
- xentries[i].vector;
- }
- }
-
- /* Try single interrupt MSI */
- if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
- efx->n_channels = 1;
- efx->n_rx_channels = 1;
- efx->n_tx_channels = 1;
- efx->n_xdp_channels = 0;
- efx->xdp_channel_offset = efx->n_channels;
- rc = pci_enable_msi(efx->pci_dev);
- if (rc == 0) {
- efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
- } else {
- netif_err(efx, drv, efx->net_dev,
- "could not enable MSI\n");
- if (efx->type->min_interrupt_mode >= EFX_INT_MODE_LEGACY)
- efx->interrupt_mode = EFX_INT_MODE_LEGACY;
- else
- return rc;
- }
- }
-
- /* Assume legacy interrupts */
- if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
- efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
- efx->n_rx_channels = 1;
- efx->n_tx_channels = 1;
- efx->n_xdp_channels = 0;
- efx->xdp_channel_offset = efx->n_channels;
- efx->legacy_irq = efx->pci_dev->irq;
- }
-
- /* Assign extra channels if possible, before XDP channels */
- efx->n_extra_tx_channels = 0;
- j = efx->xdp_channel_offset;
- for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
- if (!efx->extra_channel_type[i])
- continue;
- if (j <= efx->tx_channel_offset + efx->n_tx_channels) {
- efx->extra_channel_type[i]->handle_no_channel(efx);
- } else {
- --j;
- efx_get_channel(efx, j)->type =
- efx->extra_channel_type[i];
- if (efx_channel_has_tx_queues(efx_get_channel(efx, j)))
- efx->n_extra_tx_channels++;
- }
- }
-
- rss_spread = efx->n_rx_channels;
- /* RSS might be usable on VFs even if it is disabled on the PF */
-#ifdef CONFIG_SFC_SRIOV
- if (efx->type->sriov_wanted) {
- efx->rss_spread = ((rss_spread > 1 ||
- !efx->type->sriov_wanted(efx)) ?
- rss_spread : efx_vf_size(efx));
- return 0;
- }
-#endif
- efx->rss_spread = rss_spread;
-
- return 0;
-}
-
-#if defined(CONFIG_SMP)
-static void efx_set_interrupt_affinity(struct efx_nic *efx)
-{
- struct efx_channel *channel;
- unsigned int cpu;
-
- efx_for_each_channel(channel, efx) {
- cpu = cpumask_local_spread(channel->channel,
- pcibus_to_node(efx->pci_dev->bus));
- irq_set_affinity_hint(channel->irq, cpumask_of(cpu));
- }
-}
-
-static void efx_clear_interrupt_affinity(struct efx_nic *efx)
-{
- struct efx_channel *channel;
-
- efx_for_each_channel(channel, efx)
- irq_set_affinity_hint(channel->irq, NULL);
-}
-#else
-static void
-efx_set_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
-{
-}
-
-static void
-efx_clear_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
-{
-}
-#endif /* CONFIG_SMP */
-
-static int efx_soft_enable_interrupts(struct efx_nic *efx)
-{
- struct efx_channel *channel, *end_channel;
- int rc;
-
- BUG_ON(efx->state == STATE_DISABLED);
-
- efx->irq_soft_enabled = true;
- smp_wmb();
-
- efx_for_each_channel(channel, efx) {
- if (!channel->type->keep_eventq) {
- rc = efx_init_eventq(channel);
- if (rc)
- goto fail;
- }
- efx_start_eventq(channel);
- }
-
- efx_mcdi_mode_event(efx);
-
- return 0;
-fail:
- end_channel = channel;
- efx_for_each_channel(channel, efx) {
- if (channel == end_channel)
- break;
- efx_stop_eventq(channel);
- if (!channel->type->keep_eventq)
- efx_fini_eventq(channel);
- }
-
- return rc;
-}
-
-static void efx_soft_disable_interrupts(struct efx_nic *efx)
-{
- struct efx_channel *channel;
-
- if (efx->state == STATE_DISABLED)
- return;
-
- efx_mcdi_mode_poll(efx);
-
- efx->irq_soft_enabled = false;
- smp_wmb();
-
- if (efx->legacy_irq)
- synchronize_irq(efx->legacy_irq);
-
- efx_for_each_channel(channel, efx) {
- if (channel->irq)
- synchronize_irq(channel->irq);
-
- efx_stop_eventq(channel);
- if (!channel->type->keep_eventq)
- efx_fini_eventq(channel);
- }
-
- /* Flush the asynchronous MCDI request queue */
- efx_mcdi_flush_async(efx);
-}
-
-static int efx_enable_interrupts(struct efx_nic *efx)
-{
- struct efx_channel *channel, *end_channel;
- int rc;
-
- BUG_ON(efx->state == STATE_DISABLED);
-
- if (efx->eeh_disabled_legacy_irq) {
- enable_irq(efx->legacy_irq);
- efx->eeh_disabled_legacy_irq = false;
- }
-
- efx->type->irq_enable_master(efx);
-
- efx_for_each_channel(channel, efx) {
- if (channel->type->keep_eventq) {
- rc = efx_init_eventq(channel);
- if (rc)
- goto fail;
- }
- }
-
- rc = efx_soft_enable_interrupts(efx);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- end_channel = channel;
- efx_for_each_channel(channel, efx) {
- if (channel == end_channel)
- break;
- if (channel->type->keep_eventq)
- efx_fini_eventq(channel);
- }
-
- efx->type->irq_disable_non_ev(efx);
-
- return rc;
-}
-
-static void efx_disable_interrupts(struct efx_nic *efx)
-{
- struct efx_channel *channel;
-
- efx_soft_disable_interrupts(efx);
-
- efx_for_each_channel(channel, efx) {
- if (channel->type->keep_eventq)
- efx_fini_eventq(channel);
- }
-
- efx->type->irq_disable_non_ev(efx);
-}
-
-static void efx_remove_interrupts(struct efx_nic *efx)
-{
- struct efx_channel *channel;
-
- /* Remove MSI/MSI-X interrupts */
- efx_for_each_channel(channel, efx)
- channel->irq = 0;
- pci_disable_msi(efx->pci_dev);
- pci_disable_msix(efx->pci_dev);
-
- /* Remove legacy interrupt */
- efx->legacy_irq = 0;
-}
-
-static int efx_set_channels(struct efx_nic *efx)
-{
- struct efx_channel *channel;
- struct efx_tx_queue *tx_queue;
- int xdp_queue_number;
-
- efx->tx_channel_offset =
- efx_separate_tx_channels ?
- efx->n_channels - efx->n_tx_channels : 0;
-
- if (efx->xdp_tx_queue_count) {
- EFX_WARN_ON_PARANOID(efx->xdp_tx_queues);
-
- /* Allocate array for XDP TX queue lookup. */
- efx->xdp_tx_queues = kcalloc(efx->xdp_tx_queue_count,
- sizeof(*efx->xdp_tx_queues),
- GFP_KERNEL);
- if (!efx->xdp_tx_queues)
- return -ENOMEM;
- }
-
- /* We need to mark which channels really have RX and TX
- * queues, and adjust the TX queue numbers if we have separate
- * RX-only and TX-only channels.
- */
- xdp_queue_number = 0;
- efx_for_each_channel(channel, efx) {
- if (channel->channel < efx->n_rx_channels)
- channel->rx_queue.core_index = channel->channel;
- else
- channel->rx_queue.core_index = -1;
-
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- tx_queue->queue -= (efx->tx_channel_offset *
- EFX_TXQ_TYPES);
-
- if (efx_channel_is_xdp_tx(channel) &&
- xdp_queue_number < efx->xdp_tx_queue_count) {
- efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
- xdp_queue_number++;
- }
- }
- }
- return 0;
-}
-
static int efx_probe_nic(struct efx_nic *efx)
{
int rc;
@@ -2067,81 +536,6 @@ static int efx_probe_all(struct efx_nic *efx)
return rc;
}
-/* If the interface is supposed to be running but is not, start
- * the hardware and software data path, regular activity for the port
- * (MAC statistics, link polling, etc.) and schedule the port to be
- * reconfigured. Interrupts must already be enabled. This function
- * is safe to call multiple times, so long as the NIC is not disabled.
- * Requires the RTNL lock.
- */
-static void efx_start_all(struct efx_nic *efx)
-{
- EFX_ASSERT_RESET_SERIALISED(efx);
- BUG_ON(efx->state == STATE_DISABLED);
-
- /* Check that it is appropriate to restart the interface. All
- * of these flags are safe to read under just the rtnl lock */
- if (efx->port_enabled || !netif_running(efx->net_dev) ||
- efx->reset_pending)
- return;
-
- efx_start_port(efx);
- efx_start_datapath(efx);
-
- /* Start the hardware monitor if there is one */
- if (efx->type->monitor != NULL)
- queue_delayed_work(efx->workqueue, &efx->monitor_work,
- efx_monitor_interval);
-
- /* Link state detection is normally event-driven; we have
- * to poll now because we could have missed a change
- */
- mutex_lock(&efx->mac_lock);
- if (efx->phy_op->poll(efx))
- efx_link_status_changed(efx);
- mutex_unlock(&efx->mac_lock);
-
- efx->type->start_stats(efx);
- efx->type->pull_stats(efx);
- spin_lock_bh(&efx->stats_lock);
- efx->type->update_stats(efx, NULL, NULL);
- spin_unlock_bh(&efx->stats_lock);
-}
-
-/* Quiesce the hardware and software data path, and regular activity
- * for the port without bringing the link down. Safe to call multiple
- * times with the NIC in almost any state, but interrupts should be
- * enabled. Requires the RTNL lock.
- */
-static void efx_stop_all(struct efx_nic *efx)
-{
- EFX_ASSERT_RESET_SERIALISED(efx);
-
- /* port_enabled can be read safely under the rtnl lock */
- if (!efx->port_enabled)
- return;
-
- /* update stats before we go down so we can accurately count
- * rx_nodesc_drops
- */
- efx->type->pull_stats(efx);
- spin_lock_bh(&efx->stats_lock);
- efx->type->update_stats(efx, NULL, NULL);
- spin_unlock_bh(&efx->stats_lock);
- efx->type->stop_stats(efx);
- efx_stop_port(efx);
-
- /* Stop the kernel transmit interface. This is only valid if
- * the device is stopped or detached; otherwise the watchdog
- * may fire immediately.
- */
- WARN_ON(netif_running(efx->net_dev) &&
- netif_device_present(efx->net_dev));
- netif_tx_disable(efx->net_dev);
-
- efx_stop_datapath(efx);
-}
-
static void efx_remove_all(struct efx_nic *efx)
{
rtnl_lock();
@@ -2237,36 +631,6 @@ void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
/**************************************************************************
*
- * Hardware monitor
- *
- **************************************************************************/
-
-/* Run periodically off the general workqueue */
-static void efx_monitor(struct work_struct *data)
-{
- struct efx_nic *efx = container_of(data, struct efx_nic,
- monitor_work.work);
-
- netif_vdbg(efx, timer, efx->net_dev,
- "hardware monitor executing on CPU %d\n",
- raw_smp_processor_id());
- BUG_ON(efx->type->monitor == NULL);
-
- /* If the mac_lock is already held then it is likely a port
- * reconfiguration is already in place, which will likely do
- * most of the work of monitor() anyway. */
- if (mutex_trylock(&efx->mac_lock)) {
- if (efx->port_enabled)
- efx->type->monitor(efx);
- mutex_unlock(&efx->mac_lock);
- }
-
- queue_delayed_work(efx->workqueue, &efx->monitor_work,
- efx_monitor_interval);
-}
-
-/**************************************************************************
- *
* ioctls
*
*************************************************************************/
@@ -2294,45 +658,6 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
/**************************************************************************
*
- * NAPI interface
- *
- **************************************************************************/
-
-static void efx_init_napi_channel(struct efx_channel *channel)
-{
- struct efx_nic *efx = channel->efx;
-
- channel->napi_dev = efx->net_dev;
- netif_napi_add(channel->napi_dev, &channel->napi_str,
- efx_poll, napi_weight);
-}
-
-static void efx_init_napi(struct efx_nic *efx)
-{
- struct efx_channel *channel;
-
- efx_for_each_channel(channel, efx)
- efx_init_napi_channel(channel);
-}
-
-static void efx_fini_napi_channel(struct efx_channel *channel)
-{
- if (channel->napi_dev)
- netif_napi_del(&channel->napi_str);
-
- channel->napi_dev = NULL;
-}
-
-static void efx_fini_napi(struct efx_nic *efx)
-{
- struct efx_channel *channel;
-
- efx_for_each_channel(channel, efx)
- efx_fini_napi_channel(channel);
-}
-
-/**************************************************************************
- *
* Kernel net device interface
*
*************************************************************************/
@@ -2852,292 +1177,6 @@ static void efx_unregister_netdev(struct efx_nic *efx)
/**************************************************************************
*
- * Device reset and suspend
- *
- **************************************************************************/
-
-/* Tears down the entire software state and most of the hardware state
- * before reset. */
-void efx_reset_down(struct efx_nic *efx, enum reset_type method)
-{
- EFX_ASSERT_RESET_SERIALISED(efx);
-
- if (method == RESET_TYPE_MCDI_TIMEOUT)
- efx->type->prepare_flr(efx);
-
- efx_stop_all(efx);
- efx_disable_interrupts(efx);
-
- mutex_lock(&efx->mac_lock);
- down_write(&efx->filter_sem);
- mutex_lock(&efx->rss_lock);
- if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
- method != RESET_TYPE_DATAPATH)
- efx->phy_op->fini(efx);
- efx->type->fini(efx);
-}
-
-/* This function will always ensure that the locks acquired in
- * efx_reset_down() are released. A failure return code indicates
- * that we were unable to reinitialise the hardware, and the
- * driver should be disabled. If ok is false, then the rx and tx
- * engines are not restarted, pending a RESET_DISABLE. */
-int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
-{
- int rc;
-
- EFX_ASSERT_RESET_SERIALISED(efx);
-
- if (method == RESET_TYPE_MCDI_TIMEOUT)
- efx->type->finish_flr(efx);
-
- /* Ensure that SRAM is initialised even if we're disabling the device */
- rc = efx->type->init(efx);
- if (rc) {
- netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
- goto fail;
- }
-
- if (!ok)
- goto fail;
-
- if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
- method != RESET_TYPE_DATAPATH) {
- rc = efx->phy_op->init(efx);
- if (rc)
- goto fail;
- rc = efx->phy_op->reconfigure(efx);
- if (rc && rc != -EPERM)
- netif_err(efx, drv, efx->net_dev,
- "could not restore PHY settings\n");
- }
-
- rc = efx_enable_interrupts(efx);
- if (rc)
- goto fail;
-
-#ifdef CONFIG_SFC_SRIOV
- rc = efx->type->vswitching_restore(efx);
- if (rc) /* not fatal; the PF will still work fine */
- netif_warn(efx, probe, efx->net_dev,
- "failed to restore vswitching rc=%d;"
- " VFs may not function\n", rc);
-#endif
-
- if (efx->type->rx_restore_rss_contexts)
- efx->type->rx_restore_rss_contexts(efx);
- mutex_unlock(&efx->rss_lock);
- efx->type->filter_table_restore(efx);
- up_write(&efx->filter_sem);
- if (efx->type->sriov_reset)
- efx->type->sriov_reset(efx);
-
- mutex_unlock(&efx->mac_lock);
-
- efx_start_all(efx);
-
- if (efx->type->udp_tnl_push_ports)
- efx->type->udp_tnl_push_ports(efx);
-
- return 0;
-
-fail:
- efx->port_initialized = false;
-
- mutex_unlock(&efx->rss_lock);
- up_write(&efx->filter_sem);
- mutex_unlock(&efx->mac_lock);
-
- return rc;
-}
-
-/* Reset the NIC using the specified method. Note that the reset may
- * fail, in which case the card will be left in an unusable state.
- *
- * Caller must hold the rtnl_lock.
- */
-int efx_reset(struct efx_nic *efx, enum reset_type method)
-{
- int rc, rc2;
- bool disabled;
-
- netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
- RESET_TYPE(method));
-
- efx_device_detach_sync(efx);
- efx_reset_down(efx, method);
-
- rc = efx->type->reset(efx, method);
- if (rc) {
- netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
- goto out;
- }
-
- /* Clear flags for the scopes we covered. We assume the NIC and
- * driver are now quiescent so that there is no race here.
- */
- if (method < RESET_TYPE_MAX_METHOD)
- efx->reset_pending &= -(1 << (method + 1));
- else /* it doesn't fit into the well-ordered scope hierarchy */
- __clear_bit(method, &efx->reset_pending);
-
- /* Reinitialise bus-mastering, which may have been turned off before
- * the reset was scheduled. This is still appropriate, even in the
- * RESET_TYPE_DISABLE since this driver generally assumes the hardware
- * can respond to requests. */
- pci_set_master(efx->pci_dev);
-
-out:
- /* Leave device stopped if necessary */
- disabled = rc ||
- method == RESET_TYPE_DISABLE ||
- method == RESET_TYPE_RECOVER_OR_DISABLE;
- rc2 = efx_reset_up(efx, method, !disabled);
- if (rc2) {
- disabled = true;
- if (!rc)
- rc = rc2;
- }
-
- if (disabled) {
- dev_close(efx->net_dev);
- netif_err(efx, drv, efx->net_dev, "has been disabled\n");
- efx->state = STATE_DISABLED;
- } else {
- netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
- efx_device_attach_if_not_resetting(efx);
- }
- return rc;
-}
-
-/* Try recovery mechanisms.
- * For now only EEH is supported.
- * Returns 0 if the recovery mechanisms are unsuccessful.
- * Returns a non-zero value otherwise.
- */
-int efx_try_recovery(struct efx_nic *efx)
-{
-#ifdef CONFIG_EEH
- /* A PCI error can occur and not be seen by EEH because nothing
- * happens on the PCI bus. In this case the driver may fail and
- * schedule a 'recover or reset', leading to this recovery handler.
- * Manually call the eeh failure check function.
- */
- struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev);
- if (eeh_dev_check_failure(eehdev)) {
- /* The EEH mechanisms will handle the error and reset the
- * device if necessary.
- */
- return 1;
- }
-#endif
- return 0;
-}
-
-static void efx_wait_for_bist_end(struct efx_nic *efx)
-{
- int i;
-
- for (i = 0; i < BIST_WAIT_DELAY_COUNT; ++i) {
- if (efx_mcdi_poll_reboot(efx))
- goto out;
- msleep(BIST_WAIT_DELAY_MS);
- }
-
- netif_err(efx, drv, efx->net_dev, "Warning: No MC reboot after BIST mode\n");
-out:
- /* Either way unset the BIST flag. If we found no reboot we probably
- * won't recover, but we should try.
- */
- efx->mc_bist_for_other_fn = false;
-}
-
-/* The worker thread exists so that code that cannot sleep can
- * schedule a reset for later.
- */
-static void efx_reset_work(struct work_struct *data)
-{
- struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
- unsigned long pending;
- enum reset_type method;
-
- pending = READ_ONCE(efx->reset_pending);
- method = fls(pending) - 1;
-
- if (method == RESET_TYPE_MC_BIST)
- efx_wait_for_bist_end(efx);
-
- if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
- method == RESET_TYPE_RECOVER_OR_ALL) &&
- efx_try_recovery(efx))
- return;
-
- if (!pending)
- return;
-
- rtnl_lock();
-
- /* We checked the state in efx_schedule_reset() but it may
- * have changed by now. Now that we have the RTNL lock,
- * it cannot change again.
- */
- if (efx->state == STATE_READY)
- (void)efx_reset(efx, method);
-
- rtnl_unlock();
-}
-
-void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
-{
- enum reset_type method;
-
- if (efx->state == STATE_RECOVERY) {
- netif_dbg(efx, drv, efx->net_dev,
- "recovering: skip scheduling %s reset\n",
- RESET_TYPE(type));
- return;
- }
-
- switch (type) {
- case RESET_TYPE_INVISIBLE:
- case RESET_TYPE_ALL:
- case RESET_TYPE_RECOVER_OR_ALL:
- case RESET_TYPE_WORLD:
- case RESET_TYPE_DISABLE:
- case RESET_TYPE_RECOVER_OR_DISABLE:
- case RESET_TYPE_DATAPATH:
- case RESET_TYPE_MC_BIST:
- case RESET_TYPE_MCDI_TIMEOUT:
- method = type;
- netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
- RESET_TYPE(method));
- break;
- default:
- method = efx->type->map_reset_reason(type);
- netif_dbg(efx, drv, efx->net_dev,
- "scheduling %s reset for %s\n",
- RESET_TYPE(method), RESET_TYPE(type));
- break;
- }
-
- set_bit(method, &efx->reset_pending);
- smp_mb(); /* ensure we change reset_pending before checking state */
-
- /* If we're not READY then just leave the flags set as the cue
- * to abort probing or reschedule the reset later.
- */
- if (READ_ONCE(efx->state) != STATE_READY)
- return;
-
- /* efx_process_channel() will no longer read events once a
- * reset is scheduled. So switch back to poll'd MCDI completions. */
- efx_mcdi_mode_poll(efx);
-
- queue_work(reset_workqueue, &efx->reset_work);
-}
-
-/**************************************************************************
- *
* List of NICs we support
*
**************************************************************************/
@@ -3169,139 +1208,10 @@ static const struct pci_device_id efx_pci_table[] = {
/**************************************************************************
*
- * Dummy PHY/MAC operations
- *
- * Can be used for some unimplemented operations
- * Needed so all function pointers are valid and do not have to be tested
- * before use
- *
- **************************************************************************/
-int efx_port_dummy_op_int(struct efx_nic *efx)
-{
- return 0;
-}
-void efx_port_dummy_op_void(struct efx_nic *efx) {}
-
-static bool efx_port_dummy_op_poll(struct efx_nic *efx)
-{
- return false;
-}
-
-static const struct efx_phy_operations efx_dummy_phy_operations = {
- .init = efx_port_dummy_op_int,
- .reconfigure = efx_port_dummy_op_int,
- .poll = efx_port_dummy_op_poll,
- .fini = efx_port_dummy_op_void,
-};
-
-/**************************************************************************
- *
* Data housekeeping
*
**************************************************************************/
-/* This zeroes out and then fills in the invariants in a struct
- * efx_nic (including all sub-structures).
- */
-static int efx_init_struct(struct efx_nic *efx,
- struct pci_dev *pci_dev, struct net_device *net_dev)
-{
- int rc = -ENOMEM, i;
-
- /* Initialise common structures */
- INIT_LIST_HEAD(&efx->node);
- INIT_LIST_HEAD(&efx->secondary_list);
- spin_lock_init(&efx->biu_lock);
-#ifdef CONFIG_SFC_MTD
- INIT_LIST_HEAD(&efx->mtd_list);
-#endif
- INIT_WORK(&efx->reset_work, efx_reset_work);
- INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
- INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
- efx->pci_dev = pci_dev;
- efx->msg_enable = debug;
- efx->state = STATE_UNINIT;
- strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
-
- efx->net_dev = net_dev;
- efx->rx_prefix_size = efx->type->rx_prefix_size;
- efx->rx_ip_align =
- NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
- efx->rx_packet_hash_offset =
- efx->type->rx_hash_offset - efx->type->rx_prefix_size;
- efx->rx_packet_ts_offset =
- efx->type->rx_ts_offset - efx->type->rx_prefix_size;
- INIT_LIST_HEAD(&efx->rss_context.list);
- mutex_init(&efx->rss_lock);
- spin_lock_init(&efx->stats_lock);
- efx->vi_stride = EFX_DEFAULT_VI_STRIDE;
- efx->num_mac_stats = MC_CMD_MAC_NSTATS;
- BUILD_BUG_ON(MC_CMD_MAC_NSTATS - 1 != MC_CMD_MAC_GENERATION_END);
- mutex_init(&efx->mac_lock);
-#ifdef CONFIG_RFS_ACCEL
- mutex_init(&efx->rps_mutex);
- spin_lock_init(&efx->rps_hash_lock);
- /* Failure to allocate is not fatal, but may degrade ARFS performance */
- efx->rps_hash_table = kcalloc(EFX_ARFS_HASH_TABLE_SIZE,
- sizeof(*efx->rps_hash_table), GFP_KERNEL);
-#endif
- efx->phy_op = &efx_dummy_phy_operations;
- efx->mdio.dev = net_dev;
- INIT_WORK(&efx->mac_work, efx_mac_work);
- init_waitqueue_head(&efx->flush_wq);
-
- for (i = 0; i < EFX_MAX_CHANNELS; i++) {
- efx->channel[i] = efx_alloc_channel(efx, i, NULL);
- if (!efx->channel[i])
- goto fail;
- efx->msi_context[i].efx = efx;
- efx->msi_context[i].index = i;
- }
-
- /* Higher numbered interrupt modes are less capable! */
- if (WARN_ON_ONCE(efx->type->max_interrupt_mode >
- efx->type->min_interrupt_mode)) {
- rc = -EIO;
- goto fail;
- }
- efx->interrupt_mode = max(efx->type->max_interrupt_mode,
- interrupt_mode);
- efx->interrupt_mode = min(efx->type->min_interrupt_mode,
- interrupt_mode);
-
- /* Would be good to use the net_dev name, but we're too early */
- snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
- pci_name(pci_dev));
- efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
- if (!efx->workqueue)
- goto fail;
-
- return 0;
-
-fail:
- efx_fini_struct(efx);
- return rc;
-}
-
-static void efx_fini_struct(struct efx_nic *efx)
-{
- int i;
-
-#ifdef CONFIG_RFS_ACCEL
- kfree(efx->rps_hash_table);
-#endif
-
- for (i = 0; i < EFX_MAX_CHANNELS; i++)
- kfree(efx->channel[i]);
-
- kfree(efx->vpd_sn);
-
- if (efx->workqueue) {
- destroy_workqueue(efx->workqueue);
- efx->workqueue = NULL;
- }
-}
-
void efx_update_sw_stats(struct efx_nic *efx, u64 *stats)
{
u64 n_rx_nodesc_trunc = 0;
@@ -3519,7 +1429,7 @@ static void efx_pci_remove_main(struct efx_nic *efx)
* are not READY.
*/
BUG_ON(efx->state == STATE_READY);
- cancel_work_sync(&efx->reset_work);
+ efx_flush_reset_workqueue(efx);
efx_disable_interrupts(efx);
efx_clear_interrupt_affinity(efx);
@@ -3559,7 +1469,7 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
efx_pci_remove_main(efx);
- efx_fini_io(efx);
+ efx_fini_io(efx, efx->type->mem_bar(efx));
netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
efx_fini_struct(efx);
@@ -3782,7 +1692,8 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
efx_probe_vpd_strings(efx);
/* Set up basic I/O (BAR mappings etc) */
- rc = efx_init_io(efx);
+ rc = efx_init_io(efx, efx->type->mem_bar(efx), efx->type->max_dma_mask,
+ efx->type->mem_map_size(efx));
if (rc)
goto fail2;
@@ -3826,7 +1737,7 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
return 0;
fail3:
- efx_fini_io(efx);
+ efx_fini_io(efx, efx->type->mem_bar(efx));
fail2:
efx_fini_struct(efx);
fail1:
@@ -3904,7 +1815,7 @@ static int efx_pm_thaw(struct device *dev)
rtnl_unlock();
/* Reschedule any quenched resets scheduled during efx_pm_freeze() */
- queue_work(reset_workqueue, &efx->reset_work);
+ efx_queue_reset_work(efx);
return 0;
@@ -4083,10 +1994,6 @@ static struct pci_driver efx_pci_driver = {
*
*************************************************************************/
-module_param(interrupt_mode, uint, 0444);
-MODULE_PARM_DESC(interrupt_mode,
- "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
-
static int __init efx_init_module(void)
{
int rc;
@@ -4103,11 +2010,9 @@ static int __init efx_init_module(void)
goto err_sriov;
#endif
- reset_workqueue = create_singlethread_workqueue("sfc_reset");
- if (!reset_workqueue) {
- rc = -ENOMEM;
+ rc = efx_create_reset_workqueue();
+ if (rc)
goto err_reset;
- }
rc = pci_register_driver(&efx_pci_driver);
if (rc < 0)
@@ -4116,7 +2021,7 @@ static int __init efx_init_module(void)
return 0;
err_pci:
- destroy_workqueue(reset_workqueue);
+ efx_destroy_reset_workqueue();
err_reset:
#ifdef CONFIG_SFC_SRIOV
efx_fini_sriov();
@@ -4132,7 +2037,7 @@ static void __exit efx_exit_module(void)
printk(KERN_INFO "Solarflare NET driver unloading\n");
pci_unregister_driver(&efx_pci_driver);
- destroy_workqueue(reset_workqueue);
+ efx_destroy_reset_workqueue();
#ifdef CONFIG_SFC_SRIOV
efx_fini_sriov();
#endif
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 2dd8d5002315..3920f29b2fed 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -15,31 +15,19 @@ int efx_net_open(struct net_device *net_dev);
int efx_net_stop(struct net_device *net_dev);
/* TX */
-int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
-void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
-void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
-void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
struct net_device *net_dev);
netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
void *type_data);
-unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
extern unsigned int efx_piobuf_size;
extern bool efx_separate_tx_channels;
/* RX */
void efx_set_default_rx_indir_table(struct efx_nic *efx,
struct efx_rss_context *ctx);
-void efx_rx_config_page_split(struct efx_nic *efx);
-int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
-void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
-void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
-void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
-void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic);
-void efx_rx_slow_fill(struct timer_list *t);
void __efx_rx_packet(struct efx_channel *channel);
void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
unsigned int n_frags, unsigned int len, u16 flags);
@@ -48,7 +36,9 @@ static inline void efx_rx_flush_packet(struct efx_channel *channel)
if (channel->rx_pkt_n_frags)
__efx_rx_packet(channel);
}
-void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
+
+void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue);
+struct page *efx_reuse_page(struct efx_rx_queue *rx_queue);
#define EFX_MAX_DMAQ_SIZE 4096UL
#define EFX_DEFAULT_DMAQ_SIZE 1024UL
@@ -80,8 +70,6 @@ static inline bool efx_rss_enabled(struct efx_nic *efx)
/* Filters */
-void efx_mac_reconfigure(struct efx_nic *efx);
-
/**
* efx_filter_insert_filter - add or replace a filter
* @efx: NIC in which to insert the filter
@@ -218,26 +206,10 @@ static inline bool efx_rss_active(struct efx_rss_context *ctx)
return ctx->context_id != EFX_EF10_RSS_CONTEXT_INVALID;
}
-/* Channels */
-int efx_channel_dummy_op_int(struct efx_channel *channel);
-void efx_channel_dummy_op_void(struct efx_channel *channel);
-int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
-
-/* Ports */
-int efx_reconfigure_port(struct efx_nic *efx);
-int __efx_reconfigure_port(struct efx_nic *efx);
-
/* Ethtool support */
extern const struct ethtool_ops efx_ethtool_ops;
-/* Reset handling */
-int efx_reset(struct efx_nic *efx, enum reset_type method);
-void efx_reset_down(struct efx_nic *efx, enum reset_type method);
-int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
-int efx_try_recovery(struct efx_nic *efx);
-
/* Global */
-void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs);
unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks);
int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
@@ -245,8 +217,6 @@ int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
bool rx_may_override_tx);
void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
unsigned int *rx_usecs, bool *rx_adaptive);
-void efx_stop_eventq(struct efx_channel *channel);
-void efx_start_eventq(struct efx_channel *channel);
/* Dummy PHY ops for PHY drivers */
int efx_port_dummy_op_int(struct efx_nic *efx);
@@ -293,9 +263,6 @@ static inline void efx_schedule_channel_irq(struct efx_channel *channel)
efx_schedule_channel(channel);
}
-void efx_link_status_changed(struct efx_nic *efx);
-void efx_link_set_advertising(struct efx_nic *efx,
- const unsigned long *advertising);
void efx_link_clear_advertising(struct efx_nic *efx);
void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
new file mode 100644
index 000000000000..21bd71daf5a0
--- /dev/null
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -0,0 +1,1232 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include <linux/module.h>
+#include "efx_channels.h"
+#include "efx.h"
+#include "efx_common.h"
+#include "tx_common.h"
+#include "rx_common.h"
+#include "nic.h"
+#include "sriov.h"
+
+/* This is the first interrupt mode to try out of:
+ * 0 => MSI-X
+ * 1 => MSI
+ * 2 => legacy
+ */
+static unsigned int interrupt_mode;
+module_param(interrupt_mode, uint, 0444);
+MODULE_PARM_DESC(interrupt_mode,
+ "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
+
+/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
+ * i.e. the number of CPUs among which we may distribute simultaneous
+ * interrupt handling.
+ *
+ * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
+ * The default (0) means to assign an interrupt to each core.
+ */
+static unsigned int rss_cpus;
+module_param(rss_cpus, uint, 0444);
+MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
+
+static unsigned int irq_adapt_low_thresh = 8000;
+module_param(irq_adapt_low_thresh, uint, 0644);
+MODULE_PARM_DESC(irq_adapt_low_thresh,
+ "Threshold score for reducing IRQ moderation");
+
+static unsigned int irq_adapt_high_thresh = 16000;
+module_param(irq_adapt_high_thresh, uint, 0644);
+MODULE_PARM_DESC(irq_adapt_high_thresh,
+ "Threshold score for increasing IRQ moderation");
+
+/* This is the weight assigned to each of the (per-channel) virtual
+ * NAPI devices.
+ */
+static int napi_weight = 64;
+
+/***************
+ * Housekeeping
+ ***************/
+
+int efx_channel_dummy_op_int(struct efx_channel *channel)
+{
+ return 0;
+}
+
+void efx_channel_dummy_op_void(struct efx_channel *channel)
+{
+}
+
+static const struct efx_channel_type efx_default_channel_type = {
+ .pre_probe = efx_channel_dummy_op_int,
+ .post_remove = efx_channel_dummy_op_void,
+ .get_name = efx_get_channel_name,
+ .copy = efx_copy_channel,
+ .want_txqs = efx_default_channel_want_txqs,
+ .keep_eventq = false,
+ .want_pio = true,
+};
+
+/*************
+ * INTERRUPTS
+ *************/
+
+static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
+{
+ cpumask_var_t thread_mask;
+ unsigned int count;
+ int cpu;
+
+ if (rss_cpus) {
+ count = rss_cpus;
+ } else {
+ if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
+ netif_warn(efx, probe, efx->net_dev,
+ "RSS disabled due to allocation failure\n");
+ return 1;
+ }
+
+ count = 0;
+ for_each_online_cpu(cpu) {
+ if (!cpumask_test_cpu(cpu, thread_mask)) {
+ ++count;
+ cpumask_or(thread_mask, thread_mask,
+ topology_sibling_cpumask(cpu));
+ }
+ }
+
+ free_cpumask_var(thread_mask);
+ }
+
+ if (count > EFX_MAX_RX_QUEUES) {
+ netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
+ "Reducing number of rx queues from %u to %u.\n",
+ count, EFX_MAX_RX_QUEUES);
+ count = EFX_MAX_RX_QUEUES;
+ }
+
+ /* If RSS is requested for the PF *and* VFs then we can't write RSS
+ * table entries that are inaccessible to VFs
+ */
+#ifdef CONFIG_SFC_SRIOV
+ if (efx->type->sriov_wanted) {
+ if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
+ count > efx_vf_size(efx)) {
+ netif_warn(efx, probe, efx->net_dev,
+ "Reducing number of RSS channels from %u to %u for "
+ "VF support. Increase vf-msix-limit to use more "
+ "channels on the PF.\n",
+ count, efx_vf_size(efx));
+ count = efx_vf_size(efx);
+ }
+ }
+#endif
+
+ return count;
+}
+
+static int efx_allocate_msix_channels(struct efx_nic *efx,
+ unsigned int max_channels,
+ unsigned int extra_channels,
+ unsigned int parallelism)
+{
+ unsigned int n_channels = parallelism;
+ int vec_count;
+ int n_xdp_tx;
+ int n_xdp_ev;
+
+ if (efx_separate_tx_channels)
+ n_channels *= 2;
+ n_channels += extra_channels;
+
+ /* To allow XDP transmit to happen from arbitrary NAPI contexts
+ * we allocate a TX queue per CPU. We share event queues across
+ * multiple tx queues, assuming tx and ev queues are both
+ * maximum size.
+ */
+
+ n_xdp_tx = num_possible_cpus();
+ n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, EFX_TXQ_TYPES);
+
+ vec_count = pci_msix_vec_count(efx->pci_dev);
+ if (vec_count < 0)
+ return vec_count;
+
+ max_channels = min_t(unsigned int, vec_count, max_channels);
+
+ /* Check resources.
+ * We need a channel per event queue, plus a VI per tx queue.
+ * This may be more pessimistic than it needs to be.
+ */
+ if (n_channels + n_xdp_ev > max_channels) {
+ netif_err(efx, drv, efx->net_dev,
+ "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
+ n_xdp_ev, n_channels, max_channels);
+ efx->n_xdp_channels = 0;
+ efx->xdp_tx_per_channel = 0;
+ efx->xdp_tx_queue_count = 0;
+ } else {
+ efx->n_xdp_channels = n_xdp_ev;
+ efx->xdp_tx_per_channel = EFX_TXQ_TYPES;
+ efx->xdp_tx_queue_count = n_xdp_tx;
+ n_channels += n_xdp_ev;
+ netif_dbg(efx, drv, efx->net_dev,
+ "Allocating %d TX and %d event queues for XDP\n",
+ n_xdp_tx, n_xdp_ev);
+ }
+
+ if (vec_count < n_channels) {
+ netif_err(efx, drv, efx->net_dev,
+ "WARNING: Insufficient MSI-X vectors available (%d < %u).\n",
+ vec_count, n_channels);
+ netif_err(efx, drv, efx->net_dev,
+ "WARNING: Performance may be reduced.\n");
+ n_channels = vec_count;
+ }
+
+ n_channels = min(n_channels, max_channels);
+
+ efx->n_channels = n_channels;
+
+ /* Ignore XDP tx channels when creating rx channels. */
+ n_channels -= efx->n_xdp_channels;
+
+ if (efx_separate_tx_channels) {
+ efx->n_tx_channels =
+ min(max(n_channels / 2, 1U),
+ efx->max_tx_channels);
+ efx->tx_channel_offset =
+ n_channels - efx->n_tx_channels;
+ efx->n_rx_channels =
+ max(n_channels -
+ efx->n_tx_channels, 1U);
+ } else {
+ efx->n_tx_channels = min(n_channels, efx->max_tx_channels);
+ efx->tx_channel_offset = 0;
+ efx->n_rx_channels = n_channels;
+ }
+
+ efx->n_rx_channels = min(efx->n_rx_channels, parallelism);
+ efx->n_tx_channels = min(efx->n_tx_channels, parallelism);
+
+ efx->xdp_channel_offset = n_channels;
+
+ netif_dbg(efx, drv, efx->net_dev,
+ "Allocating %u RX channels\n",
+ efx->n_rx_channels);
+
+ return efx->n_channels;
+}
+
+/* Probe the number and type of interrupts we are able to obtain, and
+ * the resulting numbers of channels and RX queues.
+ */
+int efx_probe_interrupts(struct efx_nic *efx)
+{
+ unsigned int extra_channels = 0;
+ unsigned int rss_spread;
+ unsigned int i, j;
+ int rc;
+
+ for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++)
+ if (efx->extra_channel_type[i])
+ ++extra_channels;
+
+ if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
+ unsigned int parallelism = efx_wanted_parallelism(efx);
+ struct msix_entry xentries[EFX_MAX_CHANNELS];
+ unsigned int n_channels;
+
+ rc = efx_allocate_msix_channels(efx, efx->max_channels,
+ extra_channels, parallelism);
+ if (rc >= 0) {
+ n_channels = rc;
+ for (i = 0; i < n_channels; i++)
+ xentries[i].entry = i;
+ rc = pci_enable_msix_range(efx->pci_dev, xentries, 1,
+ n_channels);
+ }
+ if (rc < 0) {
+ /* Fall back to single channel MSI */
+ netif_err(efx, drv, efx->net_dev,
+ "could not enable MSI-X\n");
+ if (efx->type->min_interrupt_mode >= EFX_INT_MODE_MSI)
+ efx->interrupt_mode = EFX_INT_MODE_MSI;
+ else
+ return rc;
+ } else if (rc < n_channels) {
+ netif_err(efx, drv, efx->net_dev,
+ "WARNING: Insufficient MSI-X vectors"
+ " available (%d < %u).\n", rc, n_channels);
+ netif_err(efx, drv, efx->net_dev,
+ "WARNING: Performance may be reduced.\n");
+ n_channels = rc;
+ }
+
+ if (rc > 0) {
+ for (i = 0; i < efx->n_channels; i++)
+ efx_get_channel(efx, i)->irq =
+ xentries[i].vector;
+ }
+ }
+
+ /* Try single interrupt MSI */
+ if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
+ efx->n_channels = 1;
+ efx->n_rx_channels = 1;
+ efx->n_tx_channels = 1;
+ efx->n_xdp_channels = 0;
+ efx->xdp_channel_offset = efx->n_channels;
+ rc = pci_enable_msi(efx->pci_dev);
+ if (rc == 0) {
+ efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
+ } else {
+ netif_err(efx, drv, efx->net_dev,
+ "could not enable MSI\n");
+ if (efx->type->min_interrupt_mode >= EFX_INT_MODE_LEGACY)
+ efx->interrupt_mode = EFX_INT_MODE_LEGACY;
+ else
+ return rc;
+ }
+ }
+
+ /* Assume legacy interrupts */
+ if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
+ efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
+ efx->n_rx_channels = 1;
+ efx->n_tx_channels = 1;
+ efx->n_xdp_channels = 0;
+ efx->xdp_channel_offset = efx->n_channels;
+ efx->legacy_irq = efx->pci_dev->irq;
+ }
+
+ /* Assign extra channels if possible, before XDP channels */
+ efx->n_extra_tx_channels = 0;
+ j = efx->xdp_channel_offset;
+ for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
+ if (!efx->extra_channel_type[i])
+ continue;
+ if (j <= efx->tx_channel_offset + efx->n_tx_channels) {
+ efx->extra_channel_type[i]->handle_no_channel(efx);
+ } else {
+ --j;
+ efx_get_channel(efx, j)->type =
+ efx->extra_channel_type[i];
+ if (efx_channel_has_tx_queues(efx_get_channel(efx, j)))
+ efx->n_extra_tx_channels++;
+ }
+ }
+
+ rss_spread = efx->n_rx_channels;
+ /* RSS might be usable on VFs even if it is disabled on the PF */
+#ifdef CONFIG_SFC_SRIOV
+ if (efx->type->sriov_wanted) {
+ efx->rss_spread = ((rss_spread > 1 ||
+ !efx->type->sriov_wanted(efx)) ?
+ rss_spread : efx_vf_size(efx));
+ return 0;
+ }
+#endif
+ efx->rss_spread = rss_spread;
+
+ return 0;
+}
+
+#if defined(CONFIG_SMP)
+void efx_set_interrupt_affinity(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ unsigned int cpu;
+
+ efx_for_each_channel(channel, efx) {
+ cpu = cpumask_local_spread(channel->channel,
+ pcibus_to_node(efx->pci_dev->bus));
+ irq_set_affinity_hint(channel->irq, cpumask_of(cpu));
+ }
+}
+
+void efx_clear_interrupt_affinity(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ irq_set_affinity_hint(channel->irq, NULL);
+}
+#else
+void
+efx_set_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
+{
+}
+
+void
+efx_clear_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
+{
+}
+#endif /* CONFIG_SMP */
+
+void efx_remove_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ /* Remove MSI/MSI-X interrupts */
+ efx_for_each_channel(channel, efx)
+ channel->irq = 0;
+ pci_disable_msi(efx->pci_dev);
+ pci_disable_msix(efx->pci_dev);
+
+ /* Remove legacy interrupt */
+ efx->legacy_irq = 0;
+}
+
+/***************
+ * EVENT QUEUES
+ ***************/
+
+/* Create event queue
+ * Event queue memory allocations are done only once. If the channel
+ * is reset, the memory buffer will be reused; this guards against
+ * errors during channel reset and also simplifies interrupt handling.
+ */
+int efx_probe_eventq(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+ unsigned long entries;
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "chan %d create event queue\n", channel->channel);
+
+ /* Build an event queue with room for one event per tx and rx buffer,
+ * plus some extra for link state events and MCDI completions.
+ */
+ entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
+ EFX_WARN_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
+ channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
+
+ return efx_nic_probe_eventq(channel);
+}
+
+/* Prepare channel's event queue */
+int efx_init_eventq(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+ int rc;
+
+ EFX_WARN_ON_PARANOID(channel->eventq_init);
+
+ netif_dbg(efx, drv, efx->net_dev,
+ "chan %d init event queue\n", channel->channel);
+
+ rc = efx_nic_init_eventq(channel);
+ if (rc == 0) {
+ efx->type->push_irq_moderation(channel);
+ channel->eventq_read_ptr = 0;
+ channel->eventq_init = true;
+ }
+ return rc;
+}
+
+/* Enable event queue processing and NAPI */
+void efx_start_eventq(struct efx_channel *channel)
+{
+ netif_dbg(channel->efx, ifup, channel->efx->net_dev,
+ "chan %d start event queue\n", channel->channel);
+
+ /* Make sure the NAPI handler sees the enabled flag set */
+ channel->enabled = true;
+ smp_wmb();
+
+ napi_enable(&channel->napi_str);
+ efx_nic_eventq_read_ack(channel);
+}
+
+/* Disable event queue processing and NAPI */
+void efx_stop_eventq(struct efx_channel *channel)
+{
+ if (!channel->enabled)
+ return;
+
+ napi_disable(&channel->napi_str);
+ channel->enabled = false;
+}
+
+void efx_fini_eventq(struct efx_channel *channel)
+{
+ if (!channel->eventq_init)
+ return;
+
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "chan %d fini event queue\n", channel->channel);
+
+ efx_nic_fini_eventq(channel);
+ channel->eventq_init = false;
+}
+
+void efx_remove_eventq(struct efx_channel *channel)
+{
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "chan %d remove event queue\n", channel->channel);
+
+ efx_nic_remove_eventq(channel);
+}
+
+/**************************************************************************
+ *
+ * Channel handling
+ *
+ *************************************************************************/
+
+/* Allocate and initialise a channel structure. */
+struct efx_channel *
+efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
+{
+ struct efx_rx_queue *rx_queue;
+ struct efx_tx_queue *tx_queue;
+ struct efx_channel *channel;
+ int j;
+
+ channel = kzalloc(sizeof(*channel), GFP_KERNEL);
+ if (!channel)
+ return NULL;
+
+ channel->efx = efx;
+ channel->channel = i;
+ channel->type = &efx_default_channel_type;
+
+ for (j = 0; j < EFX_TXQ_TYPES; j++) {
+ tx_queue = &channel->tx_queue[j];
+ tx_queue->efx = efx;
+ tx_queue->queue = i * EFX_TXQ_TYPES + j;
+ tx_queue->channel = channel;
+ }
+
+#ifdef CONFIG_RFS_ACCEL
+ INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
+#endif
+
+ rx_queue = &channel->rx_queue;
+ rx_queue->efx = efx;
+ timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
+
+ return channel;
+}
+
+int efx_init_channels(struct efx_nic *efx)
+{
+ unsigned int i;
+
+ for (i = 0; i < EFX_MAX_CHANNELS; i++) {
+ efx->channel[i] = efx_alloc_channel(efx, i, NULL);
+ if (!efx->channel[i])
+ return -ENOMEM;
+ efx->msi_context[i].efx = efx;
+ efx->msi_context[i].index = i;
+ }
+
+ /* Higher numbered interrupt modes are less capable! */
+ if (WARN_ON_ONCE(efx->type->max_interrupt_mode >
+ efx->type->min_interrupt_mode)) {
+ return -EIO;
+ }
+ efx->interrupt_mode = max(efx->type->max_interrupt_mode,
+ interrupt_mode);
+ efx->interrupt_mode = min(efx->type->min_interrupt_mode,
+ interrupt_mode);
+
+ return 0;
+}
+
+void efx_fini_channels(struct efx_nic *efx)
+{
+ unsigned int i;
+
+ for (i = 0; i < EFX_MAX_CHANNELS; i++)
+ if (efx->channel[i]) {
+ kfree(efx->channel[i]);
+ efx->channel[i] = NULL;
+ }
+}
+
+/* Allocate and initialise a channel structure, copying parameters
+ * (but not resources) from an old channel structure.
+ */
+struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel)
+{
+ struct efx_rx_queue *rx_queue;
+ struct efx_tx_queue *tx_queue;
+ struct efx_channel *channel;
+ int j;
+
+ channel = kmalloc(sizeof(*channel), GFP_KERNEL);
+ if (!channel)
+ return NULL;
+
+ *channel = *old_channel;
+
+ channel->napi_dev = NULL;
+ INIT_HLIST_NODE(&channel->napi_str.napi_hash_node);
+ channel->napi_str.napi_id = 0;
+ channel->napi_str.state = 0;
+ memset(&channel->eventq, 0, sizeof(channel->eventq));
+
+ for (j = 0; j < EFX_TXQ_TYPES; j++) {
+ tx_queue = &channel->tx_queue[j];
+ if (tx_queue->channel)
+ tx_queue->channel = channel;
+ tx_queue->buffer = NULL;
+ memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
+ }
+
+ rx_queue = &channel->rx_queue;
+ rx_queue->buffer = NULL;
+ memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
+ timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
+#ifdef CONFIG_RFS_ACCEL
+ INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
+#endif
+
+ return channel;
+}
+
+static int efx_probe_channel(struct efx_channel *channel)
+{
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ int rc;
+
+ netif_dbg(channel->efx, probe, channel->efx->net_dev,
+ "creating channel %d\n", channel->channel);
+
+ rc = channel->type->pre_probe(channel);
+ if (rc)
+ goto fail;
+
+ rc = efx_probe_eventq(channel);
+ if (rc)
+ goto fail;
+
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ rc = efx_probe_tx_queue(tx_queue);
+ if (rc)
+ goto fail;
+ }
+
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ rc = efx_probe_rx_queue(rx_queue);
+ if (rc)
+ goto fail;
+ }
+
+ channel->rx_list = NULL;
+
+ return 0;
+
+fail:
+ efx_remove_channel(channel);
+ return rc;
+}
+
+void efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
+{
+ struct efx_nic *efx = channel->efx;
+ const char *type;
+ int number;
+
+ number = channel->channel;
+
+ if (number >= efx->xdp_channel_offset &&
+ !WARN_ON_ONCE(!efx->n_xdp_channels)) {
+ type = "-xdp";
+ number -= efx->xdp_channel_offset;
+ } else if (efx->tx_channel_offset == 0) {
+ type = "";
+ } else if (number < efx->tx_channel_offset) {
+ type = "-rx";
+ } else {
+ type = "-tx";
+ number -= efx->tx_channel_offset;
+ }
+ snprintf(buf, len, "%s%s-%d", efx->name, type, number);
+}
+
+void efx_set_channel_names(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ channel->type->get_name(channel,
+ efx->msi_context[channel->channel].name,
+ sizeof(efx->msi_context[0].name));
+}
+
+int efx_probe_channels(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ int rc;
+
+ /* Restart special buffer allocation */
+ efx->next_buffer_table = 0;
+
+ /* Probe channels in reverse, so that any 'extra' channels
+ * use the start of the buffer table. This allows the traffic
+ * channels to be resized without moving them or wasting the
+ * entries before them.
+ */
+ efx_for_each_channel_rev(channel, efx) {
+ rc = efx_probe_channel(channel);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "failed to create channel %d\n",
+ channel->channel);
+ goto fail;
+ }
+ }
+ efx_set_channel_names(efx);
+
+ return 0;
+
+fail:
+ efx_remove_channels(efx);
+ return rc;
+}
+
+void efx_remove_channel(struct efx_channel *channel)
+{
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "destroy chan %d\n", channel->channel);
+
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ efx_remove_rx_queue(rx_queue);
+ efx_for_each_possible_channel_tx_queue(tx_queue, channel)
+ efx_remove_tx_queue(tx_queue);
+ efx_remove_eventq(channel);
+ channel->type->post_remove(channel);
+}
+
+void efx_remove_channels(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ efx_remove_channel(channel);
+
+ kfree(efx->xdp_tx_queues);
+}
+
+int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
+{
+ struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
+ unsigned int i, next_buffer_table = 0;
+ u32 old_rxq_entries, old_txq_entries;
+ int rc, rc2;
+
+ rc = efx_check_disabled(efx);
+ if (rc)
+ return rc;
+
+ /* Not all channels should be reallocated. We must avoid
+ * reallocating their buffer table entries.
+ */
+ efx_for_each_channel(channel, efx) {
+ struct efx_rx_queue *rx_queue;
+ struct efx_tx_queue *tx_queue;
+
+ if (channel->type->copy)
+ continue;
+ next_buffer_table = max(next_buffer_table,
+ channel->eventq.index +
+ channel->eventq.entries);
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ next_buffer_table = max(next_buffer_table,
+ rx_queue->rxd.index +
+ rx_queue->rxd.entries);
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ next_buffer_table = max(next_buffer_table,
+ tx_queue->txd.index +
+ tx_queue->txd.entries);
+ }
+
+ efx_device_detach_sync(efx);
+ efx_stop_all(efx);
+ efx_soft_disable_interrupts(efx);
+
+ /* Clone channels (where possible) */
+ memset(other_channel, 0, sizeof(other_channel));
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = efx->channel[i];
+ if (channel->type->copy)
+ channel = channel->type->copy(channel);
+ if (!channel) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ other_channel[i] = channel;
+ }
+
+ /* Swap entry counts and channel pointers */
+ old_rxq_entries = efx->rxq_entries;
+ old_txq_entries = efx->txq_entries;
+ efx->rxq_entries = rxq_entries;
+ efx->txq_entries = txq_entries;
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = efx->channel[i];
+ efx->channel[i] = other_channel[i];
+ other_channel[i] = channel;
+ }
+
+ /* Restart buffer table allocation */
+ efx->next_buffer_table = next_buffer_table;
+
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = efx->channel[i];
+ if (!channel->type->copy)
+ continue;
+ rc = efx_probe_channel(channel);
+ if (rc)
+ goto rollback;
+ efx_init_napi_channel(efx->channel[i]);
+ }
+
+out:
+ /* Destroy unused channel structures */
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = other_channel[i];
+ if (channel && channel->type->copy) {
+ efx_fini_napi_channel(channel);
+ efx_remove_channel(channel);
+ kfree(channel);
+ }
+ }
+
+ rc2 = efx_soft_enable_interrupts(efx);
+ if (rc2) {
+ rc = rc ? rc : rc2;
+ netif_err(efx, drv, efx->net_dev,
+ "unable to restart interrupts on channel reallocation\n");
+ efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+ } else {
+ efx_start_all(efx);
+ efx_device_attach_if_not_resetting(efx);
+ }
+ return rc;
+
+rollback:
+ /* Swap back */
+ efx->rxq_entries = old_rxq_entries;
+ efx->txq_entries = old_txq_entries;
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = efx->channel[i];
+ efx->channel[i] = other_channel[i];
+ other_channel[i] = channel;
+ }
+ goto out;
+}
+
+int efx_set_channels(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+ int xdp_queue_number;
+
+ efx->tx_channel_offset =
+ efx_separate_tx_channels ?
+ efx->n_channels - efx->n_tx_channels : 0;
+
+ if (efx->xdp_tx_queue_count) {
+ EFX_WARN_ON_PARANOID(efx->xdp_tx_queues);
+
+ /* Allocate array for XDP TX queue lookup. */
+ efx->xdp_tx_queues = kcalloc(efx->xdp_tx_queue_count,
+ sizeof(*efx->xdp_tx_queues),
+ GFP_KERNEL);
+ if (!efx->xdp_tx_queues)
+ return -ENOMEM;
+ }
+
+ /* We need to mark which channels really have RX and TX
+ * queues, and adjust the TX queue numbers if we have separate
+ * RX-only and TX-only channels.
+ */
+ xdp_queue_number = 0;
+ efx_for_each_channel(channel, efx) {
+ if (channel->channel < efx->n_rx_channels)
+ channel->rx_queue.core_index = channel->channel;
+ else
+ channel->rx_queue.core_index = -1;
+
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ tx_queue->queue -= (efx->tx_channel_offset *
+ EFX_TXQ_TYPES);
+
+ if (efx_channel_is_xdp_tx(channel) &&
+ xdp_queue_number < efx->xdp_tx_queue_count) {
+ efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
+ xdp_queue_number++;
+ }
+ }
+ }
+ return 0;
+}
+
+bool efx_default_channel_want_txqs(struct efx_channel *channel)
+{
+ return channel->channel - channel->efx->tx_channel_offset <
+ channel->efx->n_tx_channels;
+}
+
+/*************
+ * START/STOP
+ *************/
+
+int efx_soft_enable_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel, *end_channel;
+ int rc;
+
+ BUG_ON(efx->state == STATE_DISABLED);
+
+ efx->irq_soft_enabled = true;
+ smp_wmb();
+
+ efx_for_each_channel(channel, efx) {
+ if (!channel->type->keep_eventq) {
+ rc = efx_init_eventq(channel);
+ if (rc)
+ goto fail;
+ }
+ efx_start_eventq(channel);
+ }
+
+ efx_mcdi_mode_event(efx);
+
+ return 0;
+fail:
+ end_channel = channel;
+ efx_for_each_channel(channel, efx) {
+ if (channel == end_channel)
+ break;
+ efx_stop_eventq(channel);
+ if (!channel->type->keep_eventq)
+ efx_fini_eventq(channel);
+ }
+
+ return rc;
+}
+
+void efx_soft_disable_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ if (efx->state == STATE_DISABLED)
+ return;
+
+ efx_mcdi_mode_poll(efx);
+
+ efx->irq_soft_enabled = false;
+ smp_wmb();
+
+ if (efx->legacy_irq)
+ synchronize_irq(efx->legacy_irq);
+
+ efx_for_each_channel(channel, efx) {
+ if (channel->irq)
+ synchronize_irq(channel->irq);
+
+ efx_stop_eventq(channel);
+ if (!channel->type->keep_eventq)
+ efx_fini_eventq(channel);
+ }
+
+ /* Flush the asynchronous MCDI request queue */
+ efx_mcdi_flush_async(efx);
+}
+
+int efx_enable_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel, *end_channel;
+ int rc;
+
+ /* TODO: Is this really a bug? */
+ BUG_ON(efx->state == STATE_DISABLED);
+
+ if (efx->eeh_disabled_legacy_irq) {
+ enable_irq(efx->legacy_irq);
+ efx->eeh_disabled_legacy_irq = false;
+ }
+
+ efx->type->irq_enable_master(efx);
+
+ efx_for_each_channel(channel, efx) {
+ if (channel->type->keep_eventq) {
+ rc = efx_init_eventq(channel);
+ if (rc)
+ goto fail;
+ }
+ }
+
+ rc = efx_soft_enable_interrupts(efx);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ end_channel = channel;
+ efx_for_each_channel(channel, efx) {
+ if (channel == end_channel)
+ break;
+ if (channel->type->keep_eventq)
+ efx_fini_eventq(channel);
+ }
+
+ efx->type->irq_disable_non_ev(efx);
+
+ return rc;
+}
+
+void efx_disable_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_soft_disable_interrupts(efx);
+
+ efx_for_each_channel(channel, efx) {
+ if (channel->type->keep_eventq)
+ efx_fini_eventq(channel);
+ }
+
+ efx->type->irq_disable_non_ev(efx);
+}
+
+void efx_start_channels(struct efx_nic *efx)
+{
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ efx_init_tx_queue(tx_queue);
+ atomic_inc(&efx->active_queues);
+ }
+
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ efx_init_rx_queue(rx_queue);
+ atomic_inc(&efx->active_queues);
+ efx_stop_eventq(channel);
+ efx_fast_push_rx_descriptors(rx_queue, false);
+ efx_start_eventq(channel);
+ }
+
+ WARN_ON(channel->rx_pkt_n_frags);
+ }
+}
+
+void efx_stop_channels(struct efx_nic *efx)
+{
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ struct efx_channel *channel;
+ int rc;
+
+ /* Stop RX refill */
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ rx_queue->refill_enabled = false;
+ }
+
+ efx_for_each_channel(channel, efx) {
+ /* RX packet processing is pipelined, so wait for the
+ * NAPI handler to complete. At least event queue 0
+ * might be kept active by non-data events, so don't
+ * use napi_synchronize() but actually disable NAPI
+ * temporarily.
+ */
+ if (efx_channel_has_rx_queue(channel)) {
+ efx_stop_eventq(channel);
+ efx_start_eventq(channel);
+ }
+ }
+
+ rc = efx->type->fini_dmaq(efx);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
+ } else {
+ netif_dbg(efx, drv, efx->net_dev,
+ "successfully flushed all queues\n");
+ }
+
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ efx_fini_rx_queue(rx_queue);
+ efx_for_each_possible_channel_tx_queue(tx_queue, channel)
+ efx_fini_tx_queue(tx_queue);
+ }
+}
+
+/**************************************************************************
+ *
+ * NAPI interface
+ *
+ *************************************************************************/
+
+/* Process channel's event queue
+ *
+ * This function is responsible for processing the event queue of a
+ * single channel. The caller must guarantee that this function will
+ * never be concurrently called more than once on the same channel,
+ * though different channels may be being processed concurrently.
+ */
+static int efx_process_channel(struct efx_channel *channel, int budget)
+{
+ struct efx_tx_queue *tx_queue;
+ struct list_head rx_list;
+ int spent;
+
+ if (unlikely(!channel->enabled))
+ return 0;
+
+ /* Prepare the batch receive list */
+ EFX_WARN_ON_PARANOID(channel->rx_list != NULL);
+ INIT_LIST_HEAD(&rx_list);
+ channel->rx_list = &rx_list;
+
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ tx_queue->pkts_compl = 0;
+ tx_queue->bytes_compl = 0;
+ }
+
+ spent = efx_nic_process_eventq(channel, budget);
+ if (spent && efx_channel_has_rx_queue(channel)) {
+ struct efx_rx_queue *rx_queue =
+ efx_channel_get_rx_queue(channel);
+
+ efx_rx_flush_packet(channel);
+ efx_fast_push_rx_descriptors(rx_queue, true);
+ }
+
+ /* Update BQL */
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ if (tx_queue->bytes_compl) {
+ netdev_tx_completed_queue(tx_queue->core_txq,
+ tx_queue->pkts_compl,
+ tx_queue->bytes_compl);
+ }
+ }
+
+ /* Receive any packets we queued up */
+ netif_receive_skb_list(channel->rx_list);
+ channel->rx_list = NULL;
+
+ return spent;
+}
+
+static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel)
+{
+ int step = efx->irq_mod_step_us;
+
+ if (channel->irq_mod_score < irq_adapt_low_thresh) {
+ if (channel->irq_moderation_us > step) {
+ channel->irq_moderation_us -= step;
+ efx->type->push_irq_moderation(channel);
+ }
+ } else if (channel->irq_mod_score > irq_adapt_high_thresh) {
+ if (channel->irq_moderation_us <
+ efx->irq_rx_moderation_us) {
+ channel->irq_moderation_us += step;
+ efx->type->push_irq_moderation(channel);
+ }
+ }
+
+ channel->irq_count = 0;
+ channel->irq_mod_score = 0;
+}
+
+/* NAPI poll handler
+ *
+ * NAPI guarantees serialisation of polls of the same device, which
+ * provides the guarantee required by efx_process_channel().
+ */
+static int efx_poll(struct napi_struct *napi, int budget)
+{
+ struct efx_channel *channel =
+ container_of(napi, struct efx_channel, napi_str);
+ struct efx_nic *efx = channel->efx;
+ int spent;
+
+ netif_vdbg(efx, intr, efx->net_dev,
+ "channel %d NAPI poll executing on CPU %d\n",
+ channel->channel, raw_smp_processor_id());
+
+ spent = efx_process_channel(channel, budget);
+
+ xdp_do_flush_map();
+
+ if (spent < budget) {
+ if (efx_channel_has_rx_queue(channel) &&
+ efx->irq_rx_adaptive &&
+ unlikely(++channel->irq_count == 1000)) {
+ efx_update_irq_mod(efx, channel);
+ }
+
+#ifdef CONFIG_RFS_ACCEL
+ /* Perhaps expire some ARFS filters */
+ mod_delayed_work(system_wq, &channel->filter_work, 0);
+#endif
+
+ /* There is no race here; although napi_disable() will
+ * only wait for napi_complete(), this isn't a problem
+ * since efx_nic_eventq_read_ack() will have no effect if
+ * interrupts have already been disabled.
+ */
+ if (napi_complete_done(napi, spent))
+ efx_nic_eventq_read_ack(channel);
+ }
+
+ return spent;
+}
+
+void efx_init_napi_channel(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+
+ channel->napi_dev = efx->net_dev;
+ netif_napi_add(channel->napi_dev, &channel->napi_str,
+ efx_poll, napi_weight);
+}
+
+void efx_init_napi(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ efx_init_napi_channel(channel);
+}
+
+void efx_fini_napi_channel(struct efx_channel *channel)
+{
+ if (channel->napi_dev)
+ netif_napi_del(&channel->napi_str);
+
+ channel->napi_dev = NULL;
+}
+
+void efx_fini_napi(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ efx_fini_napi_channel(channel);
+}
diff --git a/drivers/net/ethernet/sfc/efx_channels.h b/drivers/net/ethernet/sfc/efx_channels.h
new file mode 100644
index 000000000000..8d7b8c4142d7
--- /dev/null
+++ b/drivers/net/ethernet/sfc/efx_channels.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_CHANNELS_H
+#define EFX_CHANNELS_H
+
+int efx_probe_interrupts(struct efx_nic *efx);
+void efx_remove_interrupts(struct efx_nic *efx);
+int efx_soft_enable_interrupts(struct efx_nic *efx);
+void efx_soft_disable_interrupts(struct efx_nic *efx);
+int efx_enable_interrupts(struct efx_nic *efx);
+void efx_disable_interrupts(struct efx_nic *efx);
+
+void efx_set_interrupt_affinity(struct efx_nic *efx);
+void efx_clear_interrupt_affinity(struct efx_nic *efx);
+
+int efx_probe_eventq(struct efx_channel *channel);
+int efx_init_eventq(struct efx_channel *channel);
+void efx_start_eventq(struct efx_channel *channel);
+void efx_stop_eventq(struct efx_channel *channel);
+void efx_fini_eventq(struct efx_channel *channel);
+void efx_remove_eventq(struct efx_channel *channel);
+
+struct efx_channel *
+efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel);
+int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
+void efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len);
+void efx_set_channel_names(struct efx_nic *efx);
+int efx_init_channels(struct efx_nic *efx);
+int efx_probe_channels(struct efx_nic *efx);
+int efx_set_channels(struct efx_nic *efx);
+bool efx_default_channel_want_txqs(struct efx_channel *channel);
+void efx_remove_channel(struct efx_channel *channel);
+void efx_remove_channels(struct efx_nic *efx);
+void efx_fini_channels(struct efx_nic *efx);
+struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel);
+void efx_start_channels(struct efx_nic *efx);
+void efx_stop_channels(struct efx_nic *efx);
+
+void efx_init_napi_channel(struct efx_channel *channel);
+void efx_init_napi(struct efx_nic *efx);
+void efx_fini_napi_channel(struct efx_channel *channel);
+void efx_fini_napi(struct efx_nic *efx);
+
+int efx_channel_dummy_op_int(struct efx_channel *channel);
+void efx_channel_dummy_op_void(struct efx_channel *channel);
+
+#endif
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
new file mode 100644
index 000000000000..fe74c66c8ec6
--- /dev/null
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -0,0 +1,999 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include "efx_common.h"
+#include "efx_channels.h"
+#include "efx.h"
+#include "mcdi.h"
+#include "selftest.h"
+#include "rx_common.h"
+#include "tx_common.h"
+#include "nic.h"
+#include "io.h"
+#include "mcdi_pcol.h"
+
+static unsigned int debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
+ NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
+ NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
+ NETIF_MSG_TX_ERR | NETIF_MSG_HW);
+module_param(debug, uint, 0);
+MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
+
+/* This is the time (in jiffies) between invocations of the hardware
+ * monitor.
+ * On Falcon-based NICs, this will:
+ * - Check the on-board hardware monitor;
+ * - Poll the link state and reconfigure the hardware as necessary.
+ * On Siena-based NICs for power systems with EEH support, this will give EEH a
+ * chance to start.
+ */
+static unsigned int efx_monitor_interval = 1 * HZ;
+
+/* How often and how many times to poll for a reset while waiting for a
+ * BIST that another function started to complete.
+ */
+#define BIST_WAIT_DELAY_MS 100
+#define BIST_WAIT_DELAY_COUNT 100
+
+/* Default stats update time */
+#define STATS_PERIOD_MS_DEFAULT 1000
+
+const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
+const char *const efx_reset_type_names[] = {
+ [RESET_TYPE_INVISIBLE] = "INVISIBLE",
+ [RESET_TYPE_ALL] = "ALL",
+ [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL",
+ [RESET_TYPE_WORLD] = "WORLD",
+ [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
+ [RESET_TYPE_DATAPATH] = "DATAPATH",
+ [RESET_TYPE_MC_BIST] = "MC_BIST",
+ [RESET_TYPE_DISABLE] = "DISABLE",
+ [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
+ [RESET_TYPE_INT_ERROR] = "INT_ERROR",
+ [RESET_TYPE_DMA_ERROR] = "DMA_ERROR",
+ [RESET_TYPE_TX_SKIP] = "TX_SKIP",
+ [RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
+ [RESET_TYPE_MCDI_TIMEOUT] = "MCDI_TIMEOUT (FLR)",
+};
+
+#define RESET_TYPE(type) \
+ STRING_TABLE_LOOKUP(type, efx_reset_type)
+
+/* Loopback mode names (see LOOPBACK_MODE()) */
+const unsigned int efx_loopback_mode_max = LOOPBACK_MAX;
+const char *const efx_loopback_mode_names[] = {
+ [LOOPBACK_NONE] = "NONE",
+ [LOOPBACK_DATA] = "DATAPATH",
+ [LOOPBACK_GMAC] = "GMAC",
+ [LOOPBACK_XGMII] = "XGMII",
+ [LOOPBACK_XGXS] = "XGXS",
+ [LOOPBACK_XAUI] = "XAUI",
+ [LOOPBACK_GMII] = "GMII",
+ [LOOPBACK_SGMII] = "SGMII",
+ [LOOPBACK_XGBR] = "XGBR",
+ [LOOPBACK_XFI] = "XFI",
+ [LOOPBACK_XAUI_FAR] = "XAUI_FAR",
+ [LOOPBACK_GMII_FAR] = "GMII_FAR",
+ [LOOPBACK_SGMII_FAR] = "SGMII_FAR",
+ [LOOPBACK_XFI_FAR] = "XFI_FAR",
+ [LOOPBACK_GPHY] = "GPHY",
+ [LOOPBACK_PHYXS] = "PHYXS",
+ [LOOPBACK_PCS] = "PCS",
+ [LOOPBACK_PMAPMD] = "PMA/PMD",
+ [LOOPBACK_XPORT] = "XPORT",
+ [LOOPBACK_XGMII_WS] = "XGMII_WS",
+ [LOOPBACK_XAUI_WS] = "XAUI_WS",
+ [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR",
+ [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
+ [LOOPBACK_GMII_WS] = "GMII_WS",
+ [LOOPBACK_XFI_WS] = "XFI_WS",
+ [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR",
+ [LOOPBACK_PHYXS_WS] = "PHYXS_WS",
+};
+
+/* Reset workqueue. If any NIC has a hardware failure then a reset will be
+ * queued onto this work queue. This is not a per-nic work queue, because
+ * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
+ */
+static struct workqueue_struct *reset_workqueue;
+
+int efx_create_reset_workqueue(void)
+{
+ reset_workqueue = create_singlethread_workqueue("sfc_reset");
+ if (!reset_workqueue) {
+ printk(KERN_ERR "Failed to create reset workqueue\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void efx_queue_reset_work(struct efx_nic *efx)
+{
+ queue_work(reset_workqueue, &efx->reset_work);
+}
+
+void efx_flush_reset_workqueue(struct efx_nic *efx)
+{
+ cancel_work_sync(&efx->reset_work);
+}
+
+void efx_destroy_reset_workqueue(void)
+{
+ if (reset_workqueue) {
+ destroy_workqueue(reset_workqueue);
+ reset_workqueue = NULL;
+ }
+}
+
+/* We assume that efx->type->reconfigure_mac will always try to sync RX
+ * filters and therefore needs to read-lock the filter table against freeing
+ */
+void efx_mac_reconfigure(struct efx_nic *efx)
+{
+ down_read(&efx->filter_sem);
+ efx->type->reconfigure_mac(efx);
+ up_read(&efx->filter_sem);
+}
+
+/* Asynchronous work item for changing MAC promiscuity and multicast
+ * hash. Avoid a drain/rx_ingress enable by reconfiguring the current
+ * MAC directly.
+ */
+static void efx_mac_work(struct work_struct *data)
+{
+ struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
+
+ mutex_lock(&efx->mac_lock);
+ if (efx->port_enabled)
+ efx_mac_reconfigure(efx);
+ mutex_unlock(&efx->mac_lock);
+}
+
+/* This ensures that the kernel is kept informed (via
+ * netif_carrier_on/off) of the link status, and also maintains the
+ * link status's stop on the port's TX queue.
+ */
+void efx_link_status_changed(struct efx_nic *efx)
+{
+ struct efx_link_state *link_state = &efx->link_state;
+
+ /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
+ * that no events are triggered between unregister_netdev() and the
+ * driver unloading. A more general condition is that NETDEV_CHANGE
+ * can only be generated between NETDEV_UP and NETDEV_DOWN
+ */
+ if (!netif_running(efx->net_dev))
+ return;
+
+ if (link_state->up != netif_carrier_ok(efx->net_dev)) {
+ efx->n_link_state_changes++;
+
+ if (link_state->up)
+ netif_carrier_on(efx->net_dev);
+ else
+ netif_carrier_off(efx->net_dev);
+ }
+
+ /* Status message for kernel log */
+ if (link_state->up)
+ netif_info(efx, link, efx->net_dev,
+ "link up at %uMbps %s-duplex (MTU %d)\n",
+ link_state->speed, link_state->fd ? "full" : "half",
+ efx->net_dev->mtu);
+ else
+ netif_info(efx, link, efx->net_dev, "link down\n");
+}
+
+/**************************************************************************
+ *
+ * Hardware monitor
+ *
+ **************************************************************************/
+
+/* Run periodically off the general workqueue */
+static void efx_monitor(struct work_struct *data)
+{
+ struct efx_nic *efx = container_of(data, struct efx_nic,
+ monitor_work.work);
+
+ netif_vdbg(efx, timer, efx->net_dev,
+ "hardware monitor executing on CPU %d\n",
+ raw_smp_processor_id());
+ BUG_ON(efx->type->monitor == NULL);
+
+ /* If the mac_lock is already held then it is likely a port
+ * reconfiguration is already in place, which will likely do
+ * most of the work of monitor() anyway.
+ */
+ if (mutex_trylock(&efx->mac_lock)) {
+ if (efx->port_enabled && efx->type->monitor)
+ efx->type->monitor(efx);
+ mutex_unlock(&efx->mac_lock);
+ }
+
+ efx_start_monitor(efx);
+}
+
+void efx_start_monitor(struct efx_nic *efx)
+{
+ if (efx->type->monitor)
+ queue_delayed_work(efx->workqueue, &efx->monitor_work,
+ efx_monitor_interval);
+}
+
+/**************************************************************************
+ *
+ * Event queue processing
+ *
+ *************************************************************************/
+
+/* Channels are shutdown and reinitialised whilst the NIC is running
+ * to propagate configuration changes (mtu, checksum offload), or
+ * to clear hardware error conditions
+ */
+static void efx_start_datapath(struct efx_nic *efx)
+{
+ netdev_features_t old_features = efx->net_dev->features;
+ bool old_rx_scatter = efx->rx_scatter;
+ size_t rx_buf_len;
+
+ /* Calculate the rx buffer allocation parameters required to
+ * support the current MTU, including padding for header
+ * alignment and overruns.
+ */
+ efx->rx_dma_len = (efx->rx_prefix_size +
+ EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
+ efx->type->rx_buffer_padding);
+ rx_buf_len = (sizeof(struct efx_rx_page_state) + XDP_PACKET_HEADROOM +
+ efx->rx_ip_align + efx->rx_dma_len);
+ if (rx_buf_len <= PAGE_SIZE) {
+ efx->rx_scatter = efx->type->always_rx_scatter;
+ efx->rx_buffer_order = 0;
+ } else if (efx->type->can_rx_scatter) {
+ BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES);
+ BUILD_BUG_ON(sizeof(struct efx_rx_page_state) +
+ 2 * ALIGN(NET_IP_ALIGN + EFX_RX_USR_BUF_SIZE,
+ EFX_RX_BUF_ALIGNMENT) >
+ PAGE_SIZE);
+ efx->rx_scatter = true;
+ efx->rx_dma_len = EFX_RX_USR_BUF_SIZE;
+ efx->rx_buffer_order = 0;
+ } else {
+ efx->rx_scatter = false;
+ efx->rx_buffer_order = get_order(rx_buf_len);
+ }
+
+ efx_rx_config_page_split(efx);
+ if (efx->rx_buffer_order)
+ netif_dbg(efx, drv, efx->net_dev,
+ "RX buf len=%u; page order=%u batch=%u\n",
+ efx->rx_dma_len, efx->rx_buffer_order,
+ efx->rx_pages_per_batch);
+ else
+ netif_dbg(efx, drv, efx->net_dev,
+ "RX buf len=%u step=%u bpp=%u; page batch=%u\n",
+ efx->rx_dma_len, efx->rx_page_buf_step,
+ efx->rx_bufs_per_page, efx->rx_pages_per_batch);
+
+ /* Restore previously fixed features in hw_features and remove
+ * features which are fixed now
+ */
+ efx->net_dev->hw_features |= efx->net_dev->features;
+ efx->net_dev->hw_features &= ~efx->fixed_features;
+ efx->net_dev->features |= efx->fixed_features;
+ if (efx->net_dev->features != old_features)
+ netdev_features_change(efx->net_dev);
+
+ /* RX filters may also have scatter-enabled flags */
+ if (efx->rx_scatter != old_rx_scatter)
+ efx->type->filter_update_rx_scatter(efx);
+
+ /* We must keep at least one descriptor in a TX ring empty.
+ * We could avoid this when the queue size does not exactly
+ * match the hardware ring size, but it's not that important.
+ * Therefore we stop the queue when one more skb might fill
+ * the ring completely. We wake it when half way back to
+ * empty.
+ */
+ efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx);
+ efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
+
+ /* Initialise the channels */
+ efx_start_channels(efx);
+
+ efx_ptp_start_datapath(efx);
+
+ if (netif_device_present(efx->net_dev))
+ netif_tx_wake_all_queues(efx->net_dev);
+}
+
+static void efx_stop_datapath(struct efx_nic *efx)
+{
+ EFX_ASSERT_RESET_SERIALISED(efx);
+ BUG_ON(efx->port_enabled);
+
+ efx_ptp_stop_datapath(efx);
+
+ efx_stop_channels(efx);
+}
+
+/**************************************************************************
+ *
+ * Port handling
+ *
+ **************************************************************************/
+
+static void efx_start_port(struct efx_nic *efx)
+{
+ netif_dbg(efx, ifup, efx->net_dev, "start port\n");
+ BUG_ON(efx->port_enabled);
+
+ mutex_lock(&efx->mac_lock);
+ efx->port_enabled = true;
+
+ /* Ensure MAC ingress/egress is enabled */
+ efx_mac_reconfigure(efx);
+
+ mutex_unlock(&efx->mac_lock);
+}
+
+/* Cancel work for MAC reconfiguration, periodic hardware monitoring
+ * and the async self-test, wait for them to finish and prevent them
+ * being scheduled again. This doesn't cover online resets, which
+ * should only be cancelled when removing the device.
+ */
+static void efx_stop_port(struct efx_nic *efx)
+{
+ netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
+
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ mutex_lock(&efx->mac_lock);
+ efx->port_enabled = false;
+ mutex_unlock(&efx->mac_lock);
+
+ /* Serialise against efx_set_multicast_list() */
+ netif_addr_lock_bh(efx->net_dev);
+ netif_addr_unlock_bh(efx->net_dev);
+
+ cancel_delayed_work_sync(&efx->monitor_work);
+ efx_selftest_async_cancel(efx);
+ cancel_work_sync(&efx->mac_work);
+}
+
+/* If the interface is supposed to be running but is not, start
+ * the hardware and software data path, regular activity for the port
+ * (MAC statistics, link polling, etc.) and schedule the port to be
+ * reconfigured. Interrupts must already be enabled. This function
+ * is safe to call multiple times, so long as the NIC is not disabled.
+ * Requires the RTNL lock.
+ */
+void efx_start_all(struct efx_nic *efx)
+{
+ EFX_ASSERT_RESET_SERIALISED(efx);
+ BUG_ON(efx->state == STATE_DISABLED);
+
+ /* Check that it is appropriate to restart the interface. All
+ * of these flags are safe to read under just the rtnl lock
+ */
+ if (efx->port_enabled || !netif_running(efx->net_dev) ||
+ efx->reset_pending)
+ return;
+
+ efx_start_port(efx);
+ efx_start_datapath(efx);
+
+ /* Start the hardware monitor if there is one */
+ efx_start_monitor(efx);
+
+ /* Link state detection is normally event-driven; we have
+ * to poll now because we could have missed a change
+ */
+ mutex_lock(&efx->mac_lock);
+ if (efx->phy_op->poll(efx))
+ efx_link_status_changed(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ efx->type->start_stats(efx);
+ efx->type->pull_stats(efx);
+ spin_lock_bh(&efx->stats_lock);
+ efx->type->update_stats(efx, NULL, NULL);
+ spin_unlock_bh(&efx->stats_lock);
+}
+
+/* Quiesce the hardware and software data path, and regular activity
+ * for the port without bringing the link down. Safe to call multiple
+ * times with the NIC in almost any state, but interrupts should be
+ * enabled. Requires the RTNL lock.
+ */
+void efx_stop_all(struct efx_nic *efx)
+{
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ /* port_enabled can be read safely under the rtnl lock */
+ if (!efx->port_enabled)
+ return;
+
+ /* update stats before we go down so we can accurately count
+ * rx_nodesc_drops
+ */
+ efx->type->pull_stats(efx);
+ spin_lock_bh(&efx->stats_lock);
+ efx->type->update_stats(efx, NULL, NULL);
+ spin_unlock_bh(&efx->stats_lock);
+ efx->type->stop_stats(efx);
+ efx_stop_port(efx);
+
+ /* Stop the kernel transmit interface. This is only valid if
+ * the device is stopped or detached; otherwise the watchdog
+ * may fire immediately.
+ */
+ WARN_ON(netif_running(efx->net_dev) &&
+ netif_device_present(efx->net_dev));
+ netif_tx_disable(efx->net_dev);
+
+ efx_stop_datapath(efx);
+}
+
+/* Push loopback/power/transmit disable settings to the PHY, and reconfigure
+ * the MAC appropriately. All other PHY configuration changes are pushed
+ * through phy_op->set_settings(), and pushed asynchronously to the MAC
+ * through efx_monitor().
+ *
+ * Callers must hold the mac_lock
+ */
+int __efx_reconfigure_port(struct efx_nic *efx)
+{
+ enum efx_phy_mode phy_mode;
+ int rc;
+
+ WARN_ON(!mutex_is_locked(&efx->mac_lock));
+
+ /* Disable PHY transmit in mac level loopbacks */
+ phy_mode = efx->phy_mode;
+ if (LOOPBACK_INTERNAL(efx))
+ efx->phy_mode |= PHY_MODE_TX_DISABLED;
+ else
+ efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
+
+ rc = efx->type->reconfigure_port(efx);
+
+ if (rc)
+ efx->phy_mode = phy_mode;
+
+ return rc;
+}
+
+/* Reinitialise the MAC to pick up new PHY settings, even if the port is
+ * disabled.
+ */
+int efx_reconfigure_port(struct efx_nic *efx)
+{
+ int rc;
+
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ mutex_lock(&efx->mac_lock);
+ rc = __efx_reconfigure_port(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ return rc;
+}
+
+/**************************************************************************
+ *
+ * Device reset and suspend
+ *
+ **************************************************************************/
+
+static void efx_wait_for_bist_end(struct efx_nic *efx)
+{
+ int i;
+
+ for (i = 0; i < BIST_WAIT_DELAY_COUNT; ++i) {
+ if (efx_mcdi_poll_reboot(efx))
+ goto out;
+ msleep(BIST_WAIT_DELAY_MS);
+ }
+
+ netif_err(efx, drv, efx->net_dev, "Warning: No MC reboot after BIST mode\n");
+out:
+ /* Either way unset the BIST flag. If we found no reboot we probably
+ * won't recover, but we should try.
+ */
+ efx->mc_bist_for_other_fn = false;
+}
+
+/* Try recovery mechanisms.
+ * For now only EEH is supported.
+ * Returns 0 if the recovery mechanisms are unsuccessful.
+ * Returns a non-zero value otherwise.
+ */
+int efx_try_recovery(struct efx_nic *efx)
+{
+#ifdef CONFIG_EEH
+ /* A PCI error can occur and not be seen by EEH because nothing
+ * happens on the PCI bus. In this case the driver may fail and
+ * schedule a 'recover or reset', leading to this recovery handler.
+ * Manually call the eeh failure check function.
+ */
+ struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev);
+ if (eeh_dev_check_failure(eehdev)) {
+ /* The EEH mechanisms will handle the error and reset the
+ * device if necessary.
+ */
+ return 1;
+ }
+#endif
+ return 0;
+}
+
+/* Tears down the entire software state and most of the hardware state
+ * before reset.
+ */
+void efx_reset_down(struct efx_nic *efx, enum reset_type method)
+{
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ if (method == RESET_TYPE_MCDI_TIMEOUT)
+ efx->type->prepare_flr(efx);
+
+ efx_stop_all(efx);
+ efx_disable_interrupts(efx);
+
+ mutex_lock(&efx->mac_lock);
+ down_write(&efx->filter_sem);
+ mutex_lock(&efx->rss_lock);
+ if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
+ method != RESET_TYPE_DATAPATH)
+ efx->phy_op->fini(efx);
+ efx->type->fini(efx);
+}
+
+/* This function will always ensure that the locks acquired in
+ * efx_reset_down() are released. A failure return code indicates
+ * that we were unable to reinitialise the hardware, and the
+ * driver should be disabled. If ok is false, then the rx and tx
+ * engines are not restarted, pending a RESET_DISABLE.
+ */
+int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
+{
+ int rc;
+
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ if (method == RESET_TYPE_MCDI_TIMEOUT)
+ efx->type->finish_flr(efx);
+
+ /* Ensure that SRAM is initialised even if we're disabling the device */
+ rc = efx->type->init(efx);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
+ goto fail;
+ }
+
+ if (!ok)
+ goto fail;
+
+ if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
+ method != RESET_TYPE_DATAPATH) {
+ rc = efx->phy_op->init(efx);
+ if (rc)
+ goto fail;
+ rc = efx->phy_op->reconfigure(efx);
+ if (rc && rc != -EPERM)
+ netif_err(efx, drv, efx->net_dev,
+ "could not restore PHY settings\n");
+ }
+
+ rc = efx_enable_interrupts(efx);
+ if (rc)
+ goto fail;
+
+#ifdef CONFIG_SFC_SRIOV
+ rc = efx->type->vswitching_restore(efx);
+ if (rc) /* not fatal; the PF will still work fine */
+ netif_warn(efx, probe, efx->net_dev,
+ "failed to restore vswitching rc=%d;"
+ " VFs may not function\n", rc);
+#endif
+
+ if (efx->type->rx_restore_rss_contexts)
+ efx->type->rx_restore_rss_contexts(efx);
+ mutex_unlock(&efx->rss_lock);
+ efx->type->filter_table_restore(efx);
+ up_write(&efx->filter_sem);
+ if (efx->type->sriov_reset)
+ efx->type->sriov_reset(efx);
+
+ mutex_unlock(&efx->mac_lock);
+
+ efx_start_all(efx);
+
+ if (efx->type->udp_tnl_push_ports)
+ efx->type->udp_tnl_push_ports(efx);
+
+ return 0;
+
+fail:
+ efx->port_initialized = false;
+
+ mutex_unlock(&efx->rss_lock);
+ up_write(&efx->filter_sem);
+ mutex_unlock(&efx->mac_lock);
+
+ return rc;
+}
+
+/* Reset the NIC using the specified method. Note that the reset may
+ * fail, in which case the card will be left in an unusable state.
+ *
+ * Caller must hold the rtnl_lock.
+ */
+int efx_reset(struct efx_nic *efx, enum reset_type method)
+{
+ bool disabled;
+ int rc, rc2;
+
+ netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
+ RESET_TYPE(method));
+
+ efx_device_detach_sync(efx);
+ efx_reset_down(efx, method);
+
+ rc = efx->type->reset(efx, method);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
+ goto out;
+ }
+
+ /* Clear flags for the scopes we covered. We assume the NIC and
+ * driver are now quiescent so that there is no race here.
+ */
+ if (method < RESET_TYPE_MAX_METHOD)
+ efx->reset_pending &= -(1 << (method + 1));
+ else /* it doesn't fit into the well-ordered scope hierarchy */
+ __clear_bit(method, &efx->reset_pending);
+
+ /* Reinitialise bus-mastering, which may have been turned off before
+ * the reset was scheduled. This is still appropriate, even in the
+ * RESET_TYPE_DISABLE since this driver generally assumes the hardware
+ * can respond to requests.
+ */
+ pci_set_master(efx->pci_dev);
+
+out:
+ /* Leave device stopped if necessary */
+ disabled = rc ||
+ method == RESET_TYPE_DISABLE ||
+ method == RESET_TYPE_RECOVER_OR_DISABLE;
+ rc2 = efx_reset_up(efx, method, !disabled);
+ if (rc2) {
+ disabled = true;
+ if (!rc)
+ rc = rc2;
+ }
+
+ if (disabled) {
+ dev_close(efx->net_dev);
+ netif_err(efx, drv, efx->net_dev, "has been disabled\n");
+ efx->state = STATE_DISABLED;
+ } else {
+ netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
+ efx_device_attach_if_not_resetting(efx);
+ }
+ return rc;
+}
+
+/* The worker thread exists so that code that cannot sleep can
+ * schedule a reset for later.
+ */
+static void efx_reset_work(struct work_struct *data)
+{
+ struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
+ unsigned long pending;
+ enum reset_type method;
+
+ pending = READ_ONCE(efx->reset_pending);
+ method = fls(pending) - 1;
+
+ if (method == RESET_TYPE_MC_BIST)
+ efx_wait_for_bist_end(efx);
+
+ if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
+ method == RESET_TYPE_RECOVER_OR_ALL) &&
+ efx_try_recovery(efx))
+ return;
+
+ if (!pending)
+ return;
+
+ rtnl_lock();
+
+ /* We checked the state in efx_schedule_reset() but it may
+ * have changed by now. Now that we have the RTNL lock,
+ * it cannot change again.
+ */
+ if (efx->state == STATE_READY)
+ (void)efx_reset(efx, method);
+
+ rtnl_unlock();
+}
+
+void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
+{
+ enum reset_type method;
+
+ if (efx->state == STATE_RECOVERY) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "recovering: skip scheduling %s reset\n",
+ RESET_TYPE(type));
+ return;
+ }
+
+ switch (type) {
+ case RESET_TYPE_INVISIBLE:
+ case RESET_TYPE_ALL:
+ case RESET_TYPE_RECOVER_OR_ALL:
+ case RESET_TYPE_WORLD:
+ case RESET_TYPE_DISABLE:
+ case RESET_TYPE_RECOVER_OR_DISABLE:
+ case RESET_TYPE_DATAPATH:
+ case RESET_TYPE_MC_BIST:
+ case RESET_TYPE_MCDI_TIMEOUT:
+ method = type;
+ netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
+ RESET_TYPE(method));
+ break;
+ default:
+ method = efx->type->map_reset_reason(type);
+ netif_dbg(efx, drv, efx->net_dev,
+ "scheduling %s reset for %s\n",
+ RESET_TYPE(method), RESET_TYPE(type));
+ break;
+ }
+
+ set_bit(method, &efx->reset_pending);
+ smp_mb(); /* ensure we change reset_pending before checking state */
+
+ /* If we're not READY then just leave the flags set as the cue
+ * to abort probing or reschedule the reset later.
+ */
+ if (READ_ONCE(efx->state) != STATE_READY)
+ return;
+
+ /* efx_process_channel() will no longer read events once a
+ * reset is scheduled. So switch back to poll'd MCDI completions.
+ */
+ efx_mcdi_mode_poll(efx);
+
+ efx_queue_reset_work(efx);
+}
+
+/**************************************************************************
+ *
+ * Dummy PHY/MAC operations
+ *
+ * Can be used for some unimplemented operations
+ * Needed so all function pointers are valid and do not have to be tested
+ * before use
+ *
+ **************************************************************************/
+int efx_port_dummy_op_int(struct efx_nic *efx)
+{
+ return 0;
+}
+void efx_port_dummy_op_void(struct efx_nic *efx) {}
+
+static bool efx_port_dummy_op_poll(struct efx_nic *efx)
+{
+ return false;
+}
+
+static const struct efx_phy_operations efx_dummy_phy_operations = {
+ .init = efx_port_dummy_op_int,
+ .reconfigure = efx_port_dummy_op_int,
+ .poll = efx_port_dummy_op_poll,
+ .fini = efx_port_dummy_op_void,
+};
+
+/**************************************************************************
+ *
+ * Data housekeeping
+ *
+ **************************************************************************/
+
+/* This zeroes out and then fills in the invariants in a struct
+ * efx_nic (including all sub-structures).
+ */
+int efx_init_struct(struct efx_nic *efx,
+ struct pci_dev *pci_dev, struct net_device *net_dev)
+{
+ int rc = -ENOMEM;
+
+ /* Initialise common structures */
+ INIT_LIST_HEAD(&efx->node);
+ INIT_LIST_HEAD(&efx->secondary_list);
+ spin_lock_init(&efx->biu_lock);
+#ifdef CONFIG_SFC_MTD
+ INIT_LIST_HEAD(&efx->mtd_list);
+#endif
+ INIT_WORK(&efx->reset_work, efx_reset_work);
+ INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
+ INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
+ efx->pci_dev = pci_dev;
+ efx->msg_enable = debug;
+ efx->state = STATE_UNINIT;
+ strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
+
+ efx->net_dev = net_dev;
+ efx->rx_prefix_size = efx->type->rx_prefix_size;
+ efx->rx_ip_align =
+ NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
+ efx->rx_packet_hash_offset =
+ efx->type->rx_hash_offset - efx->type->rx_prefix_size;
+ efx->rx_packet_ts_offset =
+ efx->type->rx_ts_offset - efx->type->rx_prefix_size;
+ INIT_LIST_HEAD(&efx->rss_context.list);
+ mutex_init(&efx->rss_lock);
+ spin_lock_init(&efx->stats_lock);
+ efx->vi_stride = EFX_DEFAULT_VI_STRIDE;
+ efx->num_mac_stats = MC_CMD_MAC_NSTATS;
+ BUILD_BUG_ON(MC_CMD_MAC_NSTATS - 1 != MC_CMD_MAC_GENERATION_END);
+ mutex_init(&efx->mac_lock);
+#ifdef CONFIG_RFS_ACCEL
+ mutex_init(&efx->rps_mutex);
+ spin_lock_init(&efx->rps_hash_lock);
+ /* Failure to allocate is not fatal, but may degrade ARFS performance */
+ efx->rps_hash_table = kcalloc(EFX_ARFS_HASH_TABLE_SIZE,
+ sizeof(*efx->rps_hash_table), GFP_KERNEL);
+#endif
+ efx->phy_op = &efx_dummy_phy_operations;
+ efx->mdio.dev = net_dev;
+ INIT_WORK(&efx->mac_work, efx_mac_work);
+ init_waitqueue_head(&efx->flush_wq);
+
+ rc = efx_init_channels(efx);
+ if (rc)
+ goto fail;
+
+ /* Would be good to use the net_dev name, but we're too early */
+ snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
+ pci_name(pci_dev));
+ efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
+ if (!efx->workqueue) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ efx_fini_struct(efx);
+ return rc;
+}
+
+void efx_fini_struct(struct efx_nic *efx)
+{
+#ifdef CONFIG_RFS_ACCEL
+ kfree(efx->rps_hash_table);
+#endif
+
+ efx_fini_channels(efx);
+
+ kfree(efx->vpd_sn);
+
+ if (efx->workqueue) {
+ destroy_workqueue(efx->workqueue);
+ efx->workqueue = NULL;
+ }
+}
+
+/* This configures the PCI device to enable I/O and DMA. */
+int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask,
+ unsigned int mem_map_size)
+{
+ struct pci_dev *pci_dev = efx->pci_dev;
+ int rc;
+
+ netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
+
+ rc = pci_enable_device(pci_dev);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "failed to enable PCI device\n");
+ goto fail1;
+ }
+
+ pci_set_master(pci_dev);
+
+ /* Set the PCI DMA mask. Try all possibilities from our
+ * genuine mask down to 32 bits, because some architectures
+ * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
+ * masks event though they reject 46 bit masks.
+ */
+ while (dma_mask > 0x7fffffffUL) {
+ rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask);
+ if (rc == 0)
+ break;
+ dma_mask >>= 1;
+ }
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "could not find a suitable DMA mask\n");
+ goto fail2;
+ }
+ netif_dbg(efx, probe, efx->net_dev,
+ "using DMA mask %llx\n", (unsigned long long)dma_mask);
+
+ efx->membase_phys = pci_resource_start(efx->pci_dev, bar);
+ if (!efx->membase_phys) {
+ netif_err(efx, probe, efx->net_dev,
+ "ERROR: No BAR%d mapping from the BIOS. "
+ "Try pci=realloc on the kernel command line\n", bar);
+ rc = -ENODEV;
+ goto fail3;
+ }
+
+ rc = pci_request_region(pci_dev, bar, "sfc");
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "request for memory BAR failed\n");
+ rc = -EIO;
+ goto fail3;
+ }
+
+ efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size);
+ if (!efx->membase) {
+ netif_err(efx, probe, efx->net_dev,
+ "could not map memory BAR at %llx+%x\n",
+ (unsigned long long)efx->membase_phys, mem_map_size);
+ rc = -ENOMEM;
+ goto fail4;
+ }
+ netif_dbg(efx, probe, efx->net_dev,
+ "memory BAR at %llx+%x (virtual %p)\n",
+ (unsigned long long)efx->membase_phys, mem_map_size,
+ efx->membase);
+
+ return 0;
+
+fail4:
+ pci_release_region(efx->pci_dev, bar);
+fail3:
+ efx->membase_phys = 0;
+fail2:
+ pci_disable_device(efx->pci_dev);
+fail1:
+ return rc;
+}
+
+void efx_fini_io(struct efx_nic *efx, int bar)
+{
+ netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
+
+ if (efx->membase) {
+ iounmap(efx->membase);
+ efx->membase = NULL;
+ }
+
+ if (efx->membase_phys) {
+ pci_release_region(efx->pci_dev, bar);
+ efx->membase_phys = 0;
+ }
+
+ /* Don't disable bus-mastering if VFs are assigned */
+ if (!pci_vfs_assigned(efx->pci_dev))
+ pci_disable_device(efx->pci_dev);
+}
diff --git a/drivers/net/ethernet/sfc/efx_common.h b/drivers/net/ethernet/sfc/efx_common.h
new file mode 100644
index 000000000000..c602e5257088
--- /dev/null
+++ b/drivers/net/ethernet/sfc/efx_common.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_COMMON_H
+#define EFX_COMMON_H
+
+int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask,
+ unsigned int mem_map_size);
+void efx_fini_io(struct efx_nic *efx, int bar);
+int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev,
+ struct net_device *net_dev);
+void efx_fini_struct(struct efx_nic *efx);
+
+void efx_start_all(struct efx_nic *efx);
+void efx_stop_all(struct efx_nic *efx);
+
+int efx_create_reset_workqueue(void);
+void efx_queue_reset_work(struct efx_nic *efx);
+void efx_flush_reset_workqueue(struct efx_nic *efx);
+void efx_destroy_reset_workqueue(void);
+
+void efx_start_monitor(struct efx_nic *efx);
+
+int __efx_reconfigure_port(struct efx_nic *efx);
+int efx_reconfigure_port(struct efx_nic *efx);
+
+#define EFX_ASSERT_RESET_SERIALISED(efx) \
+ do { \
+ if ((efx->state == STATE_READY) || \
+ (efx->state == STATE_RECOVERY) || \
+ (efx->state == STATE_DISABLED)) \
+ ASSERT_RTNL(); \
+ } while (0)
+
+int efx_try_recovery(struct efx_nic *efx);
+void efx_reset_down(struct efx_nic *efx, enum reset_type method);
+int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
+int efx_reset(struct efx_nic *efx, enum reset_type method);
+void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
+
+static inline int efx_check_disabled(struct efx_nic *efx)
+{
+ if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
+ netif_err(efx, drv, efx->net_dev,
+ "device is disabled due to earlier errors\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+void efx_mac_reconfigure(struct efx_nic *efx);
+void efx_link_status_changed(struct efx_nic *efx);
+
+#endif
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index b31032da4bcb..f83398721ad7 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -13,6 +13,9 @@
#include "workarounds.h"
#include "selftest.h"
#include "efx.h"
+#include "efx_channels.h"
+#include "rx_common.h"
+#include "tx_common.h"
#include "filter.h"
#include "nic.h"
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index eedd32e2bfcb..dbbb898adddb 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -15,6 +15,7 @@
#include "net_driver.h"
#include "bitfield.h"
#include "efx.h"
+#include "rx_common.h"
#include "nic.h"
#include "farch_regs.h"
#include "sriov.h"
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 9081f84a2604..65e454a062f7 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -346,7 +346,6 @@ int efx_mcdi_flush_rxqs(struct efx_nic *efx);
int efx_mcdi_port_probe(struct efx_nic *efx);
void efx_mcdi_port_remove(struct efx_nic *efx);
int efx_mcdi_port_reconfigure(struct efx_nic *efx);
-int efx_mcdi_port_get_number(struct efx_nic *efx);
u32 efx_mcdi_phy_get_caps(struct efx_nic *efx);
void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
int efx_mcdi_set_mac(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/mcdi_functions.h b/drivers/net/ethernet/sfc/mcdi_functions.h
new file mode 100644
index 000000000000..f0726c71698b
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mcdi_functions.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+#ifndef EFX_MCDI_FUNCTIONS_H
+#define EFX_MCDI_FUNCTIONS_H
+
+int efx_mcdi_alloc_vis(struct efx_nic *efx, unsigned int min_vis,
+ unsigned int max_vis, unsigned int *vi_base,
+ unsigned int *allocated_vis);
+int efx_mcdi_free_vis(struct efx_nic *efx);
+
+int efx_mcdi_ev_probe(struct efx_channel *channel);
+int efx_mcdi_ev_init(struct efx_channel *channel, bool v1_cut_thru, bool v2);
+void efx_mcdi_ev_remove(struct efx_channel *channel);
+void efx_mcdi_ev_fini(struct efx_channel *channel);
+int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue, bool tso_v2);
+void efx_mcdi_tx_remove(struct efx_tx_queue *tx_queue);
+void efx_mcdi_tx_fini(struct efx_tx_queue *tx_queue);
+int efx_mcdi_rx_probe(struct efx_rx_queue *rx_queue);
+int efx_mcdi_rx_init(struct efx_rx_queue *rx_queue, bool want_outer_classes);
+void efx_mcdi_rx_remove(struct efx_rx_queue *rx_queue);
+void efx_mcdi_rx_fini(struct efx_rx_queue *rx_queue);
+
+#endif
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index fb7cde4980ed..f19d7b8a2935 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -14,23 +14,9 @@
#include "mcdi_pcol.h"
#include "nic.h"
#include "selftest.h"
+#include "mcdi_port_common.h"
-struct efx_mcdi_phy_data {
- u32 flags;
- u32 type;
- u32 supported_cap;
- u32 channel;
- u32 port;
- u32 stats_mask;
- u8 name[20];
- u32 media;
- u32 mmd_mask;
- u8 revision[20];
- u32 forced_cap;
-};
-
-static int
-efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg)
+int efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_CFG_OUT_LEN);
size_t outlen;
@@ -70,9 +56,9 @@ fail:
return rc;
}
-static int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
- u32 flags, u32 loopback_mode,
- u32 loopback_speed)
+int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
+ u32 flags, u32 loopback_mode,
+ u32 loopback_speed)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_LINK_IN_LEN);
int rc;
@@ -89,7 +75,7 @@ static int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
return rc;
}
-static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
+int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LOOPBACK_MODES_OUT_LEN);
size_t outlen;
@@ -168,7 +154,7 @@ static int efx_mcdi_mdio_write(struct net_device *net_dev,
return 0;
}
-static void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset)
+void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset)
{
#define SET_BIT(name) __set_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \
linkset)
@@ -232,7 +218,7 @@ static void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset)
#undef SET_BIT
}
-static u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset)
+u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset)
{
u32 result = 0;
@@ -273,7 +259,7 @@ static u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset)
return result;
}
-static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx)
+u32 efx_get_mcdi_phy_flags(struct efx_nic *efx)
{
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
enum efx_phy_mode mode, supported;
@@ -301,7 +287,7 @@ static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx)
return flags;
}
-static u8 mcdi_to_ethtool_media(u32 media)
+u8 mcdi_to_ethtool_media(u32 media)
{
switch (media) {
case MC_CMD_MEDIA_XAUI:
@@ -322,7 +308,7 @@ static u8 mcdi_to_ethtool_media(u32 media)
}
}
-static void efx_mcdi_phy_decode_link(struct efx_nic *efx,
+void efx_mcdi_phy_decode_link(struct efx_nic *efx,
struct efx_link_state *link_state,
u32 speed, u32 flags, u32 fcntl)
{
@@ -365,7 +351,7 @@ static void efx_mcdi_phy_decode_link(struct efx_nic *efx,
* Both RS and BASER (whether AUTO or not) means use FEC if cable and link
* partner support it, preferring RS to BASER.
*/
-static u32 ethtool_fec_caps_to_mcdi(u32 ethtool_cap)
+u32 ethtool_fec_caps_to_mcdi(u32 ethtool_cap)
{
u32 ret = 0;
@@ -392,7 +378,7 @@ static u32 ethtool_fec_caps_to_mcdi(u32 ethtool_cap)
* maps both of those to AUTO. This should never matter, and it's not clear
* what a better mapping would be anyway.
*/
-static u32 mcdi_fec_caps_to_ethtool(u32 caps, bool is_25g)
+u32 mcdi_fec_caps_to_ethtool(u32 caps, bool is_25g)
{
bool rs = caps & (1 << MC_CMD_PHY_CAP_RS_FEC_LBN),
rs_req = caps & (1 << MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN),
@@ -530,7 +516,7 @@ int efx_mcdi_port_reconfigure(struct efx_nic *efx)
/* Verify that the forced flow control settings (!EFX_FC_AUTO) are
* supported by the link partner. Warn the user if this isn't the case
*/
-static void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
+void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
{
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
u32 rmtadv;
@@ -555,7 +541,7 @@ static void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
"warning: link partner doesn't support pause frames");
}
-static bool efx_mcdi_phy_poll(struct efx_nic *efx)
+bool efx_mcdi_phy_poll(struct efx_nic *efx)
{
struct efx_link_state old_state = efx->link_state;
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
@@ -666,8 +652,8 @@ efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx,
return 0;
}
-static int efx_mcdi_phy_get_fecparam(struct efx_nic *efx,
- struct ethtool_fecparam *fec)
+int efx_mcdi_phy_get_fecparam(struct efx_nic *efx,
+ struct ethtool_fecparam *fec)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_V2_LEN);
u32 caps, active, speed; /* MCDI format */
@@ -745,7 +731,7 @@ static int efx_mcdi_phy_set_fecparam(struct efx_nic *efx,
return 0;
}
-static int efx_mcdi_phy_test_alive(struct efx_nic *efx)
+int efx_mcdi_phy_test_alive(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_STATE_OUT_LEN);
size_t outlen;
diff --git a/drivers/net/ethernet/sfc/mcdi_port_common.h b/drivers/net/ethernet/sfc/mcdi_port_common.h
new file mode 100644
index 000000000000..10772de94b2c
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mcdi_port_common.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+#ifndef EFX_MCDI_PORT_COMMON_H
+#define EFX_MCDI_PORT_COMMON_H
+
+#include "net_driver.h"
+#include "mcdi.h"
+#include "mcdi_pcol.h"
+
+struct efx_mcdi_phy_data {
+ u32 flags;
+ u32 type;
+ u32 supported_cap;
+ u32 channel;
+ u32 port;
+ u32 stats_mask;
+ u8 name[20];
+ u32 media;
+ u32 mmd_mask;
+ u8 revision[20];
+ u32 forced_cap;
+};
+
+int efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg);
+void efx_link_set_advertising(struct efx_nic *efx,
+ const unsigned long *advertising);
+int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
+ u32 flags, u32 loopback_mode, u32 loopback_speed);
+int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes);
+void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset);
+u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset);
+u32 efx_get_mcdi_phy_flags(struct efx_nic *efx);
+u8 mcdi_to_ethtool_media(u32 media);
+void efx_mcdi_phy_decode_link(struct efx_nic *efx,
+ struct efx_link_state *link_state,
+ u32 speed, u32 flags, u32 fcntl);
+u32 ethtool_fec_caps_to_mcdi(u32 ethtool_cap);
+u32 mcdi_fec_caps_to_ethtool(u32 caps, bool is_25g);
+void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa);
+bool efx_mcdi_phy_poll(struct efx_nic *efx);
+int efx_mcdi_phy_get_fecparam(struct efx_nic *efx,
+ struct ethtool_fecparam *fec);
+int efx_mcdi_phy_test_alive(struct efx_nic *efx);
+int efx_mcdi_port_get_number(struct efx_nic *efx);
+
+#endif
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 709172a6995e..52e6f11d8818 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -138,6 +138,8 @@ struct efx_special_buffer {
* freed when descriptor completes
* @xdpf: When @flags & %EFX_TX_BUF_XDP, the XDP frame information; its @data
* member is the associated buffer to drop a page reference on.
+ * @option: When @flags & %EFX_TX_BUF_OPTION, an EF10-specific option
+ * descriptor.
* @dma_addr: DMA address of the fragment.
* @flags: Flags for allocation and DMA mapping type
* @len: Length of this fragment.
@@ -152,7 +154,7 @@ struct efx_tx_buffer {
struct xdp_frame *xdpf;
};
union {
- efx_qword_t option;
+ efx_qword_t option; /* EF10 */
dma_addr_t dma_addr;
};
unsigned short flags;
@@ -1610,6 +1612,15 @@ static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue,
return &rx_queue->buffer[index];
}
+static inline struct efx_rx_buffer *
+efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf)
+{
+ if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask)))
+ return efx_rx_buffer(rx_queue, 0);
+ else
+ return rx_buf + 1;
+}
+
/**
* EFX_MAX_FRAME_LEN - calculate maximum frame length
*
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index bf0bdb22cc64..6670fda8f35a 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -11,6 +11,7 @@
#include <linux/net_tstamp.h>
#include "net_driver.h"
#include "efx.h"
+#include "efx_common.h"
#include "mcdi.h"
enum {
@@ -505,6 +506,9 @@ static inline void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
tx_queue->efx->type->tx_write(tx_queue);
}
+int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
+ bool *data_mapped);
+
/* RX data path */
static inline int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
{
@@ -553,6 +557,7 @@ static inline void efx_nic_eventq_read_ack(struct efx_channel *channel)
{
channel->efx->type->ev_read_ack(channel);
}
+
void efx_nic_event_test_start(struct efx_channel *channel);
/* Falcon/Siena queue operations */
@@ -670,6 +675,7 @@ struct efx_farch_register_test {
unsigned address;
efx_oword_t mask;
};
+
int efx_farch_test_registers(struct efx_nic *efx,
const struct efx_farch_register_test *regs,
size_t n_regs);
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index c29bf862a94c..0e04ed7f6382 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -21,6 +21,7 @@
#include <linux/bpf_trace.h>
#include "net_driver.h"
#include "efx.h"
+#include "rx_common.h"
#include "filter.h"
#include "nic.h"
#include "selftest.h"
@@ -42,23 +43,10 @@
/* Size of buffer allocated for skb header area. */
#define EFX_SKB_HEADERS 128u
-/* This is the percentage fill level below which new RX descriptors
- * will be added to the RX descriptor ring.
- */
-static unsigned int rx_refill_threshold;
-
/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
EFX_RX_USR_BUF_SIZE)
-/*
- * RX maximum head room required.
- *
- * This must be at least 1 to prevent overflow, plus one packet-worth
- * to allow pipelined receives.
- */
-#define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
-
static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
{
return page_address(buf->page) + buf->page_offset;
@@ -77,15 +65,6 @@ static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh)
#endif
}
-static inline struct efx_rx_buffer *
-efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf)
-{
- if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask)))
- return efx_rx_buffer(rx_queue, 0);
- else
- return rx_buf + 1;
-}
-
static inline void efx_sync_rx_buffer(struct efx_nic *efx,
struct efx_rx_buffer *rx_buf,
unsigned int len)
@@ -94,22 +73,8 @@ static inline void efx_sync_rx_buffer(struct efx_nic *efx,
DMA_FROM_DEVICE);
}
-void efx_rx_config_page_split(struct efx_nic *efx)
-{
- efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align +
- XDP_PACKET_HEADROOM,
- EFX_RX_BUF_ALIGNMENT);
- efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
- ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
- efx->rx_page_buf_step);
- efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
- efx->rx_bufs_per_page;
- efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
- efx->rx_bufs_per_page);
-}
-
/* Check the RX page recycle ring for a page that can be reused. */
-static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
+struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
struct page *page;
@@ -142,106 +107,6 @@ static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
return NULL;
}
-/**
- * efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
- *
- * @rx_queue: Efx RX queue
- *
- * This allocates a batch of pages, maps them for DMA, and populates
- * struct efx_rx_buffers for each one. Return a negative error code or
- * 0 on success. If a single page can be used for multiple buffers,
- * then the page will either be inserted fully, or not at all.
- */
-static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
-{
- struct efx_nic *efx = rx_queue->efx;
- struct efx_rx_buffer *rx_buf;
- struct page *page;
- unsigned int page_offset;
- struct efx_rx_page_state *state;
- dma_addr_t dma_addr;
- unsigned index, count;
-
- count = 0;
- do {
- page = efx_reuse_page(rx_queue);
- if (page == NULL) {
- page = alloc_pages(__GFP_COMP |
- (atomic ? GFP_ATOMIC : GFP_KERNEL),
- efx->rx_buffer_order);
- if (unlikely(page == NULL))
- return -ENOMEM;
- dma_addr =
- dma_map_page(&efx->pci_dev->dev, page, 0,
- PAGE_SIZE << efx->rx_buffer_order,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
- dma_addr))) {
- __free_pages(page, efx->rx_buffer_order);
- return -EIO;
- }
- state = page_address(page);
- state->dma_addr = dma_addr;
- } else {
- state = page_address(page);
- dma_addr = state->dma_addr;
- }
-
- dma_addr += sizeof(struct efx_rx_page_state);
- page_offset = sizeof(struct efx_rx_page_state);
-
- do {
- index = rx_queue->added_count & rx_queue->ptr_mask;
- rx_buf = efx_rx_buffer(rx_queue, index);
- rx_buf->dma_addr = dma_addr + efx->rx_ip_align +
- XDP_PACKET_HEADROOM;
- rx_buf->page = page;
- rx_buf->page_offset = page_offset + efx->rx_ip_align +
- XDP_PACKET_HEADROOM;
- rx_buf->len = efx->rx_dma_len;
- rx_buf->flags = 0;
- ++rx_queue->added_count;
- get_page(page);
- dma_addr += efx->rx_page_buf_step;
- page_offset += efx->rx_page_buf_step;
- } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
-
- rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
- } while (++count < efx->rx_pages_per_batch);
-
- return 0;
-}
-
-/* Unmap a DMA-mapped page. This function is only called for the final RX
- * buffer in a page.
- */
-static void efx_unmap_rx_buffer(struct efx_nic *efx,
- struct efx_rx_buffer *rx_buf)
-{
- struct page *page = rx_buf->page;
-
- if (page) {
- struct efx_rx_page_state *state = page_address(page);
- dma_unmap_page(&efx->pci_dev->dev,
- state->dma_addr,
- PAGE_SIZE << efx->rx_buffer_order,
- DMA_FROM_DEVICE);
- }
-}
-
-static void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
- struct efx_rx_buffer *rx_buf,
- unsigned int num_bufs)
-{
- do {
- if (rx_buf->page) {
- put_page(rx_buf->page);
- rx_buf->page = NULL;
- }
- rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
- } while (--num_bufs);
-}
-
/* Attempt to recycle the page if there is an RX recycle ring; the page can
* only be added if this is the final RX buffer, to prevent pages being used in
* the descriptor ring and appearing in the recycle ring simultaneously.
@@ -278,21 +143,6 @@ static void efx_recycle_rx_page(struct efx_channel *channel,
put_page(rx_buf->page);
}
-static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
- struct efx_rx_buffer *rx_buf)
-{
- /* Release the page reference we hold for the buffer. */
- if (rx_buf->page)
- put_page(rx_buf->page);
-
- /* If this is the last buffer in a page, unmap and free it. */
- if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
- efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
- efx_free_rx_buffers(rx_queue, rx_buf, 1);
- }
- rx_buf->page = NULL;
-}
-
/* Recycle the pages that are used by buffers that have just been received. */
static void efx_recycle_rx_pages(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf,
@@ -317,78 +167,6 @@ static void efx_discard_rx_packet(struct efx_channel *channel,
efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
}
-/**
- * efx_fast_push_rx_descriptors - push new RX descriptors quickly
- * @rx_queue: RX descriptor queue
- *
- * This will aim to fill the RX descriptor queue up to
- * @rx_queue->@max_fill. If there is insufficient atomic
- * memory to do so, a slow fill will be scheduled.
- *
- * The caller must provide serialisation (none is used here). In practise,
- * this means this function must run from the NAPI handler, or be called
- * when NAPI is disabled.
- */
-void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
-{
- struct efx_nic *efx = rx_queue->efx;
- unsigned int fill_level, batch_size;
- int space, rc = 0;
-
- if (!rx_queue->refill_enabled)
- return;
-
- /* Calculate current fill level, and exit if we don't need to fill */
- fill_level = (rx_queue->added_count - rx_queue->removed_count);
- EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries);
- if (fill_level >= rx_queue->fast_fill_trigger)
- goto out;
-
- /* Record minimum fill level */
- if (unlikely(fill_level < rx_queue->min_fill)) {
- if (fill_level)
- rx_queue->min_fill = fill_level;
- }
-
- batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
- space = rx_queue->max_fill - fill_level;
- EFX_WARN_ON_ONCE_PARANOID(space < batch_size);
-
- netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
- "RX queue %d fast-filling descriptor ring from"
- " level %d to level %d\n",
- efx_rx_queue_index(rx_queue), fill_level,
- rx_queue->max_fill);
-
-
- do {
- rc = efx_init_rx_buffers(rx_queue, atomic);
- if (unlikely(rc)) {
- /* Ensure that we don't leave the rx queue empty */
- efx_schedule_slow_fill(rx_queue);
- goto out;
- }
- } while ((space -= batch_size) >= batch_size);
-
- netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
- "RX queue %d fast-filled descriptor ring "
- "to level %d\n", efx_rx_queue_index(rx_queue),
- rx_queue->added_count - rx_queue->removed_count);
-
- out:
- if (rx_queue->notified_count != rx_queue->added_count)
- efx_nic_notify_rx_desc(rx_queue);
-}
-
-void efx_rx_slow_fill(struct timer_list *t)
-{
- struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
-
- /* Post an event to cause NAPI to run and refill the queue */
- efx_nic_generate_fill_event(rx_queue);
- ++rx_queue->slow_fill_count;
-}
-
static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
struct efx_rx_buffer *rx_buf,
int len)
@@ -805,41 +583,10 @@ out:
channel->rx_pkt_n_frags = 0;
}
-int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
-{
- struct efx_nic *efx = rx_queue->efx;
- unsigned int entries;
- int rc;
-
- /* Create the smallest power-of-two aligned ring */
- entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
- EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
- rx_queue->ptr_mask = entries - 1;
-
- netif_dbg(efx, probe, efx->net_dev,
- "creating RX queue %d size %#x mask %#x\n",
- efx_rx_queue_index(rx_queue), efx->rxq_entries,
- rx_queue->ptr_mask);
-
- /* Allocate RX buffers */
- rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
- GFP_KERNEL);
- if (!rx_queue->buffer)
- return -ENOMEM;
-
- rc = efx_nic_probe_rx(rx_queue);
- if (rc) {
- kfree(rx_queue->buffer);
- rx_queue->buffer = NULL;
- }
-
- return rc;
-}
-
-static void efx_init_rx_recycle_ring(struct efx_nic *efx,
- struct efx_rx_queue *rx_queue)
+void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue)
{
unsigned int bufs_in_recycle_ring, page_ring_size;
+ struct efx_nic *efx = rx_queue->efx;
/* Set the RX recycle ring size */
#ifdef CONFIG_PPC64
@@ -858,121 +605,6 @@ static void efx_init_rx_recycle_ring(struct efx_nic *efx,
rx_queue->page_ptr_mask = page_ring_size - 1;
}
-void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
-{
- struct efx_nic *efx = rx_queue->efx;
- unsigned int max_fill, trigger, max_trigger;
- int rc = 0;
-
- netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
- "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
-
- /* Initialise ptr fields */
- rx_queue->added_count = 0;
- rx_queue->notified_count = 0;
- rx_queue->removed_count = 0;
- rx_queue->min_fill = -1U;
- efx_init_rx_recycle_ring(efx, rx_queue);
-
- rx_queue->page_remove = 0;
- rx_queue->page_add = rx_queue->page_ptr_mask + 1;
- rx_queue->page_recycle_count = 0;
- rx_queue->page_recycle_failed = 0;
- rx_queue->page_recycle_full = 0;
-
- /* Initialise limit fields */
- max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
- max_trigger =
- max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
- if (rx_refill_threshold != 0) {
- trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
- if (trigger > max_trigger)
- trigger = max_trigger;
- } else {
- trigger = max_trigger;
- }
-
- rx_queue->max_fill = max_fill;
- rx_queue->fast_fill_trigger = trigger;
- rx_queue->refill_enabled = true;
-
- /* Initialise XDP queue information */
- rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev,
- rx_queue->core_index);
-
- if (rc) {
- netif_err(efx, rx_err, efx->net_dev,
- "Failure to initialise XDP queue information rc=%d\n",
- rc);
- efx->xdp_rxq_info_failed = true;
- } else {
- rx_queue->xdp_rxq_info_valid = true;
- }
-
- /* Set up RX descriptor ring */
- efx_nic_init_rx(rx_queue);
-}
-
-void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
-{
- int i;
- struct efx_nic *efx = rx_queue->efx;
- struct efx_rx_buffer *rx_buf;
-
- netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
- "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
-
- del_timer_sync(&rx_queue->slow_fill);
-
- /* Release RX buffers from the current read ptr to the write ptr */
- if (rx_queue->buffer) {
- for (i = rx_queue->removed_count; i < rx_queue->added_count;
- i++) {
- unsigned index = i & rx_queue->ptr_mask;
- rx_buf = efx_rx_buffer(rx_queue, index);
- efx_fini_rx_buffer(rx_queue, rx_buf);
- }
- }
-
- /* Unmap and release the pages in the recycle ring. Remove the ring. */
- for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
- struct page *page = rx_queue->page_ring[i];
- struct efx_rx_page_state *state;
-
- if (page == NULL)
- continue;
-
- state = page_address(page);
- dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
- PAGE_SIZE << efx->rx_buffer_order,
- DMA_FROM_DEVICE);
- put_page(page);
- }
- kfree(rx_queue->page_ring);
- rx_queue->page_ring = NULL;
-
- if (rx_queue->xdp_rxq_info_valid)
- xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info);
-
- rx_queue->xdp_rxq_info_valid = false;
-}
-
-void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
-{
- netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
- "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
-
- efx_nic_remove_rx(rx_queue);
-
- kfree(rx_queue->buffer);
- rx_queue->buffer = NULL;
-}
-
-
-module_param(rx_refill_threshold, uint, 0444);
-MODULE_PARM_DESC(rx_refill_threshold,
- "RX descriptor ring refill threshold (%)");
-
#ifdef CONFIG_RFS_ACCEL
static void efx_filter_rfs_work(struct work_struct *data)
diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
new file mode 100644
index 000000000000..f4b5c3d828f6
--- /dev/null
+++ b/drivers/net/ethernet/sfc/rx_common.c
@@ -0,0 +1,375 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include <linux/module.h>
+#include "efx.h"
+#include "nic.h"
+#include "rx_common.h"
+
+/* This is the percentage fill level below which new RX descriptors
+ * will be added to the RX descriptor ring.
+ */
+static unsigned int rx_refill_threshold;
+module_param(rx_refill_threshold, uint, 0444);
+MODULE_PARM_DESC(rx_refill_threshold,
+ "RX descriptor ring refill threshold (%)");
+
+/* RX maximum head room required.
+ *
+ * This must be at least 1 to prevent overflow, plus one packet-worth
+ * to allow pipelined receives.
+ */
+#define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
+
+static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf)
+{
+ /* Release the page reference we hold for the buffer. */
+ if (rx_buf->page)
+ put_page(rx_buf->page);
+
+ /* If this is the last buffer in a page, unmap and free it. */
+ if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
+ efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
+ efx_free_rx_buffers(rx_queue, rx_buf, 1);
+ }
+ rx_buf->page = NULL;
+}
+
+int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned int entries;
+ int rc;
+
+ /* Create the smallest power-of-two aligned ring */
+ entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
+ EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
+ rx_queue->ptr_mask = entries - 1;
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "creating RX queue %d size %#x mask %#x\n",
+ efx_rx_queue_index(rx_queue), efx->rxq_entries,
+ rx_queue->ptr_mask);
+
+ /* Allocate RX buffers */
+ rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
+ GFP_KERNEL);
+ if (!rx_queue->buffer)
+ return -ENOMEM;
+
+ rc = efx_nic_probe_rx(rx_queue);
+ if (rc) {
+ kfree(rx_queue->buffer);
+ rx_queue->buffer = NULL;
+ }
+
+ return rc;
+}
+
+void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
+{
+ unsigned int max_fill, trigger, max_trigger;
+ struct efx_nic *efx = rx_queue->efx;
+ int rc = 0;
+
+ netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
+ "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
+
+ /* Initialise ptr fields */
+ rx_queue->added_count = 0;
+ rx_queue->notified_count = 0;
+ rx_queue->removed_count = 0;
+ rx_queue->min_fill = -1U;
+ efx_init_rx_recycle_ring(rx_queue);
+
+ rx_queue->page_remove = 0;
+ rx_queue->page_add = rx_queue->page_ptr_mask + 1;
+ rx_queue->page_recycle_count = 0;
+ rx_queue->page_recycle_failed = 0;
+ rx_queue->page_recycle_full = 0;
+
+ /* Initialise limit fields */
+ max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
+ max_trigger =
+ max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
+ if (rx_refill_threshold != 0) {
+ trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
+ if (trigger > max_trigger)
+ trigger = max_trigger;
+ } else {
+ trigger = max_trigger;
+ }
+
+ rx_queue->max_fill = max_fill;
+ rx_queue->fast_fill_trigger = trigger;
+ rx_queue->refill_enabled = true;
+
+ /* Initialise XDP queue information */
+ rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev,
+ rx_queue->core_index);
+
+ if (rc) {
+ netif_err(efx, rx_err, efx->net_dev,
+ "Failure to initialise XDP queue information rc=%d\n",
+ rc);
+ efx->xdp_rxq_info_failed = true;
+ } else {
+ rx_queue->xdp_rxq_info_valid = true;
+ }
+
+ /* Set up RX descriptor ring */
+ efx_nic_init_rx(rx_queue);
+}
+
+void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ struct efx_rx_buffer *rx_buf;
+ int i;
+
+ netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
+ "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
+
+ del_timer_sync(&rx_queue->slow_fill);
+
+ /* Release RX buffers from the current read ptr to the write ptr */
+ if (rx_queue->buffer) {
+ for (i = rx_queue->removed_count; i < rx_queue->added_count;
+ i++) {
+ unsigned int index = i & rx_queue->ptr_mask;
+
+ rx_buf = efx_rx_buffer(rx_queue, index);
+ efx_fini_rx_buffer(rx_queue, rx_buf);
+ }
+ }
+
+ /* Unmap and release the pages in the recycle ring. Remove the ring. */
+ for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
+ struct page *page = rx_queue->page_ring[i];
+ struct efx_rx_page_state *state;
+
+ if (page == NULL)
+ continue;
+
+ state = page_address(page);
+ dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
+ PAGE_SIZE << efx->rx_buffer_order,
+ DMA_FROM_DEVICE);
+ put_page(page);
+ }
+ kfree(rx_queue->page_ring);
+ rx_queue->page_ring = NULL;
+
+ if (rx_queue->xdp_rxq_info_valid)
+ xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info);
+
+ rx_queue->xdp_rxq_info_valid = false;
+}
+
+void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
+{
+ netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
+ "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
+
+ efx_nic_remove_rx(rx_queue);
+
+ kfree(rx_queue->buffer);
+ rx_queue->buffer = NULL;
+}
+
+/* Unmap a DMA-mapped page. This function is only called for the final RX
+ * buffer in a page.
+ */
+void efx_unmap_rx_buffer(struct efx_nic *efx,
+ struct efx_rx_buffer *rx_buf)
+{
+ struct page *page = rx_buf->page;
+
+ if (page) {
+ struct efx_rx_page_state *state = page_address(page);
+
+ dma_unmap_page(&efx->pci_dev->dev,
+ state->dma_addr,
+ PAGE_SIZE << efx->rx_buffer_order,
+ DMA_FROM_DEVICE);
+ }
+}
+
+void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int num_bufs)
+{
+ do {
+ if (rx_buf->page) {
+ put_page(rx_buf->page);
+ rx_buf->page = NULL;
+ }
+ rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
+ } while (--num_bufs);
+}
+
+void efx_rx_slow_fill(struct timer_list *t)
+{
+ struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
+
+ /* Post an event to cause NAPI to run and refill the queue */
+ efx_nic_generate_fill_event(rx_queue);
+ ++rx_queue->slow_fill_count;
+}
+
+void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
+{
+ mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10));
+}
+
+/* efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
+ *
+ * @rx_queue: Efx RX queue
+ *
+ * This allocates a batch of pages, maps them for DMA, and populates
+ * struct efx_rx_buffers for each one. Return a negative error code or
+ * 0 on success. If a single page can be used for multiple buffers,
+ * then the page will either be inserted fully, or not at all.
+ */
+static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
+{
+ unsigned int page_offset, index, count;
+ struct efx_nic *efx = rx_queue->efx;
+ struct efx_rx_page_state *state;
+ struct efx_rx_buffer *rx_buf;
+ dma_addr_t dma_addr;
+ struct page *page;
+
+ count = 0;
+ do {
+ page = efx_reuse_page(rx_queue);
+ if (page == NULL) {
+ page = alloc_pages(__GFP_COMP |
+ (atomic ? GFP_ATOMIC : GFP_KERNEL),
+ efx->rx_buffer_order);
+ if (unlikely(page == NULL))
+ return -ENOMEM;
+ dma_addr =
+ dma_map_page(&efx->pci_dev->dev, page, 0,
+ PAGE_SIZE << efx->rx_buffer_order,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
+ dma_addr))) {
+ __free_pages(page, efx->rx_buffer_order);
+ return -EIO;
+ }
+ state = page_address(page);
+ state->dma_addr = dma_addr;
+ } else {
+ state = page_address(page);
+ dma_addr = state->dma_addr;
+ }
+
+ dma_addr += sizeof(struct efx_rx_page_state);
+ page_offset = sizeof(struct efx_rx_page_state);
+
+ do {
+ index = rx_queue->added_count & rx_queue->ptr_mask;
+ rx_buf = efx_rx_buffer(rx_queue, index);
+ rx_buf->dma_addr = dma_addr + efx->rx_ip_align +
+ XDP_PACKET_HEADROOM;
+ rx_buf->page = page;
+ rx_buf->page_offset = page_offset + efx->rx_ip_align +
+ XDP_PACKET_HEADROOM;
+ rx_buf->len = efx->rx_dma_len;
+ rx_buf->flags = 0;
+ ++rx_queue->added_count;
+ get_page(page);
+ dma_addr += efx->rx_page_buf_step;
+ page_offset += efx->rx_page_buf_step;
+ } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
+
+ rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
+ } while (++count < efx->rx_pages_per_batch);
+
+ return 0;
+}
+
+void efx_rx_config_page_split(struct efx_nic *efx)
+{
+ efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align +
+ XDP_PACKET_HEADROOM,
+ EFX_RX_BUF_ALIGNMENT);
+ efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
+ ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
+ efx->rx_page_buf_step);
+ efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
+ efx->rx_bufs_per_page;
+ efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
+ efx->rx_bufs_per_page);
+}
+
+/* efx_fast_push_rx_descriptors - push new RX descriptors quickly
+ * @rx_queue: RX descriptor queue
+ *
+ * This will aim to fill the RX descriptor queue up to
+ * @rx_queue->@max_fill. If there is insufficient atomic
+ * memory to do so, a slow fill will be scheduled.
+ *
+ * The caller must provide serialisation (none is used here). In practise,
+ * this means this function must run from the NAPI handler, or be called
+ * when NAPI is disabled.
+ */
+void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned int fill_level, batch_size;
+ int space, rc = 0;
+
+ if (!rx_queue->refill_enabled)
+ return;
+
+ /* Calculate current fill level, and exit if we don't need to fill */
+ fill_level = (rx_queue->added_count - rx_queue->removed_count);
+ EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries);
+ if (fill_level >= rx_queue->fast_fill_trigger)
+ goto out;
+
+ /* Record minimum fill level */
+ if (unlikely(fill_level < rx_queue->min_fill)) {
+ if (fill_level)
+ rx_queue->min_fill = fill_level;
+ }
+
+ batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
+ space = rx_queue->max_fill - fill_level;
+ EFX_WARN_ON_ONCE_PARANOID(space < batch_size);
+
+ netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
+ "RX queue %d fast-filling descriptor ring from"
+ " level %d to level %d\n",
+ efx_rx_queue_index(rx_queue), fill_level,
+ rx_queue->max_fill);
+
+ do {
+ rc = efx_init_rx_buffers(rx_queue, atomic);
+ if (unlikely(rc)) {
+ /* Ensure that we don't leave the rx queue empty */
+ efx_schedule_slow_fill(rx_queue);
+ goto out;
+ }
+ } while ((space -= batch_size) >= batch_size);
+
+ netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
+ "RX queue %d fast-filled descriptor ring "
+ "to level %d\n", efx_rx_queue_index(rx_queue),
+ rx_queue->added_count - rx_queue->removed_count);
+
+ out:
+ if (rx_queue->notified_count != rx_queue->added_count)
+ efx_nic_notify_rx_desc(rx_queue);
+}
diff --git a/drivers/net/ethernet/sfc/rx_common.h b/drivers/net/ethernet/sfc/rx_common.h
new file mode 100644
index 000000000000..8b23a7accea1
--- /dev/null
+++ b/drivers/net/ethernet/sfc/rx_common.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_RX_COMMON_H
+#define EFX_RX_COMMON_H
+
+/* Preferred number of descriptors to fill at once */
+#define EFX_RX_PREFERRED_BATCH 8U
+
+/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
+#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
+ EFX_RX_USR_BUF_SIZE)
+
+void efx_rx_slow_fill(struct timer_list *t);
+
+int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_destroy_rx_queue(struct efx_rx_queue *rx_queue);
+
+void efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
+ struct page *page,
+ unsigned int page_offset,
+ u16 flags);
+void efx_unmap_rx_buffer(struct efx_nic *efx, struct efx_rx_buffer *rx_buf);
+void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int num_bufs);
+
+void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
+void efx_rx_config_page_split(struct efx_nic *efx);
+void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic);
+
+#endif
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index 8474cf8ea7d3..ec5002b5ec4f 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -18,6 +18,8 @@
#include <linux/slab.h>
#include "net_driver.h"
#include "efx.h"
+#include "efx_common.h"
+#include "efx_channels.h"
#include "nic.h"
#include "selftest.h"
#include "workarounds.h"
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 81499244a4b4..810f6fc8a937 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -14,6 +14,7 @@
#include "net_driver.h"
#include "bitfield.h"
#include "efx.h"
+#include "efx_common.h"
#include "nic.h"
#include "farch_regs.h"
#include "io.h"
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
index dfbdf05dcf79..83dcfcae3d4b 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.c
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include "net_driver.h"
#include "efx.h"
+#include "efx_channels.h"
#include "nic.h"
#include "io.h"
#include "mcdi.h"
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 00c1c4402451..c9599fcab0b4 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -20,6 +20,7 @@
#include "io.h"
#include "nic.h"
#include "tx.h"
+#include "tx_common.h"
#include "workarounds.h"
#include "ef10_regs.h"
@@ -56,72 +57,6 @@ u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
return efx_tx_get_copy_buffer(tx_queue, buffer);
}
-static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
- struct efx_tx_buffer *buffer,
- unsigned int *pkts_compl,
- unsigned int *bytes_compl)
-{
- if (buffer->unmap_len) {
- struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
- dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
- if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
- dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
- DMA_TO_DEVICE);
- else
- dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
- DMA_TO_DEVICE);
- buffer->unmap_len = 0;
- }
-
- if (buffer->flags & EFX_TX_BUF_SKB) {
- struct sk_buff *skb = (struct sk_buff *)buffer->skb;
-
- EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
- (*pkts_compl)++;
- (*bytes_compl) += skb->len;
- if (tx_queue->timestamping &&
- (tx_queue->completed_timestamp_major ||
- tx_queue->completed_timestamp_minor)) {
- struct skb_shared_hwtstamps hwtstamp;
-
- hwtstamp.hwtstamp =
- efx_ptp_nic_to_kernel_time(tx_queue);
- skb_tstamp_tx(skb, &hwtstamp);
-
- tx_queue->completed_timestamp_major = 0;
- tx_queue->completed_timestamp_minor = 0;
- }
- dev_consume_skb_any((struct sk_buff *)buffer->skb);
- netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
- "TX queue %d transmission id %x complete\n",
- tx_queue->queue, tx_queue->read_count);
- } else if (buffer->flags & EFX_TX_BUF_XDP) {
- xdp_return_frame_rx_napi(buffer->xdpf);
- }
-
- buffer->len = 0;
- buffer->flags = 0;
-}
-
-unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
-{
- /* Header and payload descriptor for each output segment, plus
- * one for every input fragment boundary within a segment
- */
- unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
-
- /* Possibly one more per segment for option descriptors */
- if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
- max_descs += EFX_TSO_MAX_SEGS;
-
- /* Possibly more for PCIe page boundaries within input fragments */
- if (PAGE_SIZE > EFX_PAGE_SIZE)
- max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
- DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
-
- return max_descs;
-}
-
static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
{
/* We need to consider both queues that the net core sees as one */
@@ -333,107 +268,6 @@ static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue,
}
#endif /* EFX_USE_PIO */
-static struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
- dma_addr_t dma_addr,
- size_t len)
-{
- const struct efx_nic_type *nic_type = tx_queue->efx->type;
- struct efx_tx_buffer *buffer;
- unsigned int dma_len;
-
- /* Map the fragment taking account of NIC-dependent DMA limits. */
- do {
- buffer = efx_tx_queue_get_insert_buffer(tx_queue);
- dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len);
-
- buffer->len = dma_len;
- buffer->dma_addr = dma_addr;
- buffer->flags = EFX_TX_BUF_CONT;
- len -= dma_len;
- dma_addr += dma_len;
- ++tx_queue->insert_count;
- } while (len);
-
- return buffer;
-}
-
-/* Map all data from an SKB for DMA and create descriptors on the queue.
- */
-static int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
- unsigned int segment_count)
-{
- struct efx_nic *efx = tx_queue->efx;
- struct device *dma_dev = &efx->pci_dev->dev;
- unsigned int frag_index, nr_frags;
- dma_addr_t dma_addr, unmap_addr;
- unsigned short dma_flags;
- size_t len, unmap_len;
-
- nr_frags = skb_shinfo(skb)->nr_frags;
- frag_index = 0;
-
- /* Map header data. */
- len = skb_headlen(skb);
- dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE);
- dma_flags = EFX_TX_BUF_MAP_SINGLE;
- unmap_len = len;
- unmap_addr = dma_addr;
-
- if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
- return -EIO;
-
- if (segment_count) {
- /* For TSO we need to put the header in to a separate
- * descriptor. Map this separately if necessary.
- */
- size_t header_len = skb_transport_header(skb) - skb->data +
- (tcp_hdr(skb)->doff << 2u);
-
- if (header_len != len) {
- tx_queue->tso_long_headers++;
- efx_tx_map_chunk(tx_queue, dma_addr, header_len);
- len -= header_len;
- dma_addr += header_len;
- }
- }
-
- /* Add descriptors for each fragment. */
- do {
- struct efx_tx_buffer *buffer;
- skb_frag_t *fragment;
-
- buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
-
- /* The final descriptor for a fragment is responsible for
- * unmapping the whole fragment.
- */
- buffer->flags = EFX_TX_BUF_CONT | dma_flags;
- buffer->unmap_len = unmap_len;
- buffer->dma_offset = buffer->dma_addr - unmap_addr;
-
- if (frag_index >= nr_frags) {
- /* Store SKB details with the final buffer for
- * the completion.
- */
- buffer->skb = skb;
- buffer->flags = EFX_TX_BUF_SKB | dma_flags;
- return 0;
- }
-
- /* Move on to the next fragment. */
- fragment = &skb_shinfo(skb)->frags[frag_index++];
- len = skb_frag_size(fragment);
- dma_addr = skb_frag_dma_map(dma_dev, fragment,
- 0, len, DMA_TO_DEVICE);
- dma_flags = 0;
- unmap_len = len;
- unmap_addr = dma_addr;
-
- if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
- return -EIO;
- } while (1);
-}
-
/* Remove buffers put into a tx_queue for the current packet.
* None of the buffers must have an skb attached.
*/
@@ -876,131 +710,3 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
}
}
}
-
-static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue)
-{
- return DIV_ROUND_UP(tx_queue->ptr_mask + 1, PAGE_SIZE >> EFX_TX_CB_ORDER);
-}
-
-int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
-{
- struct efx_nic *efx = tx_queue->efx;
- unsigned int entries;
- int rc;
-
- /* Create the smallest power-of-two aligned ring */
- entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
- EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
- tx_queue->ptr_mask = entries - 1;
-
- netif_dbg(efx, probe, efx->net_dev,
- "creating TX queue %d size %#x mask %#x\n",
- tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
-
- /* Allocate software ring */
- tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
- GFP_KERNEL);
- if (!tx_queue->buffer)
- return -ENOMEM;
-
- tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue),
- sizeof(tx_queue->cb_page[0]), GFP_KERNEL);
- if (!tx_queue->cb_page) {
- rc = -ENOMEM;
- goto fail1;
- }
-
- /* Allocate hardware ring */
- rc = efx_nic_probe_tx(tx_queue);
- if (rc)
- goto fail2;
-
- return 0;
-
-fail2:
- kfree(tx_queue->cb_page);
- tx_queue->cb_page = NULL;
-fail1:
- kfree(tx_queue->buffer);
- tx_queue->buffer = NULL;
- return rc;
-}
-
-void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
-{
- struct efx_nic *efx = tx_queue->efx;
-
- netif_dbg(efx, drv, efx->net_dev,
- "initialising TX queue %d\n", tx_queue->queue);
-
- tx_queue->insert_count = 0;
- tx_queue->write_count = 0;
- tx_queue->packet_write_count = 0;
- tx_queue->old_write_count = 0;
- tx_queue->read_count = 0;
- tx_queue->old_read_count = 0;
- tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
- tx_queue->xmit_more_available = false;
- tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) &&
- tx_queue->channel == efx_ptp_channel(efx));
- tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
- tx_queue->completed_timestamp_major = 0;
- tx_queue->completed_timestamp_minor = 0;
-
- tx_queue->xdp_tx = efx_channel_is_xdp_tx(tx_queue->channel);
-
- /* Set up default function pointers. These may get replaced by
- * efx_nic_init_tx() based off NIC/queue capabilities.
- */
- tx_queue->handle_tso = efx_enqueue_skb_tso;
-
- /* Set up TX descriptor ring */
- efx_nic_init_tx(tx_queue);
-
- tx_queue->initialised = true;
-}
-
-void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
-{
- struct efx_tx_buffer *buffer;
-
- netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
- "shutting down TX queue %d\n", tx_queue->queue);
-
- if (!tx_queue->buffer)
- return;
-
- /* Free any buffers left in the ring */
- while (tx_queue->read_count != tx_queue->write_count) {
- unsigned int pkts_compl = 0, bytes_compl = 0;
- buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
- efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
-
- ++tx_queue->read_count;
- }
- tx_queue->xmit_more_available = false;
- netdev_tx_reset_queue(tx_queue->core_txq);
-}
-
-void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
-{
- int i;
-
- if (!tx_queue->buffer)
- return;
-
- netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
- "destroying TX queue %d\n", tx_queue->queue);
- efx_nic_remove_tx(tx_queue);
-
- if (tx_queue->cb_page) {
- for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++)
- efx_nic_free_buffer(tx_queue->efx,
- &tx_queue->cb_page[i]);
- kfree(tx_queue->cb_page);
- tx_queue->cb_page = NULL;
- }
-
- kfree(tx_queue->buffer);
- tx_queue->buffer = NULL;
-}
diff --git a/drivers/net/ethernet/sfc/tx_common.c b/drivers/net/ethernet/sfc/tx_common.c
new file mode 100644
index 000000000000..e29ade21c4b9
--- /dev/null
+++ b/drivers/net/ethernet/sfc/tx_common.c
@@ -0,0 +1,310 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include "efx.h"
+#include "nic.h"
+#include "tx_common.h"
+
+static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue)
+{
+ return DIV_ROUND_UP(tx_queue->ptr_mask + 1,
+ PAGE_SIZE >> EFX_TX_CB_ORDER);
+}
+
+int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ unsigned int entries;
+ int rc;
+
+ /* Create the smallest power-of-two aligned ring */
+ entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
+ EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
+ tx_queue->ptr_mask = entries - 1;
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "creating TX queue %d size %#x mask %#x\n",
+ tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
+
+ /* Allocate software ring */
+ tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
+ GFP_KERNEL);
+ if (!tx_queue->buffer)
+ return -ENOMEM;
+
+ tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue),
+ sizeof(tx_queue->cb_page[0]), GFP_KERNEL);
+ if (!tx_queue->cb_page) {
+ rc = -ENOMEM;
+ goto fail1;
+ }
+
+ /* Allocate hardware ring */
+ rc = efx_nic_probe_tx(tx_queue);
+ if (rc)
+ goto fail2;
+
+ return 0;
+
+fail2:
+ kfree(tx_queue->cb_page);
+ tx_queue->cb_page = NULL;
+fail1:
+ kfree(tx_queue->buffer);
+ tx_queue->buffer = NULL;
+ return rc;
+}
+
+void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+
+ netif_dbg(efx, drv, efx->net_dev,
+ "initialising TX queue %d\n", tx_queue->queue);
+
+ tx_queue->insert_count = 0;
+ tx_queue->write_count = 0;
+ tx_queue->packet_write_count = 0;
+ tx_queue->old_write_count = 0;
+ tx_queue->read_count = 0;
+ tx_queue->old_read_count = 0;
+ tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
+ tx_queue->xmit_more_available = false;
+ tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) &&
+ tx_queue->channel == efx_ptp_channel(efx));
+ tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
+ tx_queue->completed_timestamp_major = 0;
+ tx_queue->completed_timestamp_minor = 0;
+
+ tx_queue->xdp_tx = efx_channel_is_xdp_tx(tx_queue->channel);
+
+ /* Set up default function pointers. These may get replaced by
+ * efx_nic_init_tx() based off NIC/queue capabilities.
+ */
+ tx_queue->handle_tso = efx_enqueue_skb_tso;
+
+ /* Set up TX descriptor ring */
+ efx_nic_init_tx(tx_queue);
+
+ tx_queue->initialised = true;
+}
+
+void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
+{
+ struct efx_tx_buffer *buffer;
+
+ netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
+ "shutting down TX queue %d\n", tx_queue->queue);
+
+ if (!tx_queue->buffer)
+ return;
+
+ /* Free any buffers left in the ring */
+ while (tx_queue->read_count != tx_queue->write_count) {
+ unsigned int pkts_compl = 0, bytes_compl = 0;
+
+ buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
+ efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
+
+ ++tx_queue->read_count;
+ }
+ tx_queue->xmit_more_available = false;
+ netdev_tx_reset_queue(tx_queue->core_txq);
+}
+
+void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
+{
+ int i;
+
+ if (!tx_queue->buffer)
+ return;
+
+ netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
+ "destroying TX queue %d\n", tx_queue->queue);
+ efx_nic_remove_tx(tx_queue);
+
+ if (tx_queue->cb_page) {
+ for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++)
+ efx_nic_free_buffer(tx_queue->efx,
+ &tx_queue->cb_page[i]);
+ kfree(tx_queue->cb_page);
+ tx_queue->cb_page = NULL;
+ }
+
+ kfree(tx_queue->buffer);
+ tx_queue->buffer = NULL;
+}
+
+void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
+ struct efx_tx_buffer *buffer,
+ unsigned int *pkts_compl,
+ unsigned int *bytes_compl)
+{
+ if (buffer->unmap_len) {
+ struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
+ dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
+
+ if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
+ dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
+ DMA_TO_DEVICE);
+ buffer->unmap_len = 0;
+ }
+
+ if (buffer->flags & EFX_TX_BUF_SKB) {
+ struct sk_buff *skb = (struct sk_buff *)buffer->skb;
+
+ EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
+ (*pkts_compl)++;
+ (*bytes_compl) += skb->len;
+ if (tx_queue->timestamping &&
+ (tx_queue->completed_timestamp_major ||
+ tx_queue->completed_timestamp_minor)) {
+ struct skb_shared_hwtstamps hwtstamp;
+
+ hwtstamp.hwtstamp =
+ efx_ptp_nic_to_kernel_time(tx_queue);
+ skb_tstamp_tx(skb, &hwtstamp);
+
+ tx_queue->completed_timestamp_major = 0;
+ tx_queue->completed_timestamp_minor = 0;
+ }
+ dev_consume_skb_any((struct sk_buff *)buffer->skb);
+ netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
+ "TX queue %d transmission id %x complete\n",
+ tx_queue->queue, tx_queue->read_count);
+ } else if (buffer->flags & EFX_TX_BUF_XDP) {
+ xdp_return_frame_rx_napi(buffer->xdpf);
+ }
+
+ buffer->len = 0;
+ buffer->flags = 0;
+}
+
+struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
+ dma_addr_t dma_addr, size_t len)
+{
+ const struct efx_nic_type *nic_type = tx_queue->efx->type;
+ struct efx_tx_buffer *buffer;
+ unsigned int dma_len;
+
+ /* Map the fragment taking account of NIC-dependent DMA limits. */
+ do {
+ buffer = efx_tx_queue_get_insert_buffer(tx_queue);
+ dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len);
+
+ buffer->len = dma_len;
+ buffer->dma_addr = dma_addr;
+ buffer->flags = EFX_TX_BUF_CONT;
+ len -= dma_len;
+ dma_addr += dma_len;
+ ++tx_queue->insert_count;
+ } while (len);
+
+ return buffer;
+}
+
+/* Map all data from an SKB for DMA and create descriptors on the queue. */
+int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
+ unsigned int segment_count)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ struct device *dma_dev = &efx->pci_dev->dev;
+ unsigned int frag_index, nr_frags;
+ dma_addr_t dma_addr, unmap_addr;
+ unsigned short dma_flags;
+ size_t len, unmap_len;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ frag_index = 0;
+
+ /* Map header data. */
+ len = skb_headlen(skb);
+ dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE);
+ dma_flags = EFX_TX_BUF_MAP_SINGLE;
+ unmap_len = len;
+ unmap_addr = dma_addr;
+
+ if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
+ return -EIO;
+
+ if (segment_count) {
+ /* For TSO we need to put the header in to a separate
+ * descriptor. Map this separately if necessary.
+ */
+ size_t header_len = skb_transport_header(skb) - skb->data +
+ (tcp_hdr(skb)->doff << 2u);
+
+ if (header_len != len) {
+ tx_queue->tso_long_headers++;
+ efx_tx_map_chunk(tx_queue, dma_addr, header_len);
+ len -= header_len;
+ dma_addr += header_len;
+ }
+ }
+
+ /* Add descriptors for each fragment. */
+ do {
+ struct efx_tx_buffer *buffer;
+ skb_frag_t *fragment;
+
+ buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
+
+ /* The final descriptor for a fragment is responsible for
+ * unmapping the whole fragment.
+ */
+ buffer->flags = EFX_TX_BUF_CONT | dma_flags;
+ buffer->unmap_len = unmap_len;
+ buffer->dma_offset = buffer->dma_addr - unmap_addr;
+
+ if (frag_index >= nr_frags) {
+ /* Store SKB details with the final buffer for
+ * the completion.
+ */
+ buffer->skb = skb;
+ buffer->flags = EFX_TX_BUF_SKB | dma_flags;
+ return 0;
+ }
+
+ /* Move on to the next fragment. */
+ fragment = &skb_shinfo(skb)->frags[frag_index++];
+ len = skb_frag_size(fragment);
+ dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
+ DMA_TO_DEVICE);
+ dma_flags = 0;
+ unmap_len = len;
+ unmap_addr = dma_addr;
+
+ if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
+ return -EIO;
+ } while (1);
+}
+
+unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
+{
+ /* Header and payload descriptor for each output segment, plus
+ * one for every input fragment boundary within a segment
+ */
+ unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
+
+ /* Possibly one more per segment for option descriptors */
+ if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
+ max_descs += EFX_TSO_MAX_SEGS;
+
+ /* Possibly more for PCIe page boundaries within input fragments */
+ if (PAGE_SIZE > EFX_PAGE_SIZE)
+ max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
+ DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
+
+ return max_descs;
+}
diff --git a/drivers/net/ethernet/sfc/tx_common.h b/drivers/net/ethernet/sfc/tx_common.h
new file mode 100644
index 000000000000..afdfc79a8ea0
--- /dev/null
+++ b/drivers/net/ethernet/sfc/tx_common.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_TX_COMMON_H
+#define EFX_TX_COMMON_H
+
+int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
+
+void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
+ struct efx_tx_buffer *buffer,
+ unsigned int *pkts_compl,
+ unsigned int *bytes_compl);
+
+struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
+ dma_addr_t dma_addr, size_t len);
+int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
+ unsigned int segment_count);
+
+unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
+
+#endif