summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/i40e
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/i40e')
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c330
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_trace.h8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c50
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx_common.h19
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c105
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.h4
16 files changed, 200 insertions, 366 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index a7e212d1caa2..537300e762f0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -35,6 +35,7 @@
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
+#include <net/udp_tunnel.h>
#include <net/xdp_sock.h>
#include "i40e_type.h"
#include "i40e_prototype.h"
@@ -90,7 +91,7 @@
#define I40E_OEM_RELEASE_MASK 0x0000ffff
#define I40E_RX_DESC(R, i) \
- (&(((union i40e_32byte_rx_desc *)((R)->desc))[i]))
+ (&(((union i40e_rx_desc *)((R)->desc))[i]))
#define I40E_TX_DESC(R, i) \
(&(((struct i40e_tx_desc *)((R)->desc))[i]))
#define I40E_TX_CTXTDESC(R, i) \
@@ -133,7 +134,6 @@ enum i40e_state_t {
__I40E_PORT_SUSPENDED,
__I40E_VF_DISABLE,
__I40E_MACVLAN_SYNC_PENDING,
- __I40E_UDP_FILTER_SYNC_PENDING,
__I40E_TEMP_LINK_POLLING,
__I40E_CLIENT_SERVICE_REQUESTED,
__I40E_CLIENT_L2_CHANGE,
@@ -478,8 +478,8 @@ struct i40e_pf {
struct list_head l3_flex_pit_list;
struct list_head l4_flex_pit_list;
- struct i40e_udp_port_config udp_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
- u16 pending_udp_bitmap;
+ struct udp_tunnel_nic_shared udp_tunnel_shared;
+ struct udp_tunnel_nic_info udp_tunnel_nic;
struct hlist_head cloud_filter_list;
u16 num_cloud_filters;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index edec3df78971..ee394aacef4d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -85,8 +85,8 @@ struct i40e_adminq_info {
/**
* i40e_aq_rc_to_posix - convert errors to user-land codes
- * aq_ret: AdminQ handler error code can override aq_rc
- * aq_rc: AdminQ firmware error code to convert
+ * @aq_ret: AdminQ handler error code can override aq_rc
+ * @aq_rc: AdminQ firmware error code to convert
**/
static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
{
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index befd3018183f..a2dba32383f6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -278,8 +278,6 @@ void i40e_client_update_msix_info(struct i40e_pf *pf)
/**
* i40e_client_add_instance - add a client instance struct to the instance list
* @pf: pointer to the board struct
- * @client: pointer to a client struct in the client list.
- * @existing: if there was already an existing instance
*
**/
static void i40e_client_add_instance(struct i40e_pf *pf)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 6ab52cbd697a..adc9e4fa4789 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -3766,9 +3766,7 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
/**
* i40e_aq_start_lldp
* @hw: pointer to the hw struct
- * @buff: buffer for result
* @persist: True if start of LLDP should be persistent across power cycles
- * @buff_size: buffer size
* @cmd_details: pointer to command details structure or NULL
*
* Start the embedded LLDP Agent on all ports.
@@ -5395,6 +5393,7 @@ static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio,
* @hw: pointer to the hw struct
* @phy_select: select which phy should be accessed
* @dev_addr: PHY device address
+ * @page_change: flag to indicate if phy page should be updated
* @set_mdio: use MDIO I/F number specified by mdio_num
* @mdio_num: MDIO I/F number
* @reg_addr: PHY register address
@@ -5439,6 +5438,7 @@ enum i40e_status_code i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
* @hw: pointer to the hw struct
* @phy_select: select which phy should be accessed
* @dev_addr: PHY device address
+ * @page_change: flag to indicate if phy page should be updated
* @set_mdio: use MDIO I/F number specified by mdio_num
* @mdio_num: MDIO I/F number
* @reg_addr: PHY register address
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index d3ad2e3aa838..d7c13ca9be7d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -604,10 +604,9 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
} else {
rxd = I40E_RX_DESC(ring, i);
dev_info(&pf->pdev->dev,
- " d[%03x] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
+ " d[%03x] = 0x%016llx 0x%016llx\n",
i, rxd->read.pkt_addr,
- rxd->read.hdr_addr,
- rxd->read.rsvd1, rxd->read.rsvd2);
+ rxd->read.hdr_addr);
}
}
} else if (cnt == 3) {
@@ -625,10 +624,9 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
} else {
rxd = I40E_RX_DESC(ring, desc_n);
dev_info(&pf->pdev->dev,
- "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
+ "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
vsi_seid, ring_id, desc_n,
- rxd->read.pkt_addr, rxd->read.hdr_addr,
- rxd->read.rsvd1, rxd->read.rsvd2);
+ rxd->read.pkt_addr, rxd->read.hdr_addr);
}
} else {
dev_info(&pf->pdev->dev, "dump desc rx/tx/xdp <vsi_seid> <ring_id> [<desc_n>]\n");
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 825c104ecba1..dc1577156bb6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1967,7 +1967,7 @@ static int i40e_set_ringparam(struct net_device *netdev,
(new_rx_count == vsi->rx_rings[0]->count))
return 0;
- /* If there is a AF_XDP UMEM attached to any of Rx rings,
+ /* If there is a AF_XDP page pool attached to any of Rx rings,
* disallow changing the number of descriptors -- regardless
* if the netdev is running or not.
*/
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 2e433fdbf2c3..929c64789119 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -287,6 +287,7 @@ void i40e_service_event_schedule(struct i40e_pf *pf)
/**
* i40e_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: queue number timing out
*
* If any port has noticed a Tx timeout, it is likely that the whole
* device is munged, not just the one netdev port, so go for the full
@@ -1609,6 +1610,8 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
* i40e_config_rss_aq - Prepare for RSS using AQ commands
* @vsi: vsi structure
* @seed: RSS hash seed
+ * @lut: pointer to lookup table of lut_size
+ * @lut_size: size of the lookup table
**/
static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
u8 *lut, u16 lut_size)
@@ -3122,12 +3125,12 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
}
/**
- * i40e_xsk_umem - Retrieve the AF_XDP ZC if XDP and ZC is enabled
+ * i40e_xsk_pool - Retrieve the AF_XDP buffer pool if XDP and ZC is enabled
* @ring: The Tx or Rx ring
*
- * Returns the UMEM or NULL.
+ * Returns the AF_XDP buffer pool or NULL.
**/
-static struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
+static struct xsk_buff_pool *i40e_xsk_pool(struct i40e_ring *ring)
{
bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
int qid = ring->queue_index;
@@ -3138,7 +3141,7 @@ static struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
return NULL;
- return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
+ return xsk_get_pool_from_qid(ring->vsi->netdev, qid);
}
/**
@@ -3157,7 +3160,7 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
u32 qtx_ctl = 0;
if (ring_is_xdp(ring))
- ring->xsk_umem = i40e_xsk_umem(ring);
+ ring->xsk_pool = i40e_xsk_pool(ring);
/* some ATR related tx ring init */
if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
@@ -3280,12 +3283,13 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
kfree(ring->rx_bi);
- ring->xsk_umem = i40e_xsk_umem(ring);
- if (ring->xsk_umem) {
+ ring->xsk_pool = i40e_xsk_pool(ring);
+ if (ring->xsk_pool) {
ret = i40e_alloc_rx_bi_zc(ring);
if (ret)
return ret;
- ring->rx_buf_len = xsk_umem_get_rx_frame_size(ring->xsk_umem);
+ ring->rx_buf_len =
+ xsk_pool_get_rx_frame_size(ring->xsk_pool);
/* For AF_XDP ZC, we disallow packets to span on
* multiple buffers, thus letting us skip that
* handling in the fast-path.
@@ -3320,8 +3324,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
rx_ctx.base = (ring->dma / 128);
rx_ctx.qlen = ring->count;
- /* use 32 byte descriptors */
- rx_ctx.dsize = 1;
+ /* use 16 byte descriptors */
+ rx_ctx.dsize = 0;
/* descriptor type is always zero
* rx_ctx.dtype = 0;
@@ -3368,8 +3372,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
writel(0, ring->tail);
- if (ring->xsk_umem) {
- xsk_buff_set_rxq_info(ring->xsk_umem, &ring->xdp_rxq);
+ if (ring->xsk_pool) {
+ xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
ok = i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring));
} else {
ok = !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
@@ -3380,7 +3384,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
*/
dev_info(&vsi->back->pdev->dev,
"Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n",
- ring->xsk_umem ? "UMEM enabled " : "",
+ ring->xsk_pool ? "AF_XDP ZC enabled " : "",
ring->queue_index, pf_q);
}
@@ -5814,7 +5818,6 @@ static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
/**
* i40e_channel_setup_queue_map - Setup a channel queue map
* @pf: ptr to PF device
- * @vsi: the VSI being setup
* @ctxt: VSI context structure
* @ch: ptr to channel structure
*
@@ -6057,8 +6060,7 @@ static inline int i40e_setup_hw_channel(struct i40e_pf *pf,
/**
* i40e_setup_channel - setup new channel using uplink element
* @pf: ptr to PF device
- * @type: type of channel to be created (VMDq2/VF)
- * @uplink_seid: underlying HW switching element (VEB) ID
+ * @vsi: pointer to the VSI to set up the channel within
* @ch: ptr to channel structure
*
* Setup new channel (VSI) based on specified type (VMDq2/VF)
@@ -6689,7 +6691,6 @@ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
- WARN_ON(in_interrupt());
while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state))
usleep_range(1000, 2000);
i40e_down(vsi);
@@ -7779,7 +7780,7 @@ int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
/**
* i40e_parse_cls_flower - Parse tc flower filters provided by kernel
* @vsi: Pointer to VSI
- * @cls_flower: Pointer to struct flow_cls_offload
+ * @f: Pointer to struct flow_cls_offload
* @filter: Pointer to cloud filter structure
*
**/
@@ -8160,8 +8161,8 @@ static int i40e_delete_clsflower(struct i40e_vsi *vsi,
/**
* i40e_setup_tc_cls_flower - flower classifier offloads
- * @netdev: net device to configure
- * @type_data: offload data
+ * @np: net device to configure
+ * @cls_flower: offload data
**/
static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
struct flow_cls_offload *cls_flower)
@@ -8462,9 +8463,6 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
{
u32 val;
- WARN_ON(in_interrupt());
-
-
/* do the biggest reset indicated */
if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
@@ -9585,6 +9583,7 @@ end_reconstitute:
/**
* i40e_get_capabilities - get info about the HW
* @pf: the PF struct
+ * @list_type: AQ capability to be queried
**/
static int i40e_get_capabilities(struct i40e_pf *pf,
enum i40e_admin_queue_opc list_type)
@@ -10383,106 +10382,6 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
i40e_flush(hw);
}
-static const char *i40e_tunnel_name(u8 type)
-{
- switch (type) {
- case UDP_TUNNEL_TYPE_VXLAN:
- return "vxlan";
- case UDP_TUNNEL_TYPE_GENEVE:
- return "geneve";
- default:
- return "unknown";
- }
-}
-
-/**
- * i40e_sync_udp_filters - Trigger a sync event for existing UDP filters
- * @pf: board private structure
- **/
-static void i40e_sync_udp_filters(struct i40e_pf *pf)
-{
- int i;
-
- /* loop through and set pending bit for all active UDP filters */
- for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
- if (pf->udp_ports[i].port)
- pf->pending_udp_bitmap |= BIT_ULL(i);
- }
-
- set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
-}
-
-/**
- * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
- * @pf: board private structure
- **/
-static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- u8 filter_index, type;
- u16 port;
- int i;
-
- if (!test_and_clear_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state))
- return;
-
- /* acquire RTNL to maintain state of flags and port requests */
- rtnl_lock();
-
- for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
- if (pf->pending_udp_bitmap & BIT_ULL(i)) {
- struct i40e_udp_port_config *udp_port;
- i40e_status ret = 0;
-
- udp_port = &pf->udp_ports[i];
- pf->pending_udp_bitmap &= ~BIT_ULL(i);
-
- port = READ_ONCE(udp_port->port);
- type = READ_ONCE(udp_port->type);
- filter_index = READ_ONCE(udp_port->filter_index);
-
- /* release RTNL while we wait on AQ command */
- rtnl_unlock();
-
- if (port)
- ret = i40e_aq_add_udp_tunnel(hw, port,
- type,
- &filter_index,
- NULL);
- else if (filter_index != I40E_UDP_PORT_INDEX_UNUSED)
- ret = i40e_aq_del_udp_tunnel(hw, filter_index,
- NULL);
-
- /* reacquire RTNL so we can update filter_index */
- rtnl_lock();
-
- if (ret) {
- dev_info(&pf->pdev->dev,
- "%s %s port %d, index %d failed, err %s aq_err %s\n",
- i40e_tunnel_name(type),
- port ? "add" : "delete",
- port,
- filter_index,
- i40e_stat_str(&pf->hw, ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
- if (port) {
- /* failed to add, just reset port,
- * drop pending bit for any deletion
- */
- udp_port->port = 0;
- pf->pending_udp_bitmap &= ~BIT_ULL(i);
- }
- } else if (port) {
- /* record filter index on success */
- udp_port->filter_index = filter_index;
- }
- }
- }
-
- rtnl_unlock();
-}
-
/**
* i40e_service_task - Run the driver's async subtasks
* @work: pointer to work_struct containing our data
@@ -10522,7 +10421,6 @@ static void i40e_service_task(struct work_struct *work)
pf->vsi[pf->lan_vsi]);
}
i40e_sync_filters_subtask(pf);
- i40e_sync_udp_filters_subtask(pf);
} else {
i40e_reset_subtask(pf);
}
@@ -10546,7 +10444,7 @@ static void i40e_service_task(struct work_struct *work)
/**
* i40e_service_timer - timer callback
- * @data: pointer to PF struct
+ * @t: timer list pointer
**/
static void i40e_service_timer(struct timer_list *t)
{
@@ -11185,11 +11083,10 @@ static int i40e_init_msix(struct i40e_pf *pf)
* i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
* @vsi: the VSI being configured
* @v_idx: index of the vector in the vsi struct
- * @cpu: cpu to be used on affinity_mask
*
* We allocate one q_vector. If allocation fails we return -ENOMEM.
**/
-static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
+static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
{
struct i40e_q_vector *q_vector;
@@ -11222,7 +11119,7 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
- int err, v_idx, num_q_vectors, current_cpu;
+ int err, v_idx, num_q_vectors;
/* if not MSIX, give the one vector only to the LAN VSI */
if (pf->flags & I40E_FLAG_MSIX_ENABLED)
@@ -11232,15 +11129,10 @@ static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
else
return -EINVAL;
- current_cpu = cpumask_first(cpu_online_mask);
-
for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
- err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
+ err = i40e_vsi_alloc_q_vector(vsi, v_idx);
if (err)
goto err_out;
- current_cpu = cpumask_next(current_cpu, cpu_online_mask);
- if (unlikely(current_cpu >= nr_cpu_ids))
- current_cpu = cpumask_first(cpu_online_mask);
}
return 0;
@@ -12228,131 +12120,48 @@ static int i40e_set_features(struct net_device *netdev,
return 0;
}
-/**
- * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port
- * @pf: board private structure
- * @port: The UDP port to look up
- *
- * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
- **/
-static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port)
-{
- u8 i;
-
- for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
- /* Do not report ports with pending deletions as
- * being available.
- */
- if (!port && (pf->pending_udp_bitmap & BIT_ULL(i)))
- continue;
- if (pf->udp_ports[i].port == port)
- return i;
- }
-
- return i;
-}
-
-/**
- * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
- * @netdev: This physical port's netdev
- * @ti: Tunnel endpoint information
- **/
-static void i40e_udp_tunnel_add(struct net_device *netdev,
- struct udp_tunnel_info *ti)
+static int i40e_udp_tunnel_set_port(struct net_device *netdev,
+ unsigned int table, unsigned int idx,
+ struct udp_tunnel_info *ti)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
- struct i40e_vsi *vsi = np->vsi;
- struct i40e_pf *pf = vsi->back;
- u16 port = ntohs(ti->port);
- u8 next_idx;
- u8 idx;
-
- idx = i40e_get_udp_port_idx(pf, port);
-
- /* Check if port already exists */
- if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
- netdev_info(netdev, "port %d already offloaded\n", port);
- return;
- }
-
- /* Now check if there is space to add the new port */
- next_idx = i40e_get_udp_port_idx(pf, 0);
+ struct i40e_hw *hw = &np->vsi->back->hw;
+ u8 type, filter_index;
+ i40e_status ret;
- if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
- netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
- port);
- return;
- }
+ type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? I40E_AQC_TUNNEL_TYPE_VXLAN :
+ I40E_AQC_TUNNEL_TYPE_NGE;
- switch (ti->type) {
- case UDP_TUNNEL_TYPE_VXLAN:
- pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
- break;
- case UDP_TUNNEL_TYPE_GENEVE:
- if (!(pf->hw_features & I40E_HW_GENEVE_OFFLOAD_CAPABLE))
- return;
- pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
- break;
- default:
- return;
+ ret = i40e_aq_add_udp_tunnel(hw, ntohs(ti->port), type, &filter_index,
+ NULL);
+ if (ret) {
+ netdev_info(netdev, "add UDP port failed, err %s aq_err %s\n",
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return -EIO;
}
- /* New port: add it and mark its index in the bitmap */
- pf->udp_ports[next_idx].port = port;
- pf->udp_ports[next_idx].filter_index = I40E_UDP_PORT_INDEX_UNUSED;
- pf->pending_udp_bitmap |= BIT_ULL(next_idx);
- set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
+ udp_tunnel_nic_set_port_priv(netdev, table, idx, filter_index);
+ return 0;
}
-/**
- * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
- * @netdev: This physical port's netdev
- * @ti: Tunnel endpoint information
- **/
-static void i40e_udp_tunnel_del(struct net_device *netdev,
- struct udp_tunnel_info *ti)
+static int i40e_udp_tunnel_unset_port(struct net_device *netdev,
+ unsigned int table, unsigned int idx,
+ struct udp_tunnel_info *ti)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
- struct i40e_vsi *vsi = np->vsi;
- struct i40e_pf *pf = vsi->back;
- u16 port = ntohs(ti->port);
- u8 idx;
-
- idx = i40e_get_udp_port_idx(pf, port);
-
- /* Check if port already exists */
- if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS)
- goto not_found;
+ struct i40e_hw *hw = &np->vsi->back->hw;
+ i40e_status ret;
- switch (ti->type) {
- case UDP_TUNNEL_TYPE_VXLAN:
- if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN)
- goto not_found;
- break;
- case UDP_TUNNEL_TYPE_GENEVE:
- if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE)
- goto not_found;
- break;
- default:
- goto not_found;
+ ret = i40e_aq_del_udp_tunnel(hw, ti->hw_priv, NULL);
+ if (ret) {
+ netdev_info(netdev, "delete UDP port failed, err %s aq_err %s\n",
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return -EIO;
}
- /* if port exists, set it to 0 (mark for deletion)
- * and make it pending
- */
- pf->udp_ports[idx].port = 0;
-
- /* Toggle pending bit instead of setting it. This way if we are
- * deleting a port that has yet to be added we just clear the pending
- * bit and don't have to worry about it.
- */
- pf->pending_udp_bitmap ^= BIT_ULL(idx);
- set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
-
- return;
-not_found:
- netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
- port);
+ return 0;
}
static int i40e_get_phys_port_id(struct net_device *netdev,
@@ -12379,6 +12188,7 @@ static int i40e_get_phys_port_id(struct net_device *netdev,
* @addr: the MAC address entry being added
* @vid: VLAN ID
* @flags: instructions from stack about fdb operation
+ * @extack: netlink extended ack, unused currently
*/
static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
@@ -12644,7 +12454,7 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi,
*/
if (need_reset && prog)
for (i = 0; i < vsi->num_queue_pairs; i++)
- if (vsi->xdp_rings[i]->xsk_umem)
+ if (vsi->xdp_rings[i]->xsk_pool)
(void)i40e_xsk_wakeup(vsi->netdev, i,
XDP_WAKEUP_RX);
@@ -12923,8 +12733,8 @@ static int i40e_xdp(struct net_device *dev,
switch (xdp->command) {
case XDP_SETUP_PROG:
return i40e_xdp_setup(vsi, xdp->prog);
- case XDP_SETUP_XSK_UMEM:
- return i40e_xsk_umem_setup(vsi, xdp->xsk.umem,
+ case XDP_SETUP_XSK_POOL:
+ return i40e_xsk_pool_setup(vsi, xdp->xsk.pool,
xdp->xsk.queue_id);
default:
return -EINVAL;
@@ -12957,8 +12767,8 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
.ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
.ndo_set_vf_trust = i40e_ndo_set_vf_trust,
- .ndo_udp_tunnel_add = i40e_udp_tunnel_add,
- .ndo_udp_tunnel_del = i40e_udp_tunnel_del,
+ .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
+ .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_get_phys_port_id = i40e_get_phys_port_id,
.ndo_fdb_add = i40e_ndo_fdb_add,
.ndo_features_check = i40e_features_check,
@@ -13022,6 +12832,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE))
netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ netdev->udp_tunnel_nic_info = &pf->udp_tunnel_nic;
+
netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
netdev->hw_enc_features |= hw_enc_features;
@@ -14422,7 +14234,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
i40e_ptp_init(pf);
/* repopulate tunnel port filters */
- i40e_sync_udp_filters(pf);
+ udp_tunnel_nic_reset_ntf(pf->vsi[pf->lan_vsi]->netdev);
return ret;
}
@@ -15151,6 +14963,14 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_switch_setup;
+ pf->udp_tunnel_nic.set_port = i40e_udp_tunnel_set_port;
+ pf->udp_tunnel_nic.unset_port = i40e_udp_tunnel_unset_port;
+ pf->udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
+ pf->udp_tunnel_nic.shared = &pf->udp_tunnel_shared;
+ pf->udp_tunnel_nic.tables[0].n_entries = I40E_MAX_PF_UDP_OFFLOAD_PORTS;
+ pf->udp_tunnel_nic.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN |
+ UDP_TUNNEL_TYPE_GENEVE;
+
/* The number of VSIs reported by the FW is the minimum guaranteed
* to us; HW supports far more and we share the remaining pool with
* the other PFs. We allocate space for more than the guarantee with
@@ -15160,6 +14980,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
else
pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
+ if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
+ dev_warn(&pf->pdev->dev,
+ "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
+ pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
+ pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
+ }
/* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index ff7b19c6bc73..7a879614ca55 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -259,7 +259,6 @@ static u32 i40e_ptp_get_rx_events(struct i40e_pf *pf)
/**
* i40e_ptp_rx_hang - Detect error case when Rx timestamp registers are hung
* @pf: The PF private data structure
- * @vsi: The VSI with the rings relevant to 1588
*
* This watchdog task is scheduled to detect error case where hardware has
* dropped an Rx packet that was timestamped when the ring is full. The
diff --git a/drivers/net/ethernet/intel/i40e/i40e_trace.h b/drivers/net/ethernet/intel/i40e/i40e_trace.h
index 424f02077e2e..b5b12299931f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_trace.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_trace.h
@@ -22,7 +22,7 @@
#include <linux/tracepoint.h>
-/**
+/*
* i40e_trace() macro enables shared code to refer to trace points
* like:
*
@@ -112,7 +112,7 @@ DECLARE_EVENT_CLASS(
i40e_rx_template,
TP_PROTO(struct i40e_ring *ring,
- union i40e_32byte_rx_desc *desc,
+ union i40e_16byte_rx_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb),
@@ -140,7 +140,7 @@ DECLARE_EVENT_CLASS(
DEFINE_EVENT(
i40e_rx_template, i40e_clean_rx_irq,
TP_PROTO(struct i40e_ring *ring,
- union i40e_32byte_rx_desc *desc,
+ union i40e_16byte_rx_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb));
@@ -148,7 +148,7 @@ DEFINE_EVENT(
DEFINE_EVENT(
i40e_rx_template, i40e_clean_rx_irq_rx,
TP_PROTO(struct i40e_ring *ring,
- union i40e_32byte_rx_desc *desc,
+ union i40e_16byte_rx_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb));
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 3e5c566ceb01..d43ce13a93c9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -533,11 +533,11 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u64 qword0_raw,
{
struct i40e_pf *pf = rx_ring->vsi->back;
struct pci_dev *pdev = pf->pdev;
- struct i40e_32b_rx_wb_qw0 *qw0;
+ struct i40e_16b_rx_wb_qw0 *qw0;
u32 fcnt_prog, fcnt_avail;
u32 error;
- qw0 = (struct i40e_32b_rx_wb_qw0 *)&qword0_raw;
+ qw0 = (struct i40e_16b_rx_wb_qw0 *)&qword0_raw;
error = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
@@ -636,7 +636,7 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
unsigned long bi_size;
u16 i;
- if (ring_is_xdp(tx_ring) && tx_ring->xsk_umem) {
+ if (ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
i40e_xsk_clean_tx_ring(tx_ring);
} else {
/* ring already cleared, nothing to do */
@@ -1335,7 +1335,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
rx_ring->skb = NULL;
}
- if (rx_ring->xsk_umem) {
+ if (rx_ring->xsk_pool) {
i40e_xsk_clean_rx_ring(rx_ring);
goto skip_free;
}
@@ -1369,7 +1369,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
}
skip_free:
- if (rx_ring->xsk_umem)
+ if (rx_ring->xsk_pool)
i40e_clear_rx_bi_zc(rx_ring);
else
i40e_clear_rx_bi(rx_ring);
@@ -1418,7 +1418,7 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
u64_stats_init(&rx_ring->syncp);
/* Round up to nearest 4K */
- rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
+ rx_ring->size = rx_ring->count * sizeof(union i40e_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
@@ -1755,7 +1755,6 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
* @rx_ring: rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor
* @skb: pointer to current skb being populated
- * @rx_ptype: the packet type decoded by hardware
*
* This function checks the ring, descriptor, and packet information in
* order to populate the hash, checksum, VLAN, protocol, and
@@ -1953,7 +1952,7 @@ static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *rx_buffer;
rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
- prefetchw(rx_buffer->page);
+ prefetch_page_address(rx_buffer->page);
/* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev,
@@ -1992,10 +1991,8 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(xdp->data);
-#if L1_CACHE_BYTES < 128
- prefetch(xdp->data + L1_CACHE_BYTES);
-#endif
+ net_prefetch(xdp->data);
+
/* Note, we get here by enabling legacy-rx via:
*
* ethtool --set-priv-flags <dev> legacy-rx on
@@ -2078,10 +2075,8 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
* likely have a consumer accessing first few bytes of meta
* data, and then actual data.
*/
- prefetch(xdp->data_meta);
-#if L1_CACHE_BYTES < 128
- prefetch(xdp->data_meta + L1_CACHE_BYTES);
-#endif
+ net_prefetch(xdp->data_meta);
+
/* build an skb around the page buffer */
skb = build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
@@ -2300,6 +2295,19 @@ void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res)
}
/**
+ * i40e_inc_ntc: Advance the next_to_clean index
+ * @rx_ring: Rx ring
+ **/
+static void i40e_inc_ntc(struct i40e_ring *rx_ring)
+{
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+ prefetch(I40E_RX_DESC(rx_ring, ntc));
+}
+
+/**
* i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @rx_ring: rx descriptor ring to transact packets on
* @budget: Total limit on number of packets to process
@@ -2579,7 +2587,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
i40e_for_each_ring(ring, q_vector->tx) {
- bool wd = ring->xsk_umem ?
+ bool wd = ring->xsk_pool ?
i40e_clean_xdp_tx_irq(vsi, ring) :
i40e_clean_tx_irq(vsi, ring, budget);
@@ -2607,7 +2615,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
budget_per_ring = budget;
i40e_for_each_ring(ring, q_vector->rx) {
- int cleaned = ring->xsk_umem ?
+ int cleaned = ring->xsk_pool ?
i40e_clean_rx_irq_zc(ring, budget_per_ring) :
i40e_clean_rx_irq(ring, budget_per_ring);
@@ -3503,7 +3511,7 @@ dma_error:
/**
* i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring
- * @xdp: data to transmit
+ * @xdpf: data to transmit
* @xdp_ring: XDP Tx ring
**/
static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
@@ -3698,7 +3706,9 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
/**
* i40e_xdp_xmit - Implements ndo_xdp_xmit
* @dev: netdev
- * @xdp: XDP buffer
+ * @n: number of frames
+ * @frames: array of XDP buffer pointers
+ * @flags: XDP extra info
*
* Returns number of frames successfully sent. Frames that fail are
* free'ed via XDP return API.
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 4036893d6825..2feed920ef8a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -110,7 +110,7 @@ enum i40e_dyn_idx_t {
*/
#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
#define I40E_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
-#define i40e_rx_desc i40e_32byte_rx_desc
+#define i40e_rx_desc i40e_16byte_rx_desc
#define I40E_RX_DMA_ATTR \
(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
@@ -388,7 +388,7 @@ struct i40e_ring {
struct i40e_channel *ch;
struct xdp_rxq_info xdp_rxq;
- struct xdp_umem *xsk_umem;
+ struct xsk_buff_pool *xsk_pool;
} ____cacheline_internodealigned_in_smp;
static inline bool ring_uses_build_skb(struct i40e_ring *ring)
@@ -482,7 +482,6 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
/**
* i40e_xmit_descriptor_count - calculate number of Tx descriptors needed
* @skb: send buffer
- * @tx_ring: ring to send buffer on
*
* Returns number of data descriptors needed for this skb. Returns 0 to indicate
* there is not enough descriptors available in this ring since we need at least
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
index 667c4dc4b39f..19da3b22160f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
@@ -21,9 +21,9 @@ void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val);
#define I40E_XDP_TX BIT(1)
#define I40E_XDP_REDIR BIT(2)
-/**
+/*
* build_ctob - Builds the Tx descriptor (cmd, offset and type) qword
- **/
+ */
static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
u32 td_tag)
{
@@ -37,7 +37,7 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
/**
* i40e_update_tx_stats - Update the egress statistics for the Tx ring
* @tx_ring: Tx ring to update
- * @total_packet: total packets sent
+ * @total_packets: total packets sent
* @total_bytes: total bytes sent
**/
static inline void i40e_update_tx_stats(struct i40e_ring *tx_ring,
@@ -99,19 +99,6 @@ static inline bool i40e_rx_is_programming_status(u64 qword1)
return qword1 & I40E_RXD_QW1_LENGTH_SPH_MASK;
}
-/**
- * i40e_inc_ntc: Advance the next_to_clean index
- * @rx_ring: Rx ring
- **/
-static inline void i40e_inc_ntc(struct i40e_ring *rx_ring)
-{
- u32 ntc = rx_ring->next_to_clean + 1;
-
- ntc = (ntc < rx_ring->count) ? ntc : 0;
- rx_ring->next_to_clean = ntc;
- prefetch(I40E_RX_DESC(rx_ring, ntc));
-}
-
void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring);
void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring);
bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 52410d609ba1..97d29df65f9e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -628,7 +628,7 @@ union i40e_16byte_rx_desc {
__le64 hdr_addr; /* Header buffer address */
} read;
struct {
- struct {
+ struct i40e_16b_rx_wb_qw0 {
struct {
union {
__le16 mirroring_status;
@@ -647,6 +647,9 @@ union i40e_16byte_rx_desc {
__le64 status_error_len;
} qword1;
} wb; /* writeback */
+ struct {
+ u64 qword[2];
+ } raw;
};
union i40e_32byte_rx_desc {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 47bfb2e95e2d..c96e2f2d4cba 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -2244,7 +2244,8 @@ error_param:
}
/**
- * i40e_validate_queue_map
+ * i40e_validate_queue_map - check queue map is valid
+ * @vf: the VF structure pointer
* @vsi_id: vsi id
* @queuemap: Tx or Rx queue map
*
@@ -3160,8 +3161,8 @@ err:
/**
* i40e_validate_cloud_filter
- * @mask: mask for TC filter
- * @data: data for TC filter
+ * @vf: pointer to VF structure
+ * @tc_filter: pointer to filter requested
*
* This function validates cloud filter programmed as TC filter for ADq
**/
@@ -3294,7 +3295,7 @@ err:
/**
* i40e_find_vsi_from_seid - searches for the vsi with the given seid
* @vf: pointer to the VF info
- * @seid - seid of the vsi it is searching for
+ * @seid: seid of the vsi it is searching for
**/
static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid)
{
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 8ce57b507a21..6acede0acdca 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -29,14 +29,16 @@ static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
}
/**
- * i40e_xsk_umem_enable - Enable/associate a UMEM to a certain ring/qid
+ * i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a
+ * certain ring/qid
* @vsi: Current VSI
- * @umem: UMEM
- * @qid: Rx ring to associate UMEM to
+ * @pool: buffer pool
+ * @qid: Rx ring to associate buffer pool with
*
* Returns 0 on success, <0 on failure
**/
-static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
+static int i40e_xsk_pool_enable(struct i40e_vsi *vsi,
+ struct xsk_buff_pool *pool,
u16 qid)
{
struct net_device *netdev = vsi->netdev;
@@ -53,7 +55,7 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
qid >= netdev->real_num_tx_queues)
return -EINVAL;
- err = xsk_buff_dma_map(umem, &vsi->back->pdev->dev, I40E_RX_DMA_ATTR);
+ err = xsk_pool_dma_map(pool, &vsi->back->pdev->dev, I40E_RX_DMA_ATTR);
if (err)
return err;
@@ -80,21 +82,22 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
}
/**
- * i40e_xsk_umem_disable - Disassociate a UMEM from a certain ring/qid
+ * i40e_xsk_pool_disable - Disassociate an AF_XDP buffer pool from a
+ * certain ring/qid
* @vsi: Current VSI
- * @qid: Rx ring to associate UMEM to
+ * @qid: Rx ring to associate buffer pool with
*
* Returns 0 on success, <0 on failure
**/
-static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
+static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid)
{
struct net_device *netdev = vsi->netdev;
- struct xdp_umem *umem;
+ struct xsk_buff_pool *pool;
bool if_running;
int err;
- umem = xdp_get_umem_from_qid(netdev, qid);
- if (!umem)
+ pool = xsk_get_pool_from_qid(netdev, qid);
+ if (!pool)
return -EINVAL;
if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
@@ -106,7 +109,7 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
}
clear_bit(qid, vsi->af_xdp_zc_qps);
- xsk_buff_dma_unmap(umem, I40E_RX_DMA_ATTR);
+ xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR);
if (if_running) {
err = i40e_queue_pair_enable(vsi, qid);
@@ -118,20 +121,21 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
}
/**
- * i40e_xsk_umem_setup - Enable/disassociate a UMEM to/from a ring/qid
+ * i40e_xsk_pool_setup - Enable/disassociate an AF_XDP buffer pool to/from
+ * a ring/qid
* @vsi: Current VSI
- * @umem: UMEM to enable/associate to a ring, or NULL to disable
- * @qid: Rx ring to (dis)associate UMEM (from)to
+ * @pool: Buffer pool to enable/associate to a ring, or NULL to disable
+ * @qid: Rx ring to (dis)associate buffer pool (from)to
*
- * This function enables or disables a UMEM to a certain ring.
+ * This function enables or disables a buffer pool to a certain ring.
*
* Returns 0 on success, <0 on failure
**/
-int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem,
+int i40e_xsk_pool_setup(struct i40e_vsi *vsi, struct xsk_buff_pool *pool,
u16 qid)
{
- return umem ? i40e_xsk_umem_enable(vsi, umem, qid) :
- i40e_xsk_umem_disable(vsi, qid);
+ return pool ? i40e_xsk_pool_enable(vsi, pool, qid) :
+ i40e_xsk_pool_disable(vsi, qid);
}
/**
@@ -191,7 +195,7 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
rx_desc = I40E_RX_DESC(rx_ring, ntu);
bi = i40e_rx_bi(rx_ring, ntu);
do {
- xdp = xsk_buff_alloc(rx_ring->xsk_umem);
+ xdp = xsk_buff_alloc(rx_ring->xsk_pool);
if (!xdp) {
ok = false;
goto no_buffers;
@@ -254,6 +258,18 @@ static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
}
/**
+ * i40e_inc_ntc: Advance the next_to_clean index
+ * @rx_ring: Rx ring
+ **/
+static void i40e_inc_ntc(struct i40e_ring *rx_ring)
+{
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+}
+
+/**
* i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring
* @rx_ring: Rx ring
* @budget: NAPI budget
@@ -265,8 +281,8 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
unsigned int xdp_res, xdp_xmit = 0;
- bool failure = false;
struct sk_buff *skb;
+ bool failure;
while (likely(total_rx_packets < (unsigned int)budget)) {
union i40e_rx_desc *rx_desc;
@@ -274,13 +290,6 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
unsigned int size;
u64 qword;
- if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
- failure = failure ||
- !i40e_alloc_rx_buffers_zc(rx_ring,
- cleaned_count);
- cleaned_count = 0;
- }
-
rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
@@ -310,7 +319,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
(*bi)->data_end = (*bi)->data + size;
- xsk_buff_dma_sync_for_cpu(*bi);
+ xsk_buff_dma_sync_for_cpu(*bi, rx_ring->xsk_pool);
xdp_res = i40e_run_xdp_zc(rx_ring, *bi);
if (xdp_res) {
@@ -355,14 +364,17 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
napi_gro_receive(&rx_ring->q_vector->napi, skb);
}
+ if (cleaned_count >= I40E_RX_BUFFER_WRITE)
+ failure = !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count);
+
i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
- if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) {
+ if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
- xsk_set_rx_need_wakeup(rx_ring->xsk_umem);
+ xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
else
- xsk_clear_rx_need_wakeup(rx_ring->xsk_umem);
+ xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
return (int)total_rx_packets;
}
@@ -385,11 +397,11 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
dma_addr_t dma;
while (budget-- > 0) {
- if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
+ if (!xsk_tx_peek_desc(xdp_ring->xsk_pool, &desc))
break;
- dma = xsk_buff_raw_get_dma(xdp_ring->xsk_umem, desc.addr);
- xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_umem, dma,
+ dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc.addr);
+ xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma,
desc.len);
tx_bi = &xdp_ring->tx_bi[xdp_ring->next_to_use];
@@ -416,7 +428,7 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
I40E_TXD_QW1_CMD_SHIFT);
i40e_xdp_ring_update_tail(xdp_ring);
- xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
+ xsk_tx_release(xdp_ring->xsk_pool);
i40e_update_tx_stats(xdp_ring, sent_frames, total_bytes);
}
@@ -448,7 +460,7 @@ static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring,
**/
bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring)
{
- struct xdp_umem *umem = tx_ring->xsk_umem;
+ struct xsk_buff_pool *bp = tx_ring->xsk_pool;
u32 i, completed_frames, xsk_frames = 0;
u32 head_idx = i40e_get_head(tx_ring);
struct i40e_tx_buffer *tx_bi;
@@ -488,13 +500,13 @@ skip:
tx_ring->next_to_clean -= tx_ring->count;
if (xsk_frames)
- xsk_umem_complete_tx(umem, xsk_frames);
+ xsk_tx_completed(bp, xsk_frames);
i40e_arm_wb(tx_ring, vsi, completed_frames);
out_xmit:
- if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem))
- xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
+ if (xsk_uses_need_wakeup(tx_ring->xsk_pool))
+ xsk_set_tx_need_wakeup(tx_ring->xsk_pool);
return i40e_xmit_zc(tx_ring, I40E_DESC_UNUSED(tx_ring));
}
@@ -526,7 +538,7 @@ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
if (queue_id >= vsi->num_queue_pairs)
return -ENXIO;
- if (!vsi->xdp_rings[queue_id]->xsk_umem)
+ if (!vsi->xdp_rings[queue_id]->xsk_pool)
return -ENXIO;
ring = vsi->xdp_rings[queue_id];
@@ -565,7 +577,7 @@ void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
{
u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
- struct xdp_umem *umem = tx_ring->xsk_umem;
+ struct xsk_buff_pool *bp = tx_ring->xsk_pool;
struct i40e_tx_buffer *tx_bi;
u32 xsk_frames = 0;
@@ -585,14 +597,15 @@ void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
}
if (xsk_frames)
- xsk_umem_complete_tx(umem, xsk_frames);
+ xsk_tx_completed(bp, xsk_frames);
}
/**
- * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have AF_XDP UMEM attached
+ * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have an AF_XDP
+ * buffer pool attached
* @vsi: vsi
*
- * Returns true if any of the Rx rings has an AF_XDP UMEM attached
+ * Returns true if any of the Rx rings has an AF_XDP buffer pool attached
**/
bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi)
{
@@ -600,7 +613,7 @@ bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi)
int i;
for (i = 0; i < vsi->num_queue_pairs; i++) {
- if (xdp_get_umem_from_qid(netdev, i))
+ if (xsk_get_pool_from_qid(netdev, i))
return true;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
index c524c142127f..7adfd8539247 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
@@ -5,12 +5,12 @@
#define _I40E_XSK_H_
struct i40e_vsi;
-struct xdp_umem;
+struct xsk_buff_pool;
struct zero_copy_allocator;
int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair);
int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair);
-int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem,
+int i40e_xsk_pool_setup(struct i40e_vsi *vsi, struct xsk_buff_pool *pool,
u16 qid);
bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 cleaned_count);
int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget);