diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-09-18 12:34:53 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-09-18 12:34:53 -0700 |
commit | 81160dda9a7aad13c04e78bb2cfd3c4630e3afab (patch) | |
tree | 4bf79ffa9fc7dc5e2915ff978778c3402c491113 /drivers/net/ethernet/intel/i40e | |
parent | 8b53c76533aa4356602aea98f98a2f3b4051464c (diff) | |
parent | 1bab8d4c488be22d57f9dd09968c90a0ddc413bf (diff) | |
download | linux-81160dda9a7aad13c04e78bb2cfd3c4630e3afab.tar.bz2 |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next
Pull networking updates from David Miller:
1) Support IPV6 RA Captive Portal Identifier, from Maciej Żenczykowski.
2) Use bio_vec in the networking instead of custom skb_frag_t, from
Matthew Wilcox.
3) Make use of xmit_more in r8169 driver, from Heiner Kallweit.
4) Add devmap_hash to xdp, from Toke Høiland-Jørgensen.
5) Support all variants of 5750X bnxt_en chips, from Michael Chan.
6) More RTNL avoidance work in the core and mlx5 driver, from Vlad
Buslov.
7) Add TCP syn cookies bpf helper, from Petar Penkov.
8) Add 'nettest' to selftests and use it, from David Ahern.
9) Add extack support to drop_monitor, add packet alert mode and
support for HW drops, from Ido Schimmel.
10) Add VLAN offload to stmmac, from Jose Abreu.
11) Lots of devm_platform_ioremap_resource() conversions, from
YueHaibing.
12) Add IONIC driver, from Shannon Nelson.
13) Several kTLS cleanups, from Jakub Kicinski.
* git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (1930 commits)
mlxsw: spectrum_buffers: Add the ability to query the CPU port's shared buffer
mlxsw: spectrum: Register CPU port with devlink
mlxsw: spectrum_buffers: Prevent changing CPU port's configuration
net: ena: fix incorrect update of intr_delay_resolution
net: ena: fix retrieval of nonadaptive interrupt moderation intervals
net: ena: fix update of interrupt moderation register
net: ena: remove all old adaptive rx interrupt moderation code from ena_com
net: ena: remove ena_restore_ethtool_params() and relevant fields
net: ena: remove old adaptive interrupt moderation code from ena_netdev
net: ena: remove code duplication in ena_com_update_nonadaptive_moderation_interval _*()
net: ena: enable the interrupt_moderation in driver_supported_features
net: ena: reimplement set/get_coalesce()
net: ena: switch to dim algorithm for rx adaptive interrupt moderation
net: ena: add intr_moder_rx_interval to struct ena_com_dev and use it
net: phy: adin: implement Energy Detect Powerdown mode via phy-tunable
ethtool: implement Energy Detect Powerdown support via phy-tunable
xen-netfront: do not assume sk_buff_head list is empty in error handling
s390/ctcm: Delete unnecessary checks before the macro call “dev_kfree_skb”
net: ena: don't wake up tx queue when down
drop_monitor: Better sanitize notified packets
...
Diffstat (limited to 'drivers/net/ethernet/intel/i40e')
23 files changed, 518 insertions, 267 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 84bd06901014..2af9f6308f84 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -131,7 +131,6 @@ enum i40e_state_t { __I40E_PF_RESET_REQUESTED, __I40E_CORE_RESET_REQUESTED, __I40E_GLOBAL_RESET_REQUESTED, - __I40E_EMP_RESET_REQUESTED, __I40E_EMP_RESET_INTR_RECEIVED, __I40E_SUSPENDED, __I40E_PTP_TX_IN_PROGRESS, @@ -244,11 +243,11 @@ struct i40e_fdir_filter { u32 fd_id; }; -#define I40E_CLOUD_FIELD_OMAC 0x01 -#define I40E_CLOUD_FIELD_IMAC 0x02 -#define I40E_CLOUD_FIELD_IVLAN 0x04 -#define I40E_CLOUD_FIELD_TEN_ID 0x08 -#define I40E_CLOUD_FIELD_IIP 0x10 +#define I40E_CLOUD_FIELD_OMAC BIT(0) +#define I40E_CLOUD_FIELD_IMAC BIT(1) +#define I40E_CLOUD_FIELD_IVLAN BIT(2) +#define I40E_CLOUD_FIELD_TEN_ID BIT(3) +#define I40E_CLOUD_FIELD_IIP BIT(4) #define I40E_CLOUD_FILTER_FLAGS_OMAC I40E_CLOUD_FIELD_OMAC #define I40E_CLOUD_FILTER_FLAGS_IMAC I40E_CLOUD_FIELD_IMAC @@ -1021,6 +1020,7 @@ i40e_find_vsi_by_type(struct i40e_pf *pf, u16 type) return NULL; } void i40e_update_stats(struct i40e_vsi *vsi); +void i40e_update_veb_stats(struct i40e_veb *veb); void i40e_update_eth_stats(struct i40e_vsi *vsi); struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi); int i40e_fetch_switch_configuration(struct i40e_pf *pf, diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 814acbe79ffd..72c04881d290 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -610,8 +610,10 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw) if (hw->aq.api_maj_ver > 1 || (hw->aq.api_maj_ver == 1 && - hw->aq.api_min_ver >= 8)) + hw->aq.api_min_ver >= 8)) { hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT; + hw->flags |= I40E_HW_FLAG_DROP_MODE; + } if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) { ret_code = I40E_ERR_FIRMWARE_API_VERSION; diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 6536023fa074..530613f31527 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -11,8 +11,8 @@ */ #define I40E_FW_API_VERSION_MAJOR 0x0001 -#define I40E_FW_API_VERSION_MINOR_X722 0x0008 -#define I40E_FW_API_VERSION_MINOR_X710 0x0008 +#define I40E_FW_API_VERSION_MINOR_X722 0x0009 +#define I40E_FW_API_VERSION_MINOR_X710 0x0009 #define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \ I40E_FW_API_VERSION_MINOR_X710 : \ @@ -1382,7 +1382,7 @@ struct i40e_aqc_cloud_filters_element_data { #define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ I40E_AQC_ADD_CLOUD_FILTER_SHIFT) /* 0x0000 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001 +/* 0x0001 reserved */ /* 0x0002 reserved */ #define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003 #define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004 @@ -1394,6 +1394,9 @@ struct i40e_aqc_cloud_filters_element_data { #define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A #define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B #define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C +/* 0x000D reserved */ +/* 0x000E reserved */ +/* 0x000F reserved */ /* 0x0010 to 0x0017 is for custom filters */ #define I40E_AQC_ADD_CLOUD_FILTER_IP_PORT 0x0010 /* Dest IP + L4 Port */ #define I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT 0x0011 /* Dest MAC + L4 Port */ @@ -2051,20 +2054,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config); struct i40e_aq_set_mac_config { __le16 max_frame_size; u8 params; -#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 -#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 -#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 -#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 -#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 -#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 -#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 -#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 -#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 -#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 +#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 +#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 +#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 +#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 +#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 +#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 +#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 +#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 +#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 +#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 +#define I40E_AQ_SET_MAC_CONFIG_DROP_BLOCKING_PACKET_EN 0x80 u8 tx_timer_priority; /* bitmap */ __le16 tx_timer_value; __le16 fc_refresh_threshold; diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 906cf68d3453..d37c6e0e5f08 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ +#include "i40e.h" #include "i40e_type.h" #include "i40e_adminq.h" #include "i40e_prototype.h" @@ -13,7 +14,7 @@ * This function sets the mac type of the adapter based on the * vendor ID and device ID stored in the hw structure. **/ -static i40e_status i40e_set_mac_type(struct i40e_hw *hw) +i40e_status i40e_set_mac_type(struct i40e_hw *hw) { i40e_status status = 0; @@ -1577,19 +1578,22 @@ i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw, status = i40e_asq_send_command(hw, &desc, abilities, abilities_size, cmd_details); - if (status) - break; - - if (hw->aq.asq_last_status == I40E_AQ_RC_EIO) { + switch (hw->aq.asq_last_status) { + case I40E_AQ_RC_EIO: status = I40E_ERR_UNKNOWN_PHY; break; - } else if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) { + case I40E_AQ_RC_EAGAIN: usleep_range(1000, 2000); total_delay++; status = I40E_ERR_TIMEOUT; + break; + /* also covers I40E_AQ_RC_OK */ + default: + break; } - } while ((hw->aq.asq_last_status != I40E_AQ_RC_OK) && - (total_delay < max_delay)); + + } while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) && + (total_delay < max_delay)); if (status) return status; @@ -1643,25 +1647,15 @@ enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, return status; } -/** - * i40e_set_fc - * @hw: pointer to the hw struct - * @aq_failures: buffer to return AdminQ failure information - * @atomic_restart: whether to enable atomic link restart - * - * Set the requested flow control mode using set_phy_config. - **/ -enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, - bool atomic_restart) +static noinline_for_stack enum i40e_status_code +i40e_set_fc_status(struct i40e_hw *hw, + struct i40e_aq_get_phy_abilities_resp *abilities, + bool atomic_restart) { - enum i40e_fc_mode fc_mode = hw->fc.requested_mode; - struct i40e_aq_get_phy_abilities_resp abilities; struct i40e_aq_set_phy_config config; - enum i40e_status_code status; + enum i40e_fc_mode fc_mode = hw->fc.requested_mode; u8 pause_mask = 0x0; - *aq_failures = 0x0; - switch (fc_mode) { case I40E_FC_FULL: pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; @@ -1677,6 +1671,48 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, break; } + memset(&config, 0, sizeof(struct i40e_aq_set_phy_config)); + /* clear the old pause settings */ + config.abilities = abilities->abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) & + ~(I40E_AQ_PHY_FLAG_PAUSE_RX); + /* set the new abilities */ + config.abilities |= pause_mask; + /* If the abilities have changed, then set the new config */ + if (config.abilities == abilities->abilities) + return 0; + + /* Auto restart link so settings take effect */ + if (atomic_restart) + config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; + /* Copy over all the old settings */ + config.phy_type = abilities->phy_type; + config.phy_type_ext = abilities->phy_type_ext; + config.link_speed = abilities->link_speed; + config.eee_capability = abilities->eee_capability; + config.eeer = abilities->eeer_val; + config.low_power_ctrl = abilities->d3_lpan; + config.fec_config = abilities->fec_cfg_curr_mod_ext_info & + I40E_AQ_PHY_FEC_CONFIG_MASK; + + return i40e_aq_set_phy_config(hw, &config, NULL); +} + +/** + * i40e_set_fc + * @hw: pointer to the hw struct + * @aq_failures: buffer to return AdminQ failure information + * @atomic_restart: whether to enable atomic link restart + * + * Set the requested flow control mode using set_phy_config. + **/ +enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, + bool atomic_restart) +{ + struct i40e_aq_get_phy_abilities_resp abilities; + enum i40e_status_code status; + + *aq_failures = 0x0; + /* Get the current phy config */ status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL); @@ -1685,31 +1721,10 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, return status; } - memset(&config, 0, sizeof(struct i40e_aq_set_phy_config)); - /* clear the old pause settings */ - config.abilities = abilities.abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) & - ~(I40E_AQ_PHY_FLAG_PAUSE_RX); - /* set the new abilities */ - config.abilities |= pause_mask; - /* If the abilities have changed, then set the new config */ - if (config.abilities != abilities.abilities) { - /* Auto restart link so settings take effect */ - if (atomic_restart) - config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; - /* Copy over all the old settings */ - config.phy_type = abilities.phy_type; - config.phy_type_ext = abilities.phy_type_ext; - config.link_speed = abilities.link_speed; - config.eee_capability = abilities.eee_capability; - config.eeer = abilities.eeer_val; - config.low_power_ctrl = abilities.d3_lpan; - config.fec_config = abilities.fec_cfg_curr_mod_ext_info & - I40E_AQ_PHY_FEC_CONFIG_MASK; - status = i40e_aq_set_phy_config(hw, &config, NULL); + status = i40e_set_fc_status(hw, &abilities, atomic_restart); + if (status) + *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; - if (status) - *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; - } /* Update the link info */ status = i40e_update_link_info(hw); if (status) { @@ -2537,7 +2552,7 @@ i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up) * i40e_updatelink_status - update status of the HW network link * @hw: pointer to the hw struct **/ -i40e_status i40e_update_link_info(struct i40e_hw *hw) +noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw) { struct i40e_aq_get_phy_abilities_resp abilities; i40e_status status = 0; diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c index 292eeb3def10..200a1cb3b536 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c @@ -877,7 +877,23 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change) return I40E_NOT_SUPPORTED; /* Read LLDP NVM area */ - ret = i40e_read_lldp_cfg(hw, &lldp_cfg); + if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) { + u8 offset = 0; + + if (hw->mac.type == I40E_MAC_XL710) + offset = I40E_LLDP_CURRENT_STATUS_XL710_OFFSET; + else if (hw->mac.type == I40E_MAC_X722) + offset = I40E_LLDP_CURRENT_STATUS_X722_OFFSET; + else + return I40E_NOT_SUPPORTED; + + ret = i40e_read_nvm_module_data(hw, + I40E_SR_EMP_SR_SETTINGS_PTR, + offset, 1, + &lldp_cfg.adminstatus); + } else { + ret = i40e_read_lldp_cfg(hw, &lldp_cfg); + } if (ret) return I40E_ERR_NOT_READY; diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h index ddb48ae7cce4..2a80c5daa376 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h @@ -30,6 +30,8 @@ #define I40E_CEE_SUBTYPE_APP_PRI 4 #define I40E_CEE_MAX_FEAT_TYPE 3 +#define I40E_LLDP_CURRENT_STATUS_XL710_OFFSET 0x2B +#define I40E_LLDP_CURRENT_STATUS_X722_OFFSET 0x31 /* Defines for LLDP TLV header */ #define I40E_LLDP_TLV_LEN_SHIFT 0 #define I40E_LLDP_TLV_LEN_MASK (0x01FF << I40E_LLDP_TLV_LEN_SHIFT) diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 55d20acfcf70..99ea543dd245 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -1125,10 +1125,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp, dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n"); i40e_do_reset_safe(pf, BIT(__I40E_GLOBAL_RESET_REQUESTED)); - } else if (strncmp(cmd_buf, "empr", 4) == 0) { - dev_info(&pf->pdev->dev, "debugfs: forcing EMPR\n"); - i40e_do_reset_safe(pf, BIT(__I40E_EMP_RESET_REQUESTED)); - } else if (strncmp(cmd_buf, "read", 4) == 0) { u32 address; u32 value; @@ -1732,29 +1728,15 @@ static const struct file_operations i40e_dbg_netdev_ops_fops = { **/ void i40e_dbg_pf_init(struct i40e_pf *pf) { - struct dentry *pfile; const char *name = pci_name(pf->pdev); - const struct device *dev = &pf->pdev->dev; pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root); - if (!pf->i40e_dbg_pf) - return; - - pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf, - &i40e_dbg_command_fops); - if (!pfile) - goto create_failed; - pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf, - &i40e_dbg_netdev_ops_fops); - if (!pfile) - goto create_failed; + debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf, + &i40e_dbg_command_fops); - return; - -create_failed: - dev_info(dev, "debugfs dir/file for %s failed\n", name); - debugfs_remove_recursive(pf->i40e_dbg_pf); + debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf, + &i40e_dbg_netdev_ops_fops); } /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 527eb52c5401..41e1240acaea 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -711,6 +711,35 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, } /** + * i40e_get_settings_link_up_fec - Get the FEC mode encoding from mask + * @req_fec_info: mask request FEC info + * @ks: ethtool ksettings to fill in + **/ +static void i40e_get_settings_link_up_fec(u8 req_fec_info, + struct ethtool_link_ksettings *ks) +{ + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); + + if (I40E_AQ_SET_FEC_REQUEST_RS & req_fec_info) { + ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); + } else if (I40E_AQ_SET_FEC_REQUEST_KR & req_fec_info) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + FEC_BASER); + } else { + ethtool_link_ksettings_add_link_mode(ks, advertising, + FEC_NONE); + if (I40E_AQ_SET_FEC_AUTO & req_fec_info) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + FEC_RS); + ethtool_link_ksettings_add_link_mode(ks, advertising, + FEC_BASER); + } + } +} + +/** * i40e_get_settings_link_up - Get the Link settings for when link is up * @hw: hw structure * @ks: ethtool ksettings to fill in @@ -769,13 +798,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, 25000baseSR_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, 25000baseSR_Full); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); - ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); - ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); - ethtool_link_ksettings_add_link_mode(ks, advertising, - FEC_BASER); + i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks); ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseSR_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, @@ -892,9 +915,6 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, 40000baseKR4_Full); ethtool_link_ksettings_add_link_mode(ks, supported, 25000baseKR_Full); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); ethtool_link_ksettings_add_link_mode(ks, supported, 20000baseKR2_Full); ethtool_link_ksettings_add_link_mode(ks, supported, @@ -908,10 +928,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, 40000baseKR4_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, 25000baseKR_Full); - ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); - ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); - ethtool_link_ksettings_add_link_mode(ks, advertising, - FEC_BASER); + i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks); ethtool_link_ksettings_add_link_mode(ks, advertising, 20000baseKR2_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, @@ -929,13 +946,8 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, 25000baseCR_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, 25000baseCR_Full); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); - ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); - ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); - ethtool_link_ksettings_add_link_mode(ks, advertising, - FEC_BASER); + i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks); + break; case I40E_PHY_TYPE_25GBASE_AOC: case I40E_PHY_TYPE_25GBASE_ACC: @@ -945,13 +957,8 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, 25000baseCR_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, 25000baseCR_Full); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); - ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); - ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); - ethtool_link_ksettings_add_link_mode(ks, advertising, - FEC_BASER); + i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks); + ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseCR_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, @@ -2250,7 +2257,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; - struct i40e_veb *veb = pf->veb[pf->lan_veb]; + struct i40e_veb *veb = NULL; unsigned int i; bool veb_stats; u64 *p = data; @@ -2273,8 +2280,14 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, goto check_data_pointer; veb_stats = ((pf->lan_veb != I40E_NO_VEB) && + (pf->lan_veb < I40E_MAX_VEB) && (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)); + if (veb_stats) { + veb = pf->veb[pf->lan_veb]; + i40e_update_veb_stats(veb); + } + /* If veb stats aren't enabled, pass NULL instead of the veb so that * we initialize stats to zero and update the data pointer * intelligently @@ -2329,7 +2342,7 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data) } if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) - return; + goto check_data_pointer; i40e_add_stat_strings(&data, i40e_gstrings_veb_stats); @@ -2341,6 +2354,7 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data) for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) i40e_add_stat_strings(&data, i40e_gstrings_pfc_stats, i); +check_data_pointer: WARN_ONCE(data - p != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN, "stat strings count mismatch!"); } @@ -5123,6 +5137,12 @@ static int i40e_get_module_info(struct net_device *netdev, /* Module is not SFF-8472 compliant */ modinfo->type = ETH_MODULE_SFF_8079; modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } else if (!(sff8472_swap & I40E_MODULE_SFF_DDM_IMPLEMENTED)) { + /* Module is SFF-8472 compliant but doesn't implement + * Digital Diagnostic Monitoring (DDM). + */ + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; } else { modinfo->type = ETH_MODULE_SFF_8472; modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c index 19ce93d7fd0a..163ee8c6311c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ +#include "i40e.h" #include "i40e_osdep.h" #include "i40e_register.h" #include "i40e_status.h" diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c index 994011c38fb4..be24d42280d8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ +#include "i40e.h" #include "i40e_osdep.h" #include "i40e_register.h" #include "i40e_type.h" @@ -963,7 +964,7 @@ static i40e_status i40e_set_hmc_context(u8 *context_bytes, /** * i40e_hmc_get_object_va - retrieves an object's virtual address - * @hmc_info: pointer to i40e_hmc_info struct + * @hw: the hardware struct, from which we obtain the i40e_hmc_info pointer * @object_base: pointer to u64 to get the va * @rsrc_type: the hmc resource type * @obj_idx: hmc object index @@ -972,16 +973,16 @@ static i40e_status i40e_set_hmc_context(u8 *context_bytes, * base pointer. This function is used for LAN Queue contexts. **/ static -i40e_status i40e_hmc_get_object_va(struct i40e_hmc_info *hmc_info, - u8 **object_base, - enum i40e_hmc_lan_rsrc_type rsrc_type, - u32 obj_idx) +i40e_status i40e_hmc_get_object_va(struct i40e_hw *hw, u8 **object_base, + enum i40e_hmc_lan_rsrc_type rsrc_type, + u32 obj_idx) { + struct i40e_hmc_info *hmc_info = &hw->hmc; u32 obj_offset_in_sd, obj_offset_in_pd; - i40e_status ret_code = 0; struct i40e_hmc_sd_entry *sd_entry; struct i40e_hmc_pd_entry *pd_entry; u32 pd_idx, pd_lmt, rel_pd_idx; + i40e_status ret_code = 0; u64 obj_offset_in_fpm; u32 sd_idx, sd_lmt; @@ -1047,7 +1048,7 @@ i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw, i40e_status err; u8 *context_bytes; - err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes, + err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue); if (err < 0) return err; @@ -1068,7 +1069,7 @@ i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw, i40e_status err; u8 *context_bytes; - err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes, + err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue); if (err < 0) return err; @@ -1088,7 +1089,7 @@ i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw, i40e_status err; u8 *context_bytes; - err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes, + err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue); if (err < 0) return err; @@ -1109,7 +1110,7 @@ i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw, i40e_status err; u8 *context_bytes; - err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes, + err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue); if (err < 0) return err; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 9ebbe3da61bb..6031223eafab 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -73,6 +73,7 @@ static const struct pci_device_id i40e_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_BC), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_SFP), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_B), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0}, @@ -534,6 +535,10 @@ void i40e_pf_reset_stats(struct i40e_pf *pf) sizeof(pf->veb[i]->stats)); memset(&pf->veb[i]->stats_offsets, 0, sizeof(pf->veb[i]->stats_offsets)); + memset(&pf->veb[i]->tc_stats, 0, + sizeof(pf->veb[i]->tc_stats)); + memset(&pf->veb[i]->tc_stats_offsets, 0, + sizeof(pf->veb[i]->tc_stats_offsets)); pf->veb[i]->stat_offsets_loaded = false; } } @@ -677,7 +682,7 @@ void i40e_update_eth_stats(struct i40e_vsi *vsi) * i40e_update_veb_stats - Update Switch component statistics * @veb: the VEB being updated **/ -static void i40e_update_veb_stats(struct i40e_veb *veb) +void i40e_update_veb_stats(struct i40e_veb *veb) { struct i40e_pf *pf = veb->pf; struct i40e_hw *hw = &pf->hw; @@ -2530,6 +2535,10 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) vsi_name, i40e_stat_str(hw, aq_ret), i40e_aq_str(hw, hw->aq.asq_last_status)); + } else { + dev_info(&pf->pdev->dev, "%s is %s allmulti mode.\n", + vsi->netdev->name, + cur_multipromisc ? "entering" : "leaving"); } } @@ -2583,6 +2592,10 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf) return; if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state)) return; + if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) { + set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state); + return; + } for (v = 0; v < pf->num_alloc_vsi; v++) { if (pf->vsi[v] && @@ -2597,6 +2610,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf) } } } + clear_bit(__I40E_VF_DISABLE, pf->state); } /** @@ -3360,7 +3374,7 @@ static int i40e_vsi_configure_tx(struct i40e_vsi *vsi) for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) err = i40e_configure_tx_ring(vsi->tx_rings[i]); - if (!i40e_enabled_xdp_vsi(vsi)) + if (err || !i40e_enabled_xdp_vsi(vsi)) return err; for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) @@ -6412,50 +6426,6 @@ static int i40e_resume_port_tx(struct i40e_pf *pf) } /** - * i40e_update_dcb_config - * @hw: pointer to the HW struct - * @enable_mib_change: enable MIB change event - * - * Update DCB configuration from the firmware - **/ -static enum i40e_status_code -i40e_update_dcb_config(struct i40e_hw *hw, bool enable_mib_change) -{ - struct i40e_lldp_variables lldp_cfg; - i40e_status ret; - - if (!hw->func_caps.dcb) - return I40E_NOT_SUPPORTED; - - /* Read LLDP NVM area */ - ret = i40e_read_lldp_cfg(hw, &lldp_cfg); - if (ret) - return I40E_ERR_NOT_READY; - - /* Get DCBX status */ - ret = i40e_get_dcbx_status(hw, &hw->dcbx_status); - if (ret) - return ret; - - /* Check the DCBX Status */ - if (hw->dcbx_status == I40E_DCBX_STATUS_DONE || - hw->dcbx_status == I40E_DCBX_STATUS_IN_PROGRESS) { - /* Get current DCBX configuration */ - ret = i40e_get_dcb_config(hw); - if (ret) - return ret; - } else if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) { - return I40E_ERR_NOT_READY; - } - - /* Configure the LLDP MIB change event */ - if (enable_mib_change) - ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL); - - return ret; -} - -/** * i40e_init_pf_dcb - Initialize DCB configuration * @pf: PF being configured * @@ -6477,7 +6447,7 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) goto out; } - err = i40e_update_dcb_config(hw, true); + err = i40e_init_dcb(hw, true); if (!err) { /* Device/Function is not DCBX capable */ if ((!hw->func_caps.dcb) || @@ -6599,19 +6569,19 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) } if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) { - req_fec = ", Requested FEC: None"; - fec = ", FEC: None"; - an = ", Autoneg: False"; + req_fec = "None"; + fec = "None"; + an = "False"; if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED) - an = ", Autoneg: True"; + an = "True"; if (pf->hw.phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA) - fec = ", FEC: CL74 FC-FEC/BASE-R"; + fec = "CL74 FC-FEC/BASE-R"; else if (pf->hw.phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA) - fec = ", FEC: CL108 RS-FEC"; + fec = "CL108 RS-FEC"; /* 'CL108 RS-FEC' should be displayed when RS is requested, or * both RS and FC are requested @@ -6620,14 +6590,19 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) { if (vsi->back->hw.phy.link_info.req_fec_info & I40E_AQ_REQUEST_FEC_RS) - req_fec = ", Requested FEC: CL108 RS-FEC"; + req_fec = "CL108 RS-FEC"; else - req_fec = ", Requested FEC: CL74 FC-FEC/BASE-R"; + req_fec = "CL74 FC-FEC/BASE-R"; } + netdev_info(vsi->netdev, + "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n", + speed, req_fec, fec, an, fc); + } else { + netdev_info(vsi->netdev, + "NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n", + speed, fc); } - netdev_info(vsi->netdev, "NIC Link is Up, %sbps Full Duplex%s%s%s, Flow Control: %s\n", - speed, req_fec, fec, an, fc); } /** @@ -8486,6 +8461,11 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired) dev_dbg(&pf->pdev->dev, "PFR requested\n"); i40e_handle_reset_warning(pf, lock_acquired); + dev_info(&pf->pdev->dev, + pf->flags & I40E_FLAG_DISABLE_FW_LLDP ? + "FW LLDP is disabled\n" : + "FW LLDP is enabled\n"); + } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) { int v; @@ -12561,7 +12541,8 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, if (need_reset && prog) for (i = 0; i < vsi->num_queue_pairs; i++) if (vsi->xdp_rings[i]->xsk_umem) - (void)i40e_xsk_async_xmit(vsi->netdev, i); + (void)i40e_xsk_wakeup(vsi->netdev, i, + XDP_WAKEUP_RX); return 0; } @@ -12883,7 +12864,7 @@ static const struct net_device_ops i40e_netdev_ops = { .ndo_bridge_setlink = i40e_ndo_bridge_setlink, .ndo_bpf = i40e_xdp, .ndo_xdp_xmit = i40e_xdp_xmit, - .ndo_xsk_async_xmit = i40e_xsk_async_xmit, + .ndo_xsk_wakeup = i40e_xsk_wakeup, .ndo_dfwd_add_station = i40e_fwd_add, .ndo_dfwd_del_station = i40e_fwd_del, }; @@ -14569,9 +14550,20 @@ void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags) **/ static bool i40e_check_recovery_mode(struct i40e_pf *pf) { - u32 val = rd32(&pf->hw, I40E_GL_FWSTS); - - if (val & I40E_GL_FWSTS_FWS1B_MASK) { + u32 val = rd32(&pf->hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK; + bool is_recovery_mode = false; + + if (pf->hw.mac.type == I40E_MAC_XL710) + is_recovery_mode = + val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK || + val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK || + val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_TRANSITION_MASK || + val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK; + if (pf->hw.mac.type == I40E_MAC_X722) + is_recovery_mode = + val == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK || + val == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK; + if (is_recovery_mode) { dev_notice(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n"); dev_notice(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); set_bit(__I40E_RECOVERY_MODE, pf->state); @@ -14585,6 +14577,51 @@ static bool i40e_check_recovery_mode(struct i40e_pf *pf) } /** + * i40e_pf_loop_reset - perform reset in a loop. + * @pf: board private structure + * + * This function is useful when a NIC is about to enter recovery mode. + * When a NIC's internal data structures are corrupted the NIC's + * firmware is going to enter recovery mode. + * Right after a POR it takes about 7 minutes for firmware to enter + * recovery mode. Until that time a NIC is in some kind of intermediate + * state. After that time period the NIC almost surely enters + * recovery mode. The only way for a driver to detect intermediate + * state is to issue a series of pf-resets and check a return value. + * If a PF reset returns success then the firmware could be in recovery + * mode so the caller of this code needs to check for recovery mode + * if this function returns success. There is a little chance that + * firmware will hang in intermediate state forever. + * Since waiting 7 minutes is quite a lot of time this function waits + * 10 seconds and then gives up by returning an error. + * + * Return 0 on success, negative on failure. + **/ +static i40e_status i40e_pf_loop_reset(struct i40e_pf *pf) +{ + const unsigned short MAX_CNT = 1000; + const unsigned short MSECS = 10; + struct i40e_hw *hw = &pf->hw; + i40e_status ret; + int cnt; + + for (cnt = 0; cnt < MAX_CNT; ++cnt) { + ret = i40e_pf_reset(hw); + if (!ret) + break; + msleep(MSECS); + } + + if (cnt == MAX_CNT) { + dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret); + return ret; + } + + pf->pfr_count++; + return ret; +} + +/** * i40e_init_recovery_mode - initialize subsystems needed in recovery mode * @pf: board private structure * @hw: ptr to the hardware info @@ -14812,14 +14849,22 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* Reset here to make sure all is clean and to define PF 'n' */ i40e_clear_hw(hw); - if (!i40e_check_recovery_mode(pf)) { - err = i40e_pf_reset(hw); - if (err) { - dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err); - goto err_pf_reset; - } - pf->pfr_count++; + + err = i40e_set_mac_type(hw); + if (err) { + dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n", + err); + goto err_pf_reset; } + + err = i40e_pf_loop_reset(pf); + if (err) { + dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err); + goto err_pf_reset; + } + + i40e_check_recovery_mode(pf); + hw->aq.num_arq_entries = I40E_AQ_LEN; hw->aq.num_asq_entries = I40E_AQ_LEN; hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE; @@ -15605,8 +15650,7 @@ static void i40e_shutdown(struct pci_dev *pdev) **/ static int __maybe_unused i40e_suspend(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_pf *pf = dev_get_drvdata(dev); struct i40e_hw *hw = &pf->hw; /* If we're already suspended, then there is nothing to do */ @@ -15656,8 +15700,7 @@ static int __maybe_unused i40e_suspend(struct device *dev) **/ static int __maybe_unused i40e_resume(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_pf *pf = dev_get_drvdata(dev); int err; /* If we're not suspended, then there is nothing to do */ @@ -15674,7 +15717,7 @@ static int __maybe_unused i40e_resume(struct device *dev) */ err = i40e_restore_interrupt_scheme(pf); if (err) { - dev_err(&pdev->dev, "Cannot restore interrupt scheme: %d\n", + dev_err(dev, "Cannot restore interrupt scheme: %d\n", err); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index c508b75c3c09..e4d8d20baf3b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -322,6 +322,77 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, } /** + * i40e_read_nvm_module_data - Reads NVM Buffer to specified memory location + * @hw: pointer to the HW structure + * @module_ptr: Pointer to module in words with respect to NVM beginning + * @offset: offset in words from module start + * @words_data_size: Words to read from NVM + * @data_ptr: Pointer to memory location where resulting buffer will be stored + **/ +i40e_status i40e_read_nvm_module_data(struct i40e_hw *hw, + u8 module_ptr, u16 offset, + u16 words_data_size, + u16 *data_ptr) +{ + i40e_status status; + u16 ptr_value = 0; + u32 flat_offset; + + if (module_ptr != 0) { + status = i40e_read_nvm_word(hw, module_ptr, &ptr_value); + if (status) { + i40e_debug(hw, I40E_DEBUG_ALL, + "Reading nvm word failed.Error code: %d.\n", + status); + return I40E_ERR_NVM; + } + } +#define I40E_NVM_INVALID_PTR_VAL 0x7FFF +#define I40E_NVM_INVALID_VAL 0xFFFF + + /* Pointer not initialized */ + if (ptr_value == I40E_NVM_INVALID_PTR_VAL || + ptr_value == I40E_NVM_INVALID_VAL) + return I40E_ERR_BAD_PTR; + + /* Check whether the module is in SR mapped area or outside */ + if (ptr_value & I40E_PTR_TYPE) { + /* Pointer points outside of the Shared RAM mapped area */ + ptr_value &= ~I40E_PTR_TYPE; + + /* PtrValue in 4kB units, need to convert to words */ + ptr_value /= 2; + flat_offset = ((u32)ptr_value * 0x1000) + (u32)offset; + status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (!status) { + status = i40e_aq_read_nvm(hw, 0, 2 * flat_offset, + 2 * words_data_size, + data_ptr, true, NULL); + i40e_release_nvm(hw); + if (status) { + i40e_debug(hw, I40E_DEBUG_ALL, + "Reading nvm aq failed.Error code: %d.\n", + status); + return I40E_ERR_NVM; + } + } else { + return I40E_ERR_NVM; + } + } else { + /* Read from the Shadow RAM */ + status = i40e_read_nvm_buffer(hw, ptr_value + offset, + &words_data_size, data_ptr); + if (status) { + i40e_debug(hw, I40E_DEBUG_ALL, + "Reading nvm buffer failed.Error code: %d.\n", + status); + } + } + + return status; +} + +/** * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). @@ -430,6 +501,36 @@ static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw, } /** + * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acquire lock if necessary + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). + * @words: (in) number of words to read; (out) number of words actually read + * @data: words read from the Shadow RAM + * + * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() + * method. The buffer read is preceded by the NVM ownership take + * and followed by the release. + **/ +i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, + u16 *words, u16 *data) +{ + i40e_status ret_code = 0; + + if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { + ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (!ret_code) { + ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, + data); + i40e_release_nvm(hw); + } + } else { + ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data); + } + + return ret_code; +} + +/** * i40e_write_nvm_aq - Writes Shadow RAM. * @hw: pointer to the HW structure. * @module_pointer: module pointer location in words from the NVM beginning diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h index a07574bff550..c302ef2524f8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h +++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h @@ -18,7 +18,10 @@ * actual OS primitives */ -#define hw_dbg(hw, S, A...) do {} while (0) +#define hw_dbg(hw, S, A...) \ +do { \ + dev_dbg(&((struct i40e_pf *)hw->back)->pdev->dev, S, ##A); \ +} while (0) #define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg))) #define rd32(a, reg) readl((a)->hw_addr + (reg)) diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index eac88bcc6c06..5250441bf75b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -315,6 +315,12 @@ i40e_status i40e_acquire_nvm(struct i40e_hw *hw, void i40e_release_nvm(struct i40e_hw *hw); i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, u16 *data); +i40e_status i40e_read_nvm_module_data(struct i40e_hw *hw, + u8 module_ptr, u16 offset, + u16 words_data_size, + u16 *data_ptr); +i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, + u16 *words, u16 *data); i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw); i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw, u16 *checksum); @@ -326,6 +332,8 @@ void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode, void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw); void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status); +i40e_status i40e_set_mac_type(struct i40e_hw *hw); + extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[]; static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 11394a52e21c..9bf1ad4319f5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -725,7 +725,7 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf) pf->tstamp_config.tx_type = HWTSTAMP_TX_OFF; /* Set the previous "reset" time to the current Kernel clock time */ - pf->ptp_prev_hw_time = ktime_to_timespec64(ktime_get_real()); + ktime_get_real_ts64(&pf->ptp_prev_hw_time); pf->ptp_reset_start = ktime_get(); return 0; diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h index 52e3680c57f8..d35d690ca10f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_register.h +++ b/drivers/net/ethernet/intel/i40e/i40e_register.h @@ -58,7 +58,7 @@ #define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30 #define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT) #define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31 -#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQENABLE_SHIFT) +#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ARQLEN_ARQENABLE_SHIFT) #define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */ #define I40E_PF_ARQT_ARQT_SHIFT 0 #define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT) @@ -81,7 +81,7 @@ #define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30 #define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT) #define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31 -#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT) +#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ATQLEN_ATQENABLE_SHIFT) #define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */ #define I40E_PF_ATQT_ATQT_SHIFT 0 #define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT) @@ -108,7 +108,7 @@ #define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30 #define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT) #define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31 -#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQENABLE_SHIFT) +#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN_ARQENABLE_SHIFT) #define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ #define I40E_VF_ARQT_MAX_INDEX 127 #define I40E_VF_ARQT_ARQT_SHIFT 0 @@ -136,7 +136,7 @@ #define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30 #define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT) #define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31 -#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQENABLE_SHIFT) +#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN_ATQENABLE_SHIFT) #define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ #define I40E_VF_ATQT_MAX_INDEX 127 #define I40E_VF_ATQT_ATQT_SHIFT 0 @@ -259,7 +259,7 @@ #define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30 #define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) #define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31 -#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) +#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1u, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) #define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */ #define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0 #define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT) @@ -363,6 +363,12 @@ #define I40E_GL_FWSTS_FWRI_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWRI_SHIFT) #define I40E_GL_FWSTS_FWS1B_SHIFT 16 #define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT) +#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK I40E_MASK(0x30, I40E_GL_FWSTS_FWS1B_SHIFT) +#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK I40E_MASK(0x31, I40E_GL_FWSTS_FWS1B_SHIFT) +#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_TRANSITION_MASK I40E_MASK(0x32, I40E_GL_FWSTS_FWS1B_SHIFT) +#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK I40E_MASK(0x33, I40E_GL_FWSTS_FWS1B_SHIFT) +#define I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK I40E_MASK(0xB, I40E_GL_FWSTS_FWS1B_SHIFT) +#define I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK I40E_MASK(0xC, I40E_GL_FWSTS_FWS1B_SHIFT) #define I40E_GLGEN_CLKSTAT 0x000B8184 /* Reset: POR */ #define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0 #define I40E_GLGEN_CLKSTAT_CLKMODE_MASK I40E_MASK(0x1, I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT) @@ -503,7 +509,7 @@ #define I40E_GLGEN_MSCA_MDICMD_SHIFT 30 #define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT) #define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31 -#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT) +#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1u, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT) #define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ #define I40E_GLGEN_MSRWD_MAX_INDEX 3 #define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0 @@ -1242,14 +1248,14 @@ #define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30 #define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT) #define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31 -#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT) +#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1u, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT) #define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */ #define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0 #define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT) #define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16 #define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT) #define I40E_PFLAN_QALLOC_VALID_SHIFT 31 -#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_VALID_SHIFT) +#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PFLAN_QALLOC_VALID_SHIFT) #define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */ #define I40E_QRX_ENA_MAX_INDEX 1535 #define I40E_QRX_ENA_QENA_REQ_SHIFT 0 @@ -1658,7 +1664,7 @@ #define I40E_GLNVM_SRCTL_START_SHIFT 30 #define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT) #define I40E_GLNVM_SRCTL_DONE_SHIFT 31 -#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_DONE_SHIFT) +#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1u, I40E_GLNVM_SRCTL_DONE_SHIFT) #define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */ #define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0 #define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT) @@ -3025,7 +3031,7 @@ #define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8 #define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT) #define I40E_PF_VT_PFALLOC_VALID_SHIFT 31 -#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VALID_SHIFT) +#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PF_VT_PFALLOC_VALID_SHIFT) #define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ #define I40E_VP_MDET_RX_MAX_INDEX 127 #define I40E_VP_MDET_RX_VALID_SHIFT 0 @@ -3161,7 +3167,7 @@ #define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30 #define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT) #define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31 -#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT) +#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN1_ARQENABLE_SHIFT) #define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */ #define I40E_VF_ARQT1_ARQT_SHIFT 0 #define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT) @@ -3184,7 +3190,7 @@ #define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30 #define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT) #define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31 -#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT) +#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN1_ATQENABLE_SHIFT) #define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */ #define I40E_VF_ATQT1_ATQT_SHIFT 0 #define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 2a2fe3ec7926..e3f29dc8b290 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -3262,7 +3262,7 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) **/ bool __i40e_chk_linearize(struct sk_buff *skb) { - const struct skb_frag_struct *frag, *stale; + const skb_frag_t *frag, *stale; int nr_frags, sum; /* no need to check if number of frags is less than 7 */ @@ -3306,7 +3306,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb) * descriptor associated with the fragment. */ if (stale_size > I40E_MAX_DATA_PER_TXD) { - int align_pad = -(stale->page_offset) & + int align_pad = -(skb_frag_off(stale)) & (I40E_MAX_READ_REQ_SIZE - 1); sum -= align_pad; @@ -3349,7 +3349,7 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, { unsigned int data_len = skb->data_len; unsigned int size = skb_headlen(skb); - struct skb_frag_struct *frag; + skb_frag_t *frag; struct i40e_tx_buffer *tx_bi; struct i40e_tx_desc *tx_desc; u16 i = tx_ring->next_to_use; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 100e92d2982f..36d37f31a287 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -521,7 +521,7 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring) **/ static inline int i40e_xmit_descriptor_count(struct sk_buff *skb) { - const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; unsigned int nr_frags = skb_shinfo(skb)->nr_frags; int count = 0, size = skb_headlen(skb); diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 8f43aa47c263..b43ec94a0f29 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -443,6 +443,7 @@ struct i40e_nvm_access { #define I40E_MODULE_SFF_8472_COMP 0x5E #define I40E_MODULE_SFF_8472_SWAP 0x5C #define I40E_MODULE_SFF_ADDR_MODE 0x04 +#define I40E_MODULE_SFF_DDM_IMPLEMENTED 0x40 #define I40E_MODULE_TYPE_QSFP_PLUS 0x0D #define I40E_MODULE_TYPE_QSFP28 0x11 #define I40E_MODULE_QSFP_MAX_LEN 640 @@ -623,6 +624,7 @@ struct i40e_hw { #define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3) #define I40E_HW_FLAG_FW_LLDP_STOPPABLE BIT_ULL(4) #define I40E_HW_FLAG_FW_LLDP_PERSISTENT BIT_ULL(5) +#define I40E_HW_FLAG_DROP_MODE BIT_ULL(7) u64 flags; /* Used in set switch config AQ command */ @@ -1316,6 +1318,7 @@ struct i40e_hw_port_stats { #define I40E_SR_VPD_PTR 0x2F #define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E #define I40E_SR_SW_CHECKSUM_WORD 0x3F +#define I40E_SR_EMP_SR_SETTINGS_PTR 0x48 /* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */ #define I40E_SR_VPD_MODULE_MAX_SIZE 1024 diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 02b09a8ad54c..3d2440838822 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -55,7 +55,12 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; pfe.severity = PF_EVENT_SEVERITY_INFO; - if (vf->link_forced) { + + /* Always report link is down if the VF queues aren't enabled */ + if (!vf->queues_enabled) { + pfe.event_data.link_event.link_status = false; + pfe.event_data.link_event.link_speed = 0; + } else if (vf->link_forced) { pfe.event_data.link_event.link_status = vf->link_up; pfe.event_data.link_event.link_speed = (vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0); @@ -65,6 +70,7 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) pfe.event_data.link_event.link_speed = i40e_virtchnl_link_speed(ls->link_speed); } + i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe, sizeof(pfe), NULL); } @@ -2037,30 +2043,33 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg) alluni = true; aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti, alluni); - if (!aq_ret) { - if (allmulti) { + if (aq_ret) + goto err_out; + + if (allmulti) { + if (!test_and_set_bit(I40E_VF_STATE_MC_PROMISC, + &vf->vf_states)) dev_info(&pf->pdev->dev, "VF %d successfully set multicast promiscuous mode\n", vf->vf_id); - set_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); - } else { - dev_info(&pf->pdev->dev, - "VF %d successfully unset multicast promiscuous mode\n", - vf->vf_id); - clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); - } - if (alluni) { + } else if (test_and_clear_bit(I40E_VF_STATE_MC_PROMISC, + &vf->vf_states)) + dev_info(&pf->pdev->dev, + "VF %d successfully unset multicast promiscuous mode\n", + vf->vf_id); + + if (alluni) { + if (!test_and_set_bit(I40E_VF_STATE_UC_PROMISC, + &vf->vf_states)) dev_info(&pf->pdev->dev, "VF %d successfully set unicast promiscuous mode\n", vf->vf_id); - set_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); - } else { - dev_info(&pf->pdev->dev, - "VF %d successfully unset unicast promiscuous mode\n", - vf->vf_id); - clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); - } - } + } else if (test_and_clear_bit(I40E_VF_STATE_UC_PROMISC, + &vf->vf_states)) + dev_info(&pf->pdev->dev, + "VF %d successfully unset unicast promiscuous mode\n", + vf->vf_id); + err_out: /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, @@ -2153,7 +2162,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) * VF does not know about these additional VSIs and all * it cares is about its own queues. PF configures these queues * to its appropriate VSIs based on TC mapping - **/ + */ if (vf->adq_enabled) { if (idx >= ARRAY_SIZE(vf->ch)) { aq_ret = I40E_ERR_NO_AVAILABLE_VSI; @@ -2364,6 +2373,8 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg) } } + vf->queues_enabled = true; + error_param: /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, @@ -2385,6 +2396,9 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg) struct i40e_pf *pf = vf->pf; i40e_status aq_ret = 0; + /* Immediately mark queues as disabled */ + vf->queues_enabled = false; + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { aq_ret = I40E_ERR_PARAM; goto error_param; @@ -3953,10 +3967,15 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) /* When the VF is resetting wait until it is done. * It can take up to 200 milliseconds, * but wait for up to 300 milliseconds to be safe. + * If the VF is indeed in reset, the vsi pointer has + * to show on the newly loaded vsi under pf->vsi[id]. */ for (i = 0; i < 15; i++) { - if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) + if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { + if (i > 0) + vsi = pf->vsi[vf->lan_vsi_idx]; break; + } msleep(20); } if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { @@ -4244,7 +4263,8 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, if (min_tx_rate) { dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n", min_tx_rate, vf_id); - return -EINVAL; + ret = -EINVAL; + goto error; } vf = &pf->vf[vf_id]; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index f65cc0c16550..7164b9bb294f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -99,6 +99,7 @@ struct i40e_vf { unsigned int tx_rate; /* Tx bandwidth limit in Mbps */ bool link_forced; bool link_up; /* only valid if VF link is forced */ + bool queues_enabled; /* true if the VF queues are enabled */ bool spoofchk; u16 num_mac; u16 num_vlan; diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index 32bad014d76c..b1c3227ae4ab 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -116,7 +116,7 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem, return err; /* Kick start the NAPI context so that receiving will start */ - err = i40e_xsk_async_xmit(vsi->netdev, qid); + err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX); if (err) return err; } @@ -157,6 +157,11 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid) err = i40e_queue_pair_enable(vsi, qid); if (err) return err; + + /* Kick start the NAPI context so that receiving will start */ + err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX); + if (err) + return err; } return 0; @@ -190,9 +195,11 @@ int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem, **/ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) { + struct xdp_umem *umem = rx_ring->xsk_umem; int err, result = I40E_XDP_PASS; struct i40e_ring *xdp_ring; struct bpf_prog *xdp_prog; + u64 offset; u32 act; rcu_read_lock(); @@ -201,7 +208,10 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) */ xdp_prog = READ_ONCE(rx_ring->xdp_prog); act = bpf_prog_run_xdp(xdp_prog, xdp); - xdp->handle += xdp->data - xdp->data_hard_start; + offset = xdp->data - xdp->data_hard_start; + + xdp->handle = xsk_umem_adjust_offset(umem, xdp->handle, offset); + switch (act) { case XDP_PASS: break; @@ -262,7 +272,7 @@ static bool i40e_alloc_buffer_zc(struct i40e_ring *rx_ring, bi->addr = xdp_umem_get_data(umem, handle); bi->addr += hr; - bi->handle = handle + umem->headroom; + bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom); xsk_umem_discard_addr(umem); return true; @@ -299,7 +309,7 @@ static bool i40e_alloc_buffer_slow_zc(struct i40e_ring *rx_ring, bi->addr = xdp_umem_get_data(umem, handle); bi->addr += hr; - bi->handle = handle + umem->headroom; + bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom); xsk_umem_discard_addr_rq(umem); return true; @@ -420,8 +430,6 @@ static void i40e_reuse_rx_buffer_zc(struct i40e_ring *rx_ring, struct i40e_rx_buffer *old_bi) { struct i40e_rx_buffer *new_bi = &rx_ring->rx_bi[rx_ring->next_to_alloc]; - unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask; - u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM; u16 nta = rx_ring->next_to_alloc; /* update, and store next to alloc */ @@ -429,14 +437,9 @@ static void i40e_reuse_rx_buffer_zc(struct i40e_ring *rx_ring, rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; /* transfer page from old buffer to new buffer */ - new_bi->dma = old_bi->dma & mask; - new_bi->dma += hr; - - new_bi->addr = (void *)((unsigned long)old_bi->addr & mask); - new_bi->addr += hr; - - new_bi->handle = old_bi->handle & mask; - new_bi->handle += rx_ring->xsk_umem->headroom; + new_bi->dma = old_bi->dma; + new_bi->addr = old_bi->addr; + new_bi->handle = old_bi->handle; old_bi->addr = NULL; } @@ -471,7 +474,8 @@ void i40e_zca_free(struct zero_copy_allocator *alloc, unsigned long handle) bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle); bi->addr += hr; - bi->handle = (u64)handle + rx_ring->xsk_umem->headroom; + bi->handle = xsk_umem_adjust_offset(rx_ring->xsk_umem, (u64)handle, + rx_ring->xsk_umem->headroom); } /** @@ -626,6 +630,15 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) i40e_finalize_xdp_rx(rx_ring, xdp_xmit); i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets); + + if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) { + if (failure || rx_ring->next_to_clean == rx_ring->next_to_use) + xsk_set_rx_need_wakeup(rx_ring->xsk_umem); + else + xsk_clear_rx_need_wakeup(rx_ring->xsk_umem); + + return (int)total_rx_packets; + } return failure ? budget : (int)total_rx_packets; } @@ -681,6 +694,8 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget) i40e_xdp_ring_update_tail(xdp_ring); xsk_umem_consume_tx_done(xdp_ring->xsk_umem); + if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem)) + xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem); } return !!budget && work_done; @@ -759,19 +774,27 @@ bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, i40e_update_tx_stats(tx_ring, completed_frames, total_bytes); out_xmit: + if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem)) { + if (tx_ring->next_to_clean == tx_ring->next_to_use) + xsk_set_tx_need_wakeup(tx_ring->xsk_umem); + else + xsk_clear_tx_need_wakeup(tx_ring->xsk_umem); + } + xmit_done = i40e_xmit_zc(tx_ring, budget); return work_done && xmit_done; } /** - * i40e_xsk_async_xmit - Implements the ndo_xsk_async_xmit + * i40e_xsk_wakeup - Implements the ndo_xsk_wakeup * @dev: the netdevice * @queue_id: queue id to wake up + * @flags: ignored in our case since we have Rx and Tx in the same NAPI. * * Returns <0 for errors, 0 otherwise. **/ -int i40e_xsk_async_xmit(struct net_device *dev, u32 queue_id) +int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) { struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h index 8cc0a2e7d9a2..9ed59c14eb55 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h @@ -18,6 +18,6 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget); bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring, int napi_budget); -int i40e_xsk_async_xmit(struct net_device *dev, u32 queue_id); +int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags); #endif /* _I40E_XSK_H_ */ |