diff options
Diffstat (limited to 'drivers/net/ethernet/intel')
63 files changed, 2651 insertions, 1148 deletions
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 154e2e818ec6..ad34e4335df2 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -294,6 +294,7 @@ config ICE tristate "Intel(R) Ethernet Connection E800 Series Support" default n depends on PCI_MSI + select NET_DEVLINK ---help--- This driver supports Intel(R) Ethernet Connection E800 Series of devices. For more information on how to identify your adapter, go diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index be56e631d693..6f45df5690d4 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -1852,6 +1852,7 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset, } static const struct ethtool_ops e1000_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS, .get_drvinfo = e1000_get_drvinfo, .get_regs_len = e1000_get_regs_len, .get_regs = e1000_get_regs, diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 2bced34c19ba..f7103356ef56 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -2715,11 +2715,7 @@ static int e1000_tso(struct e1000_adapter *adapter, cmd_length = E1000_TXD_CMD_IP; ipcse = skb_transport_offset(skb) - 1; } else if (skb_is_gso_v6(skb)) { - ipv6_hdr(skb)->payload_len = 0; - tcp_hdr(skb)->check = - ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0); + tcp_v6_gso_csum_prep(skb); ipcse = 0; } ipcss = skb_network_offset(skb); diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index adce7e319b9e..1d47e2503072 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -897,6 +897,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) case e1000_pch_cnp: /* fall through */ case e1000_pch_tgp: + case e1000_pch_adp: mask |= BIT(18); break; default: @@ -1561,6 +1562,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter) case e1000_pch_spt: case e1000_pch_cnp: case e1000_pch_tgp: + case e1000_pch_adp: fext_nvm11 = er32(FEXTNVM11); fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX; ew32(FEXTNVM11, fext_nvm11); @@ -2305,6 +2307,7 @@ static int e1000e_get_ts_info(struct net_device *netdev, } static const struct ethtool_ops e1000_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS, .get_drvinfo = e1000_get_drvinfo, .get_regs_len = e1000_get_regs_len, .get_regs = e1000_get_regs, diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index f556163481cb..b1447221669e 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -97,6 +97,11 @@ struct e1000_hw; #define E1000_DEV_ID_PCH_TGP_I219_LM14 0x15F9 #define E1000_DEV_ID_PCH_TGP_I219_V14 0x15FA #define E1000_DEV_ID_PCH_TGP_I219_LM15 0x15F4 +#define E1000_DEV_ID_PCH_TGP_I219_V15 0x15F5 +#define E1000_DEV_ID_PCH_ADP_I219_LM16 0x1A1E +#define E1000_DEV_ID_PCH_ADP_I219_V16 0x1A1F +#define E1000_DEV_ID_PCH_ADP_I219_LM17 0x1A1C +#define E1000_DEV_ID_PCH_ADP_I219_V17 0x1A1D #define E1000_REVISION_4 4 @@ -121,6 +126,7 @@ enum e1000_mac_type { e1000_pch_spt, e1000_pch_cnp, e1000_pch_tgp, + e1000_pch_adp, }; enum e1000_media_type { diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index b4135c50e905..735bf25952fc 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -317,6 +317,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) case e1000_pch_spt: case e1000_pch_cnp: case e1000_pch_tgp: + case e1000_pch_adp: if (e1000_phy_is_accessible_pchlan(hw)) break; @@ -460,6 +461,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) case e1000_pch_spt: case e1000_pch_cnp: case e1000_pch_tgp: + case e1000_pch_adp: /* In case the PHY needs to be in mdio slow mode, * set slow mode and try to get the PHY id again. */ @@ -703,6 +705,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) case e1000_pch_spt: case e1000_pch_cnp: case e1000_pch_tgp: + case e1000_pch_adp: case e1000_pchlan: /* check management mode */ mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; @@ -1642,6 +1645,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) case e1000_pch_spt: case e1000_pch_cnp: case e1000_pch_tgp: + case e1000_pch_adp: rc = e1000_init_phy_params_pchlan(hw); break; default: @@ -2095,6 +2099,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) case e1000_pch_spt: case e1000_pch_cnp: case e1000_pch_tgp: + case e1000_pch_adp: sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; break; default: @@ -3133,6 +3138,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) case e1000_pch_spt: case e1000_pch_cnp: case e1000_pch_tgp: + case e1000_pch_adp: bank1_offset = nvm->flash_bank_size; act_offset = E1000_ICH_NVM_SIG_WORD; @@ -4077,6 +4083,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) case e1000_pch_spt: case e1000_pch_cnp: case e1000_pch_tgp: + case e1000_pch_adp: word = NVM_COMPAT; valid_csum_mask = NVM_COMPAT_VALID_CSUM; break; diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c index e531976f8a67..51512a73fdd0 100644 --- a/drivers/net/ethernet/intel/e1000e/mac.c +++ b/drivers/net/ethernet/intel/e1000e/mac.c @@ -1363,7 +1363,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw) if (!(swsm & E1000_SWSM_SMBI)) break; - usleep_range(50, 100); + udelay(100); i++; } @@ -1381,7 +1381,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw) if (er32(SWSM) & E1000_SWSM_SWESMBI) break; - usleep_range(50, 100); + udelay(100); } if (i == timeout) { diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 0f02c7a5ee9b..177c6da80c57 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -3536,6 +3536,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) break; case e1000_pch_cnp: case e1000_pch_tgp: + case e1000_pch_adp: if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { /* Stable 24MHz frequency */ incperiod = INCPERIOD_24MHZ; @@ -3807,7 +3808,7 @@ static void e1000_flush_tx_ring(struct e1000_adapter *adapter) tdt = er32(TDT(0)); BUG_ON(tdt != tx_ring->next_to_use); tx_desc = E1000_TX_DESC(*tx_ring, tx_ring->next_to_use); - tx_desc->buffer_addr = tx_ring->dma; + tx_desc->buffer_addr = cpu_to_le64(tx_ring->dma); tx_desc->lower.data = cpu_to_le32(txd_lower | size); tx_desc->upper.data = 0; @@ -4049,6 +4050,7 @@ void e1000e_reset(struct e1000_adapter *adapter) case e1000_pch_cnp: /* fall-through */ case e1000_pch_tgp: + case e1000_pch_adp: fc->refresh_time = 0xFFFF; fc->pause_time = 0xFFFF; @@ -5461,10 +5463,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb, cmd_length = E1000_TXD_CMD_IP; ipcse = skb_transport_offset(skb) - 1; } else if (skb_is_gso_v6(skb)) { - ipv6_hdr(skb)->payload_len = 0; - tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0); + tcp_v6_gso_csum_prep(skb); ipcse = 0; } ipcss = skb_network_offset(skb); @@ -7759,6 +7758,11 @@ static const struct pci_device_id e1000_pci_tbl[] = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM14), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_cnp }, { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ }; diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c index eaa5a0fb99f0..439fda2f5368 100644 --- a/drivers/net/ethernet/intel/e1000e/ptp.c +++ b/drivers/net/ethernet/intel/e1000e/ptp.c @@ -297,6 +297,7 @@ void e1000e_ptp_init(struct e1000_adapter *adapter) case e1000_pch_cnp: /* fall-through */ case e1000_pch_tgp: + case e1000_pch_adp: if ((hw->mac.type < e1000_pch_lpt) || (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) { adapter->ptp_clock_info.max_adj = 24000000 - 1; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index f306084ca12c..5b78362b82ac 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -41,7 +41,7 @@ struct fm10k_l2_accel { u16 count; u16 dglort; struct rcu_head rcu; - struct net_device *macvlan[0]; + struct net_device *macvlan[]; }; enum fm10k_ring_state_t { @@ -198,7 +198,7 @@ struct fm10k_q_vector { struct rcu_head rcu; /* to avoid race with update stats on free */ /* for dynamic allocation of rings associated with this q_vector */ - struct fm10k_ring ring[0] ____cacheline_internodealigned_in_smp; + struct fm10k_ring ring[] ____cacheline_internodealigned_in_smp; }; enum fm10k_ring_f_enum { @@ -218,7 +218,7 @@ struct fm10k_iov_data { unsigned int num_vfs; unsigned int next_vf_mbx; struct rcu_head rcu; - struct fm10k_vf_info vf_info[0]; + struct fm10k_vf_info vf_info[]; }; struct fm10k_udp_port { diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 68edf55ac906..37fbc646deb9 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -1151,6 +1151,8 @@ static int fm10k_set_channels(struct net_device *dev, } static const struct ethtool_ops fm10k_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_USE_ADAPTIVE, .get_strings = fm10k_get_strings, .get_sset_count = fm10k_get_sset_count, .get_ethtool_stats = fm10k_get_ethtool_stats, diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 4833187bd259..e95b8da45e07 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -334,13 +334,13 @@ int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash); struct i40e_ddp_profile_list { u32 p_count; - struct i40e_profile_info p_info[0]; + struct i40e_profile_info p_info[]; }; struct i40e_ddp_old_profile_list { struct list_head list; size_t old_ddp_size; - u8 old_ddp_buf[0]; + u8 old_ddp_buf[]; }; /* macros related to FLX_PIT */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 317f3f1458db..aa8026b1eb81 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -5249,6 +5249,11 @@ static const struct ethtool_ops i40e_ethtool_recovery_mode_ops = { }; static const struct ethtool_ops i40e_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES_IRQ | + ETHTOOL_COALESCE_USE_ADAPTIVE | + ETHTOOL_COALESCE_RX_USECS_HIGH | + ETHTOOL_COALESCE_TX_USECS_HIGH, .get_drvinfo = i40e_get_drvinfo, .get_regs_len = i40e_get_regs_len, .get_regs = i40e_get_regs, diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index bd1b1ed323f4..bcd11b4b29df 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -81,7 +81,7 @@ struct iavf_vsi { #define IAVF_TX_DESC(R, i) (&(((struct iavf_tx_desc *)((R)->desc))[i])) #define IAVF_TX_CTXTDESC(R, i) \ (&(((struct iavf_tx_context_desc *)((R)->desc))[i])) -#define IAVF_MAX_REQ_QUEUES 4 +#define IAVF_MAX_REQ_QUEUES 16 #define IAVF_HKEY_ARRAY_SIZE ((IAVF_VFQF_HKEY_MAX_INDEX + 1) * 4) #define IAVF_HLUT_ARRAY_SIZE ((IAVF_VFQF_HLUT_MAX_INDEX + 1) * 4) diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index 84c3d8d97ef6..2c39d46b6138 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -860,7 +860,7 @@ static void iavf_get_channels(struct net_device *netdev, struct iavf_adapter *adapter = netdev_priv(netdev); /* Report maximum channels */ - ch->max_combined = IAVF_MAX_REQ_QUEUES; + ch->max_combined = adapter->vsi_res->num_queue_pairs; ch->max_other = NONQ_VECS; ch->other_count = NONQ_VECS; @@ -881,14 +881,7 @@ static int iavf_set_channels(struct net_device *netdev, struct ethtool_channels *ch) { struct iavf_adapter *adapter = netdev_priv(netdev); - int num_req = ch->combined_count; - - if (num_req != adapter->num_active_queues && - !(adapter->vf_res->vf_cap_flags & - VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)) { - dev_info(&adapter->pdev->dev, "PF is not capable of queue negotiation.\n"); - return -EINVAL; - } + u32 num_req = ch->combined_count; if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && adapter->num_tc) { @@ -899,14 +892,19 @@ static int iavf_set_channels(struct net_device *netdev, /* All of these should have already been checked by ethtool before this * even gets to us, but just to be sure. */ - if (num_req <= 0 || num_req > IAVF_MAX_REQ_QUEUES) + if (num_req > adapter->vsi_res->num_queue_pairs) return -EINVAL; + if (num_req == adapter->num_active_queues) + return 0; + if (ch->rx_count || ch->tx_count || ch->other_count != NONQ_VECS) return -EINVAL; adapter->num_req_queues = num_req; - return iavf_request_queues(adapter, num_req); + adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; + iavf_schedule_reset(adapter); + return 0; } /** @@ -998,6 +996,10 @@ static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir, } static const struct ethtool_ops iavf_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES | + ETHTOOL_COALESCE_MAX_FRAMES_IRQ | + ETHTOOL_COALESCE_USE_ADAPTIVE, .get_drvinfo = iavf_get_drvinfo, .get_link = ethtool_op_get_link, .get_ringparam = iavf_get_ringparam, diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 62fe56ddcb6e..2050649848ba 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -3061,9 +3061,6 @@ static int iavf_delete_clsflower(struct iavf_adapter *adapter, static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter, struct flow_cls_offload *cls_flower) { - if (cls_flower->common.chain_index) - return -EOPNOTSUPP; - switch (cls_flower->command) { case FLOW_CLS_REPLACE: return iavf_configure_clsflower(adapter, cls_flower); @@ -3087,6 +3084,11 @@ static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter, static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) { + struct iavf_adapter *adapter = cb_priv; + + if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data)) + return -EOPNOTSUPP; + switch (type) { case TC_SETUP_CLSFLOWER: return iavf_setup_tc_cls_flower(cb_priv, type_data); @@ -3448,7 +3450,7 @@ int iavf_process_config(struct iavf_adapter *adapter) } if (num_req_queues && - num_req_queues != adapter->vsi_res->num_queue_pairs) { + num_req_queues > adapter->vsi_res->num_queue_pairs) { /* Problem. The PF gave us fewer queues than what we had * negotiated in our request. Need a reset to see if we can't * get back to a working state. diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 1ab9cb339acb..d58374c2c33d 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -397,33 +397,6 @@ void iavf_map_queues(struct iavf_adapter *adapter) } /** - * iavf_request_queues - * @adapter: adapter structure - * @num: number of requested queues - * - * We get a default number of queues from the PF. This enables us to request a - * different number. Returns 0 on success, negative on failure - **/ -int iavf_request_queues(struct iavf_adapter *adapter, int num) -{ - struct virtchnl_vf_res_request vfres; - - if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { - /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "Cannot request queues, command %d pending\n", - adapter->current_op); - return -EBUSY; - } - - vfres.num_queue_pairs = min_t(int, num, num_online_cpus()); - - adapter->current_op = VIRTCHNL_OP_REQUEST_QUEUES; - adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; - return iavf_send_pf_msg(adapter, VIRTCHNL_OP_REQUEST_QUEUES, - (u8 *)&vfres, sizeof(vfres)); -} - -/** * iavf_add_ether_addrs * @adapter: adapter structure * diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile index 59544b0fc086..29c6c6743450 100644 --- a/drivers/net/ethernet/intel/ice/Makefile +++ b/drivers/net/ethernet/intel/ice/Makefile @@ -19,6 +19,7 @@ ice-y := ice_main.o \ ice_txrx.o \ ice_flex_pipe.o \ ice_flow.o \ + ice_devlink.o \ ice_ethtool.o ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index cb10abb14e11..5c11448bfbb3 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -34,6 +34,7 @@ #include <linux/ctype.h> #include <linux/bpf.h> #include <linux/avf/virtchnl.h> +#include <net/devlink.h> #include <net/ipv6.h> #include <net/xdp_sock.h> #include "ice_devids.h" @@ -60,7 +61,6 @@ extern const char ice_drv_ver[]; #define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16) #define ICE_AQ_LEN 64 #define ICE_MBXSQ_LEN 64 -#define ICE_MBXRQ_LEN 512 #define ICE_MIN_MSIX 2 #define ICE_NO_VSI 0xffff #define ICE_VSI_MAP_CONTIG 0 @@ -70,7 +70,6 @@ extern const char ice_drv_ver[]; #define ICE_Q_WAIT_RETRY_LIMIT 10 #define ICE_Q_WAIT_MAX_RETRY (5 * ICE_Q_WAIT_RETRY_LIMIT) #define ICE_MAX_LG_RSS_QS 256 -#define ICE_MAX_SMALL_RSS_QS 8 #define ICE_RES_VALID_BIT 0x8000 #define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1) #define ICE_INVAL_Q_INDEX 0xffff @@ -212,6 +211,8 @@ enum ice_state { __ICE_SERVICE_SCHED, __ICE_SERVICE_DIS, __ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */ + __ICE_MDD_VF_PRINT_PENDING, /* set when MDD event handle */ + __ICE_VF_RESETS_DISABLED, /* disable resets during ice_remove */ __ICE_STATE_NBITS /* must be last */ }; @@ -340,12 +341,18 @@ enum ice_pf_flags { ICE_FLAG_FW_LLDP_AGENT, ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */ ICE_FLAG_LEGACY_RX, + ICE_FLAG_MDD_AUTO_RESET_VF, ICE_PF_FLAGS_NBITS /* must be last */ }; struct ice_pf { struct pci_dev *pdev; + /* devlink port data */ + struct devlink_port devlink_port; + + struct devlink_region *nvm_region; + /* OS reserved IRQ details */ struct msix_entry *msix_entries; struct ice_res_tracker *irq_tracker; @@ -361,8 +368,10 @@ struct ice_pf { struct ice_vf *vf; int num_alloc_vfs; /* actual number of VFs allocated */ u16 num_vfs_supported; /* num VFs supported for this PF */ - u16 num_vf_qps; /* num queue pairs per VF */ - u16 num_vf_msix; /* num vectors per VF */ + u16 num_qps_per_vf; + u16 num_msix_per_vf; + /* used to ratelimit the MDD event logging */ + unsigned long last_printed_mdd_jiffies; DECLARE_BITMAP(state, __ICE_STATE_NBITS); DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS); unsigned long *avail_txqs; /* bitmap to track PF Tx queue usage */ diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 6873998cf145..2381b4014ed6 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -1232,6 +1232,7 @@ struct ice_aqc_sff_eeprom { * NVM Update commands (indirect 0x0703) */ struct ice_aqc_nvm { +#define ICE_AQC_NVM_MAX_OFFSET 0xFFFFFF __le16 offset_low; u8 offset_high; u8 cmd_flags; @@ -1250,6 +1251,8 @@ struct ice_aqc_nvm { __le32 addr_low; }; +#define ICE_AQC_NVM_START_POINT 0 + /* NVM Checksum Command (direct, 0x0706) */ struct ice_aqc_nvm_checksum { u8 flags; @@ -1661,6 +1664,13 @@ struct ice_aqc_get_pkg_info_resp { struct ice_aqc_get_pkg_info pkg_info[1]; }; +/* Lan Queue Overflow Event (direct, 0x1001) */ +struct ice_aqc_event_lan_overflow { + __le32 prtdcb_ruptq; + __le32 qtx_ctl; + u8 reserved[8]; +}; + /** * struct ice_aq_desc - Admin Queue (AQ) descriptor * @flags: ICE_AQ_FLAG_* flags @@ -1730,6 +1740,7 @@ struct ice_aq_desc { struct ice_aqc_alloc_free_res_cmd sw_res_ctrl; struct ice_aqc_set_event_mask set_event_mask; struct ice_aqc_get_link_status get_link_status; + struct ice_aqc_event_lan_overflow lan_overflow; } params; }; @@ -1756,6 +1767,7 @@ enum ice_aq_err { ICE_AQ_RC_ENOMEM = 9, /* Out of memory */ ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */ ICE_AQ_RC_EEXIST = 13, /* Object already exists */ + ICE_AQ_RC_EINVAL = 14, /* Invalid argument */ ICE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */ ICE_AQ_RC_ENOSYS = 17, /* Function not implemented */ ICE_AQ_RC_ENOSEC = 24, /* Missing security manifest */ @@ -1860,6 +1872,9 @@ enum ice_adminq_opc { ice_aqc_opc_update_pkg = 0x0C42, ice_aqc_opc_get_pkg_info_list = 0x0C43, + /* Standalone Commands/Events */ + ice_aqc_opc_event_lan_overflow = 0x1001, + /* debug commands */ ice_aqc_opc_fw_logging = 0xFF09, ice_aqc_opc_fw_logging_info = 0xFF10, diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index 81885efadc7a..a19cd6f5436b 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -203,8 +203,7 @@ static void ice_cfg_itr_gran(struct ice_hw *hw) */ static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc) { - WARN_ONCE(ice_ring_is_xdp(ring) && tc, - "XDP ring can't belong to TC other than 0"); + WARN_ONCE(ice_ring_is_xdp(ring) && tc, "XDP ring can't belong to TC other than 0\n"); /* Idea here for calculation is that we subtract the number of queue * count from TC that ring belongs to from it's absolute queue index @@ -247,7 +246,6 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) */ switch (vsi->type) { case ICE_VSI_LB: - /* fall through */ case ICE_VSI_PF: tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; break; @@ -387,8 +385,8 @@ int ice_setup_rx_ctx(struct ice_ring *ring) /* Enable Flexible Descriptors in the queue context which * allows this driver to select a specific receive descriptor format */ + regval = rd32(hw, QRXFLXP_CNTXT(pf_q)); if (vsi->type != ICE_VSI_VF) { - regval = rd32(hw, QRXFLXP_CNTXT(pf_q)); regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & QRXFLXP_CNTXT_RXDID_IDX_M; @@ -399,8 +397,12 @@ int ice_setup_rx_ctx(struct ice_ring *ring) regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) & QRXFLXP_CNTXT_RXDID_PRIO_M; - wr32(hw, QRXFLXP_CNTXT(pf_q), regval); + } else { + regval &= ~(QRXFLXP_CNTXT_RXDID_IDX_M | + QRXFLXP_CNTXT_RXDID_PRIO_M | + QRXFLXP_CNTXT_TS_M); } + wr32(hw, QRXFLXP_CNTXT(pf_q), regval); /* Absolute queue number out of 2K needs to be passed */ err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); @@ -459,17 +461,20 @@ int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg) } /** - * ice_vsi_ctrl_rx_ring - Start or stop a VSI's Rx ring + * ice_vsi_ctrl_one_rx_ring - start/stop VSI's Rx ring with no busy wait * @vsi: the VSI being configured - * @ena: start or stop the Rx rings - * @rxq_idx: Rx queue index + * @ena: start or stop the Rx ring + * @rxq_idx: 0-based Rx queue index for the VSI passed in + * @wait: wait or don't wait for configuration to finish in hardware + * + * Return 0 on success and negative on error. */ -int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx) +int +ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait) { int pf_q = vsi->rxq_map[rxq_idx]; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; - int ret = 0; u32 rx_reg; rx_reg = rd32(hw, QRX_CTRL(pf_q)); @@ -485,13 +490,30 @@ int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx) rx_reg &= ~QRX_CTRL_QENA_REQ_M; wr32(hw, QRX_CTRL(pf_q), rx_reg); - /* wait for the change to finish */ - ret = ice_pf_rxq_wait(pf, pf_q, ena); - if (ret) - dev_err(ice_pf_to_dev(pf), "VSI idx %d Rx ring %d %sable timeout\n", - vsi->idx, pf_q, (ena ? "en" : "dis")); + if (!wait) + return 0; + + ice_flush(hw); + return ice_pf_rxq_wait(pf, pf_q, ena); +} - return ret; +/** + * ice_vsi_wait_one_rx_ring - wait for a VSI's Rx ring to be stopped/started + * @vsi: the VSI being configured + * @ena: true/false to verify Rx ring has been enabled/disabled respectively + * @rxq_idx: 0-based Rx queue index for the VSI passed in + * + * This routine will wait for the given Rx queue of the VSI to reach the + * enabled or disabled state. Returns -ETIMEDOUT in case of failing to reach + * the requested state after multiple retries; else will return 0 in case of + * success. + */ +int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx) +{ + int pf_q = vsi->rxq_map[rxq_idx]; + struct ice_pf *pf = vsi->back; + + return ice_pf_rxq_wait(pf, pf_q, ena); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_base.h b/drivers/net/ethernet/intel/ice/ice_base.h index 407995e8e944..44efdb627043 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.h +++ b/drivers/net/ethernet/intel/ice/ice_base.h @@ -8,7 +8,9 @@ int ice_setup_rx_ctx(struct ice_ring *ring); int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg); -int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx); +int +ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait); +int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx); int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi); void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi); void ice_vsi_free_q_vectors(struct ice_vsi *vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 04d5db0a25bf..2c0d8fd3d5cd 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -6,7 +6,7 @@ #include "ice_adminq_cmd.h" #include "ice_flow.h" -#define ICE_PF_RESET_WAIT_COUNT 200 +#define ICE_PF_RESET_WAIT_COUNT 300 /** * ice_set_mac_type - Sets MAC type @@ -615,29 +615,6 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw) } /** - * ice_get_nvm_version - get cached NVM version data - * @hw: pointer to the hardware structure - * @oem_ver: 8 bit NVM version - * @oem_build: 16 bit NVM build number - * @oem_patch: 8 NVM patch number - * @ver_hi: high 16 bits of the NVM version - * @ver_lo: low 16 bits of the NVM version - */ -void -ice_get_nvm_version(struct ice_hw *hw, u8 *oem_ver, u16 *oem_build, - u8 *oem_patch, u8 *ver_hi, u8 *ver_lo) -{ - struct ice_nvm_info *nvm = &hw->nvm; - - *oem_ver = (u8)((nvm->oem_ver & ICE_OEM_VER_MASK) >> ICE_OEM_VER_SHIFT); - *oem_patch = (u8)(nvm->oem_ver & ICE_OEM_VER_PATCH_MASK); - *oem_build = (u16)((nvm->oem_ver & ICE_OEM_VER_BUILD_MASK) >> - ICE_OEM_VER_BUILD_SHIFT); - *ver_hi = (nvm->ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT; - *ver_lo = (nvm->ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT; -} - -/** * ice_init_hw - main hardware initialization routine * @hw: pointer to the hardware structure */ @@ -958,72 +935,6 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req) } /** - * ice_get_pfa_module_tlv - Reads sub module TLV from NVM PFA - * @hw: pointer to hardware structure - * @module_tlv: pointer to module TLV to return - * @module_tlv_len: pointer to module TLV length to return - * @module_type: module type requested - * - * Finds the requested sub module TLV type from the Preserved Field - * Area (PFA) and returns the TLV pointer and length. The caller can - * use these to read the variable length TLV value. - */ -enum ice_status -ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, - u16 module_type) -{ - enum ice_status status; - u16 pfa_len, pfa_ptr; - u16 next_tlv; - - status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr); - if (status) { - ice_debug(hw, ICE_DBG_INIT, "Preserved Field Array pointer.\n"); - return status; - } - status = ice_read_sr_word(hw, pfa_ptr, &pfa_len); - if (status) { - ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n"); - return status; - } - /* Starting with first TLV after PFA length, iterate through the list - * of TLVs to find the requested one. - */ - next_tlv = pfa_ptr + 1; - while (next_tlv < pfa_ptr + pfa_len) { - u16 tlv_sub_module_type; - u16 tlv_len; - - /* Read TLV type */ - status = ice_read_sr_word(hw, next_tlv, &tlv_sub_module_type); - if (status) { - ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV type.\n"); - break; - } - /* Read TLV length */ - status = ice_read_sr_word(hw, next_tlv + 1, &tlv_len); - if (status) { - ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV length.\n"); - break; - } - if (tlv_sub_module_type == module_type) { - if (tlv_len) { - *module_tlv = next_tlv; - *module_tlv_len = tlv_len; - return 0; - } - return ICE_ERR_INVAL_SIZE; - } - /* Check next TLV, i.e. current TLV pointer + length + 2 words - * (for current TLV's type and length) - */ - next_tlv = next_tlv + tlv_len + 2; - } - /* Module does not exist */ - return ICE_ERR_DOES_NOT_EXIST; -} - -/** * ice_copy_rxq_ctx_to_hw * @hw: pointer to the hardware structure * @ice_rxq_ctx: pointer to the rxq context @@ -1181,7 +1092,7 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, case ice_aqc_opc_release_res: if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK) break; - /* fall-through */ + fallthrough; default: mutex_lock(&ice_global_cfg_lock_sw); lock_acquired = true; @@ -2703,7 +2614,7 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut, ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; break; } - /* fall-through */ + fallthrough; default: status = ICE_ERR_PARAM; goto ice_aq_get_set_rss_lut_exit; diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index f9fc005d35a7..8104f3d64d96 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -15,9 +15,6 @@ enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw); enum ice_status ice_init_hw(struct ice_hw *hw); void ice_deinit_hw(struct ice_hw *hw); -enum ice_status -ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, - u16 module_type); enum ice_status ice_check_reset(struct ice_hw *hw); enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req); enum ice_status ice_create_all_ctrlq(struct ice_hw *hw); @@ -38,9 +35,6 @@ enum ice_status ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res); enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res); -enum ice_status ice_init_nvm(struct ice_hw *hw); -enum ice_status -ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data); enum ice_status ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, @@ -153,9 +147,6 @@ ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat); -void -ice_get_nvm_version(struct ice_hw *hw, u8 *oem_ver, u16 *oem_build, - u8 *oem_patch, u8 *ver_hi, u8 *ver_lo); enum ice_status ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, struct ice_aqc_get_elem *buf); diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index 7108fb41b604..7bea09363b42 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -63,6 +63,26 @@ u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg) } /** + * ice_dcb_get_mode - gets the DCB mode + * @port_info: pointer to port info structure + * @host: if set it's HOST if not it's MANAGED + */ +static u8 ice_dcb_get_mode(struct ice_port_info *port_info, bool host) +{ + u8 mode; + + if (host) + mode = DCB_CAP_DCBX_HOST; + else + mode = DCB_CAP_DCBX_LLD_MANAGED; + + if (port_info->local_dcbx_cfg.dcbx_mode & ICE_DCBX_MODE_CEE) + return mode | DCB_CAP_DCBX_VER_CEE; + else + return mode | DCB_CAP_DCBX_VER_IEEE; +} + +/** * ice_dcb_get_num_tc - Get the number of TCs from DCBX config * @dcbcfg: config to retrieve number of TCs from */ @@ -149,6 +169,43 @@ void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi) } /** + * ice_dcb_bwchk - check if ETS bandwidth input parameters are correct + * @pf: pointer to the PF struct + * @dcbcfg: pointer to DCB config structure + */ +int ice_dcb_bwchk(struct ice_pf *pf, struct ice_dcbx_cfg *dcbcfg) +{ + struct ice_dcb_ets_cfg *etscfg = &dcbcfg->etscfg; + u8 num_tc, total_bw = 0; + int i; + + /* returns number of contigous TCs and 1 TC for non-contigous TCs, + * since at least 1 TC has to be configured + */ + num_tc = ice_dcb_get_num_tc(dcbcfg); + + /* no bandwidth checks required if there's only one TC, so assign + * all bandwidth to TC0 and return + */ + if (num_tc == 1) { + etscfg->tcbwtable[0] = ICE_TC_MAX_BW; + return 0; + } + + for (i = 0; i < num_tc; i++) + total_bw += etscfg->tcbwtable[i]; + + if (!total_bw) { + etscfg->tcbwtable[0] = ICE_TC_MAX_BW; + } else if (total_bw != ICE_TC_MAX_BW) { + dev_err(ice_pf_to_dev(pf), "Invalid config, total bandwidth must equal 100\n"); + return -EINVAL; + } + + return 0; +} + +/** * ice_pf_dcb_cfg - Apply new DCB configuration * @pf: pointer to the PF struct * @new_cfg: DCBX config to apply @@ -182,6 +239,9 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked) return ret; } + if (ice_dcb_bwchk(pf, new_cfg)) + return -EINVAL; + /* Store old config in case FW config fails */ old_cfg = kmemdup(curr_cfg, sizeof(*old_cfg), GFP_KERNEL); if (!old_cfg) @@ -605,14 +665,14 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked) ice_cfg_sw_lldp(pf_vsi, false, true); - pf->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; + pf->dcbx_cap = ice_dcb_get_mode(port_info, true); return 0; } set_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags); - /* DCBX in FW and LLDP enabled in FW */ - pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_IEEE; + /* DCBX/LLDP enabled in FW, set DCBNL mode advertisement */ + pf->dcbx_cap = ice_dcb_get_mode(port_info, false); err = ice_dcb_init_cfg(pf, locked); if (err) @@ -719,7 +779,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, bool need_reconfig = false; struct ice_port_info *pi; struct ice_vsi *pf_vsi; - u8 type; + u8 mib_type; int ret; /* Not DCB capable or capability disabled */ @@ -734,16 +794,16 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, pi = pf->hw.port_info; mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw; /* Ignore if event is not for Nearest Bridge */ - type = ((mib->type >> ICE_AQ_LLDP_BRID_TYPE_S) & - ICE_AQ_LLDP_BRID_TYPE_M); - dev_dbg(dev, "LLDP event MIB bridge type 0x%x\n", type); - if (type != ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID) + mib_type = ((mib->type >> ICE_AQ_LLDP_BRID_TYPE_S) & + ICE_AQ_LLDP_BRID_TYPE_M); + dev_dbg(dev, "LLDP event MIB bridge type 0x%x\n", mib_type); + if (mib_type != ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID) return; /* Check MIB Type and return if event for Remote MIB update */ - type = mib->type & ICE_AQ_LLDP_MIB_TYPE_M; - dev_dbg(dev, "LLDP event mib type %s\n", type ? "remote" : "local"); - if (type == ICE_AQ_LLDP_MIB_REMOTE) { + mib_type = mib->type & ICE_AQ_LLDP_MIB_TYPE_M; + dev_dbg(dev, "LLDP event mib type %s\n", mib_type ? "remote" : "local"); + if (mib_type == ICE_AQ_LLDP_MIB_REMOTE) { /* Update the remote cached instance and return */ ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE, ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, @@ -775,6 +835,8 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, goto out; } + pf->dcbx_cap = ice_dcb_get_mode(pi, false); + need_reconfig = ice_dcb_need_recfg(pf, &tmp_dcbx_cfg, &pi->local_dcbx_cfg); ice_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &pi->local_dcbx_cfg); diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h index f15e5776f287..37680e815b02 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h @@ -20,6 +20,7 @@ u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg); u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index); int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked); +int ice_dcb_bwchk(struct ice_pf *pf, struct ice_dcbx_cfg *dcbcfg); void ice_pf_dcb_recfg(struct ice_pf *pf); void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi); int ice_init_pf_dcb(struct ice_pf *pf, bool locked); diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c index b61aba428adb..c4c12414083a 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c @@ -95,14 +95,12 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets) new_cfg->etsrec.prio_table[i] = ets->reco_prio_tc[i]; } - /* max_tc is a 1-8 value count of number of TC's, not a 0-7 value - * for the TC's index number. Add one to value if not zero, and - * for zero set it to the FW's default value - */ - if (max_tc) - max_tc++; - else - max_tc = IEEE_8021QAZ_MAX_TCS; + if (ice_dcb_bwchk(pf, new_cfg)) { + err = -EINVAL; + goto ets_out; + } + + max_tc = pf->hw.func_caps.common_cap.maxtc; new_cfg->etscfg.maxtcs = max_tc; @@ -119,6 +117,7 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets) if (err == ICE_DCB_NO_HW_CHG) err = ICE_DCB_HW_CHG_RST; +ets_out: mutex_unlock(&pf->tc_mutex); return err; } @@ -535,6 +534,30 @@ ice_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int prio, } /** + * ice_dcbnl_set_pg_tc_cfg_rx + * @netdev: relevant netdev struct + * @prio: corresponding user priority + * @prio_type: the traffic priority type + * @pgid: the PG ID + * @bw_pct: BW percentage for corresponding BWG + * @up_map: prio mapped to corresponding TC + * + * lldpad requires this function pointer to be non-NULL to complete CEE config. + */ +static void +ice_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, + int __always_unused prio, + u8 __always_unused prio_type, + u8 __always_unused pgid, + u8 __always_unused bw_pct, + u8 __always_unused up_map) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + + dev_dbg(ice_pf_to_dev(pf), "Rx TC PG Config Not Supported.\n"); +} + +/** * ice_dcbnl_get_pg_bwg_cfg_rx - Get CEE PG BW Rx config * @netdev: pointer to netdev struct * @pgid: the corresponding traffic class @@ -554,6 +577,23 @@ ice_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int __always_unused pgid, } /** + * ice_dcbnl_set_pg_bwg_cfg_rx + * @netdev: the corresponding netdev + * @pgid: corresponding TC + * @bw_pct: BW percentage for given TC + * + * lldpad requires this function pointer to be non-NULL to complete CEE config. + */ +static void +ice_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int __always_unused pgid, + u8 __always_unused bw_pct) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + + dev_dbg(ice_pf_to_dev(pf), "Rx BWG PG Config Not Supported.\n"); +} + +/** * ice_dcbnl_get_cap - Get DCBX capabilities of adapter * @netdev: pointer to netdev struct * @capid: the capability type @@ -799,6 +839,8 @@ static const struct dcbnl_rtnl_ops dcbnl_ops = { .getpermhwaddr = ice_dcbnl_get_perm_hw_addr, .setpgtccfgtx = ice_dcbnl_set_pg_tc_cfg_tx, .setpgbwgcfgtx = ice_dcbnl_set_pg_bwg_cfg_tx, + .setpgtccfgrx = ice_dcbnl_set_pg_tc_cfg_rx, + .setpgbwgcfgrx = ice_dcbnl_set_pg_bwg_cfg_rx, .getpgtccfgtx = ice_dcbnl_get_pg_tc_cfg_tx, .getpgbwgcfgtx = ice_dcbnl_get_pg_bwg_cfg_tx, .getpgtccfgrx = ice_dcbnl_get_pg_tc_cfg_rx, diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h index ce63017c56c7..9d8194671f6a 100644 --- a/drivers/net/ethernet/intel/ice/ice_devids.h +++ b/drivers/net/ethernet/intel/ice/ice_devids.h @@ -5,12 +5,34 @@ #define _ICE_DEVIDS_H_ /* Device IDs */ +/* Intel(R) Ethernet Connection E823-L for backplane */ +#define ICE_DEV_ID_E823L_BACKPLANE 0x124C +/* Intel(R) Ethernet Connection E823-L for SFP */ +#define ICE_DEV_ID_E823L_SFP 0x124D +/* Intel(R) Ethernet Connection E823-L/X557-AT 10GBASE-T */ +#define ICE_DEV_ID_E823L_10G_BASE_T 0x124E +/* Intel(R) Ethernet Connection E823-L 1GbE */ +#define ICE_DEV_ID_E823L_1GBE 0x124F +/* Intel(R) Ethernet Connection E823-L for QSFP */ +#define ICE_DEV_ID_E823L_QSFP 0x151D /* Intel(R) Ethernet Controller E810-C for backplane */ #define ICE_DEV_ID_E810C_BACKPLANE 0x1591 /* Intel(R) Ethernet Controller E810-C for QSFP */ #define ICE_DEV_ID_E810C_QSFP 0x1592 /* Intel(R) Ethernet Controller E810-C for SFP */ #define ICE_DEV_ID_E810C_SFP 0x1593 +/* Intel(R) Ethernet Controller E810-XXV for SFP */ +#define ICE_DEV_ID_E810_XXV_SFP 0x159B +/* Intel(R) Ethernet Connection E823-C for backplane */ +#define ICE_DEV_ID_E823C_BACKPLANE 0x188A +/* Intel(R) Ethernet Connection E823-C for QSFP */ +#define ICE_DEV_ID_E823C_QSFP 0x188B +/* Intel(R) Ethernet Connection E823-C for SFP */ +#define ICE_DEV_ID_E823C_SFP 0x188C +/* Intel(R) Ethernet Connection E823-C/X557-AT 10GBASE-T */ +#define ICE_DEV_ID_E823C_10G_BASE_T 0x188D +/* Intel(R) Ethernet Connection E823-C 1GbE */ +#define ICE_DEV_ID_E823C_SGMII 0x188E /* Intel(R) Ethernet Connection E822-C for backplane */ #define ICE_DEV_ID_E822C_BACKPLANE 0x1890 /* Intel(R) Ethernet Connection E822-C for QSFP */ @@ -21,8 +43,8 @@ #define ICE_DEV_ID_E822C_10G_BASE_T 0x1893 /* Intel(R) Ethernet Connection E822-C 1GbE */ #define ICE_DEV_ID_E822C_SGMII 0x1894 -/* Intel(R) Ethernet Connection E822-X for backplane */ -#define ICE_DEV_ID_E822X_BACKPLANE 0x1897 +/* Intel(R) Ethernet Connection E822-L for backplane */ +#define ICE_DEV_ID_E822L_BACKPLANE 0x1897 /* Intel(R) Ethernet Connection E822-L for SFP */ #define ICE_DEV_ID_E822L_SFP 0x1898 /* Intel(R) Ethernet Connection E822-L/X557-AT 10GBASE-T */ diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c new file mode 100644 index 000000000000..c6833944b90a --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_devlink.c @@ -0,0 +1,416 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020, Intel Corporation. */ + +#include "ice.h" +#include "ice_lib.h" +#include "ice_devlink.h" + +static int ice_info_get_dsn(struct ice_pf *pf, char *buf, size_t len) +{ + u8 dsn[8]; + + /* Copy the DSN into an array in Big Endian format */ + put_unaligned_be64(pci_get_dsn(pf->pdev), dsn); + + snprintf(buf, len, "%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x", + dsn[0], dsn[1], dsn[2], dsn[3], + dsn[4], dsn[5], dsn[6], dsn[7]); + + return 0; +} + +static int ice_info_pba(struct ice_pf *pf, char *buf, size_t len) +{ + struct ice_hw *hw = &pf->hw; + enum ice_status status; + + status = ice_read_pba_string(hw, (u8 *)buf, len); + if (status) + return -EIO; + + return 0; +} + +static int ice_info_fw_mgmt(struct ice_pf *pf, char *buf, size_t len) +{ + struct ice_hw *hw = &pf->hw; + + snprintf(buf, len, "%u.%u.%u", hw->fw_maj_ver, hw->fw_min_ver, + hw->fw_patch); + + return 0; +} + +static int ice_info_fw_api(struct ice_pf *pf, char *buf, size_t len) +{ + struct ice_hw *hw = &pf->hw; + + snprintf(buf, len, "%u.%u", hw->api_maj_ver, hw->api_min_ver); + + return 0; +} + +static int ice_info_fw_build(struct ice_pf *pf, char *buf, size_t len) +{ + struct ice_hw *hw = &pf->hw; + + snprintf(buf, len, "0x%08x", hw->fw_build); + + return 0; +} + +static int ice_info_orom_ver(struct ice_pf *pf, char *buf, size_t len) +{ + struct ice_orom_info *orom = &pf->hw.nvm.orom; + + snprintf(buf, len, "%u.%u.%u", orom->major, orom->build, orom->patch); + + return 0; +} + +static int ice_info_nvm_ver(struct ice_pf *pf, char *buf, size_t len) +{ + struct ice_nvm_info *nvm = &pf->hw.nvm; + + snprintf(buf, len, "%x.%02x", nvm->major_ver, nvm->minor_ver); + + return 0; +} + +static int ice_info_eetrack(struct ice_pf *pf, char *buf, size_t len) +{ + struct ice_nvm_info *nvm = &pf->hw.nvm; + + snprintf(buf, len, "0x%08x", nvm->eetrack); + + return 0; +} + +static int ice_info_ddp_pkg_name(struct ice_pf *pf, char *buf, size_t len) +{ + struct ice_hw *hw = &pf->hw; + + snprintf(buf, len, "%s", hw->active_pkg_name); + + return 0; +} + +static int ice_info_ddp_pkg_version(struct ice_pf *pf, char *buf, size_t len) +{ + struct ice_pkg_ver *pkg = &pf->hw.active_pkg_ver; + + snprintf(buf, len, "%u.%u.%u.%u", pkg->major, pkg->minor, pkg->update, + pkg->draft); + + return 0; +} + +#define fixed(key, getter) { ICE_VERSION_FIXED, key, getter } +#define running(key, getter) { ICE_VERSION_RUNNING, key, getter } + +enum ice_version_type { + ICE_VERSION_FIXED, + ICE_VERSION_RUNNING, + ICE_VERSION_STORED, +}; + +static const struct ice_devlink_version { + enum ice_version_type type; + const char *key; + int (*getter)(struct ice_pf *pf, char *buf, size_t len); +} ice_devlink_versions[] = { + fixed(DEVLINK_INFO_VERSION_GENERIC_BOARD_ID, ice_info_pba), + running(DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, ice_info_fw_mgmt), + running("fw.mgmt.api", ice_info_fw_api), + running("fw.mgmt.build", ice_info_fw_build), + running(DEVLINK_INFO_VERSION_GENERIC_FW_UNDI, ice_info_orom_ver), + running("fw.psid.api", ice_info_nvm_ver), + running(DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID, ice_info_eetrack), + running("fw.app.name", ice_info_ddp_pkg_name), + running(DEVLINK_INFO_VERSION_GENERIC_FW_APP, ice_info_ddp_pkg_version), +}; + +/** + * ice_devlink_info_get - .info_get devlink handler + * @devlink: devlink instance structure + * @req: the devlink info request + * @extack: extended netdev ack structure + * + * Callback for the devlink .info_get operation. Reports information about the + * device. + * + * Return: zero on success or an error code on failure. + */ +static int ice_devlink_info_get(struct devlink *devlink, + struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ + struct ice_pf *pf = devlink_priv(devlink); + char buf[100]; + size_t i; + int err; + + err = devlink_info_driver_name_put(req, KBUILD_MODNAME); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Unable to set driver name"); + return err; + } + + err = ice_info_get_dsn(pf, buf, sizeof(buf)); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Unable to obtain serial number"); + return err; + } + + err = devlink_info_serial_number_put(req, buf); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Unable to set serial number"); + return err; + } + + for (i = 0; i < ARRAY_SIZE(ice_devlink_versions); i++) { + enum ice_version_type type = ice_devlink_versions[i].type; + const char *key = ice_devlink_versions[i].key; + + err = ice_devlink_versions[i].getter(pf, buf, sizeof(buf)); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Unable to obtain version info"); + return err; + } + + switch (type) { + case ICE_VERSION_FIXED: + err = devlink_info_version_fixed_put(req, key, buf); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Unable to set fixed version"); + return err; + } + break; + case ICE_VERSION_RUNNING: + err = devlink_info_version_running_put(req, key, buf); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Unable to set running version"); + return err; + } + break; + case ICE_VERSION_STORED: + err = devlink_info_version_stored_put(req, key, buf); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Unable to set stored version"); + return err; + } + break; + } + } + + return 0; +} + +static const struct devlink_ops ice_devlink_ops = { + .info_get = ice_devlink_info_get, +}; + +static void ice_devlink_free(void *devlink_ptr) +{ + devlink_free((struct devlink *)devlink_ptr); +} + +/** + * ice_allocate_pf - Allocate devlink and return PF structure pointer + * @dev: the device to allocate for + * + * Allocate a devlink instance for this device and return the private area as + * the PF structure. The devlink memory is kept track of through devres by + * adding an action to remove it when unwinding. + */ +struct ice_pf *ice_allocate_pf(struct device *dev) +{ + struct devlink *devlink; + + devlink = devlink_alloc(&ice_devlink_ops, sizeof(struct ice_pf)); + if (!devlink) + return NULL; + + /* Add an action to teardown the devlink when unwinding the driver */ + if (devm_add_action(dev, ice_devlink_free, devlink)) { + devlink_free(devlink); + return NULL; + } + + return devlink_priv(devlink); +} + +/** + * ice_devlink_register - Register devlink interface for this PF + * @pf: the PF to register the devlink for. + * + * Register the devlink instance associated with this physical function. + * + * Return: zero on success or an error code on failure. + */ +int ice_devlink_register(struct ice_pf *pf) +{ + struct devlink *devlink = priv_to_devlink(pf); + struct device *dev = ice_pf_to_dev(pf); + int err; + + err = devlink_register(devlink, dev); + if (err) { + dev_err(dev, "devlink registration failed: %d\n", err); + return err; + } + + return 0; +} + +/** + * ice_devlink_unregister - Unregister devlink resources for this PF. + * @pf: the PF structure to cleanup + * + * Releases resources used by devlink and cleans up associated memory. + */ +void ice_devlink_unregister(struct ice_pf *pf) +{ + devlink_unregister(priv_to_devlink(pf)); +} + +/** + * ice_devlink_create_port - Create a devlink port for this PF + * @pf: the PF to create a port for + * + * Create and register a devlink_port for this PF. Note that although each + * physical function is connected to a separate devlink instance, the port + * will still be numbered according to the physical function id. + * + * Return: zero on success or an error code on failure. + */ +int ice_devlink_create_port(struct ice_pf *pf) +{ + struct devlink *devlink = priv_to_devlink(pf); + struct ice_vsi *vsi = ice_get_main_vsi(pf); + struct device *dev = ice_pf_to_dev(pf); + int err; + + if (!vsi) { + dev_err(dev, "%s: unable to find main VSI\n", __func__); + return -EIO; + } + + devlink_port_attrs_set(&pf->devlink_port, DEVLINK_PORT_FLAVOUR_PHYSICAL, + pf->hw.pf_id, false, 0, NULL, 0); + err = devlink_port_register(devlink, &pf->devlink_port, pf->hw.pf_id); + if (err) { + dev_err(dev, "devlink_port_register failed: %d\n", err); + return err; + } + + return 0; +} + +/** + * ice_devlink_destroy_port - Destroy the devlink_port for this PF + * @pf: the PF to cleanup + * + * Unregisters the devlink_port structure associated with this PF. + */ +void ice_devlink_destroy_port(struct ice_pf *pf) +{ + devlink_port_type_clear(&pf->devlink_port); + devlink_port_unregister(&pf->devlink_port); +} + +/** + * ice_devlink_nvm_snapshot - Capture a snapshot of the Shadow RAM contents + * @devlink: the devlink instance + * @extack: extended ACK response structure + * @data: on exit points to snapshot data buffer + * + * This function is called in response to the DEVLINK_CMD_REGION_TRIGGER for + * the shadow-ram devlink region. It captures a snapshot of the shadow ram + * contents. This snapshot can later be viewed via the devlink-region + * interface. + * + * @returns zero on success, and updates the data pointer. Returns a non-zero + * error code on failure. + */ +static int ice_devlink_nvm_snapshot(struct devlink *devlink, + struct netlink_ext_ack *extack, u8 **data) +{ + struct ice_pf *pf = devlink_priv(devlink); + struct device *dev = ice_pf_to_dev(pf); + struct ice_hw *hw = &pf->hw; + enum ice_status status; + void *nvm_data; + u32 nvm_size; + + nvm_size = hw->nvm.flash_size; + nvm_data = vzalloc(nvm_size); + if (!nvm_data) + return -ENOMEM; + + status = ice_acquire_nvm(hw, ICE_RES_READ); + if (status) { + dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n", + status, hw->adminq.sq_last_status); + NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore"); + vfree(nvm_data); + return -EIO; + } + + status = ice_read_flat_nvm(hw, 0, &nvm_size, nvm_data, false); + if (status) { + dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n", + nvm_size, status, hw->adminq.sq_last_status); + NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents"); + ice_release_nvm(hw); + vfree(nvm_data); + return -EIO; + } + + ice_release_nvm(hw); + + *data = nvm_data; + + return 0; +} + +static const struct devlink_region_ops ice_nvm_region_ops = { + .name = "nvm-flash", + .destructor = vfree, + .snapshot = ice_devlink_nvm_snapshot, +}; + +/** + * ice_devlink_init_regions - Initialize devlink regions + * @pf: the PF device structure + * + * Create devlink regions used to enable access to dump the contents of the + * flash memory on the device. + */ +void ice_devlink_init_regions(struct ice_pf *pf) +{ + struct devlink *devlink = priv_to_devlink(pf); + struct device *dev = ice_pf_to_dev(pf); + u64 nvm_size; + + nvm_size = pf->hw.nvm.flash_size; + pf->nvm_region = devlink_region_create(devlink, &ice_nvm_region_ops, 1, + nvm_size); + if (IS_ERR(pf->nvm_region)) { + dev_err(dev, "failed to create NVM devlink region, err %ld\n", + PTR_ERR(pf->nvm_region)); + pf->nvm_region = NULL; + } +} + +/** + * ice_devlink_destroy_regions - Destroy devlink regions + * @pf: the PF device structure + * + * Remove previously created regions for this PF. + */ +void ice_devlink_destroy_regions(struct ice_pf *pf) +{ + if (pf->nvm_region) + devlink_region_destroy(pf->nvm_region); +} diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.h b/drivers/net/ethernet/intel/ice/ice_devlink.h new file mode 100644 index 000000000000..6e806a08dc23 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_devlink.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2019, Intel Corporation. */ + +#ifndef _ICE_DEVLINK_H_ +#define _ICE_DEVLINK_H_ + +struct ice_pf *ice_allocate_pf(struct device *dev); + +int ice_devlink_register(struct ice_pf *pf); +void ice_devlink_unregister(struct ice_pf *pf); +int ice_devlink_create_port(struct ice_pf *pf); +void ice_devlink_destroy_port(struct ice_pf *pf); + +void ice_devlink_init_regions(struct ice_pf *pf); +void ice_devlink_destroy_regions(struct ice_pf *pf); + +#endif /* _ICE_DEVLINK_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 77c412a7e7a4..593fb37bd59e 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -157,6 +157,7 @@ struct ice_priv_flag { static const struct ice_priv_flag ice_gstrings_priv_flags[] = { ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA), ICE_PRIV_FLAG("fw-lldp-agent", ICE_FLAG_FW_LLDP_AGENT), + ICE_PRIV_FLAG("mdd-auto-reset-vf", ICE_FLAG_MDD_AUTO_RESET_VF), ICE_PRIV_FLAG("legacy-rx", ICE_FLAG_LEGACY_RX), }; @@ -166,25 +167,26 @@ static void ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct ice_netdev_priv *np = netdev_priv(netdev); - u8 oem_ver, oem_patch, nvm_ver_hi, nvm_ver_lo; struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; - u16 oem_build; + struct ice_orom_info *orom; + struct ice_nvm_info *nvm; - strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, ice_drv_ver, sizeof(drvinfo->version)); + nvm = &hw->nvm; + orom = &nvm->orom; + + strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); + strscpy(drvinfo->version, ice_drv_ver, sizeof(drvinfo->version)); /* Display NVM version (from which the firmware version can be * determined) which contains more pertinent information. */ - ice_get_nvm_version(hw, &oem_ver, &oem_build, &oem_patch, - &nvm_ver_hi, &nvm_ver_lo); snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), - "%x.%02x 0x%x %d.%d.%d", nvm_ver_hi, nvm_ver_lo, - hw->nvm.eetrack, oem_ver, oem_build, oem_patch); + "%x.%02x 0x%x %d.%d.%d", nvm->major_ver, nvm->minor_ver, + nvm->eetrack, orom->major, orom->build, orom->patch); - strlcpy(drvinfo->bus_info, pci_name(pf->pdev), + strscpy(drvinfo->bus_info, pci_name(pf->pdev), sizeof(drvinfo->bus_info)); drvinfo->n_priv_flags = ICE_PRIV_FLAG_ARRAY_SIZE; } @@ -243,7 +245,7 @@ static int ice_get_eeprom_len(struct net_device *netdev) struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_pf *pf = np->vsi->back; - return (int)(pf->hw.nvm.sr_words * sizeof(u16)); + return (int)pf->hw.nvm.flash_size; } static int @@ -251,39 +253,46 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, u8 *bytes) { struct ice_netdev_priv *np = netdev_priv(netdev); - u16 first_word, last_word, nwords; struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; enum ice_status status; struct device *dev; int ret = 0; - u16 *buf; + u8 *buf; dev = ice_pf_to_dev(pf); eeprom->magic = hw->vendor_id | (hw->device_id << 16); + netdev_dbg(netdev, "GEEPROM cmd 0x%08x, offset 0x%08x, len 0x%08x\n", + eeprom->cmd, eeprom->offset, eeprom->len); - first_word = eeprom->offset >> 1; - last_word = (eeprom->offset + eeprom->len - 1) >> 1; - nwords = last_word - first_word + 1; - - buf = devm_kcalloc(dev, nwords, sizeof(u16), GFP_KERNEL); + buf = kzalloc(eeprom->len, GFP_KERNEL); if (!buf) return -ENOMEM; - status = ice_read_sr_buf(hw, first_word, &nwords, buf); + status = ice_acquire_nvm(hw, ICE_RES_READ); if (status) { - dev_err(dev, "ice_read_sr_buf failed, err %d aq_err %d\n", + dev_err(dev, "ice_acquire_nvm failed, err %d aq_err %d\n", status, hw->adminq.sq_last_status); - eeprom->len = sizeof(u16) * nwords; ret = -EIO; goto out; } - memcpy(bytes, (u8 *)buf + (eeprom->offset & 1), eeprom->len); + status = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->len, buf, + false); + if (status) { + dev_err(dev, "ice_read_flat_nvm failed, err %d aq_err %d\n", + status, hw->adminq.sq_last_status); + ret = -EIO; + goto release; + } + + memcpy(bytes, buf, eeprom->len); +release: + ice_release_nvm(hw); out: - devm_kfree(dev, buf); + kfree(buf); return ret; } @@ -462,7 +471,7 @@ static int ice_lbtest_prepare_rings(struct ice_vsi *vsi) if (status) goto err_setup_rx_ring; - status = ice_vsi_start_rx_rings(vsi); + status = ice_vsi_start_all_rx_rings(vsi); if (status) goto err_start_rx_ring; @@ -494,7 +503,7 @@ static int ice_lbtest_disable_rings(struct ice_vsi *vsi) netdev_err(vsi->netdev, "Failed to stop Tx rings, VSI %d error %d\n", vsi->vsi_num, status); - status = ice_vsi_stop_rx_rings(vsi); + status = ice_vsi_stop_all_rx_rings(vsi); if (status) netdev_err(vsi->netdev, "Failed to stop Rx rings, VSI %d error %d\n", vsi->vsi_num, status); @@ -672,7 +681,7 @@ static u64 ice_loopback_test(struct net_device *netdev) test_vsi = ice_lb_vsi_setup(pf, pf->hw.port_info); if (!test_vsi) { - netdev_err(netdev, "Failed to create a VSI for the loopback test"); + netdev_err(netdev, "Failed to create a VSI for the loopback test\n"); return 1; } @@ -731,7 +740,7 @@ lbtest_free_frame: devm_kfree(dev, tx_frame); remove_mac_filters: if (ice_remove_mac(&pf->hw, &tmp_list)) - netdev_err(netdev, "Could not remove MAC filter for the test VSI"); + netdev_err(netdev, "Could not remove MAC filter for the test VSI\n"); free_mac_list: ice_free_fltr_list(dev, &tmp_list); lbtest_mac_dis: @@ -744,7 +753,7 @@ lbtest_rings_dis: lbtest_vsi_close: test_vsi->netdev = NULL; if (ice_vsi_release(test_vsi)) - netdev_err(netdev, "Failed to remove the test VSI"); + netdev_err(netdev, "Failed to remove the test VSI\n"); return ret; } @@ -834,7 +843,7 @@ ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test, int status = ice_open(netdev); if (status) { - dev_err(dev, "Could not open device %s, err %d", + dev_err(dev, "Could not open device %s, err %d\n", pf->int_name, status); } } @@ -1091,7 +1100,6 @@ ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam) fecparam->active_fec = ETHTOOL_FEC_BASER; break; case ICE_AQ_LINK_25G_RS_528_FEC_EN: - /* fall through */ case ICE_AQ_LINK_25G_RS_544_FEC_EN: fecparam->active_fec = ETHTOOL_FEC_RS; break; @@ -1132,6 +1140,33 @@ done: } /** + * ice_nway_reset - restart autonegotiation + * @netdev: network interface device structure + */ +static int ice_nway_reset(struct net_device *netdev) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + struct ice_port_info *pi; + enum ice_status status; + + pi = vsi->port_info; + /* If VSI state is up, then restart autoneg with link up */ + if (!test_bit(__ICE_DOWN, vsi->back->state)) + status = ice_aq_set_link_restart_an(pi, true, NULL); + else + status = ice_aq_set_link_restart_an(pi, false, NULL); + + if (status) { + netdev_info(netdev, "link restart failed, err %d aq_err %d\n", + status, pi->hw->adminq.sq_last_status); + return -EIO; + } + + return 0; +} + +/** * ice_get_priv_flags - report device private flags * @netdev: network interface device structure * @@ -1264,6 +1299,8 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) status = ice_cfg_lldp_mib_change(&pf->hw, true); if (status) dev_dbg(dev, "Fail to enable MIB change events\n"); + + ice_nway_reset(netdev); } } if (test_bit(ICE_FLAG_LEGACY_RX, change_flags)) { @@ -1781,7 +1818,6 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks, Asym_Pause); break; case ICE_FC_PFC: - /* fall through */ default: ethtool_link_ksettings_del_link_mode(ks, lp_advertising, Pause); ethtool_link_ksettings_del_link_mode(ks, lp_advertising, @@ -2776,30 +2812,6 @@ done: return err; } -static int ice_nway_reset(struct net_device *netdev) -{ - /* restart autonegotiation */ - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_vsi *vsi = np->vsi; - struct ice_port_info *pi; - enum ice_status status; - - pi = vsi->port_info; - /* If VSI state is up, then restart autoneg with link up */ - if (!test_bit(__ICE_DOWN, vsi->back->state)) - status = ice_aq_set_link_restart_an(pi, true, NULL); - else - status = ice_aq_set_link_restart_an(pi, false, NULL); - - if (status) { - netdev_info(netdev, "link restart failed, err %d aq_err %d\n", - status, pi->hw->adminq.sq_last_status); - return -EIO; - } - - return 0; -} - /** * ice_get_pauseparam - Get Flow Control status * @netdev: network interface device structure @@ -3453,12 +3465,6 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec, break; case ICE_TX_CONTAINER: - if (ec->tx_coalesce_usecs_high) { - netdev_info(vsi->netdev, "setting %s-usecs-high is not supported\n", - c_type_str); - return -EINVAL; - } - use_adaptive_coalesce = ec->use_adaptive_tx_coalesce; coalesce_usecs = ec->tx_coalesce_usecs; @@ -3535,53 +3541,6 @@ ice_set_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num) } /** - * ice_is_coalesce_param_invalid - check for unsupported coalesce parameters - * @netdev: pointer to the netdev associated with this query - * @ec: ethtool structure to fill with driver's coalesce settings - * - * Print netdev info if driver doesn't support one of the parameters - * and return error. When any parameters will be implemented, remove only - * this parameter from param array. - */ -static int -ice_is_coalesce_param_invalid(struct net_device *netdev, - struct ethtool_coalesce *ec) -{ - struct ice_ethtool_not_used { - u32 value; - const char *name; - } param[] = { - {ec->stats_block_coalesce_usecs, "stats-block-usecs"}, - {ec->rate_sample_interval, "sample-interval"}, - {ec->pkt_rate_low, "pkt-rate-low"}, - {ec->pkt_rate_high, "pkt-rate-high"}, - {ec->rx_max_coalesced_frames, "rx-frames"}, - {ec->rx_coalesce_usecs_irq, "rx-usecs-irq"}, - {ec->rx_max_coalesced_frames_irq, "rx-frames-irq"}, - {ec->tx_max_coalesced_frames, "tx-frames"}, - {ec->tx_coalesce_usecs_irq, "tx-usecs-irq"}, - {ec->tx_max_coalesced_frames_irq, "tx-frames-irq"}, - {ec->rx_coalesce_usecs_low, "rx-usecs-low"}, - {ec->rx_max_coalesced_frames_low, "rx-frames-low"}, - {ec->tx_coalesce_usecs_low, "tx-usecs-low"}, - {ec->tx_max_coalesced_frames_low, "tx-frames-low"}, - {ec->rx_max_coalesced_frames_high, "rx-frames-high"}, - {ec->tx_max_coalesced_frames_high, "tx-frames-high"} - }; - int i; - - for (i = 0; i < ARRAY_SIZE(param); i++) { - if (param[i].value) { - netdev_info(netdev, "Setting %s not supported\n", - param[i].name); - return -EINVAL; - } - } - - return 0; -} - -/** * ice_print_if_odd_usecs - print message if user tries to set odd [tx|rx]-usecs * @netdev: netdev used for print * @itr_setting: previous user setting @@ -3621,9 +3580,6 @@ __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; - if (ice_is_coalesce_param_invalid(netdev, ec)) - return -EINVAL; - if (q_num < 0) { struct ice_q_vector *q_vector = vsi->q_vectors[0]; int v_idx; @@ -3818,6 +3774,9 @@ ice_get_module_eeprom(struct net_device *netdev, } static const struct ethtool_ops ice_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_USE_ADAPTIVE | + ETHTOOL_COALESCE_RX_USECS_HIGH, .get_link_ksettings = ice_get_link_ksettings, .set_link_ksettings = ice_set_link_ksettings, .get_drvinfo = ice_get_drvinfo, @@ -3867,6 +3826,7 @@ static const struct ethtool_ops ice_ethtool_safe_mode_ops = { .get_regs = ice_get_regs, .get_msglevel = ice_get_msglevel, .set_msglevel = ice_set_msglevel, + .get_link = ethtool_op_get_link, .get_eeprom_len = ice_get_eeprom_len, .get_eeprom = ice_get_eeprom, .get_strings = ice_get_strings, diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c index 99208946224c..42bac3ec5526 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c @@ -3471,6 +3471,24 @@ ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig, } /** + * ice_rem_chg_tcam_ent - remove a specific TCAM entry from change list + * @hw: pointer to the HW struct + * @idx: the index of the TCAM entry to remove + * @chg: the list of change structures to search + */ +static void +ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct list_head *chg) +{ + struct ice_chs_chg *pos, *tmp; + + list_for_each_entry_safe(tmp, pos, chg, list_entry) + if (tmp->type == ICE_TCAM_ADD && tmp->tcam_idx == idx) { + list_del(&tmp->list_entry); + devm_kfree(ice_hw_to_dev(hw), tmp); + } +} + +/** * ice_prof_tcam_ena_dis - add enable or disable TCAM change * @hw: pointer to the HW struct * @blk: hardware block @@ -3489,14 +3507,19 @@ ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable, enum ice_status status; struct ice_chs_chg *p; - /* Default: enable means change the low flag bit to don't care */ - u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 }; + u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; + u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 }; u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 }; - u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 }; /* if disabling, free the TCAM */ if (!enable) { - status = ice_free_tcam_ent(hw, blk, tcam->tcam_idx); + status = ice_rel_tcam_idx(hw, blk, tcam->tcam_idx); + + /* if we have already created a change for this TCAM entry, then + * we need to remove that entry, in order to prevent writing to + * a TCAM entry we no longer will have ownership of. + */ + ice_rem_chg_tcam_ent(hw, tcam->tcam_idx, chg); tcam->tcam_idx = 0; tcam->in_use = 0; return status; @@ -3612,11 +3635,12 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig, * @blk: hardware block * @vsig: the VSIG to which this profile is to be added * @hdl: the profile handle indicating the profile to add + * @rev: true to add entries to the end of the list * @chg: the change list */ static enum ice_status ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, - struct list_head *chg) + bool rev, struct list_head *chg) { /* Masks that ignore flags */ u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; @@ -3625,7 +3649,7 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, struct ice_prof_map *map; struct ice_vsig_prof *t; struct ice_chs_chg *p; - u16 i; + u16 vsig_idx, i; /* Get the details on the profile specified by the handle ID */ map = ice_search_prof_id(hw, blk, hdl); @@ -3687,8 +3711,13 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, } /* add profile to VSIG */ - list_add(&t->list, - &hw->blk[blk].xlt2.vsig_tbl[(vsig & ICE_VSIG_IDX_M)].prop_lst); + vsig_idx = vsig & ICE_VSIG_IDX_M; + if (rev) + list_add_tail(&t->list, + &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst); + else + list_add(&t->list, + &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst); return 0; @@ -3728,7 +3757,7 @@ ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl, if (status) goto err_ice_create_prof_id_vsig; - status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, chg); + status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, false, chg); if (status) goto err_ice_create_prof_id_vsig; @@ -3753,11 +3782,13 @@ err_ice_create_prof_id_vsig: * @blk: hardware block * @vsi: the initial VSI that will be in VSIG * @lst: the list of profile that will be added to the VSIG + * @new_vsig: return of new VSIG * @chg: the change list */ static enum ice_status ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi, - struct list_head *lst, struct list_head *chg) + struct list_head *lst, u16 *new_vsig, + struct list_head *chg) { struct ice_vsig_prof *t; enum ice_status status; @@ -3772,12 +3803,15 @@ ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi, return status; list_for_each_entry(t, lst, list) { + /* Reverse the order here since we are copying the list */ status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie, - chg); + true, chg); if (status) return status; } + *new_vsig = vsig; + return 0; } @@ -3899,7 +3933,8 @@ ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl) * not sharing entries and we can simply add the new * profile to the VSIG. */ - status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, &chg); + status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, false, + &chg); if (status) goto err_ice_add_prof_id_flow; @@ -3910,7 +3945,8 @@ ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl) } else { /* No match, so we need a new VSIG */ status = ice_create_vsig_from_lst(hw, blk, vsi, - &union_lst, &chg); + &union_lst, &vsig, + &chg); if (status) goto err_ice_add_prof_id_flow; @@ -4076,7 +4112,8 @@ ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl) * new VSIG and TCAM entries */ status = ice_create_vsig_from_lst(hw, blk, vsi, - ©, &chg); + ©, &vsig, + &chg); if (status) goto err_ice_rem_prof_id_flow; diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c index a05ceb59863b..3de862a3c789 100644 --- a/drivers/net/ethernet/intel/ice/ice_flow.c +++ b/drivers/net/ethernet/intel/ice/ice_flow.c @@ -694,7 +694,7 @@ out: * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer * @seg: packet segment the field being set belongs to * @fld: field to be set - * @type: type of the field + * @field_type: type of the field * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from * entry's input buffer * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's @@ -715,16 +715,16 @@ out: */ static void ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld, - enum ice_flow_fld_match_type type, u16 val_loc, + enum ice_flow_fld_match_type field_type, u16 val_loc, u16 mask_loc, u16 last_loc) { u64 bit = BIT_ULL(fld); seg->match |= bit; - if (type == ICE_FLOW_FLD_TYPE_RANGE) + if (field_type == ICE_FLOW_FLD_TYPE_RANGE) seg->range |= bit; - seg->fields[fld].type = type; + seg->fields[fld].type = field_type; seg->fields[fld].src.val = val_loc; seg->fields[fld].src.mask = mask_loc; seg->fields[fld].src.last = last_loc; diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 6db3d0494127..1d37a9f02c1c 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -85,6 +85,7 @@ #define QRXFLXP_CNTXT_RXDID_IDX_M ICE_M(0x3F, 0) #define QRXFLXP_CNTXT_RXDID_PRIO_S 8 #define QRXFLXP_CNTXT_RXDID_PRIO_M ICE_M(0x7, 8) +#define QRXFLXP_CNTXT_TS_M BIT(11) #define GLGEN_RSTAT 0x000B8188 #define GLGEN_RSTAT_DEVSTATE_M ICE_M(0x3, 0) #define GLGEN_RSTCTL 0x000B8180 @@ -217,6 +218,8 @@ #define VPLAN_TX_QBASE_VFNUMQ_M ICE_M(0xFF, 16) #define VPLAN_TXQ_MAPENA(_VF) (0x00073800 + ((_VF) * 4)) #define VPLAN_TXQ_MAPENA_TX_ENA_M BIT(0) +#define GL_MDCK_TX_TDPU 0x00049348 +#define GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M BIT(1) #define GL_MDET_RX 0x00294C00 #define GL_MDET_RX_QNUM_S 0 #define GL_MDET_RX_QNUM_M ICE_M(0x7FFF, 0) @@ -286,6 +289,8 @@ #define GL_PWR_MODE_CTL 0x000B820C #define GL_PWR_MODE_CTL_CAR_MAX_BW_S 30 #define GL_PWR_MODE_CTL_CAR_MAX_BW_M ICE_M(0x3, 30) +#define GLDCB_RTCTQ_RXQNUM_S 0 +#define GLDCB_RTCTQ_RXQNUM_M ICE_M(0x7FF, 0) #define GLPRT_BPRCL(_i) (0x00381380 + ((_i) * 8)) #define GLPRT_BPTCL(_i) (0x00381240 + ((_i) * 8)) #define GLPRT_CRCERRS(_i) (0x00380100 + ((_i) * 8)) diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index d974e2fa3e63..2f256bf45efc 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -9,11 +9,11 @@ /** * ice_vsi_type_str - maps VSI type enum to string equivalents - * @type: VSI type enum + * @vsi_type: VSI type enum */ -const char *ice_vsi_type_str(enum ice_vsi_type type) +const char *ice_vsi_type_str(enum ice_vsi_type vsi_type) { - switch (type) { + switch (vsi_type) { case ICE_VSI_PF: return "ICE_VSI_PF"; case ICE_VSI_VF: @@ -26,16 +26,26 @@ const char *ice_vsi_type_str(enum ice_vsi_type type) } /** - * ice_vsi_ctrl_rx_rings - Start or stop a VSI's Rx rings + * ice_vsi_ctrl_all_rx_rings - Start or stop a VSI's Rx rings * @vsi: the VSI being configured * @ena: start or stop the Rx rings + * + * First enable/disable all of the Rx rings, flush any remaining writes, and + * then verify that they have all been enabled/disabled successfully. This will + * let all of the register writes complete when enabling/disabling the Rx rings + * before waiting for the change in hardware to complete. */ -static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena) +static int ice_vsi_ctrl_all_rx_rings(struct ice_vsi *vsi, bool ena) { int i, ret = 0; + for (i = 0; i < vsi->num_rxq; i++) + ice_vsi_ctrl_one_rx_ring(vsi, ena, i, false); + + ice_flush(&vsi->back->hw); + for (i = 0; i < vsi->num_rxq; i++) { - ret = ice_vsi_ctrl_rx_ring(vsi, ena, i); + ret = ice_vsi_wait_one_rx_ring(vsi, ena, i); if (ret) break; } @@ -111,7 +121,6 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi) { switch (vsi->type) { case ICE_VSI_PF: - /* fall through */ case ICE_VSI_LB: vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC; vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC; @@ -169,12 +178,12 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id) vf = &pf->vf[vsi->vf_id]; vsi->alloc_txq = vf->num_vf_qs; vsi->alloc_rxq = vf->num_vf_qs; - /* pf->num_vf_msix includes (VF miscellaneous vector + + /* pf->num_msix_per_vf includes (VF miscellaneous vector + * data queue interrupts). Since vsi->num_q_vectors is number * of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the * original vector count */ - vsi->num_q_vectors = pf->num_vf_msix - ICE_NONQ_VECS_VF; + vsi->num_q_vectors = pf->num_msix_per_vf - ICE_NONQ_VECS_VF; break; case ICE_VSI_LB: vsi->alloc_txq = 1; @@ -341,13 +350,13 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data) /** * ice_vsi_alloc - Allocates the next available struct VSI in the PF * @pf: board private structure - * @type: type of VSI + * @vsi_type: type of VSI * @vf_id: ID of the VF being configured * * returns a pointer to a VSI on success, NULL on failure. */ static struct ice_vsi * -ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id) +ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id) { struct device *dev = ice_pf_to_dev(pf); struct ice_vsi *vsi = NULL; @@ -368,13 +377,13 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id) if (!vsi) goto unlock_pf; - vsi->type = type; + vsi->type = vsi_type; vsi->back = pf; set_bit(__ICE_DOWN, vsi->state); vsi->idx = pf->next_vsi; - if (type == ICE_VSI_VF) + if (vsi_type == ICE_VSI_VF) ice_vsi_set_num_qs(vsi, vf_id); else ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID); @@ -433,7 +442,7 @@ static int ice_vsi_get_qs(struct ice_vsi *vsi) .scatter_count = ICE_MAX_SCATTER_TXQS, .vsi_map = vsi->txq_map, .vsi_map_offset = 0, - .mapping_mode = vsi->tx_mapping_mode + .mapping_mode = ICE_VSI_MAP_CONTIG }; struct ice_qs_cfg rx_qs_cfg = { .qs_mutex = &pf->avail_q_mutex, @@ -443,18 +452,21 @@ static int ice_vsi_get_qs(struct ice_vsi *vsi) .scatter_count = ICE_MAX_SCATTER_RXQS, .vsi_map = vsi->rxq_map, .vsi_map_offset = 0, - .mapping_mode = vsi->rx_mapping_mode + .mapping_mode = ICE_VSI_MAP_CONTIG }; - int ret = 0; - - vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG; - vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG; + int ret; ret = __ice_vsi_get_qs(&tx_qs_cfg); - if (!ret) - ret = __ice_vsi_get_qs(&rx_qs_cfg); + if (ret) + return ret; + vsi->tx_mapping_mode = tx_qs_cfg.mapping_mode; - return ret; + ret = __ice_vsi_get_qs(&rx_qs_cfg); + if (ret) + return ret; + vsi->rx_mapping_mode = rx_qs_cfg.mapping_mode; + + return 0; } /** @@ -559,12 +571,11 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi) vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF; break; case ICE_VSI_VF: - /* VF VSI will gets a small RSS table - * For VSI_LUT, LUT size should be set to 64 bytes + /* VF VSI will get a small RSS table. + * For VSI_LUT, LUT size should be set to 64 bytes. */ vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE; - vsi->rss_size = min_t(int, num_online_cpus(), - BIT(cap->rss_table_entry_width)); + vsi->rss_size = ICE_MAX_RSS_QS_PER_VF; vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI; break; case ICE_VSI_LB: @@ -672,7 +683,7 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) if (vsi->type == ICE_VSI_PF) max_rss = ICE_MAX_LG_RSS_QS; else - max_rss = ICE_MAX_SMALL_RSS_QS; + max_rss = ICE_MAX_RSS_QS_PER_VF; qcount_rx = min_t(int, rx_numq_tc, max_rss); if (!vsi->req_rxq) qcount_rx = min_t(int, qcount_rx, @@ -804,7 +815,6 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi) ctxt->info = vsi->info; switch (vsi->type) { case ICE_VSI_LB: - /* fall through */ case ICE_VSI_PF: ctxt->flags = ICE_AQ_VSI_TYPE_PF; break; @@ -897,6 +907,109 @@ out: } /** + * ice_free_res - free a block of resources + * @res: pointer to the resource + * @index: starting index previously returned by ice_get_res + * @id: identifier to track owner + * + * Returns number of resources freed + */ +int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id) +{ + int count = 0; + int i; + + if (!res || index >= res->end) + return -EINVAL; + + id |= ICE_RES_VALID_BIT; + for (i = index; i < res->end && res->list[i] == id; i++) { + res->list[i] = 0; + count++; + } + + return count; +} + +/** + * ice_search_res - Search the tracker for a block of resources + * @res: pointer to the resource + * @needed: size of the block needed + * @id: identifier to track owner + * + * Returns the base item index of the block, or -ENOMEM for error + */ +static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id) +{ + int start = 0, end = 0; + + if (needed > res->end) + return -ENOMEM; + + id |= ICE_RES_VALID_BIT; + + do { + /* skip already allocated entries */ + if (res->list[end++] & ICE_RES_VALID_BIT) { + start = end; + if ((start + needed) > res->end) + break; + } + + if (end == (start + needed)) { + int i = start; + + /* there was enough, so assign it to the requestor */ + while (i != end) + res->list[i++] = id; + + return start; + } + } while (end < res->end); + + return -ENOMEM; +} + +/** + * ice_get_free_res_count - Get free count from a resource tracker + * @res: Resource tracker instance + */ +static u16 ice_get_free_res_count(struct ice_res_tracker *res) +{ + u16 i, count = 0; + + for (i = 0; i < res->end; i++) + if (!(res->list[i] & ICE_RES_VALID_BIT)) + count++; + + return count; +} + +/** + * ice_get_res - get a block of resources + * @pf: board private structure + * @res: pointer to the resource + * @needed: size of the block needed + * @id: identifier to track owner + * + * Returns the base item index of the block, or negative for error + */ +int +ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id) +{ + if (!res || !pf) + return -EINVAL; + + if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) { + dev_err(ice_pf_to_dev(pf), "param err: needed=%d, num_entries = %d id=0x%04x\n", + needed, res->num_entries, id); + return -EINVAL; + } + + return ice_search_res(res, needed, id); +} + +/** * ice_vsi_setup_vector_base - Set up the base vector for the given VSI * @vsi: ptr to the VSI * @@ -928,8 +1041,9 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi) vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors, vsi->idx); if (vsi->base_vector < 0) { - dev_err(dev, "Failed to get tracking for %d vectors for VSI %d, err=%d\n", - num_q_vectors, vsi->vsi_num, vsi->base_vector); + dev_err(dev, "%d MSI-X interrupts available. %s %d failed to get %d MSI-X vectors\n", + ice_get_free_res_count(pf->irq_tracker), + ice_vsi_type_str(vsi->type), vsi->idx, num_q_vectors); return -ENOENT; } pf->num_avail_sw_msix -= num_q_vectors; @@ -1348,7 +1462,9 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid) list_add(&tmp->list_entry, &tmp_add_list); status = ice_add_vlan(&pf->hw, &tmp_add_list); - if (status) { + if (!status) { + vsi->num_vlan++; + } else { err = -ENODEV; dev_err(dev, "Failure Adding VLAN %d on VSI %i\n", vid, vsi->vsi_num); @@ -1390,10 +1506,12 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid) list_add(&list->list_entry, &tmp_add_list); status = ice_remove_vlan(&pf->hw, &tmp_add_list); - if (status == ICE_ERR_DOES_NOT_EXIST) { + if (!status) { + vsi->num_vlan--; + } else if (status == ICE_ERR_DOES_NOT_EXIST) { dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, status: %d\n", vid, vsi->vsi_num, status); - } else if (status) { + } else { dev_err(dev, "Error removing VLAN %d on vsi %i error: %d\n", vid, vsi->vsi_num, status); err = -EIO; @@ -1678,25 +1796,25 @@ out: } /** - * ice_vsi_start_rx_rings - start VSI's Rx rings - * @vsi: the VSI whose rings are to be started + * ice_vsi_start_all_rx_rings - start/enable all of a VSI's Rx rings + * @vsi: the VSI whose rings are to be enabled * * Returns 0 on success and a negative value on error */ -int ice_vsi_start_rx_rings(struct ice_vsi *vsi) +int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi) { - return ice_vsi_ctrl_rx_rings(vsi, true); + return ice_vsi_ctrl_all_rx_rings(vsi, true); } /** - * ice_vsi_stop_rx_rings - stop VSI's Rx rings - * @vsi: the VSI + * ice_vsi_stop_all_rx_rings - stop/disable all of a VSI's Rx rings + * @vsi: the VSI whose rings are to be disabled * * Returns 0 on success and a negative value on error */ -int ice_vsi_stop_rx_rings(struct ice_vsi *vsi) +int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi) { - return ice_vsi_ctrl_rx_rings(vsi, false); + return ice_vsi_ctrl_all_rx_rings(vsi, false); } /** @@ -1756,6 +1874,20 @@ int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi) } /** + * ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not + * @vsi: VSI to check whether or not VLAN pruning is enabled. + * + * returns true if Rx VLAN pruning is enabled and false otherwise. + */ +bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi) +{ + if (!vsi) + return false; + + return (vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA); +} + +/** * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI * @vsi: VSI to enable or disable VLAN pruning on * @ena: set to true to enable VLAN pruning and false to disable it @@ -1952,7 +2084,7 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create) * ice_vsi_setup - Set up a VSI by a given type * @pf: board private structure * @pi: pointer to the port_info instance - * @type: VSI type + * @vsi_type: VSI type * @vf_id: defines VF ID to which this VSI connects. This field is meant to be * used only for ICE_VSI_VF VSI type. For other VSI types, should * fill-in ICE_INVAL_VFID as input. @@ -1964,7 +2096,7 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create) */ struct ice_vsi * ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, - enum ice_vsi_type type, u16 vf_id) + enum ice_vsi_type vsi_type, u16 vf_id) { u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; struct device *dev = ice_pf_to_dev(pf); @@ -1972,10 +2104,10 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, struct ice_vsi *vsi; int ret, i; - if (type == ICE_VSI_VF) - vsi = ice_vsi_alloc(pf, type, vf_id); + if (vsi_type == ICE_VSI_VF) + vsi = ice_vsi_alloc(pf, vsi_type, vf_id); else - vsi = ice_vsi_alloc(pf, type, ICE_INVAL_VFID); + vsi = ice_vsi_alloc(pf, vsi_type, ICE_INVAL_VFID); if (!vsi) { dev_err(dev, "could not allocate VSI\n"); @@ -2025,6 +2157,17 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, if (ret) goto unroll_vector_base; + /* Always add VLAN ID 0 switch rule by default. This is needed + * in order to allow all untagged and 0 tagged priority traffic + * if Rx VLAN pruning is enabled. Also there are cases where we + * don't get the call to add VLAN 0 via ice_vlan_rx_add_vid() + * so this handles those cases (i.e. adding the PF to a bridge + * without the 8021q module loaded). + */ + ret = ice_vsi_add_vlan(vsi, 0); + if (ret) + goto unroll_clear_rings; + ice_vsi_map_rings_to_vectors(vsi); /* Do not exit if configuring RSS had an issue, at least @@ -2104,6 +2247,8 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, return vsi; +unroll_clear_rings: + ice_vsi_clear_rings(vsi); unroll_vector_base: /* reclaim SW interrupts back to the common pool */ ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); @@ -2299,94 +2444,6 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked) } /** - * ice_free_res - free a block of resources - * @res: pointer to the resource - * @index: starting index previously returned by ice_get_res - * @id: identifier to track owner - * - * Returns number of resources freed - */ -int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id) -{ - int count = 0; - int i; - - if (!res || index >= res->end) - return -EINVAL; - - id |= ICE_RES_VALID_BIT; - for (i = index; i < res->end && res->list[i] == id; i++) { - res->list[i] = 0; - count++; - } - - return count; -} - -/** - * ice_search_res - Search the tracker for a block of resources - * @res: pointer to the resource - * @needed: size of the block needed - * @id: identifier to track owner - * - * Returns the base item index of the block, or -ENOMEM for error - */ -static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id) -{ - int start = 0, end = 0; - - if (needed > res->end) - return -ENOMEM; - - id |= ICE_RES_VALID_BIT; - - do { - /* skip already allocated entries */ - if (res->list[end++] & ICE_RES_VALID_BIT) { - start = end; - if ((start + needed) > res->end) - break; - } - - if (end == (start + needed)) { - int i = start; - - /* there was enough, so assign it to the requestor */ - while (i != end) - res->list[i++] = id; - - return start; - } - } while (end < res->end); - - return -ENOMEM; -} - -/** - * ice_get_res - get a block of resources - * @pf: board private structure - * @res: pointer to the resource - * @needed: size of the block needed - * @id: identifier to track owner - * - * Returns the base item index of the block, or negative for error - */ -int -ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id) -{ - if (!res || !pf) - return -EINVAL; - - if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) { - dev_err(ice_pf_to_dev(pf), "param err: needed=%d, num_entries = %d id=0x%04x\n", - needed, res->num_entries, id); - return -EINVAL; - } - - return ice_search_res(res, needed, id); -} - -/** * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI * @vsi: the VSI being un-configured */ diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index e2c0dadce920..04ca00799364 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -6,7 +6,7 @@ #include "ice.h" -const char *ice_vsi_type_str(enum ice_vsi_type type); +const char *ice_vsi_type_str(enum ice_vsi_type vsi_type); int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list, @@ -30,9 +30,9 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi); int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena); -int ice_vsi_start_rx_rings(struct ice_vsi *vsi); +int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi); -int ice_vsi_stop_rx_rings(struct ice_vsi *vsi); +int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi); int ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, @@ -42,6 +42,8 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi); int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi); +bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi); + int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc); void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create); @@ -56,7 +58,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc); struct ice_vsi * ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, - enum ice_vsi_type type, u16 vf_id); + enum ice_vsi_type vsi_type, u16 vf_id); void ice_napi_del(struct ice_vsi *vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 5ef28052c0f8..306a4e5b2320 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -10,6 +10,7 @@ #include "ice_lib.h" #include "ice_dcb_lib.h" #include "ice_dcb_nl.h" +#include "ice_devlink.h" #define DRV_VERSION_MAJOR 0 #define DRV_VERSION_MINOR 8 @@ -706,7 +707,6 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) /* Get FEC mode based on negotiated link info */ switch (vsi->port_info->phy.link_info.fec_info) { case ICE_AQ_LINK_25G_RS_528_FEC_EN: - /* fall through */ case ICE_AQ_LINK_25G_RS_544_FEC_EN: fec = "RS-FEC"; break; @@ -1029,6 +1029,9 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) if (ice_handle_link_event(pf, &event)) dev_err(dev, "Could not handle link event\n"); break; + case ice_aqc_opc_event_lan_overflow: + ice_vf_lan_overflow_event(pf, &event); + break; case ice_mbx_opc_send_msg_to_pf: ice_vc_process_vf_msg(pf, &event); break; @@ -1185,20 +1188,28 @@ static void ice_service_timer(struct timer_list *t) * ice_handle_mdd_event - handle malicious driver detect event * @pf: pointer to the PF structure * - * Called from service task. OICR interrupt handler indicates MDD event + * Called from service task. OICR interrupt handler indicates MDD event. + * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log + * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events + * disable the queue, the PF can be configured to reset the VF using ethtool + * private flag mdd-auto-reset-vf. */ static void ice_handle_mdd_event(struct ice_pf *pf) { struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; - bool mdd_detected = false; u32 reg; int i; - if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state)) + if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state)) { + /* Since the VF MDD event logging is rate limited, check if + * there are pending MDD events. + */ + ice_print_vfs_mdd_events(pf); return; + } - /* find what triggered the MDD event */ + /* find what triggered an MDD event */ reg = rd32(hw, GL_MDET_TX_PQM); if (reg & GL_MDET_TX_PQM_VALID_M) { u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >> @@ -1214,7 +1225,6 @@ static void ice_handle_mdd_event(struct ice_pf *pf) dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", event, queue, pf_num, vf_num); wr32(hw, GL_MDET_TX_PQM, 0xffffffff); - mdd_detected = true; } reg = rd32(hw, GL_MDET_TX_TCLAN); @@ -1232,7 +1242,6 @@ static void ice_handle_mdd_event(struct ice_pf *pf) dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", event, queue, pf_num, vf_num); wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff); - mdd_detected = true; } reg = rd32(hw, GL_MDET_RX); @@ -1250,85 +1259,85 @@ static void ice_handle_mdd_event(struct ice_pf *pf) dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n", event, queue, pf_num, vf_num); wr32(hw, GL_MDET_RX, 0xffffffff); - mdd_detected = true; } - if (mdd_detected) { - bool pf_mdd_detected = false; - - reg = rd32(hw, PF_MDET_TX_PQM); - if (reg & PF_MDET_TX_PQM_VALID_M) { - wr32(hw, PF_MDET_TX_PQM, 0xFFFF); - dev_info(dev, "TX driver issue detected, PF reset issued\n"); - pf_mdd_detected = true; - } + /* check to see if this PF caused an MDD event */ + reg = rd32(hw, PF_MDET_TX_PQM); + if (reg & PF_MDET_TX_PQM_VALID_M) { + wr32(hw, PF_MDET_TX_PQM, 0xFFFF); + if (netif_msg_tx_err(pf)) + dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n"); + } - reg = rd32(hw, PF_MDET_TX_TCLAN); - if (reg & PF_MDET_TX_TCLAN_VALID_M) { - wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF); - dev_info(dev, "TX driver issue detected, PF reset issued\n"); - pf_mdd_detected = true; - } + reg = rd32(hw, PF_MDET_TX_TCLAN); + if (reg & PF_MDET_TX_TCLAN_VALID_M) { + wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF); + if (netif_msg_tx_err(pf)) + dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n"); + } - reg = rd32(hw, PF_MDET_RX); - if (reg & PF_MDET_RX_VALID_M) { - wr32(hw, PF_MDET_RX, 0xFFFF); - dev_info(dev, "RX driver issue detected, PF reset issued\n"); - pf_mdd_detected = true; - } - /* Queue belongs to the PF initiate a reset */ - if (pf_mdd_detected) { - set_bit(__ICE_NEEDS_RESTART, pf->state); - ice_service_task_schedule(pf); - } + reg = rd32(hw, PF_MDET_RX); + if (reg & PF_MDET_RX_VALID_M) { + wr32(hw, PF_MDET_RX, 0xFFFF); + if (netif_msg_rx_err(pf)) + dev_info(dev, "Malicious Driver Detection event RX detected on PF\n"); } - /* check to see if one of the VFs caused the MDD */ + /* Check to see if one of the VFs caused an MDD event, and then + * increment counters and set print pending + */ ice_for_each_vf(pf, i) { struct ice_vf *vf = &pf->vf[i]; - bool vf_mdd_detected = false; - reg = rd32(hw, VP_MDET_TX_PQM(i)); if (reg & VP_MDET_TX_PQM_VALID_M) { wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF); - vf_mdd_detected = true; - dev_info(dev, "TX driver issue detected on VF %d\n", - i); + vf->mdd_tx_events.count++; + set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state); + if (netif_msg_tx_err(pf)) + dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n", + i); } reg = rd32(hw, VP_MDET_TX_TCLAN(i)); if (reg & VP_MDET_TX_TCLAN_VALID_M) { wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF); - vf_mdd_detected = true; - dev_info(dev, "TX driver issue detected on VF %d\n", - i); + vf->mdd_tx_events.count++; + set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state); + if (netif_msg_tx_err(pf)) + dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n", + i); } reg = rd32(hw, VP_MDET_TX_TDPU(i)); if (reg & VP_MDET_TX_TDPU_VALID_M) { wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF); - vf_mdd_detected = true; - dev_info(dev, "TX driver issue detected on VF %d\n", - i); + vf->mdd_tx_events.count++; + set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state); + if (netif_msg_tx_err(pf)) + dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n", + i); } reg = rd32(hw, VP_MDET_RX(i)); if (reg & VP_MDET_RX_VALID_M) { wr32(hw, VP_MDET_RX(i), 0xFFFF); - vf_mdd_detected = true; - dev_info(dev, "RX driver issue detected on VF %d\n", - i); - } - - if (vf_mdd_detected) { - vf->num_mdd_events++; - if (vf->num_mdd_events && - vf->num_mdd_events <= ICE_MDD_EVENTS_THRESHOLD) - dev_info(dev, "VF %d has had %llu MDD events since last boot, Admin might need to reload AVF driver with this number of events\n", - i, vf->num_mdd_events); + vf->mdd_rx_events.count++; + set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state); + if (netif_msg_rx_err(pf)) + dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n", + i); + + /* Since the queue is disabled on VF Rx MDD events, the + * PF can be configured to reset the VF through ethtool + * private flag mdd-auto-reset-vf. + */ + if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) + ice_reset_vf(&pf->vf[i], false); } } + + ice_print_vfs_mdd_events(pf); } /** @@ -1510,7 +1519,7 @@ static void ice_set_ctrlq_len(struct ice_hw *hw) hw->adminq.num_sq_entries = ICE_AQ_LEN; hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN; hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN; - hw->mailboxq.num_rq_entries = ICE_MBXRQ_LEN; + hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M; hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN; hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN; hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN; @@ -1916,8 +1925,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, if (if_running && !test_and_set_bit(__ICE_DOWN, vsi->state)) { ret = ice_down(vsi); if (ret) { - NL_SET_ERR_MSG_MOD(extack, - "Preparing device for XDP attach failed"); + NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed"); return ret; } } @@ -1926,13 +1934,11 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, vsi->num_xdp_txq = vsi->alloc_txq; xdp_ring_err = ice_prepare_xdp_rings(vsi, prog); if (xdp_ring_err) - NL_SET_ERR_MSG_MOD(extack, - "Setting up XDP Tx resources failed"); + NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed"); } else if (ice_is_xdp_ena_vsi(vsi) && !prog) { xdp_ring_err = ice_destroy_xdp_rings(vsi); if (xdp_ring_err) - NL_SET_ERR_MSG_MOD(extack, - "Freeing XDP Tx resources failed"); + NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed"); } else { ice_vsi_assign_bpf_prog(vsi, prog); } @@ -1965,8 +1971,7 @@ static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp) struct ice_vsi *vsi = np->vsi; if (vsi->type != ICE_VSI_PF) { - NL_SET_ERR_MSG_MOD(xdp->extack, - "XDP can be loaded only on PF VSI"); + NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI"); return -EINVAL; } @@ -1993,6 +1998,14 @@ static void ice_ena_misc_vector(struct ice_pf *pf) struct ice_hw *hw = &pf->hw; u32 val; + /* Disable anti-spoof detection interrupt to prevent spurious event + * interrupts during a function reset. Anti-spoof functionally is + * still supported. + */ + val = rd32(hw, GL_MDCK_TX_TDPU); + val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M; + wr32(hw, GL_MDCK_TX_TDPU, val); + /* clear things first */ wr32(hw, PFINT_OICR_ENA, 0); /* disable all */ rd32(hw, PFINT_OICR); /* read to clear */ @@ -2042,8 +2055,16 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) set_bit(__ICE_MDD_EVENT_PENDING, pf->state); } if (oicr & PFINT_OICR_VFLR_M) { - ena_mask &= ~PFINT_OICR_VFLR_M; - set_bit(__ICE_VFLR_EVENT_PENDING, pf->state); + /* disable any further VFLR event notifications */ + if (test_bit(__ICE_VF_RESETS_DISABLED, pf->state)) { + u32 reg = rd32(hw, PFINT_OICR_ENA); + + reg &= ~PFINT_OICR_VFLR_M; + wr32(hw, PFINT_OICR_ENA, reg); + } else { + ena_mask &= ~PFINT_OICR_VFLR_M; + set_bit(__ICE_VFLR_EVENT_PENDING, pf->state); + } } if (oicr & PFINT_OICR_GRST_M) { @@ -2351,10 +2372,16 @@ static int ice_cfg_netdev(struct ice_vsi *vsi) u8 mac_addr[ETH_ALEN]; int err; + err = ice_devlink_create_port(pf); + if (err) + return err; + netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq, vsi->alloc_rxq); - if (!netdev) - return -ENOMEM; + if (!netdev) { + err = -ENOMEM; + goto err_destroy_devlink_port; + } vsi->netdev = netdev; np = netdev_priv(netdev); @@ -2384,7 +2411,9 @@ static int ice_cfg_netdev(struct ice_vsi *vsi) err = register_netdev(vsi->netdev); if (err) - return err; + goto err_destroy_devlink_port; + + devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev); netif_carrier_off(vsi->netdev); @@ -2392,6 +2421,11 @@ static int ice_cfg_netdev(struct ice_vsi *vsi) netif_tx_stop_all_queues(vsi->netdev); return 0; + +err_destroy_devlink_port: + ice_devlink_destroy_port(pf); + + return err; } /** @@ -2461,16 +2495,19 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto, if (vsi->info.pvid) return -EINVAL; - /* Enable VLAN pruning when VLAN 0 is added */ - if (unlikely(!vid)) { + /* VLAN 0 is added by default during load/reset */ + if (!vid) + return 0; + + /* Enable VLAN pruning when a VLAN other than 0 is added */ + if (!ice_vsi_is_vlan_pruning_ena(vsi)) { ret = ice_cfg_vlan_pruning(vsi, true, false); if (ret) return ret; } - /* Add all VLAN IDs including 0 to the switch filter. VLAN ID 0 is - * needed to continue allowing all untagged packets since VLAN prune - * list is applied to all packets by the switch + /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged + * packets aren't pruned by the device's internal switch on Rx */ ret = ice_vsi_add_vlan(vsi, vid); if (!ret) { @@ -2500,6 +2537,10 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, if (vsi->info.pvid) return -EINVAL; + /* don't allow removal of VLAN 0 */ + if (!vid) + return 0; + /* Make sure ice_vsi_kill_vlan is successful before updating VLAN * information */ @@ -2507,8 +2548,8 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, if (ret) return ret; - /* Disable VLAN pruning when VLAN 0 is removed */ - if (unlikely(!vid)) + /* Disable pruning when VLAN 0 is the only VLAN rule */ + if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi)) ret = ice_cfg_vlan_pruning(vsi, false, false); vsi->vlan_ena = false; @@ -2945,7 +2986,6 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status) } break; case ICE_ERR_BUF_TOO_SHORT: - /* fall-through */ case ICE_ERR_CFG: dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n"); break; @@ -2977,7 +3017,7 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status) default: break; } - /* fall-through */ + fallthrough; default: dev_err(dev, "An unknown error (%d) occurred when loading the DDP package. Entering Safe Mode.\n", *status); @@ -3069,30 +3109,22 @@ static char *ice_get_opt_fw_name(struct ice_pf *pf) * followed by a EUI-64 identifier (PCIe Device Serial Number) */ struct pci_dev *pdev = pf->pdev; - char *opt_fw_filename = NULL; - u32 dword; - u8 dsn[8]; - int pos; + char *opt_fw_filename; + u64 dsn; /* Determine the name of the optional file using the DSN (two * dwords following the start of the DSN Capability). */ - pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN); - if (pos) { - opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL); - if (!opt_fw_filename) - return NULL; - - pci_read_config_dword(pdev, pos + 4, &dword); - put_unaligned_le32(dword, &dsn[0]); - pci_read_config_dword(pdev, pos + 8, &dword); - put_unaligned_le32(dword, &dsn[4]); - snprintf(opt_fw_filename, NAME_MAX, - "%sice-%02x%02x%02x%02x%02x%02x%02x%02x.pkg", - ICE_DDP_PKG_PATH, - dsn[7], dsn[6], dsn[5], dsn[4], - dsn[3], dsn[2], dsn[1], dsn[0]); - } + dsn = pci_get_dsn(pdev); + if (!dsn) + return NULL; + + opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL); + if (!opt_fw_filename) + return NULL; + + snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llX.pkg", + ICE_DDP_PKG_PATH, dsn); return opt_fw_filename; } @@ -3166,7 +3198,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) return err; } - pf = devm_kzalloc(dev, sizeof(*pf), GFP_KERNEL); + pf = ice_allocate_pf(dev); if (!pf) return -ENOMEM; @@ -3204,6 +3236,12 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M); + err = ice_devlink_register(pf); + if (err) { + dev_err(dev, "ice_devlink_register failed: %d\n", err); + goto err_exit_unroll; + } + #ifndef CONFIG_DYNAMIC_DEBUG if (debug < -1) hw->debug_mask = debug; @@ -3238,6 +3276,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) goto err_init_pf_unroll; } + ice_devlink_init_regions(pf); + pf->num_alloc_vsi = hw->func_caps.guar_num_vsi; if (!pf->num_alloc_vsi) { err = -EIO; @@ -3336,6 +3376,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) return 0; err_alloc_sw_unroll: + ice_devlink_destroy_port(pf); set_bit(__ICE_SERVICE_DIS, pf->state); set_bit(__ICE_DOWN, pf->state); devm_kfree(dev, pf->first_sw); @@ -3346,8 +3387,10 @@ err_init_interrupt_unroll: devm_kfree(dev, pf->vsi); err_init_pf_unroll: ice_deinit_pf(pf); + ice_devlink_destroy_regions(pf); ice_deinit_hw(hw); err_exit_unroll: + ice_devlink_unregister(pf); pci_disable_pcie_error_reporting(pdev); return err; } @@ -3370,11 +3413,15 @@ static void ice_remove(struct pci_dev *pdev) msleep(100); } + if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { + set_bit(__ICE_VF_RESETS_DISABLED, pf->state); + ice_free_vfs(pf); + } + set_bit(__ICE_DOWN, pf->state); ice_service_task_stop(pf); - if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) - ice_free_vfs(pf); + ice_devlink_destroy_port(pf); ice_vsi_release_all(pf); ice_free_irq_msix_misc(pf); ice_for_each_vsi(pf, i) { @@ -3383,7 +3430,10 @@ static void ice_remove(struct pci_dev *pdev) ice_vsi_free_q_vectors(pf->vsi[i]); } ice_deinit_pf(pf); + ice_devlink_destroy_regions(pf); ice_deinit_hw(&pf->hw); + ice_devlink_unregister(pf); + /* Issue a PFR as part of the prescribed driver unload flow. Do not * do it via ice_schedule_reset() since there is no need to rebuild * and the service task is already stopped. @@ -3534,15 +3584,26 @@ static const struct pci_device_id ice_pci_tbl[] = { { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822X_BACKPLANE), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 }, /* required last entry */ { 0, } }; @@ -3961,7 +4022,7 @@ static int ice_up_complete(struct ice_vsi *vsi) * Tx queue group list was configured and the context bits were * programmed using ice_vsi_cfg_txqs */ - err = ice_vsi_start_rx_rings(vsi); + err = ice_vsi_start_all_rx_rings(vsi); if (err) return err; @@ -4340,7 +4401,7 @@ int ice_down(struct ice_vsi *vsi) vsi->vsi_num, tx_err); } - rx_err = ice_vsi_stop_rx_rings(vsi); + rx_err = ice_vsi_stop_all_rx_rings(vsi); if (rx_err) netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n", vsi->vsi_num, rx_err); @@ -5027,6 +5088,7 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, /** * ice_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure + * @txqueue: Tx queue */ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) { @@ -5064,13 +5126,13 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) /* Read interrupt register */ val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx)); - netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n", + netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n", vsi->vsi_num, txqueue, tx_ring->next_to_clean, head, tx_ring->next_to_use, val); } pf->tx_timeout_last_recovery = jiffies; - netdev_info(netdev, "tx_timeout recovery level %d, txqueue %d\n", + netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n", pf->tx_timeout_recovery_level, txqueue); switch (pf->tx_timeout_recovery_level) { diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c index 7525ac50742e..8beb675d676b 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.c +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c @@ -11,25 +11,29 @@ * @length: length of the section to be read (in bytes from the offset) * @data: command buffer (size [bytes] = length) * @last_command: tells if this is the last command in a series + * @read_shadow_ram: tell if this is a shadow RAM read * @cd: pointer to command details structure or NULL * * Read the NVM using the admin queue commands (0x0701) */ static enum ice_status ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, - void *data, bool last_command, struct ice_sq_cd *cd) + void *data, bool last_command, bool read_shadow_ram, + struct ice_sq_cd *cd) { struct ice_aq_desc desc; struct ice_aqc_nvm *cmd; cmd = &desc.params.nvm; - /* In offset the highest byte must be zeroed. */ - if (offset & 0xFF000000) + if (offset > ICE_AQC_NVM_MAX_OFFSET) return ICE_ERR_PARAM; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_read); + if (!read_shadow_ram && module_typeid == ICE_AQC_NVM_START_POINT) + cmd->cmd_flags |= ICE_AQC_NVM_FLASH_ONLY; + /* If this is the last command in a series, set the proper flag. */ if (last_command) cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD; @@ -42,65 +46,64 @@ ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, } /** - * ice_check_sr_access_params - verify params for Shadow RAM R/W operations. - * @hw: pointer to the HW structure - * @offset: offset in words from module start - * @words: number of words to access + * ice_read_flat_nvm - Read portion of NVM by flat offset + * @hw: pointer to the HW struct + * @offset: offset from beginning of NVM + * @length: (in) number of bytes to read; (out) number of bytes actually read + * @data: buffer to return data in (sized to fit the specified length) + * @read_shadow_ram: if true, read from shadow RAM instead of NVM + * + * Reads a portion of the NVM, as a flat memory space. This function correctly + * breaks read requests across Shadow RAM sectors and ensures that no single + * read request exceeds the maximum 4Kb read for a single AdminQ command. + * + * Returns a status code on failure. Note that the data pointer may be + * partially updated if some reads succeed before a failure. */ -static enum ice_status -ice_check_sr_access_params(struct ice_hw *hw, u32 offset, u16 words) +enum ice_status +ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, + bool read_shadow_ram) { - if ((offset + words) > hw->nvm.sr_words) { - ice_debug(hw, ICE_DBG_NVM, - "NVM error: offset beyond SR lmt.\n"); - return ICE_ERR_PARAM; - } + enum ice_status status; + u32 inlen = *length; + u32 bytes_read = 0; + bool last_cmd; - if (words > ICE_SR_SECTOR_SIZE_IN_WORDS) { - /* We can access only up to 4KB (one sector), in one AQ write */ - ice_debug(hw, ICE_DBG_NVM, - "NVM error: tried to access %d words, limit is %d.\n", - words, ICE_SR_SECTOR_SIZE_IN_WORDS); - return ICE_ERR_PARAM; - } + *length = 0; - if (((offset + (words - 1)) / ICE_SR_SECTOR_SIZE_IN_WORDS) != - (offset / ICE_SR_SECTOR_SIZE_IN_WORDS)) { - /* A single access cannot spread over two sectors */ + /* Verify the length of the read if this is for the Shadow RAM */ + if (read_shadow_ram && ((offset + inlen) > (hw->nvm.sr_words * 2u))) { ice_debug(hw, ICE_DBG_NVM, - "NVM error: cannot spread over two sectors.\n"); + "NVM error: requested offset is beyond Shadow RAM limit\n"); return ICE_ERR_PARAM; } - return 0; -} + do { + u32 read_size, sector_offset; -/** - * ice_read_sr_aq - Read Shadow RAM. - * @hw: pointer to the HW structure - * @offset: offset in words from module start - * @words: number of words to read - * @data: buffer for words reads from Shadow RAM - * @last_command: tells the AdminQ that this is the last command - * - * Reads 16-bit word buffers from the Shadow RAM using the admin command. - */ -static enum ice_status -ice_read_sr_aq(struct ice_hw *hw, u32 offset, u16 words, u16 *data, - bool last_command) -{ - enum ice_status status; + /* ice_aq_read_nvm cannot read more than 4Kb at a time. + * Additionally, a read from the Shadow RAM may not cross over + * a sector boundary. Conveniently, the sector size is also + * 4Kb. + */ + sector_offset = offset % ICE_AQ_MAX_BUF_LEN; + read_size = min_t(u32, ICE_AQ_MAX_BUF_LEN - sector_offset, + inlen - bytes_read); - status = ice_check_sr_access_params(hw, offset, words); + last_cmd = !(bytes_read + read_size < inlen); - /* values in "offset" and "words" parameters are sized as words - * (16 bits) but ice_aq_read_nvm expects these values in bytes. - * So do this conversion while calling ice_aq_read_nvm. - */ - if (!status) - status = ice_aq_read_nvm(hw, 0, 2 * offset, 2 * words, data, - last_command, NULL); + status = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, + offset, read_size, + data + bytes_read, last_cmd, + read_shadow_ram, NULL); + if (status) + break; + bytes_read += read_size; + offset += read_size; + } while (!last_cmd); + + *length = bytes_read; return status; } @@ -110,75 +113,25 @@ ice_read_sr_aq(struct ice_hw *hw, u32 offset, u16 words, u16 *data, * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) * @data: word read from the Shadow RAM * - * Reads one 16 bit word from the Shadow RAM using the ice_read_sr_aq method. + * Reads one 16 bit word from the Shadow RAM using ice_read_flat_nvm. */ static enum ice_status ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data) { + u32 bytes = sizeof(u16); enum ice_status status; + __le16 data_local; - status = ice_read_sr_aq(hw, offset, 1, data, true); - if (!status) - *data = le16_to_cpu(*(__force __le16 *)data); - - return status; -} - -/** - * ice_read_sr_buf_aq - Reads Shadow RAM buf via AQ - * @hw: pointer to the HW structure - * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) - * @words: (in) number of words to read; (out) number of words actually read - * @data: words read from the Shadow RAM - * - * Reads 16 bit words (data buf) from the SR using the ice_read_sr_aq - * method. Ownership of the NVM is taken before reading the buffer and later - * released. - */ -static enum ice_status -ice_read_sr_buf_aq(struct ice_hw *hw, u16 offset, u16 *words, u16 *data) -{ - enum ice_status status; - bool last_cmd = false; - u16 words_read = 0; - u16 i = 0; - - do { - u16 read_size, off_w; - - /* Calculate number of bytes we should read in this step. - * It's not allowed to read more than one page at a time or - * to cross page boundaries. - */ - off_w = offset % ICE_SR_SECTOR_SIZE_IN_WORDS; - read_size = off_w ? - min_t(u16, *words, - (ICE_SR_SECTOR_SIZE_IN_WORDS - off_w)) : - min_t(u16, (*words - words_read), - ICE_SR_SECTOR_SIZE_IN_WORDS); - - /* Check if this is last command, if so set proper flag */ - if ((words_read + read_size) >= *words) - last_cmd = true; - - status = ice_read_sr_aq(hw, offset, read_size, - data + words_read, last_cmd); - if (status) - goto read_nvm_buf_aq_exit; - - /* Increment counter for words already read and move offset to - * new read location - */ - words_read += read_size; - offset += read_size; - } while (words_read < *words); - - for (i = 0; i < *words; i++) - data[i] = le16_to_cpu(((__force __le16 *)data)[i]); + /* Note that ice_read_flat_nvm takes into account the 4Kb AdminQ and + * Shadow RAM sector restrictions necessary when reading from the NVM. + */ + status = ice_read_flat_nvm(hw, offset * sizeof(u16), &bytes, + (u8 *)&data_local, true); + if (status) + return status; -read_nvm_buf_aq_exit: - *words = words_read; - return status; + *data = le16_to_cpu(data_local); + return 0; } /** @@ -188,7 +141,7 @@ read_nvm_buf_aq_exit: * * This function will request NVM ownership. */ -static enum ice_status +enum ice_status ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access) { if (hw->nvm.blank_nvm_mode) @@ -203,7 +156,7 @@ ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access) * * This function will release NVM ownership. */ -static void ice_release_nvm(struct ice_hw *hw) +void ice_release_nvm(struct ice_hw *hw) { if (hw->nvm.blank_nvm_mode) return; @@ -233,6 +186,239 @@ enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data) } /** + * ice_get_pfa_module_tlv - Reads sub module TLV from NVM PFA + * @hw: pointer to hardware structure + * @module_tlv: pointer to module TLV to return + * @module_tlv_len: pointer to module TLV length to return + * @module_type: module type requested + * + * Finds the requested sub module TLV type from the Preserved Field + * Area (PFA) and returns the TLV pointer and length. The caller can + * use these to read the variable length TLV value. + */ +enum ice_status +ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, + u16 module_type) +{ + enum ice_status status; + u16 pfa_len, pfa_ptr; + u16 next_tlv; + + status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr); + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Preserved Field Array pointer.\n"); + return status; + } + status = ice_read_sr_word(hw, pfa_ptr, &pfa_len); + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n"); + return status; + } + /* Starting with first TLV after PFA length, iterate through the list + * of TLVs to find the requested one. + */ + next_tlv = pfa_ptr + 1; + while (next_tlv < pfa_ptr + pfa_len) { + u16 tlv_sub_module_type; + u16 tlv_len; + + /* Read TLV type */ + status = ice_read_sr_word(hw, next_tlv, &tlv_sub_module_type); + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV type.\n"); + break; + } + /* Read TLV length */ + status = ice_read_sr_word(hw, next_tlv + 1, &tlv_len); + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV length.\n"); + break; + } + if (tlv_sub_module_type == module_type) { + if (tlv_len) { + *module_tlv = next_tlv; + *module_tlv_len = tlv_len; + return 0; + } + return ICE_ERR_INVAL_SIZE; + } + /* Check next TLV, i.e. current TLV pointer + length + 2 words + * (for current TLV's type and length) + */ + next_tlv = next_tlv + tlv_len + 2; + } + /* Module does not exist */ + return ICE_ERR_DOES_NOT_EXIST; +} + +/** + * ice_read_pba_string - Reads part number string from NVM + * @hw: pointer to hardware structure + * @pba_num: stores the part number string from the NVM + * @pba_num_size: part number string buffer length + * + * Reads the part number string from the NVM. + */ +enum ice_status +ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size) +{ + u16 pba_tlv, pba_tlv_len; + enum ice_status status; + u16 pba_word, pba_size; + u16 i; + + status = ice_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len, + ICE_SR_PBA_BLOCK_PTR); + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Block TLV.\n"); + return status; + } + + /* pba_size is the next word */ + status = ice_read_sr_word(hw, (pba_tlv + 2), &pba_size); + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Section size.\n"); + return status; + } + + if (pba_tlv_len < pba_size) { + ice_debug(hw, ICE_DBG_INIT, "Invalid PBA Block TLV size.\n"); + return ICE_ERR_INVAL_SIZE; + } + + /* Subtract one to get PBA word count (PBA Size word is included in + * total size) + */ + pba_size--; + if (pba_num_size < (((u32)pba_size * 2) + 1)) { + ice_debug(hw, ICE_DBG_INIT, "Buffer too small for PBA data.\n"); + return ICE_ERR_PARAM; + } + + for (i = 0; i < pba_size; i++) { + status = ice_read_sr_word(hw, (pba_tlv + 2 + 1) + i, &pba_word); + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Block word %d.\n", i); + return status; + } + + pba_num[(i * 2)] = (pba_word >> 8) & 0xFF; + pba_num[(i * 2) + 1] = pba_word & 0xFF; + } + pba_num[(pba_size * 2)] = '\0'; + + return status; +} + +/** + * ice_get_orom_ver_info - Read Option ROM version information + * @hw: pointer to the HW struct + * + * Read the Combo Image version data from the Boot Configuration TLV and fill + * in the option ROM version data. + */ +static enum ice_status ice_get_orom_ver_info(struct ice_hw *hw) +{ + u16 combo_hi, combo_lo, boot_cfg_tlv, boot_cfg_tlv_len; + struct ice_orom_info *orom = &hw->nvm.orom; + enum ice_status status; + u32 combo_ver; + + status = ice_get_pfa_module_tlv(hw, &boot_cfg_tlv, &boot_cfg_tlv_len, + ICE_SR_BOOT_CFG_PTR); + if (status) { + ice_debug(hw, ICE_DBG_INIT, + "Failed to read Boot Configuration Block TLV.\n"); + return status; + } + + /* Boot Configuration Block must have length at least 2 words + * (Combo Image Version High and Combo Image Version Low) + */ + if (boot_cfg_tlv_len < 2) { + ice_debug(hw, ICE_DBG_INIT, + "Invalid Boot Configuration Block TLV size.\n"); + return ICE_ERR_INVAL_SIZE; + } + + status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OROM_VER_OFF), + &combo_hi); + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Failed to read OROM_VER hi.\n"); + return status; + } + + status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OROM_VER_OFF + 1), + &combo_lo); + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Failed to read OROM_VER lo.\n"); + return status; + } + + combo_ver = ((u32)combo_hi << 16) | combo_lo; + + orom->major = (u8)((combo_ver & ICE_OROM_VER_MASK) >> + ICE_OROM_VER_SHIFT); + orom->patch = (u8)(combo_ver & ICE_OROM_VER_PATCH_MASK); + orom->build = (u16)((combo_ver & ICE_OROM_VER_BUILD_MASK) >> + ICE_OROM_VER_BUILD_SHIFT); + + return 0; +} + +/** + * ice_discover_flash_size - Discover the available flash size. + * @hw: pointer to the HW struct + * + * The device flash could be up to 16MB in size. However, it is possible that + * the actual size is smaller. Use bisection to determine the accessible size + * of flash memory. + */ +static enum ice_status ice_discover_flash_size(struct ice_hw *hw) +{ + u32 min_size = 0, max_size = ICE_AQC_NVM_MAX_OFFSET + 1; + enum ice_status status; + + status = ice_acquire_nvm(hw, ICE_RES_READ); + if (status) + return status; + + while ((max_size - min_size) > 1) { + u32 offset = (max_size + min_size) / 2; + u32 len = 1; + u8 data; + + status = ice_read_flat_nvm(hw, offset, &len, &data, false); + if (status == ICE_ERR_AQ_ERROR && + hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) { + ice_debug(hw, ICE_DBG_NVM, + "%s: New upper bound of %u bytes\n", + __func__, offset); + status = 0; + max_size = offset; + } else if (!status) { + ice_debug(hw, ICE_DBG_NVM, + "%s: New lower bound of %u bytes\n", + __func__, offset); + min_size = offset; + } else { + /* an unexpected error occurred */ + goto err_read_flat_nvm; + } + } + + ice_debug(hw, ICE_DBG_NVM, + "Predicted flash size is %u bytes\n", max_size); + + hw->nvm.flash_size = max_size; + +err_read_flat_nvm: + ice_release_nvm(hw); + + return status; +} + +/** * ice_init_nvm - initializes NVM setting * @hw: pointer to the HW struct * @@ -241,9 +427,8 @@ enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data) */ enum ice_status ice_init_nvm(struct ice_hw *hw) { - u16 oem_hi, oem_lo, boot_cfg_tlv, boot_cfg_tlv_len; struct ice_nvm_info *nvm = &hw->nvm; - u16 eetrack_lo, eetrack_hi; + u16 eetrack_lo, eetrack_hi, ver; enum ice_status status; u32 fla, gens_stat; u8 sr_size; @@ -269,12 +454,14 @@ enum ice_status ice_init_nvm(struct ice_hw *hw) return ICE_ERR_NVM_BLANK_MODE; } - status = ice_read_sr_word(hw, ICE_SR_NVM_DEV_STARTER_VER, &nvm->ver); + status = ice_read_sr_word(hw, ICE_SR_NVM_DEV_STARTER_VER, &ver); if (status) { ice_debug(hw, ICE_DBG_INIT, "Failed to read DEV starter version.\n"); return status; } + nvm->major_ver = (ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT; + nvm->minor_ver = (ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT; status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_LO, &eetrack_lo); if (status) { @@ -289,80 +476,49 @@ enum ice_status ice_init_nvm(struct ice_hw *hw) nvm->eetrack = (eetrack_hi << 16) | eetrack_lo; - /* the following devices do not have boot_cfg_tlv yet */ - if (hw->device_id == ICE_DEV_ID_E822C_BACKPLANE || - hw->device_id == ICE_DEV_ID_E822C_QSFP || - hw->device_id == ICE_DEV_ID_E822C_10G_BASE_T || - hw->device_id == ICE_DEV_ID_E822C_SGMII || - hw->device_id == ICE_DEV_ID_E822C_SFP || - hw->device_id == ICE_DEV_ID_E822X_BACKPLANE || - hw->device_id == ICE_DEV_ID_E822L_SFP || - hw->device_id == ICE_DEV_ID_E822L_10G_BASE_T || - hw->device_id == ICE_DEV_ID_E822L_SGMII) - return status; - - status = ice_get_pfa_module_tlv(hw, &boot_cfg_tlv, &boot_cfg_tlv_len, - ICE_SR_BOOT_CFG_PTR); + status = ice_discover_flash_size(hw); if (status) { - ice_debug(hw, ICE_DBG_INIT, - "Failed to read Boot Configuration Block TLV.\n"); + ice_debug(hw, ICE_DBG_NVM, + "NVM init error: failed to discover flash size.\n"); return status; } - /* Boot Configuration Block must have length at least 2 words - * (Combo Image Version High and Combo Image Version Low) - */ - if (boot_cfg_tlv_len < 2) { - ice_debug(hw, ICE_DBG_INIT, - "Invalid Boot Configuration Block TLV size.\n"); - return ICE_ERR_INVAL_SIZE; - } - - status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OEM_VER_OFF), - &oem_hi); - if (status) { - ice_debug(hw, ICE_DBG_INIT, "Failed to read OEM_VER hi.\n"); + switch (hw->device_id) { + /* the following devices do not have boot_cfg_tlv yet */ + case ICE_DEV_ID_E823C_BACKPLANE: + case ICE_DEV_ID_E823C_QSFP: + case ICE_DEV_ID_E823C_SFP: + case ICE_DEV_ID_E823C_10G_BASE_T: + case ICE_DEV_ID_E823C_SGMII: + case ICE_DEV_ID_E822C_BACKPLANE: + case ICE_DEV_ID_E822C_QSFP: + case ICE_DEV_ID_E822C_10G_BASE_T: + case ICE_DEV_ID_E822C_SGMII: + case ICE_DEV_ID_E822C_SFP: + case ICE_DEV_ID_E822L_BACKPLANE: + case ICE_DEV_ID_E822L_SFP: + case ICE_DEV_ID_E822L_10G_BASE_T: + case ICE_DEV_ID_E822L_SGMII: + case ICE_DEV_ID_E823L_BACKPLANE: + case ICE_DEV_ID_E823L_SFP: + case ICE_DEV_ID_E823L_10G_BASE_T: + case ICE_DEV_ID_E823L_1GBE: + case ICE_DEV_ID_E823L_QSFP: return status; + default: + break; } - status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OEM_VER_OFF + 1), - &oem_lo); + status = ice_get_orom_ver_info(hw); if (status) { - ice_debug(hw, ICE_DBG_INIT, "Failed to read OEM_VER lo.\n"); + ice_debug(hw, ICE_DBG_INIT, "Failed to read Option ROM info.\n"); return status; } - nvm->oem_ver = ((u32)oem_hi << 16) | oem_lo; - return 0; } /** - * ice_read_sr_buf - Reads Shadow RAM buf and acquire lock if necessary - * @hw: pointer to the HW structure - * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) - * @words: (in) number of words to read; (out) number of words actually read - * @data: words read from the Shadow RAM - * - * Reads 16 bit words (data buf) from the SR using the ice_read_nvm_buf_aq - * method. The buf read is preceded by the NVM ownership take - * and followed by the release. - */ -enum ice_status -ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data) -{ - enum ice_status status; - - status = ice_acquire_nvm(hw, ICE_RES_READ); - if (!status) { - status = ice_read_sr_buf_aq(hw, offset, words, data); - ice_release_nvm(hw); - } - - return status; -} - -/** * ice_nvm_validate_checksum * @hw: pointer to the HW struct * diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.h b/drivers/net/ethernet/intel/ice/ice_nvm.h index a9fa011c22c6..999f273ba6ad 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.h +++ b/drivers/net/ethernet/intel/ice/ice_nvm.h @@ -4,5 +4,17 @@ #ifndef _ICE_NVM_H_ #define _ICE_NVM_H_ +enum ice_status +ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access); +void ice_release_nvm(struct ice_hw *hw); +enum ice_status +ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, + bool read_shadow_ram); +enum ice_status +ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, + u16 module_type); +enum ice_status +ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size); +enum ice_status ice_init_nvm(struct ice_hw *hw); enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data); #endif /* _ICE_NVM_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index d2db0d04e117..554f567476f3 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -121,9 +121,7 @@ u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed) speed = (u32)VIRTCHNL_LINK_SPEED_25GB; break; case ICE_AQ_LINK_SPEED_40GB: - /* fall through */ case ICE_AQ_LINK_SPEED_50GB: - /* fall through */ case ICE_AQ_LINK_SPEED_100GB: speed = (u32)VIRTCHNL_LINK_SPEED_40GB; break; diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 431266081a80..51825a203e35 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -578,7 +578,7 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw) struct ice_aqc_get_sw_cfg_resp_elem *ele; u16 pf_vf_num, swid, vsi_port_num; bool is_vf = false; - u8 type; + u8 res_type; ele = rbuf[i].elements; vsi_port_num = le16_to_cpu(ele->vsi_port_num) & @@ -593,16 +593,16 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw) ICE_AQC_GET_SW_CONF_RESP_IS_VF) is_vf = true; - type = le16_to_cpu(ele->vsi_port_num) >> + res_type = le16_to_cpu(ele->vsi_port_num) >> ICE_AQC_GET_SW_CONF_RESP_TYPE_S; - if (type == ICE_AQC_GET_SW_CONF_RESP_VSI) { + if (res_type == ICE_AQC_GET_SW_CONF_RESP_VSI) { /* FW VSI is not needed. Just continue. */ continue; } ice_init_port_info(hw->port_info, vsi_port_num, - type, swid, pf_vf_num, is_vf); + res_type, swid, pf_vf_num, is_vf); } } while (req_desc && !status); @@ -760,7 +760,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, break; case ICE_SW_LKUP_ETHERTYPE_MAC: daddr = f_info->l_data.ethertype_mac.mac_addr; - /* fall-through */ + fallthrough; case ICE_SW_LKUP_ETHERTYPE: off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET); *off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype); @@ -771,7 +771,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, break; case ICE_SW_LKUP_PROMISC_VLAN: vlan_id = f_info->l_data.mac_vlan.vlan_id; - /* fall-through */ + fallthrough; case ICE_SW_LKUP_PROMISC: daddr = f_info->l_data.mac_vlan.mac_addr; break; @@ -958,7 +958,7 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, struct ice_aqc_sw_rules_elem *s_rule; enum ice_status status; u16 s_rule_size; - u16 type; + u16 rule_type; int i; if (!num_vsi) @@ -970,11 +970,11 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || lkup_type == ICE_SW_LKUP_PROMISC || lkup_type == ICE_SW_LKUP_PROMISC_VLAN) - type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR : - ICE_AQC_SW_RULES_T_VSI_LIST_SET; + rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR : + ICE_AQC_SW_RULES_T_VSI_LIST_SET; else if (lkup_type == ICE_SW_LKUP_VLAN) - type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR : - ICE_AQC_SW_RULES_T_PRUNE_LIST_SET; + rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR : + ICE_AQC_SW_RULES_T_PRUNE_LIST_SET; else return ICE_ERR_PARAM; @@ -992,7 +992,7 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i])); } - s_rule->type = cpu_to_le16(type); + s_rule->type = cpu_to_le16(rule_type); s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi); s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id); diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 4de61dbedd36..f67e8362958c 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -453,10 +453,10 @@ ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp, break; default: bpf_warn_invalid_xdp_action(act); - /* fallthrough -- not supported action */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(rx_ring->netdev, xdp_prog, act); - /* fallthrough -- handle aborts by dropping frame */ + fallthrough; case XDP_DROP: result = ICE_XDP_CONSUMED; break; @@ -1188,7 +1188,6 @@ ice_adjust_itr_by_size_and_speed(struct ice_port_info *port_info, avg_pkt_size + 640); break; case ICE_AQ_LINK_SPEED_10GB: - /* fall through */ default: itr += DIV_ROUND_UP(170 * (avg_pkt_size + 24), avg_pkt_size + 640); diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index db0ef6ba907f..4ce5f92fca4a 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -239,12 +239,21 @@ struct ice_fc_info { enum ice_fc_mode req_mode; /* FC mode requested by caller */ }; +/* Option ROM version information */ +struct ice_orom_info { + u8 major; /* Major version of OROM */ + u8 patch; /* Patch version of OROM */ + u16 build; /* Build version of OROM */ +}; + /* NVM Information */ struct ice_nvm_info { - u32 eetrack; /* NVM data version */ - u32 oem_ver; /* OEM version info */ - u16 sr_words; /* Shadow RAM size in words */ - u16 ver; /* NVM package version */ + struct ice_orom_info orom; /* Option ROM version info */ + u32 eetrack; /* NVM data version */ + u16 sr_words; /* Shadow RAM size in words */ + u32 flash_size; /* Size of available flash in bytes */ + u8 major_ver; /* major version of NVM package */ + u8 minor_ver; /* minor version of dev starter */ u8 blank_nvm_mode; /* is NVM empty (no FW present) */ }; @@ -626,7 +635,8 @@ struct ice_hw_port_stats { /* Checksum and Shadow RAM pointers */ #define ICE_SR_BOOT_CFG_PTR 0x132 -#define ICE_NVM_OEM_VER_OFF 0x02 +#define ICE_NVM_OROM_VER_OFF 0x02 +#define ICE_SR_PBA_BLOCK_PTR 0x16 #define ICE_SR_NVM_DEV_STARTER_VER 0x18 #define ICE_SR_NVM_EETRACK_LO 0x2D #define ICE_SR_NVM_EETRACK_HI 0x2E @@ -634,12 +644,12 @@ struct ice_hw_port_stats { #define ICE_NVM_VER_LO_MASK (0xff << ICE_NVM_VER_LO_SHIFT) #define ICE_NVM_VER_HI_SHIFT 12 #define ICE_NVM_VER_HI_MASK (0xf << ICE_NVM_VER_HI_SHIFT) -#define ICE_OEM_VER_PATCH_SHIFT 0 -#define ICE_OEM_VER_PATCH_MASK (0xff << ICE_OEM_VER_PATCH_SHIFT) -#define ICE_OEM_VER_BUILD_SHIFT 8 -#define ICE_OEM_VER_BUILD_MASK (0xffff << ICE_OEM_VER_BUILD_SHIFT) -#define ICE_OEM_VER_SHIFT 24 -#define ICE_OEM_VER_MASK (0xff << ICE_OEM_VER_SHIFT) +#define ICE_OROM_VER_PATCH_SHIFT 0 +#define ICE_OROM_VER_PATCH_MASK (0xff << ICE_OROM_VER_PATCH_SHIFT) +#define ICE_OROM_VER_BUILD_SHIFT 8 +#define ICE_OROM_VER_BUILD_MASK (0xffff << ICE_OROM_VER_BUILD_SHIFT) +#define ICE_OROM_VER_SHIFT 24 +#define ICE_OROM_VER_MASK (0xff << ICE_OROM_VER_SHIFT) #define ICE_SR_PFA_PTR 0x40 #define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800 #define ICE_SR_WORDS_IN_1KB 512 diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 75c70d432c72..15191a325918 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -91,6 +91,39 @@ ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe, } /** + * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled + * @vf: the VF to check + * + * Returns true if the VF has no Rx and no Tx queues enabled and returns false + * otherwise + */ +static bool ice_vf_has_no_qs_ena(struct ice_vf *vf) +{ + return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) && + !bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF)); +} + +/** + * ice_is_vf_link_up - check if the VF's link is up + * @vf: VF to check if link is up + */ +static bool ice_is_vf_link_up(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + + if (ice_check_vf_init(pf, vf)) + return false; + + if (ice_vf_has_no_qs_ena(vf)) + return false; + else if (vf->link_forced) + return vf->link_up; + else + return pf->hw.port_info->phy.link_info.link_info & + ICE_AQ_LINK_UP; +} + +/** * ice_vc_notify_vf_link_state - Inform a VF of link status * @vf: pointer to the VF structure * @@ -99,28 +132,16 @@ ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe, static void ice_vc_notify_vf_link_state(struct ice_vf *vf) { struct virtchnl_pf_event pfe = { 0 }; - struct ice_link_status *ls; - struct ice_pf *pf = vf->pf; - struct ice_hw *hw; - - hw = &pf->hw; - ls = &hw->port_info->phy.link_info; + struct ice_hw *hw = &vf->pf->hw; pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; pfe.severity = PF_EVENT_SEVERITY_INFO; - /* Always report link is down if the VF queues aren't enabled */ - if (!vf->num_qs_ena) { + if (ice_is_vf_link_up(vf)) + ice_set_pfe_link(vf, &pfe, + hw->port_info->phy.link_info.link_speed, true); + else ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false); - } else if (vf->link_forced) { - u16 link_speed = vf->link_up ? - ls->link_speed : ICE_AQ_LINK_SPEED_UNKNOWN; - - ice_set_pfe_link(vf, &pfe, link_speed, vf->link_up); - } else { - ice_set_pfe_link(vf, &pfe, ls->link_speed, - ls->link_info & ICE_AQ_LINK_UP); - } ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, @@ -149,7 +170,12 @@ static void ice_free_vf_res(struct ice_vf *vf) vf->num_mac = 0; } - last_vector_idx = vf->first_vector_idx + pf->num_vf_msix - 1; + last_vector_idx = vf->first_vector_idx + pf->num_msix_per_vf - 1; + + /* clear VF MDD event information */ + memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events)); + memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events)); + /* Disable interrupts so that VF starts in a known state */ for (i = vf->first_vector_idx; i <= last_vector_idx; i++) { wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M); @@ -180,7 +206,7 @@ static void ice_dis_vf_mappings(struct ice_vf *vf) wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0); first = vf->first_vector_idx; - last = first + pf->num_vf_msix - 1; + last = first + pf->num_msix_per_vf - 1; for (v = first; v <= last; v++) { u32 reg; @@ -206,11 +232,7 @@ static void ice_dis_vf_mappings(struct ice_vf *vf) * ice_sriov_free_msix_res - Reset/free any used MSIX resources * @pf: pointer to the PF structure * - * If MSIX entries from the pf->irq_tracker were needed then we need to - * reset the irq_tracker->end and give back the entries we needed to - * num_avail_sw_msix. - * - * If no MSIX entries were taken from the pf->irq_tracker then just clear + * Since no MSIX entries are taken from the pf->irq_tracker then just clear * the pf->sriov_base_vector. * * Returns 0 on success, and -EINVAL on error. @@ -227,11 +249,7 @@ static int ice_sriov_free_msix_res(struct ice_pf *pf) return -EINVAL; /* give back irq_tracker resources used */ - if (pf->sriov_base_vector < res->num_entries) { - res->end = res->num_entries; - pf->num_avail_sw_msix += - res->num_entries - pf->sriov_base_vector; - } + WARN_ON(pf->sriov_base_vector < res->num_entries); pf->sriov_base_vector = 0; @@ -245,9 +263,8 @@ static int ice_sriov_free_msix_res(struct ice_pf *pf) void ice_set_vf_state_qs_dis(struct ice_vf *vf) { /* Clear Rx/Tx enabled queues flag */ - bitmap_zero(vf->txq_ena, ICE_MAX_BASE_QS_PER_VF); - bitmap_zero(vf->rxq_ena, ICE_MAX_BASE_QS_PER_VF); - vf->num_qs_ena = 0; + bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF); + bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF); clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states); } @@ -263,7 +280,7 @@ static void ice_dis_vf_qs(struct ice_vf *vf) vsi = pf->vsi[vf->lan_vsi_idx]; ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id); - ice_vsi_stop_rx_rings(vsi); + ice_vsi_stop_all_rx_rings(vsi); ice_set_vf_state_qs_dis(vf); } @@ -283,11 +300,6 @@ void ice_free_vfs(struct ice_pf *pf) while (test_and_set_bit(__ICE_VF_DIS, pf->state)) usleep_range(1000, 2000); - /* Avoid wait time by stopping all VFs at the same time */ - ice_for_each_vf(pf, i) - if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states)) - ice_dis_vf_qs(&pf->vf[i]); - /* Disable IOV before freeing resources. This lets any VF drivers * running in the host get themselves cleaned up before we yank * the carpet out from underneath their feet. @@ -297,8 +309,13 @@ void ice_free_vfs(struct ice_pf *pf) else dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n"); + /* Avoid wait time by stopping all VFs at the same time */ + ice_for_each_vf(pf, i) + if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states)) + ice_dis_vf_qs(&pf->vf[i]); + tmp = pf->num_alloc_vfs; - pf->num_vf_qps = 0; + pf->num_qps_per_vf = 0; pf->num_alloc_vfs = 0; for (i = 0; i < tmp; i++) { if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) { @@ -407,43 +424,15 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr) } /** - * ice_vsi_set_pvid_fill_ctxt - Set VSI ctxt for add PVID - * @ctxt: the VSI ctxt to fill - * @vid: the VLAN ID to set as a PVID - */ -static void ice_vsi_set_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt, u16 vid) -{ - ctxt->info.vlan_flags = (ICE_AQ_VSI_VLAN_MODE_UNTAGGED | - ICE_AQ_VSI_PVLAN_INSERT_PVID | - ICE_AQ_VSI_VLAN_EMOD_STR); - ctxt->info.pvid = cpu_to_le16(vid); - ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; - ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID | - ICE_AQ_VSI_PROP_SW_VALID); -} - -/** - * ice_vsi_kill_pvid_fill_ctxt - Set VSI ctx for remove PVID - * @ctxt: the VSI ctxt to fill - */ -static void ice_vsi_kill_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt) -{ - ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING; - ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL; - ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; - ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID | - ICE_AQ_VSI_PROP_SW_VALID); -} - -/** * ice_vsi_manage_pvid - Enable or disable port VLAN for VSI * @vsi: the VSI to update - * @vid: the VLAN ID to set as a PVID + * @pvid_info: VLAN ID and QoS used to set the PVID VSI context field * @enable: true for enable PVID false for disable */ -static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable) +static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable) { struct ice_hw *hw = &vsi->back->hw; + struct ice_aqc_vsi_props *info; struct ice_vsi_ctx *ctxt; enum ice_status status; int ret = 0; @@ -453,20 +442,33 @@ static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable) return -ENOMEM; ctxt->info = vsi->info; - if (enable) - ice_vsi_set_pvid_fill_ctxt(ctxt, vid); - else - ice_vsi_kill_pvid_fill_ctxt(ctxt); + info = &ctxt->info; + if (enable) { + info->vlan_flags = ICE_AQ_VSI_VLAN_MODE_UNTAGGED | + ICE_AQ_VSI_PVLAN_INSERT_PVID | + ICE_AQ_VSI_VLAN_EMOD_STR; + info->sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; + } else { + info->vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING | + ICE_AQ_VSI_VLAN_MODE_ALL; + info->sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; + } + + info->pvid = cpu_to_le16(pvid_info); + info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID | + ICE_AQ_VSI_PROP_SW_VALID); status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); if (status) { - dev_info(ice_pf_to_dev(vsi->back), "update VSI for port VLAN failed, err %d aq_err %d\n", + dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %d aq_err %d\n", status, hw->adminq.sq_last_status); ret = -EIO; goto out; } - vsi->info = ctxt->info; + vsi->info.vlan_flags = info->vlan_flags; + vsi->info.sw_flags2 = info->sw_flags2; + vsi->info.pvid = info->pvid; out: kfree(ctxt); return ret; @@ -501,7 +503,7 @@ ice_vf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, u16 vf_id) */ static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf) { - return pf->sriov_base_vector + vf->vf_id * pf->num_vf_msix; + return pf->sriov_base_vector + vf->vf_id * pf->num_msix_per_vf; } /** @@ -533,9 +535,20 @@ static int ice_alloc_vsi_res(struct ice_vf *vf) vf->lan_vsi_num = vsi->vsi_num; /* Check if port VLAN exist before, and restore it accordingly */ - if (vf->port_vlan_id) { - ice_vsi_manage_pvid(vsi, vf->port_vlan_id, true); - ice_vsi_add_vlan(vsi, vf->port_vlan_id & ICE_VLAN_M); + if (vf->port_vlan_info) { + ice_vsi_manage_pvid(vsi, vf->port_vlan_info, true); + if (ice_vsi_add_vlan(vsi, vf->port_vlan_info & VLAN_VID_MASK)) + dev_warn(ice_pf_to_dev(pf), "Failed to add Port VLAN %d filter for VF %d\n", + vf->port_vlan_info & VLAN_VID_MASK, vf->vf_id); + } else { + /* set VLAN 0 filter by default when no port VLAN is + * enabled. If a port VLAN is enabled we don't want + * untagged broadcast/multicast traffic seen on the VF + * interface. + */ + if (ice_vsi_add_vlan(vsi, 0)) + dev_warn(ice_pf_to_dev(pf), "Failed to add VLAN 0 filter for VF %d, MDD events will trigger. Reset the VF, disable spoofchk, or enable 8021q module on the guest\n", + vf->vf_id); } eth_broadcast_addr(broadcast); @@ -583,7 +596,7 @@ static int ice_alloc_vf_res(struct ice_vf *vf) */ tx_rx_queue_left = min_t(int, ice_get_avail_txq_count(pf), ice_get_avail_rxq_count(pf)); - tx_rx_queue_left += ICE_DFLT_QS_PER_VF; + tx_rx_queue_left += pf->num_qps_per_vf; if (vf->num_req_qs && vf->num_req_qs <= tx_rx_queue_left && vf->num_req_qs != vf->num_vf_qs) vf->num_vf_qs = vf->num_req_qs; @@ -629,9 +642,9 @@ static void ice_ena_vf_mappings(struct ice_vf *vf) hw = &pf->hw; vsi = pf->vsi[vf->lan_vsi_idx]; first = vf->first_vector_idx; - last = (first + pf->num_vf_msix) - 1; + last = (first + pf->num_msix_per_vf) - 1; abs_first = first + pf->hw.func_caps.common_cap.msix_vector_first_id; - abs_last = (abs_first + pf->num_vf_msix) - 1; + abs_last = (abs_first + pf->num_msix_per_vf) - 1; abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; /* VF Vector allocation */ @@ -749,7 +762,7 @@ int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector) pf = vf->pf; /* always add one to account for the OICR being the first MSIX */ - return pf->sriov_base_vector + pf->num_vf_msix * vf->vf_id + + return pf->sriov_base_vector + pf->num_msix_per_vf * vf->vf_id + q_vector->v_idx + 1; } @@ -782,127 +795,112 @@ static int ice_get_max_valid_res_idx(struct ice_res_tracker *res) * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs * * This function allows SR-IOV resources to be taken from the end of the PF's - * allowed HW MSIX vectors so in many cases the irq_tracker will not - * be needed. In these cases we just set the pf->sriov_base_vector and return - * success. + * allowed HW MSIX vectors so that the irq_tracker will not be affected. We + * just set the pf->sriov_base_vector and return success. * - * If SR-IOV needs to use any pf->irq_tracker entries it updates the - * irq_tracker->end based on the first entry needed for SR-IOV. This makes it - * so any calls to ice_get_res() using the irq_tracker will not try to use - * resources at or beyond the newly set value. + * If there are not enough resources available, return an error. This should + * always be caught by ice_set_per_vf_res(). * * Return 0 on success, and -EINVAL when there are not enough MSIX vectors in * in the PF's space available for SR-IOV. */ static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed) { - int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker); - u16 pf_total_msix_vectors = - pf->hw.func_caps.common_cap.num_msix_vectors; - struct ice_res_tracker *res = pf->irq_tracker; + u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors; + int vectors_used = pf->irq_tracker->num_entries; int sriov_base_vector; - if (max_valid_res_idx < 0) - return max_valid_res_idx; - - sriov_base_vector = pf_total_msix_vectors - num_msix_needed; + sriov_base_vector = total_vectors - num_msix_needed; /* make sure we only grab irq_tracker entries from the list end and * that we have enough available MSIX vectors */ - if (sriov_base_vector <= max_valid_res_idx) + if (sriov_base_vector < vectors_used) return -EINVAL; pf->sriov_base_vector = sriov_base_vector; - /* dip into irq_tracker entries and update used resources */ - if (num_msix_needed > (pf_total_msix_vectors - res->num_entries)) { - pf->num_avail_sw_msix -= - res->num_entries - pf->sriov_base_vector; - res->end = pf->sriov_base_vector; - } - return 0; } /** - * ice_check_avail_res - check if vectors and queues are available + * ice_set_per_vf_res - check if vectors and queues are available * @pf: pointer to the PF structure * - * This function is where we calculate actual number of resources for VF VSIs, - * we don't reserve ahead of time during probe. Returns success if vectors and - * queues resources are available, otherwise returns error code + * First, determine HW interrupts from common pool. If we allocate fewer VFs, we + * get more vectors and can enable more queues per VF. Note that this does not + * grab any vectors from the SW pool already allocated. Also note, that all + * vector counts include one for each VF's miscellaneous interrupt vector + * (i.e. OICR). + * + * Minimum VFs - 2 vectors, 1 queue pair + * Small VFs - 5 vectors, 4 queue pairs + * Medium VFs - 17 vectors, 16 queue pairs + * + * Second, determine number of queue pairs per VF by starting with a pre-defined + * maximum each VF supports. If this is not possible, then we adjust based on + * queue pairs available on the device. + * + * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used + * by each VF during VF initialization and reset. */ -static int ice_check_avail_res(struct ice_pf *pf) +static int ice_set_per_vf_res(struct ice_pf *pf) { int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker); - u16 num_msix, num_txq, num_rxq, num_avail_msix; + int msix_avail_per_vf, msix_avail_for_sriov; struct device *dev = ice_pf_to_dev(pf); + u16 num_msix_per_vf, num_txq, num_rxq; if (!pf->num_alloc_vfs || max_valid_res_idx < 0) return -EINVAL; - /* add 1 to max_valid_res_idx to account for it being 0-based */ - num_avail_msix = pf->hw.func_caps.common_cap.num_msix_vectors - - (max_valid_res_idx + 1); - - /* Grab from HW interrupts common pool - * Note: By the time the user decides it needs more vectors in a VF - * its already too late since one must decide this prior to creating the - * VF interface. So the best we can do is take a guess as to what the - * user might want. - * - * We have two policies for vector allocation: - * 1. if num_alloc_vfs is from 1 to 16, then we consider this as small - * number of NFV VFs used for NFV appliances, since this is a special - * case, we try to assign maximum vectors per VF (65) as much as - * possible, based on determine_resources algorithm. - * 2. if num_alloc_vfs is from 17 to 256, then its large number of - * regular VFs which are not used for any special purpose. Hence try to - * grab default interrupt vectors (5 as supported by AVF driver). - */ - if (pf->num_alloc_vfs <= 16) { - num_msix = ice_determine_res(pf, num_avail_msix, - ICE_MAX_INTR_PER_VF, - ICE_MIN_INTR_PER_VF); - } else if (pf->num_alloc_vfs <= ICE_MAX_VF_COUNT) { - num_msix = ice_determine_res(pf, num_avail_msix, - ICE_DFLT_INTR_PER_VF, - ICE_MIN_INTR_PER_VF); + /* determine MSI-X resources per VF */ + msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors - + pf->irq_tracker->num_entries; + msix_avail_per_vf = msix_avail_for_sriov / pf->num_alloc_vfs; + if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) { + num_msix_per_vf = ICE_NUM_VF_MSIX_MED; + } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) { + num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL; + } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) { + num_msix_per_vf = ICE_MIN_INTR_PER_VF; } else { - dev_err(dev, "Number of VFs %d exceeds max VF count %d\n", - pf->num_alloc_vfs, ICE_MAX_VF_COUNT); + dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n", + msix_avail_for_sriov, ICE_MIN_INTR_PER_VF, + pf->num_alloc_vfs); return -EIO; } - if (!num_msix) - return -EIO; - - /* Grab from the common pool - * start by requesting Default queues (4 as supported by AVF driver), - * Note that, the main difference between queues and vectors is, latter - * can only be reserved at init time but queues can be requested by VF - * at runtime through Virtchnl, that is the reason we start by reserving - * few queues. - */ + /* determine queue resources per VF */ num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf), - ICE_DFLT_QS_PER_VF, ICE_MIN_QS_PER_VF); + min_t(u16, + num_msix_per_vf - ICE_NONQ_VECS_VF, + ICE_MAX_RSS_QS_PER_VF), + ICE_MIN_QS_PER_VF); num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf), - ICE_DFLT_QS_PER_VF, ICE_MIN_QS_PER_VF); - - if (!num_txq || !num_rxq) + min_t(u16, + num_msix_per_vf - ICE_NONQ_VECS_VF, + ICE_MAX_RSS_QS_PER_VF), + ICE_MIN_QS_PER_VF); + + if (!num_txq || !num_rxq) { + dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n", + ICE_MIN_QS_PER_VF, pf->num_alloc_vfs); return -EIO; + } - if (ice_sriov_set_msix_res(pf, num_msix * pf->num_alloc_vfs)) + if (ice_sriov_set_msix_res(pf, num_msix_per_vf * pf->num_alloc_vfs)) { + dev_err(dev, "Unable to set MSI-X resources for %d VFs\n", + pf->num_alloc_vfs); return -EINVAL; + } - /* since AVF driver works with only queue pairs which means, it expects - * to have equal number of Rx and Tx queues, so take the minimum of - * available Tx or Rx queues - */ - pf->num_vf_qps = min_t(int, num_txq, num_rxq); - pf->num_vf_msix = num_msix; + /* only allow equal Tx/Rx queue count (i.e. queue pairs) */ + pf->num_qps_per_vf = min_t(int, num_txq, num_rxq); + pf->num_msix_per_vf = num_msix_per_vf; + dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n", + pf->num_alloc_vfs, pf->num_msix_per_vf, pf->num_qps_per_vf); return 0; } @@ -943,17 +941,9 @@ static void ice_cleanup_and_realloc_vf(struct ice_vf *vf) /* reallocate VF resources to finish resetting the VSI state */ if (!ice_alloc_vf_res(vf)) { - struct ice_vsi *vsi; - ice_ena_vf_mappings(vf); set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states); clear_bit(ICE_VF_STATE_DIS, vf->vf_states); - - vsi = pf->vsi[vf->lan_vsi_idx]; - if (ice_vsi_add_vlan(vsi, 0)) - dev_warn(ice_pf_to_dev(pf), - "Failed to add VLAN 0 filter for VF %d, MDD events will trigger. Reset the VF, disable spoofchk, or enable 8021q module on the guest", - vf->vf_id); } /* Tell the VF driver the reset is done. This needs to be done only @@ -985,13 +975,13 @@ ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m, if (vsi->num_vlan) { status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m, rm_promisc); - } else if (vf->port_vlan_id) { + } else if (vf->port_vlan_info) { if (rm_promisc) status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m, - vf->port_vlan_id); + vf->port_vlan_info); else status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m, - vf->port_vlan_id); + vf->port_vlan_info); } else { if (rm_promisc) status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m, @@ -1019,7 +1009,7 @@ static bool ice_config_res_vfs(struct ice_pf *pf) struct ice_hw *hw = &pf->hw; int v; - if (ice_check_avail_res(pf)) { + if (ice_set_per_vf_res(pf)) { dev_err(dev, "Cannot allocate VF resources, try with fewer number of VFs\n"); return false; } @@ -1032,7 +1022,7 @@ static bool ice_config_res_vfs(struct ice_pf *pf) ice_for_each_vf(pf, v) { struct ice_vf *vf = &pf->vf[v]; - vf->num_vf_qs = pf->num_vf_qps; + vf->num_vf_qs = pf->num_qps_per_vf; dev_dbg(dev, "VF-id %d has %d queues configured\n", vf->vf_id, vf->num_vf_qs); ice_cleanup_and_realloc_vf(vf); @@ -1165,9 +1155,10 @@ static bool ice_is_vf_disabled(struct ice_vf *vf) * @vf: pointer to the VF structure * @is_vflr: true if VFLR was issued, false if not * - * Returns true if the VF is reset, false otherwise. + * Returns true if the VF is currently in reset, resets successfully, or resets + * are disabled and false otherwise. */ -static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) +bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) { struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; @@ -1180,6 +1171,12 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) dev = ice_pf_to_dev(pf); + if (test_bit(__ICE_VF_RESETS_DISABLED, pf->state)) { + dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n", + vf->vf_id); + return true; + } + if (ice_is_vf_disabled(vf)) { dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n", vf->vf_id); @@ -1231,7 +1228,7 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) */ if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) || test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) { - if (vf->port_vlan_id || vsi->num_vlan) + if (vf->port_vlan_info || vsi->num_vlan) promisc_m = ICE_UCAST_VLAN_PROMISC_BITS; else promisc_m = ICE_UCAST_PROMISC_BITS; @@ -1432,7 +1429,7 @@ static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs) if (num_vfs > pf->num_vfs_supported) { dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n", num_vfs, pf->num_vfs_supported); - return -ENOTSUPP; + return -EOPNOTSUPP; } dev_info(dev, "Allocating %d VFs\n", num_vfs); @@ -1518,6 +1515,72 @@ static void ice_vc_reset_vf(struct ice_vf *vf) } /** + * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in + * @pf: PF used to index all VFs + * @pfq: queue index relative to the PF's function space + * + * If no VF is found who owns the pfq then return NULL, otherwise return a + * pointer to the VF who owns the pfq + */ +static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq) +{ + int vf_id; + + ice_for_each_vf(pf, vf_id) { + struct ice_vf *vf = &pf->vf[vf_id]; + struct ice_vsi *vsi; + u16 rxq_idx; + + vsi = pf->vsi[vf->lan_vsi_idx]; + + ice_for_each_rxq(vsi, rxq_idx) + if (vsi->rxq_map[rxq_idx] == pfq) + return vf; + } + + return NULL; +} + +/** + * ice_globalq_to_pfq - convert from global queue index to PF space queue index + * @pf: PF used for conversion + * @globalq: global queue index used to convert to PF space queue index + */ +static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq) +{ + return globalq - pf->hw.func_caps.common_cap.rxq_first_id; +} + +/** + * ice_vf_lan_overflow_event - handle LAN overflow event for a VF + * @pf: PF that the LAN overflow event happened on + * @event: structure holding the event information for the LAN overflow event + * + * Determine if the LAN overflow event was caused by a VF queue. If it was not + * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a + * reset on the offending VF. + */ +void +ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) +{ + u32 gldcb_rtctq, queue; + struct ice_vf *vf; + + gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq); + dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq); + + /* event returns device global Rx queue number */ + queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >> + GLDCB_RTCTQ_RXQNUM_S; + + vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue)); + if (!vf) + return; + + ice_vc_reset_vf(vf); +} + +/** * ice_vc_send_msg_to_vf - Send message to VF * @vf: pointer to the VF info * @v_opcode: virtual channel opcode @@ -1675,7 +1738,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) vfres->num_vsis = 1; /* Tx and Rx queue are equal for VF */ vfres->num_queue_pairs = vsi->num_txq; - vfres->max_vectors = pf->num_vf_msix; + vfres->max_vectors = pf->num_msix_per_vf; vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE; vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE; @@ -1981,7 +2044,7 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL); if (status) { - dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %d", + dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %d\n", ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num, status); ret = -EIO; goto out; @@ -2039,6 +2102,22 @@ error_param: } /** + * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL + * @vqs: virtchnl_queue_select structure containing bitmaps to validate + * + * Return true on successful validation, else false + */ +static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs) +{ + if ((!vqs->rx_queues && !vqs->tx_queues) || + vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) || + vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF)) + return false; + + return true; +} + +/** * ice_vc_ena_qs_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer @@ -2065,13 +2144,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) goto error_param; } - if (!vqs->rx_queues && !vqs->tx_queues) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; - goto error_param; - } - - if (vqs->rx_queues > ICE_MAX_BASE_QS_PER_VF || - vqs->tx_queues > ICE_MAX_BASE_QS_PER_VF) { + if (!ice_vc_validate_vqs_bitmaps(vqs)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -2087,7 +2160,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) * programmed using ice_vsi_cfg_txqs */ q_map = vqs->rx_queues; - for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) { + for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) { if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; @@ -2097,7 +2170,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) if (test_bit(vf_q_id, vf->rxq_ena)) continue; - if (ice_vsi_ctrl_rx_ring(vsi, true, vf_q_id)) { + if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) { dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n", vf_q_id, vsi->vsi_num); v_ret = VIRTCHNL_STATUS_ERR_PARAM; @@ -2105,12 +2178,11 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) } set_bit(vf_q_id, vf->rxq_ena); - vf->num_qs_ena++; } vsi = pf->vsi[vf->lan_vsi_idx]; q_map = vqs->tx_queues; - for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) { + for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) { if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; @@ -2121,7 +2193,6 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) continue; set_bit(vf_q_id, vf->txq_ena); - vf->num_qs_ena++; } /* Set flag to indicate that queues are enabled */ @@ -2163,13 +2234,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) goto error_param; } - if (!vqs->rx_queues && !vqs->tx_queues) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; - goto error_param; - } - - if (vqs->rx_queues > ICE_MAX_BASE_QS_PER_VF || - vqs->tx_queues > ICE_MAX_BASE_QS_PER_VF) { + if (!ice_vc_validate_vqs_bitmaps(vqs)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -2183,7 +2248,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) if (vqs->tx_queues) { q_map = vqs->tx_queues; - for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) { + for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) { struct ice_ring *ring = vsi->tx_rings[vf_q_id]; struct ice_txq_meta txq_meta = { 0 }; @@ -2208,14 +2273,23 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) /* Clear enabled queues flag */ clear_bit(vf_q_id, vf->txq_ena); - vf->num_qs_ena--; } } - if (vqs->rx_queues) { - q_map = vqs->rx_queues; + q_map = vqs->rx_queues; + /* speed up Rx queue disable by batching them if possible */ + if (q_map && + bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) { + if (ice_vsi_stop_all_rx_rings(vsi)) { + dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n", + vsi->vsi_num); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } - for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) { + bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF); + } else if (q_map) { + for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) { if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; @@ -2225,7 +2299,8 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) if (!test_bit(vf_q_id, vf->rxq_ena)) continue; - if (ice_vsi_ctrl_rx_ring(vsi, false, vf_q_id)) { + if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id, + true)) { dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n", vf_q_id, vsi->vsi_num); v_ret = VIRTCHNL_STATUS_ERR_PARAM; @@ -2234,12 +2309,11 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) /* Clear enabled queues flag */ clear_bit(vf_q_id, vf->rxq_ena); - vf->num_qs_ena--; } } /* Clear enabled queues flag */ - if (v_ret == VIRTCHNL_STATUS_SUCCESS && !vf->num_qs_ena) + if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf)) clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states); error_param: @@ -2249,6 +2323,57 @@ error_param: } /** + * ice_cfg_interrupt + * @vf: pointer to the VF info + * @vsi: the VSI being configured + * @vector_id: vector ID + * @map: vector map for mapping vectors to queues + * @q_vector: structure for interrupt vector + * configure the IRQ to queue map + */ +static int +ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id, + struct virtchnl_vector_map *map, + struct ice_q_vector *q_vector) +{ + u16 vsi_q_id, vsi_q_id_idx; + unsigned long qmap; + + q_vector->num_ring_rx = 0; + q_vector->num_ring_tx = 0; + + qmap = map->rxq_map; + for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) { + vsi_q_id = vsi_q_id_idx; + + if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id)) + return VIRTCHNL_STATUS_ERR_PARAM; + + q_vector->num_ring_rx++; + q_vector->rx.itr_idx = map->rxitr_idx; + vsi->rx_rings[vsi_q_id]->q_vector = q_vector; + ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id, + q_vector->rx.itr_idx); + } + + qmap = map->txq_map; + for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) { + vsi_q_id = vsi_q_id_idx; + + if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id)) + return VIRTCHNL_STATUS_ERR_PARAM; + + q_vector->num_ring_tx++; + q_vector->tx.itr_idx = map->txitr_idx; + vsi->tx_rings[vsi_q_id]->q_vector = q_vector; + ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id, + q_vector->tx.itr_idx); + } + + return VIRTCHNL_STATUS_SUCCESS; +} + +/** * ice_vc_cfg_irq_map_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer @@ -2258,13 +2383,11 @@ error_param: static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) { enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + u16 num_q_vectors_mapped, vsi_id, vector_id; struct virtchnl_irq_map_info *irqmap_info; - u16 vsi_id, vsi_q_id, vector_id; struct virtchnl_vector_map *map; struct ice_pf *pf = vf->pf; - u16 num_q_vectors_mapped; struct ice_vsi *vsi; - unsigned long qmap; int i; irqmap_info = (struct virtchnl_irq_map_info *)msg; @@ -2275,8 +2398,8 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) * there is actually at least a single VF queue vector mapped */ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) || - pf->num_vf_msix < num_q_vectors_mapped || - !irqmap_info->num_vectors) { + pf->num_msix_per_vf < num_q_vectors_mapped || + !num_q_vectors_mapped) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -2297,7 +2420,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) /* vector_id is always 0-based for each VF, and can never be * larger than or equal to the max allowed interrupts per VF */ - if (!(vector_id < ICE_MAX_INTR_PER_VF) || + if (!(vector_id < pf->num_msix_per_vf) || !ice_vc_isvalid_vsi_id(vf, vsi_id) || (!vector_id && (map->rxq_map || map->txq_map))) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; @@ -2318,33 +2441,10 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) } /* lookout for the invalid queue index */ - qmap = map->rxq_map; - q_vector->num_ring_rx = 0; - for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) { - if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; - goto error_param; - } - q_vector->num_ring_rx++; - q_vector->rx.itr_idx = map->rxitr_idx; - vsi->rx_rings[vsi_q_id]->q_vector = q_vector; - ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id, - q_vector->rx.itr_idx); - } - - qmap = map->txq_map; - q_vector->num_ring_tx = 0; - for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) { - if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; - goto error_param; - } - q_vector->num_ring_tx++; - q_vector->tx.itr_idx = map->txitr_idx; - vsi->tx_rings[vsi_q_id]->q_vector = q_vector; - ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id, - q_vector->tx.itr_idx); - } + v_ret = (enum virtchnl_status_code) + ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector); + if (v_ret) + goto error_param; } error_param: @@ -2387,7 +2487,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) goto error_param; } - if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF || + if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF || qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) { dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n", vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)); @@ -2694,16 +2794,16 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg) if (!req_queues) { dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n", vf->vf_id); - } else if (req_queues > ICE_MAX_BASE_QS_PER_VF) { + } else if (req_queues > ICE_MAX_RSS_QS_PER_VF) { dev_err(dev, "VF %d tried to request more than %d queues.\n", - vf->vf_id, ICE_MAX_BASE_QS_PER_VF); - vfres->num_queue_pairs = ICE_MAX_BASE_QS_PER_VF; + vf->vf_id, ICE_MAX_RSS_QS_PER_VF); + vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF; } else if (req_queues > cur_queues && req_queues - cur_queues > tx_rx_queue_left) { dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n", vf->vf_id, req_queues - cur_queues, tx_rx_queue_left); vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues, - ICE_MAX_BASE_QS_PER_VF); + ICE_MAX_RSS_QS_PER_VF); } else { /* request is successful, then reset VF */ vf->num_req_qs = req_queues; @@ -2733,19 +2833,20 @@ int ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, __be16 vlan_proto) { - u16 vlanprio = vlan_id | (qos << ICE_VLAN_PRIORITY_S); struct ice_pf *pf = ice_netdev_to_pf(netdev); struct ice_vsi *vsi; struct device *dev; struct ice_vf *vf; + u16 vlanprio; int ret; dev = ice_pf_to_dev(pf); if (ice_validate_vf_id(pf, vf_id)) return -EINVAL; - if (vlan_id > ICE_MAX_VLANID || qos > 7) { - dev_err(dev, "Invalid VF Parameters\n"); + if (vlan_id >= VLAN_N_VID || qos > 7) { + dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n", + vf_id, vlan_id, qos); return -EINVAL; } @@ -2761,40 +2862,52 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, if (ret) return ret; - if (le16_to_cpu(vsi->info.pvid) == vlanprio) { + vlanprio = vlan_id | (qos << VLAN_PRIO_SHIFT); + + if (vf->port_vlan_info == vlanprio) { /* duplicate request, so just return success */ dev_dbg(dev, "Duplicate pvid %d request\n", vlanprio); return 0; } - /* If PVID, then remove all filters on the old VLAN */ - if (vsi->info.pvid) - ice_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) & - VLAN_VID_MASK)); - if (vlan_id || qos) { + /* remove VLAN 0 filter set by default when transitioning from + * no port VLAN to a port VLAN. No change to old port VLAN on + * failure. + */ + ret = ice_vsi_kill_vlan(vsi, 0); + if (ret) + return ret; ret = ice_vsi_manage_pvid(vsi, vlanprio, true); if (ret) return ret; } else { - ice_vsi_manage_pvid(vsi, 0, false); - vsi->info.pvid = 0; + /* add VLAN 0 filter back when transitioning from port VLAN to + * no port VLAN. No change to old port VLAN on failure. + */ + ret = ice_vsi_add_vlan(vsi, 0); + if (ret) + return ret; + ret = ice_vsi_manage_pvid(vsi, 0, false); + if (ret) + return ret; } if (vlan_id) { dev_info(dev, "Setting VLAN %d, QoS 0x%x on VF %d\n", vlan_id, qos, vf_id); - /* add new VLAN filter for each MAC */ + /* add VLAN filter for the port VLAN */ ret = ice_vsi_add_vlan(vsi, vlan_id); if (ret) return ret; } + /* remove old port VLAN filter with valid VLAN ID or QoS fields */ + if (vf->port_vlan_info) + ice_vsi_kill_vlan(vsi, vf->port_vlan_info & VLAN_VID_MASK); - /* The Port VLAN needs to be saved across resets the same as the - * default LAN MAC address. - */ - vf->port_vlan_id = le16_to_cpu(vsi->info.pvid); + /* keep port VLAN information persistent on resets */ + vf->port_vlan_info = le16_to_cpu(vsi->info.pvid); return 0; } @@ -2849,7 +2962,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) } for (i = 0; i < vfl->num_elements; i++) { - if (vfl->vlan_id[i] > ICE_MAX_VLANID) { + if (vfl->vlan_id[i] >= VLAN_N_VID) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; dev_err(dev, "invalid VF VLAN id %d\n", vfl->vlan_id[i]); @@ -2911,9 +3024,9 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) goto error_param; } - vsi->num_vlan++; - /* Enable VLAN pruning when VLAN is added */ - if (!vlan_promisc) { + /* Enable VLAN pruning when non-zero VLAN is added */ + if (!vlan_promisc && vid && + !ice_vsi_is_vlan_pruning_ena(vsi)) { status = ice_cfg_vlan_pruning(vsi, true, false); if (status) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; @@ -2921,7 +3034,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) vid, status); goto error_param; } - } else { + } else if (vlan_promisc) { /* Enable Ucast/Mcast VLAN promiscuous mode */ promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX; @@ -2965,9 +3078,9 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) goto error_param; } - vsi->num_vlan--; - /* Disable VLAN pruning when the last VLAN is removed */ - if (!vsi->num_vlan) + /* Disable VLAN pruning when only VLAN 0 is left */ + if (vsi->num_vlan == 1 && + ice_vsi_is_vlan_pruning_ena(vsi)) ice_cfg_vlan_pruning(vsi, false, false); /* Disable Unicast/Multicast VLAN promiscuous mode */ @@ -3246,14 +3359,12 @@ int ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi) { struct ice_pf *pf = ice_netdev_to_pf(netdev); - struct ice_vsi *vsi; struct ice_vf *vf; if (ice_validate_vf_id(pf, vf_id)) return -EINVAL; vf = &pf->vf[vf_id]; - vsi = pf->vsi[vf->lan_vsi_idx]; if (ice_check_vf_init(pf, vf)) return -EBUSY; @@ -3262,9 +3373,8 @@ ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi) ether_addr_copy(ivi->mac, vf->dflt_lan_addr.addr); /* VF configuration for VLAN and applicable QoS */ - ivi->vlan = le16_to_cpu(vsi->info.pvid) & ICE_VLAN_M; - ivi->qos = (le16_to_cpu(vsi->info.pvid) & ICE_PRIORITY_M) >> - ICE_VLAN_PRIORITY_S; + ivi->vlan = vf->port_vlan_info & VLAN_VID_MASK; + ivi->qos = (vf->port_vlan_info & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; ivi->trusted = vf->trusted; ivi->spoofchk = vf->spoofchk; @@ -3442,3 +3552,52 @@ int ice_get_vf_stats(struct net_device *netdev, int vf_id, return 0; } + +/** + * ice_print_vfs_mdd_event - print VFs malicious driver detect event + * @pf: pointer to the PF structure + * + * Called from ice_handle_mdd_event to rate limit and print VFs MDD events. + */ +void ice_print_vfs_mdd_events(struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + struct ice_hw *hw = &pf->hw; + int i; + + /* check that there are pending MDD events to print */ + if (!test_and_clear_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state)) + return; + + /* VF MDD event logs are rate limited to one second intervals */ + if (time_is_after_jiffies(pf->last_printed_mdd_jiffies + HZ * 1)) + return; + + pf->last_printed_mdd_jiffies = jiffies; + + ice_for_each_vf(pf, i) { + struct ice_vf *vf = &pf->vf[i]; + + /* only print Rx MDD event message if there are new events */ + if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) { + vf->mdd_rx_events.last_printed = + vf->mdd_rx_events.count; + + dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n", + vf->mdd_rx_events.count, hw->pf_id, i, + vf->dflt_lan_addr.addr, + test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags) + ? "on" : "off"); + } + + /* only print Tx MDD event message if there are new events */ + if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) { + vf->mdd_tx_events.last_printed = + vf->mdd_tx_events.count; + + dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n", + vf->mdd_tx_events.count, hw->pf_id, i, + vf->dflt_lan_addr.addr); + } + } +} diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h index ac67982751df..3f9464269bd2 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h @@ -5,11 +5,6 @@ #define _ICE_VIRTCHNL_PF_H_ #include "ice.h" -#define ICE_MAX_VLANID 4095 -#define ICE_VLAN_PRIORITY_S 12 -#define ICE_VLAN_M 0xFFF -#define ICE_PRIORITY_M 0x7000 - /* Restrict number of MAC Addr and VLAN that non-trusted VF can programmed */ #define ICE_MAX_VLAN_PER_VF 8 #define ICE_MAX_MACADDR_PER_VF 12 @@ -26,18 +21,15 @@ #define ICE_PCI_CIAD_WAIT_COUNT 100 #define ICE_PCI_CIAD_WAIT_DELAY_US 1 -/* VF resources default values and limitation */ +/* VF resource constraints */ #define ICE_MAX_VF_COUNT 256 -#define ICE_MAX_QS_PER_VF 256 #define ICE_MIN_QS_PER_VF 1 -#define ICE_DFLT_QS_PER_VF 4 #define ICE_NONQ_VECS_VF 1 #define ICE_MAX_SCATTER_QS_PER_VF 16 -#define ICE_MAX_BASE_QS_PER_VF 16 -#define ICE_MAX_INTR_PER_VF 65 -#define ICE_MAX_POLICY_INTR_PER_VF 33 +#define ICE_MAX_RSS_QS_PER_VF 16 +#define ICE_NUM_VF_MSIX_MED 17 +#define ICE_NUM_VF_MSIX_SMALL 5 #define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1) -#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1) #define ICE_MAX_VF_RESET_TRIES 40 #define ICE_MAX_VF_RESET_SLEEP_MS 20 @@ -61,6 +53,13 @@ enum ice_virtchnl_cap { ICE_VIRTCHNL_VF_CAP_PRIVILEGE, }; +/* VF MDD events print structure */ +struct ice_mdd_vf_events { + u16 count; /* total count of Rx|Tx events */ + /* count number of the last printed event */ + u16 last_printed; +}; + /* VF information structure */ struct ice_vf { struct ice_pf *pf; @@ -73,9 +72,9 @@ struct ice_vf { struct virtchnl_version_info vf_ver; u32 driver_caps; /* reported by VF driver */ struct virtchnl_ether_addr dflt_lan_addr; - DECLARE_BITMAP(txq_ena, ICE_MAX_BASE_QS_PER_VF); - DECLARE_BITMAP(rxq_ena, ICE_MAX_BASE_QS_PER_VF); - u16 port_vlan_id; + DECLARE_BITMAP(txq_ena, ICE_MAX_RSS_QS_PER_VF); + DECLARE_BITMAP(rxq_ena, ICE_MAX_RSS_QS_PER_VF); + u16 port_vlan_info; /* Port VLAN ID and QoS */ u8 pf_set_mac:1; /* VF MAC address set by VMM admin */ u8 trusted:1; u8 spoofchk:1; @@ -89,14 +88,14 @@ struct ice_vf { unsigned int tx_rate; /* Tx bandwidth limit in Mbps */ DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */ - u64 num_mdd_events; /* number of MDD events detected */ u64 num_inval_msgs; /* number of continuous invalid msgs */ u64 num_valid_msgs; /* number of valid msgs detected */ unsigned long vf_caps; /* VF's adv. capabilities */ u8 num_req_qs; /* num of queue pairs requested by VF */ u16 num_mac; u16 num_vf_qs; /* num of queue configured per VF */ - u16 num_qs_ena; /* total num of Tx/Rx queue enabled */ + struct ice_mdd_vf_events mdd_rx_events; + struct ice_mdd_vf_events mdd_tx_events; }; #ifdef CONFIG_PCI_IOV @@ -111,6 +110,7 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event); void ice_vc_notify_link_state(struct ice_pf *pf); void ice_vc_notify_reset(struct ice_pf *pf); bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr); +bool ice_reset_vf(struct ice_vf *vf, bool is_vflr); int ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, @@ -128,6 +128,9 @@ void ice_set_vf_state_qs_dis(struct ice_vf *vf); int ice_get_vf_stats(struct net_device *netdev, int vf_id, struct ifla_vf_stats *vf_stats); +void +ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event); +void ice_print_vfs_mdd_events(struct ice_pf *pf); #else /* CONFIG_PCI_IOV */ #define ice_process_vflr_event(pf) do {} while (0) #define ice_free_vfs(pf) do {} while (0) @@ -135,6 +138,8 @@ ice_get_vf_stats(struct net_device *netdev, int vf_id, #define ice_vc_notify_link_state(pf) do {} while (0) #define ice_vc_notify_reset(pf) do {} while (0) #define ice_set_vf_state_qs_dis(vf) do {} while (0) +#define ice_vf_lan_overflow_event(pf, event) do {} while (0) +#define ice_print_vfs_mdd_events(pf) do {} while (0) static inline bool ice_reset_all_vfs(struct ice_pf __always_unused *pf, @@ -143,6 +148,12 @@ ice_reset_all_vfs(struct ice_pf __always_unused *pf, return true; } +static inline bool +ice_reset_vf(struct ice_vf __always_unused *vf, bool __always_unused is_vflr) +{ + return true; +} + static inline int ice_sriov_configure(struct pci_dev __always_unused *pdev, int __always_unused num_vfs) diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 4d3407bbd4c4..8279db15e870 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -183,7 +183,7 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) if (err) return err; } - err = ice_vsi_ctrl_rx_ring(vsi, false, q_idx); + err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true); if (err) return err; @@ -243,7 +243,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) ice_qvec_cfg_msix(vsi, q_vector); - err = ice_vsi_ctrl_rx_ring(vsi, true, q_idx); + err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true); if (err) goto free_buf; @@ -457,7 +457,7 @@ int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid) if (if_running) { ret = ice_qp_dis(vsi, qid); if (ret) { - netdev_err(vsi->netdev, "ice_qp_dis error = %d", ret); + netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret); goto xsk_umem_if_up; } } @@ -471,11 +471,11 @@ xsk_umem_if_up: if (!ret && umem_present) napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi); else if (ret) - netdev_err(vsi->netdev, "ice_qp_ena error = %d", ret); + netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret); } if (umem_failure) { - netdev_err(vsi->netdev, "Could not %sable UMEM, error = %d", + netdev_err(vsi->netdev, "Could not %sable UMEM, error = %d\n", umem_present ? "en" : "dis", umem_failure); return umem_failure; } @@ -609,7 +609,7 @@ ice_alloc_buf_slow_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) */ static bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, int count, - bool alloc(struct ice_ring *, struct ice_rx_buf *)) + bool (*alloc)(struct ice_ring *, struct ice_rx_buf *)) { union ice_32b_rx_flex_desc *rx_desc; u16 ntu = rx_ring->next_to_use; @@ -816,10 +816,10 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp) break; default: bpf_warn_invalid_xdp_action(act); - /* fallthrough -- not supported action */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(rx_ring->netdev, xdp_prog, act); - /* fallthrough -- handle aborts by dropping frame */ + fallthrough; case XDP_DROP: result = ICE_XDP_CONSUMED; break; @@ -841,8 +841,8 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget) unsigned int total_rx_bytes = 0, total_rx_packets = 0; u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); unsigned int xdp_xmit = 0; + bool failure = false; struct xdp_buff xdp; - bool failure = 0; xdp.rxq = &rx_ring->xdp_rxq; @@ -937,6 +937,15 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget) ice_finalize_xdp_rx(rx_ring, xdp_xmit); ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes); + if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) { + if (failure || rx_ring->next_to_clean == rx_ring->next_to_use) + xsk_set_rx_need_wakeup(rx_ring->xsk_umem); + else + xsk_clear_rx_need_wakeup(rx_ring->xsk_umem); + + return (int)total_rx_packets; + } + return failure ? budget : (int)total_rx_packets; } @@ -988,6 +997,8 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget) if (tx_desc) { ice_xdp_ring_update_tail(xdp_ring); xsk_umem_consume_tx_done(xdp_ring->xsk_umem); + if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem)) + xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem); } return budget > 0 && work_done; @@ -1063,6 +1074,13 @@ bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget) if (xsk_frames) xsk_umem_complete_tx(xdp_ring->xsk_umem, xsk_frames); + if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem)) { + if (xdp_ring->next_to_clean == xdp_ring->next_to_use) + xsk_set_tx_need_wakeup(xdp_ring->xsk_umem); + else + xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem); + } + ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes); xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK); diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h index 3479e1de98fe..8a4ba7c6d549 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.h +++ b/drivers/net/ethernet/intel/ice/ice_xsk.h @@ -24,7 +24,7 @@ ice_xsk_umem_setup(struct ice_vsi __always_unused *vsi, struct xdp_umem __always_unused *umem, u16 __always_unused qid) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline void @@ -63,7 +63,7 @@ static inline int ice_xsk_wakeup(struct net_device __always_unused *netdev, u32 __always_unused queue_id, u32 __always_unused flags) { - return -ENOTSUPP; + return -EOPNOTSUPP; } #define ice_xsk_clean_rx_ring(rx_ring) do {} while (0) diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 49b5fa9d4783..0c9282e2aaec 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -306,7 +306,7 @@ struct igb_q_vector { char name[IFNAMSIZ + 9]; /* for dynamic allocation of rings associated with this q_vector */ - struct igb_ring ring[0] ____cacheline_internodealigned_in_smp; + struct igb_ring ring[] ____cacheline_internodealigned_in_smp; }; enum e1000_ring_flags_t { diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index f96ffa83efbe..39d3b76a6f5d 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2183,27 +2183,6 @@ static int igb_set_coalesce(struct net_device *netdev, struct igb_adapter *adapter = netdev_priv(netdev); int i; - if (ec->rx_max_coalesced_frames || - ec->rx_coalesce_usecs_irq || - ec->rx_max_coalesced_frames_irq || - ec->tx_max_coalesced_frames || - ec->tx_coalesce_usecs_irq || - ec->stats_block_coalesce_usecs || - ec->use_adaptive_rx_coalesce || - ec->use_adaptive_tx_coalesce || - ec->pkt_rate_low || - ec->rx_coalesce_usecs_low || - ec->rx_max_coalesced_frames_low || - ec->tx_coalesce_usecs_low || - ec->tx_max_coalesced_frames_low || - ec->pkt_rate_high || - ec->rx_coalesce_usecs_high || - ec->rx_max_coalesced_frames_high || - ec->tx_coalesce_usecs_high || - ec->tx_max_coalesced_frames_high || - ec->rate_sample_interval) - return -ENOTSUPP; - if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) || ((ec->rx_coalesce_usecs > 3) && (ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) || @@ -3477,6 +3456,7 @@ static int igb_set_priv_flags(struct net_device *netdev, u32 priv_flags) } static const struct ethtool_ops igb_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS, .get_drvinfo = igb_get_drvinfo, .get_regs_len = igb_get_regs_len, .get_regs = igb_get_regs, diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c index 3b83747b2700..21a29a0ca7f4 100644 --- a/drivers/net/ethernet/intel/igb/igb_hwmon.c +++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c @@ -198,11 +198,11 @@ int igb_sysfs_init(struct igb_adapter *adapter) } /* init i2c_client */ - client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info); - if (client == NULL) { + client = i2c_new_client_device(&adapter->i2c_adap, &i350_sensor_info); + if (IS_ERR(client)) { dev_info(&adapter->pdev->dev, "Failed to create new i2c device.\n"); - rc = -ENODEV; + rc = PTR_ERR(client); goto exit; } adapter->i2c_client = client; diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c index 3ae358b35227..9217d150e286 100644 --- a/drivers/net/ethernet/intel/igbvf/ethtool.c +++ b/drivers/net/ethernet/intel/igbvf/ethtool.c @@ -424,6 +424,7 @@ static void igbvf_get_strings(struct net_device *netdev, u32 stringset, } static const struct ethtool_ops igbvf_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS, .get_drvinfo = igbvf_get_drvinfo, .get_regs_len = igbvf_get_regs_len, .get_regs = igbvf_get_regs, diff --git a/drivers/net/ethernet/intel/igc/Makefile b/drivers/net/ethernet/intel/igc/Makefile index 49fb1e1965cd..e3c164c12e10 100644 --- a/drivers/net/ethernet/intel/igc/Makefile +++ b/drivers/net/ethernet/intel/igc/Makefile @@ -8,4 +8,4 @@ obj-$(CONFIG_IGC) += igc.o igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \ -igc_ethtool.o igc_ptp.o +igc_ethtool.o igc_ptp.o igc_dump.o diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 52066bdbbad0..a1f845a2aa80 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -42,6 +42,10 @@ int igc_del_mac_steering_filter(struct igc_adapter *adapter, const u8 *addr, u8 queue, u8 flags); void igc_update_stats(struct igc_adapter *adapter); +/* igc_dump declarations */ +void igc_rings_dump(struct igc_adapter *adapter); +void igc_regs_dump(struct igc_adapter *adapter); + extern char igc_driver_name[]; extern char igc_driver_version[]; @@ -53,10 +57,13 @@ extern char igc_driver_version[]; /* Interrupt defines */ #define IGC_START_ITR 648 /* ~6000 ints/sec */ + +/* Flags definitions */ #define IGC_FLAG_HAS_MSI BIT(0) #define IGC_FLAG_QUEUE_PAIRS BIT(3) #define IGC_FLAG_DMAC BIT(4) #define IGC_FLAG_PTP BIT(8) +#define IGC_FLAG_WOL_SUPPORTED BIT(8) #define IGC_FLAG_NEED_LINK_UPDATE BIT(9) #define IGC_FLAG_MEDIA_RESET BIT(10) #define IGC_FLAG_MAS_ENABLE BIT(12) @@ -108,7 +115,7 @@ extern char igc_driver_version[]; #define IGC_RX_HDR_LEN IGC_RXBUFFER_256 /* Transmit and receive latency (for PTP timestamps) */ -/* FIXME: These values were estimated using the ones that i210 has as +/* FIXME: These values were estimated using the ones that i225 has as * basis, they seem to provide good numbers with ptp4l/phc2sys, but we * need to confirm them. */ @@ -319,7 +326,7 @@ struct igc_q_vector { struct net_device poll_dev; /* for dynamic allocation of rings associated with this q_vector */ - struct igc_ring ring[0] ____cacheline_internodealigned_in_smp; + struct igc_ring ring[] ____cacheline_internodealigned_in_smp; }; #define MAX_ETYPE_FILTER (4 - 1) @@ -552,6 +559,7 @@ int igc_erase_filter(struct igc_adapter *adapter, void igc_ptp_init(struct igc_adapter *adapter); void igc_ptp_reset(struct igc_adapter *adapter); +void igc_ptp_suspend(struct igc_adapter *adapter); void igc_ptp_stop(struct igc_adapter *adapter); void igc_ptp_rx_rgtstamp(struct igc_q_vector *q_vector, struct sk_buff *skb); void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va, diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index 58efa7a02c68..4ddccccf42cc 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -16,7 +16,10 @@ /* Wake Up Filter Control */ #define IGC_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define IGC_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define IGC_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ #define IGC_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define IGC_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ #define IGC_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ @@ -259,6 +262,9 @@ #define IGC_GPIE_EIAME 0x40000000 #define IGC_GPIE_PBA 0x80000000 +/* Receive Descriptor bit definitions */ +#define IGC_RXD_STAT_DD 0x01 /* Descriptor Done */ + /* Transmit Descriptor bit definitions */ #define IGC_TXD_DTYP_D 0x00100000 /* Data Descriptor */ #define IGC_TXD_DTYP_C 0x00000000 /* Context Descriptor */ diff --git a/drivers/net/ethernet/intel/igc/igc_dump.c b/drivers/net/ethernet/intel/igc/igc_dump.c new file mode 100644 index 000000000000..657ab50ae296 --- /dev/null +++ b/drivers/net/ethernet/intel/igc/igc_dump.c @@ -0,0 +1,323 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include "igc.h" + +struct igc_reg_info { + u32 ofs; + char *name; +}; + +static const struct igc_reg_info igc_reg_info_tbl[] = { + /* General Registers */ + {IGC_CTRL, "CTRL"}, + {IGC_STATUS, "STATUS"}, + {IGC_CTRL_EXT, "CTRL_EXT"}, + {IGC_MDIC, "MDIC"}, + + /* Interrupt Registers */ + {IGC_ICR, "ICR"}, + + /* RX Registers */ + {IGC_RCTL, "RCTL"}, + {IGC_RDLEN(0), "RDLEN"}, + {IGC_RDH(0), "RDH"}, + {IGC_RDT(0), "RDT"}, + {IGC_RXDCTL(0), "RXDCTL"}, + {IGC_RDBAL(0), "RDBAL"}, + {IGC_RDBAH(0), "RDBAH"}, + + /* TX Registers */ + {IGC_TCTL, "TCTL"}, + {IGC_TDBAL(0), "TDBAL"}, + {IGC_TDBAH(0), "TDBAH"}, + {IGC_TDLEN(0), "TDLEN"}, + {IGC_TDH(0), "TDH"}, + {IGC_TDT(0), "TDT"}, + {IGC_TXDCTL(0), "TXDCTL"}, + {IGC_TDFH, "TDFH"}, + {IGC_TDFT, "TDFT"}, + {IGC_TDFHS, "TDFHS"}, + {IGC_TDFPC, "TDFPC"}, + + /* List Terminator */ + {} +}; + +/* igc_regdump - register printout routine */ +static void igc_regdump(struct igc_hw *hw, struct igc_reg_info *reginfo) +{ + int n = 0; + char rname[16]; + u32 regs[8]; + + switch (reginfo->ofs) { + case IGC_RDLEN(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDLEN(n)); + break; + case IGC_RDH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDH(n)); + break; + case IGC_RDT(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDT(n)); + break; + case IGC_RXDCTL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RXDCTL(n)); + break; + case IGC_RDBAL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDBAL(n)); + break; + case IGC_RDBAH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDBAH(n)); + break; + case IGC_TDBAL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDBAL(n)); + break; + case IGC_TDBAH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDBAH(n)); + break; + case IGC_TDLEN(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDLEN(n)); + break; + case IGC_TDH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDH(n)); + break; + case IGC_TDT(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDT(n)); + break; + case IGC_TXDCTL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TXDCTL(n)); + break; + default: + pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs)); + return; + } + + snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]"); + pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1], + regs[2], regs[3]); +} + +/* igc_rings_dump - Tx-rings and Rx-rings */ +void igc_rings_dump(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct my_u0 { u64 a; u64 b; } *u0; + union igc_adv_tx_desc *tx_desc; + union igc_adv_rx_desc *rx_desc; + struct igc_ring *tx_ring; + struct igc_ring *rx_ring; + u32 staterr; + u16 i, n; + + if (!netif_msg_hw(adapter)) + return; + + /* Print netdevice Info */ + if (netdev) { + dev_info(&adapter->pdev->dev, "Net device Info\n"); + pr_info("Device Name state trans_start\n"); + pr_info("%-15s %016lX %016lX\n", netdev->name, + netdev->state, dev_trans_start(netdev)); + } + + /* Print TX Ring Summary */ + if (!netdev || !netif_running(netdev)) + goto exit; + + dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); + pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); + for (n = 0; n < adapter->num_tx_queues; n++) { + struct igc_tx_buffer *buffer_info; + + tx_ring = adapter->tx_ring[n]; + buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; + + pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n", + n, tx_ring->next_to_use, tx_ring->next_to_clean, + (u64)dma_unmap_addr(buffer_info, dma), + dma_unmap_len(buffer_info, len), + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp); + } + + /* Print TX Rings */ + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); + + /* Transmit Descriptor Formats + * + * Advanced Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +--------------------------------------------------------------+ + * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN | + * +--------------------------------------------------------------+ + * 63 46 45 40 39 38 36 35 32 31 24 15 0 + */ + + for (n = 0; n < adapter->num_tx_queues; n++) { + tx_ring = adapter->tx_ring[n]; + pr_info("------------------------------------\n"); + pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); + pr_info("------------------------------------\n"); + pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n"); + + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + const char *next_desc; + struct igc_tx_buffer *buffer_info; + + tx_desc = IGC_TX_DESC(tx_ring, i); + buffer_info = &tx_ring->tx_buffer_info[i]; + u0 = (struct my_u0 *)tx_desc; + if (i == tx_ring->next_to_use && + i == tx_ring->next_to_clean) + next_desc = " NTC/U"; + else if (i == tx_ring->next_to_use) + next_desc = " NTU"; + else if (i == tx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + + pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n", + i, le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)dma_unmap_addr(buffer_info, dma), + dma_unmap_len(buffer_info, len), + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp, + buffer_info->skb, next_desc); + + if (netif_msg_pktdata(adapter) && buffer_info->skb) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, + 16, 1, buffer_info->skb->data, + dma_unmap_len(buffer_info, len), + true); + } + } + + /* Print RX Rings Summary */ +rx_ring_summary: + dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); + pr_info("Queue [NTU] [NTC]\n"); + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + pr_info(" %5d %5X %5X\n", + n, rx_ring->next_to_use, rx_ring->next_to_clean); + } + + /* Print RX Rings */ + if (!netif_msg_rx_status(adapter)) + goto exit; + + dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); + + /* Advanced Receive Descriptor (Read) Format + * 63 1 0 + * +-----------------------------------------------------+ + * 0 | Packet Buffer Address [63:1] |A0/NSE| + * +----------------------------------------------+------+ + * 8 | Header Buffer Address [63:1] | DD | + * +-----------------------------------------------------+ + * + * + * Advanced Receive Descriptor (Write-Back) Format + * + * 63 48 47 32 31 30 21 20 17 16 4 3 0 + * +------------------------------------------------------+ + * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS | + * | Checksum Ident | | | | Type | Type | + * +------------------------------------------------------+ + * 8 | VLAN Tag | Length | Extended Error | Extended Status | + * +------------------------------------------------------+ + * 63 48 47 32 31 20 19 0 + */ + + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + pr_info("------------------------------------\n"); + pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); + pr_info("------------------------------------\n"); + pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n"); + pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n"); + + for (i = 0; i < rx_ring->count; i++) { + const char *next_desc; + struct igc_rx_buffer *buffer_info; + + buffer_info = &rx_ring->rx_buffer_info[i]; + rx_desc = IGC_RX_DESC(rx_ring, i); + u0 = (struct my_u0 *)rx_desc; + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + + if (i == rx_ring->next_to_use) + next_desc = " NTU"; + else if (i == rx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + + if (staterr & IGC_RXD_STAT_DD) { + /* Descriptor Done */ + pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n", + "RWB", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + next_desc); + } else { + pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n", + "R ", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)buffer_info->dma, + next_desc); + + if (netif_msg_pktdata(adapter) && + buffer_info->dma && buffer_info->page) { + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, + 16, 1, + page_address + (buffer_info->page) + + buffer_info->page_offset, + igc_rx_bufsz(rx_ring), + true); + } + } + } + } + +exit: + return; +} + +/* igc_regs_dump - registers dump */ +void igc_regs_dump(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + struct igc_reg_info *reginfo; + + /* Print Registers */ + dev_info(&adapter->pdev->dev, "Register Dump\n"); + pr_info(" Register Name Value\n"); + for (reginfo = (struct igc_reg_info *)igc_reg_info_tbl; + reginfo->name; reginfo++) { + igc_regdump(hw, reginfo); + } +} diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index ee07011e13e9..f530fc29b074 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -308,6 +308,65 @@ static void igc_get_regs(struct net_device *netdev, regs_buff[168 + i] = rd32(IGC_TXDCTL(i)); } +static void igc_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + wol->wolopts = 0; + + if (!(adapter->flags & IGC_FLAG_WOL_SUPPORTED)) + return; + + wol->supported = WAKE_UCAST | WAKE_MCAST | + WAKE_BCAST | WAKE_MAGIC | + WAKE_PHY; + + /* apply any specific unsupported masks here */ + switch (adapter->hw.device_id) { + default: + break; + } + + if (adapter->wol & IGC_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & IGC_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & IGC_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & IGC_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; + if (adapter->wol & IGC_WUFC_LNKC) + wol->wolopts |= WAKE_PHY; +} + +static int igc_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_FILTER)) + return -EOPNOTSUPP; + + if (!(adapter->flags & IGC_FLAG_WOL_SUPPORTED)) + return wol->wolopts ? -EOPNOTSUPP : 0; + + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= IGC_WUFC_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= IGC_WUFC_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= IGC_WUFC_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= IGC_WUFC_MAG; + if (wol->wolopts & WAKE_PHY) + adapter->wol |= IGC_WUFC_LNKC; + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + static u32 igc_get_msglevel(struct net_device *netdev) { struct igc_adapter *adapter = netdev_priv(netdev); @@ -802,27 +861,6 @@ static int igc_set_coalesce(struct net_device *netdev, struct igc_adapter *adapter = netdev_priv(netdev); int i; - if (ec->rx_max_coalesced_frames || - ec->rx_coalesce_usecs_irq || - ec->rx_max_coalesced_frames_irq || - ec->tx_max_coalesced_frames || - ec->tx_coalesce_usecs_irq || - ec->stats_block_coalesce_usecs || - ec->use_adaptive_rx_coalesce || - ec->use_adaptive_tx_coalesce || - ec->pkt_rate_low || - ec->rx_coalesce_usecs_low || - ec->rx_max_coalesced_frames_low || - ec->tx_coalesce_usecs_low || - ec->tx_max_coalesced_frames_low || - ec->pkt_rate_high || - ec->rx_coalesce_usecs_high || - ec->rx_max_coalesced_frames_high || - ec->tx_coalesce_usecs_high || - ec->tx_max_coalesced_frames_high || - ec->rate_sample_interval) - return -ENOTSUPP; - if (ec->rx_coalesce_usecs > IGC_MAX_ITR_USECS || (ec->rx_coalesce_usecs > 3 && ec->rx_coalesce_usecs < IGC_MIN_ITR_USECS) || @@ -1856,9 +1894,12 @@ static int igc_set_link_ksettings(struct net_device *netdev, } static const struct ethtool_ops igc_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS, .get_drvinfo = igc_get_drvinfo, .get_regs_len = igc_get_regs_len, .get_regs = igc_get_regs, + .get_wol = igc_get_wol, + .set_wol = igc_set_wol, .get_msglevel = igc_get_msglevel, .set_msglevel = igc_set_msglevel, .nway_reset = igc_nway_reset, diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index d9d5425fe8d9..69fa1ce1f927 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -3546,6 +3546,8 @@ static void igc_reset_task(struct work_struct *work) adapter = container_of(work, struct igc_adapter, reset_task); + igc_rings_dump(adapter); + igc_regs_dump(adapter); netdev_err(adapter->netdev, "Reset adapter\n"); igc_reinit_locked(adapter); } @@ -4029,6 +4031,9 @@ static void igc_watchdog_task(struct work_struct *work) } } if (link) { + /* Cancel scheduled suspend requests. */ + pm_runtime_resume(netdev->dev.parent); + if (!netif_carrier_ok(netdev)) { u32 ctrl; @@ -4114,6 +4119,8 @@ no_wait: return; } } + pm_schedule_suspend(netdev->dev.parent, + MSEC_PER_SEC * 5); /* also check for alternate media here */ } else if (!netif_carrier_ok(netdev) && @@ -4337,6 +4344,7 @@ request_done: static int __igc_open(struct net_device *netdev, bool resuming) { struct igc_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; struct igc_hw *hw = &adapter->hw; int err = 0; int i = 0; @@ -4348,6 +4356,9 @@ static int __igc_open(struct net_device *netdev, bool resuming) return -EBUSY; } + if (!resuming) + pm_runtime_get_sync(&pdev->dev); + netif_carrier_off(netdev); /* allocate transmit descriptors */ @@ -4386,6 +4397,9 @@ static int __igc_open(struct net_device *netdev, bool resuming) rd32(IGC_ICR); igc_irq_enable(adapter); + if (!resuming) + pm_runtime_put(&pdev->dev); + netif_tx_start_all_queues(netdev); /* start the watchdog. */ @@ -4404,6 +4418,8 @@ err_setup_rx: igc_free_all_tx_resources(adapter); err_setup_tx: igc_reset(adapter); + if (!resuming) + pm_runtime_put(&pdev->dev); return err; } @@ -4428,9 +4444,13 @@ static int igc_open(struct net_device *netdev) static int __igc_close(struct net_device *netdev, bool suspending) { struct igc_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; WARN_ON(test_bit(__IGC_RESETTING, &adapter->state)); + if (!suspending) + pm_runtime_get_sync(&pdev->dev); + igc_down(adapter); igc_release_hw_control(adapter); @@ -4440,6 +4460,9 @@ static int __igc_close(struct net_device *netdev, bool suspending) igc_free_all_tx_resources(adapter); igc_free_all_rx_resources(adapter); + if (!suspending) + pm_runtime_put_sync(&pdev->dev); + return 0; } @@ -4766,6 +4789,16 @@ static int igc_probe(struct pci_dev *pdev, hw->fc.requested_mode = igc_fc_default; hw->fc.current_mode = igc_fc_default; + /* By default, support wake on port A */ + adapter->flags |= IGC_FLAG_WOL_SUPPORTED; + + /* initialize the wol settings based on the eeprom settings */ + if (adapter->flags & IGC_FLAG_WOL_SUPPORTED) + adapter->wol |= IGC_WUFC_MAG; + + device_set_wakeup_enable(&adapter->pdev->dev, + adapter->flags & IGC_FLAG_WOL_SUPPORTED); + /* reset the hardware with the new settings */ igc_reset(adapter); @@ -4792,6 +4825,10 @@ static int igc_probe(struct pci_dev *pdev, pcie_print_link_status(pdev); netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr); + dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP); + + pm_runtime_put_noidle(&pdev->dev); + return 0; err_register: @@ -4826,6 +4863,8 @@ static void igc_remove(struct pci_dev *pdev) struct net_device *netdev = pci_get_drvdata(pdev); struct igc_adapter *adapter = netdev_priv(netdev); + pm_runtime_get_noresume(&pdev->dev); + igc_ptp_stop(adapter); set_bit(__IGC_DOWN, &adapter->state); @@ -4870,6 +4909,8 @@ static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake, if (netif_running(netdev)) __igc_close(netdev, true); + igc_ptp_suspend(adapter); + igc_clear_interrupt_scheme(adapter); rtnl_unlock(); @@ -5045,6 +5086,108 @@ static void igc_shutdown(struct pci_dev *pdev) } } +/** + * igc_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current PCI connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + **/ +static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + igc_down(adapter); + pci_disable_device(pdev); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * igc_io_slot_reset - called after the PCI bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the igc_resume routine. + **/ +static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + pci_ers_result_t result; + + if (pci_enable_device_mem(pdev)) { + dev_err(&pdev->dev, + "Could not re-enable PCI device after reset.\n"); + result = PCI_ERS_RESULT_DISCONNECT; + } else { + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + /* In case of PCI error, adapter loses its HW address + * so we should re-assign it here. + */ + hw->hw_addr = adapter->io_addr; + + igc_reset(adapter); + wr32(IGC_WUS, ~0); + result = PCI_ERS_RESULT_RECOVERED; + } + + return result; +} + +/** + * igc_io_resume - called when traffic can start to flow again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the igc_resume routine. + */ +static void igc_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + + rtnl_lock(); + if (netif_running(netdev)) { + if (igc_open(netdev)) { + dev_err(&pdev->dev, "igc_open failed after reset\n"); + return; + } + } + + netif_device_attach(netdev); + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igc_get_hw_control(adapter); + rtnl_unlock(); +} + +static const struct pci_error_handlers igc_err_handler = { + .error_detected = igc_io_error_detected, + .slot_reset = igc_io_slot_reset, + .resume = igc_io_resume, +}; + #ifdef CONFIG_PM static const struct dev_pm_ops igc_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume) @@ -5062,6 +5205,7 @@ static struct pci_driver igc_driver = { .driver.pm = &igc_pm_ops, #endif .shutdown = igc_shutdown, + .err_handler = &igc_err_handler, }; /** diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index 693506587198..f99c514ad0f4 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -509,7 +509,7 @@ static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter) * This work function polls the TSYNCTXCTL valid bit to determine when a * timestamp has been taken for the current stored skb. */ -void igc_ptp_tx_work(struct work_struct *work) +static void igc_ptp_tx_work(struct work_struct *work) { struct igc_adapter *adapter = container_of(work, struct igc_adapter, ptp_tx_work); diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h index c9029b549b90..d4af53a80f11 100644 --- a/drivers/net/ethernet/intel/igc/igc_regs.h +++ b/drivers/net/ethernet/intel/igc/igc_regs.h @@ -17,6 +17,11 @@ /* Internal Packet Buffer Size Registers */ #define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ #define IGC_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */ +#define IGC_TDFH 0x03410 /* Tx Data FIFO Head - RW */ +#define IGC_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ +#define IGC_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ +#define IGC_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ +#define IGC_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ /* NVM Register Descriptions */ #define IGC_EERD 0x12014 /* EEprom mode read - RW */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 39e73ad60352..2833e4f041ce 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -462,7 +462,7 @@ struct ixgbe_q_vector { char name[IFNAMSIZ + 9]; /* for dynamic allocation of rings associated with this q_vector */ - struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp; + struct ixgbe_ring ring[] ____cacheline_internodealigned_in_smp; }; #ifdef CONFIG_IXGBE_HWMON diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 7c52ae8ac005..c6bf0a50ee63 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -3444,6 +3444,7 @@ static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags) } static const struct ethtool_ops ixgbe_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS, .get_drvinfo = ixgbe_get_drvinfo, .get_regs_len = ixgbe_get_regs_len, .get_regs = ixgbe_get_regs, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index ccd852ad62a4..ec7a11d13fdc 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -968,8 +968,7 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; - int i, pos; - u8 buf[8]; + u64 dsn; if (!info) return -EINVAL; @@ -985,17 +984,11 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, /* Serial Number */ /* Get the PCI-e Device Serial Number Capability */ - pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_DSN); - if (pos) { - pos += 4; - for (i = 0; i < 8; i++) - pci_read_config_byte(adapter->pdev, pos + i, &buf[i]); - + dsn = pci_get_dsn(adapter->pdev); + if (dsn) snprintf(info->serial_number, sizeof(info->serial_number), - "%02X%02X%02X%02X%02X%02X%02X%02X", - buf[7], buf[6], buf[5], buf[4], - buf[3], buf[2], buf[1], buf[0]); - } else + "%016llX", dsn); + else snprintf(info->serial_number, sizeof(info->serial_number), "Unknown"); diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index f7f309c96fa8..988fa49fa99a 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -968,6 +968,7 @@ static int ixgbevf_set_priv_flags(struct net_device *netdev, u32 priv_flags) } static const struct ethtool_ops ixgbevf_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS, .get_drvinfo = ixgbevf_get_drvinfo, .get_regs_len = ixgbevf_get_regs_len, .get_regs = ixgbevf_get_regs, |