From d7840976e3915669382c62ddd1700960f348328e Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 22 Jul 2019 20:08:25 -0700 Subject: net: Use skb accessors in network drivers In preparation for unifying the skb_frag and bio_vec, use the fine accessors which already exist and use skb_frag_t instead of struct skb_frag_struct. Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ice/ice_txrx.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 3c83230434b6..dd7392f293bf 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -1521,7 +1521,7 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first, { u64 td_offset, td_tag, td_cmd; u16 i = tx_ring->next_to_use; - struct skb_frag_struct *frag; + skb_frag_t *frag; unsigned int data_len, size; struct ice_tx_desc *tx_desc; struct ice_tx_buf *tx_buf; @@ -1923,7 +1923,7 @@ static unsigned int ice_txd_use_count(unsigned int size) */ static unsigned int ice_xmit_desc_count(struct sk_buff *skb) { - const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; unsigned int nr_frags = skb_shinfo(skb)->nr_frags; unsigned int count = 0, size = skb_headlen(skb); @@ -1954,7 +1954,7 @@ static unsigned int ice_xmit_desc_count(struct sk_buff *skb) */ static bool __ice_chk_linearize(struct sk_buff *skb) { - const struct skb_frag_struct *frag, *stale; + const skb_frag_t *frag, *stale; int nr_frags, sum; /* no need to check if number of frags is less than 7 */ -- cgit v1.2.3 From 5a056cd7ead2b72b00fea8a6819fb93eeb12e313 Mon Sep 17 00:00:00 2001 From: Paul Greenwalt Date: Wed, 26 Jun 2019 02:20:12 -0700 Subject: ice: add lp_advertising flow control support Add support for reporting link partner advertising when ETHTOOL_GLINKSETTINGS defined. Get pause param reports the Tx/Rx pause configured, and then ethtool issues ETHTOOL_GSET ioctl and ice_get_settings_link_up reports the negotiated Tx/Rx pause. Negotiated pause frame report per IEEE 802.3-2005 table 288-3. $ ethtool --show-pause ens6f0 Pause parameters for ens6f0: Autonegotiate: on RX: on TX: on RX negotiated: on TX negotiated: on $ ethtool ens6f0 Settings for ens6f0: Supported ports: [ FIBRE ] Supported link modes: 25000baseCR/Full Supported pause frame use: Symmetric Supports auto-negotiation: Yes Supported FEC modes: None BaseR RS Advertised link modes: 25000baseCR/Full Advertised pause frame use: Symmetric Receive-only Advertised auto-negotiation: Yes Advertised FEC modes: None BaseR RS Link partner advertised link modes: Not reported Link partner advertised pause frame use: Symmetric Link partner advertised auto-negotiation: Yes Link partner advertised FEC modes: Not reported Speed: 25000Mb/s Duplex: Full Port: Direct Attach Copper PHYAD: 0 Transceiver: internal Auto-negotiation: on Supports Wake-on: g Wake-on: g Current message level: 0x00000007 (7) drv probe link Link detected: yes When ETHTOOL_GLINKSETTINGS is not defined, get pause param reports the negotiated Tx/Rx pause. Signed-off-by: Paul Greenwalt Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_ethtool.c | 104 ++++++++++++++++++--------- 1 file changed, 72 insertions(+), 32 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 52083a63dee6..d3ba535bd65a 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -1716,6 +1716,7 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks, struct net_device *netdev) { struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_port_info *pi = np->vsi->port_info; struct ethtool_link_ksettings cap_ksettings; struct ice_link_status *link_info; struct ice_vsi *vsi = np->vsi; @@ -2040,6 +2041,33 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks, break; } ks->base.duplex = DUPLEX_FULL; + + if (link_info->an_info & ICE_AQ_AN_COMPLETED) + ethtool_link_ksettings_add_link_mode(ks, lp_advertising, + Autoneg); + + /* Set flow control negotiated Rx/Tx pause */ + switch (pi->fc.current_mode) { + case ICE_FC_FULL: + ethtool_link_ksettings_add_link_mode(ks, lp_advertising, Pause); + break; + case ICE_FC_TX_PAUSE: + ethtool_link_ksettings_add_link_mode(ks, lp_advertising, Pause); + ethtool_link_ksettings_add_link_mode(ks, lp_advertising, + Asym_Pause); + break; + case ICE_FC_RX_PAUSE: + ethtool_link_ksettings_add_link_mode(ks, lp_advertising, + Asym_Pause); + break; + case ICE_FC_PFC: + /* fall through */ + default: + ethtool_link_ksettings_del_link_mode(ks, lp_advertising, Pause); + ethtool_link_ksettings_del_link_mode(ks, lp_advertising, + Asym_Pause); + break; + } } /** @@ -2078,9 +2106,12 @@ ice_get_link_ksettings(struct net_device *netdev, struct ice_aqc_get_phy_caps_data *caps; struct ice_link_status *hw_link_info; struct ice_vsi *vsi = np->vsi; + enum ice_status status; + int err = 0; ethtool_link_ksettings_zero_link_mode(ks, supported); ethtool_link_ksettings_zero_link_mode(ks, advertising); + ethtool_link_ksettings_zero_link_mode(ks, lp_advertising); hw_link_info = &vsi->port_info->phy.link_info; /* set speed and duplex */ @@ -2125,48 +2156,36 @@ ice_get_link_ksettings(struct net_device *netdev, /* flow control is symmetric and always supported */ ethtool_link_ksettings_add_link_mode(ks, supported, Pause); - switch (vsi->port_info->fc.req_mode) { - case ICE_FC_FULL: + caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL); + if (!caps) + return -ENOMEM; + + status = ice_aq_get_phy_caps(vsi->port_info, false, + ICE_AQC_REPORT_SW_CFG, caps, NULL); + if (status) { + err = -EIO; + goto done; + } + + /* Set the advertised flow control based on the PHY capability */ + if ((caps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) && + (caps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)) { ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); - break; - case ICE_FC_TX_PAUSE: ethtool_link_ksettings_add_link_mode(ks, advertising, Asym_Pause); - break; - case ICE_FC_RX_PAUSE: + } else if (caps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + Asym_Pause); + } else if (caps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) { ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); ethtool_link_ksettings_add_link_mode(ks, advertising, Asym_Pause); - break; - case ICE_FC_PFC: - default: + } else { ethtool_link_ksettings_del_link_mode(ks, advertising, Pause); ethtool_link_ksettings_del_link_mode(ks, advertising, Asym_Pause); - break; } - caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL); - if (!caps) - goto done; - - if (ice_aq_get_phy_caps(vsi->port_info, false, ICE_AQC_REPORT_TOPO_CAP, - caps, NULL)) - netdev_info(netdev, "Get phy capability failed.\n"); - - /* Set supported FEC modes based on PHY capability */ - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); - - if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN || - caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN) - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); - if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN) - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); - - if (ice_aq_get_phy_caps(vsi->port_info, false, ICE_AQC_REPORT_SW_CFG, - caps, NULL)) - netdev_info(netdev, "Get phy capability failed.\n"); - /* Set advertised FEC modes based on PHY capability */ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); @@ -2178,9 +2197,25 @@ ice_get_link_ksettings(struct net_device *netdev, caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ) ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); + status = ice_aq_get_phy_caps(vsi->port_info, false, + ICE_AQC_REPORT_TOPO_CAP, caps, NULL); + if (status) { + err = -EIO; + goto done; + } + + /* Set supported FEC modes based on PHY capability */ + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); + + if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN || + caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN) + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); + if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN) + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); + done: devm_kfree(&vsi->back->pdev->dev, caps); - return 0; + return err; } /** @@ -2763,6 +2798,11 @@ static int ice_nway_reset(struct net_device *netdev) * ice_get_pauseparam - Get Flow Control status * @netdev: network interface device structure * @pause: ethernet pause (flow control) parameters + * + * Get requested flow control status from PHY capability. + * If autoneg is true, then ethtool will send the ETHTOOL_GSET ioctl which + * is handled by ice_get_link_ksettings. ice_get_link_ksettings will report + * the negotiated Rx/Tx pause via lp_advertising. */ static void ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) -- cgit v1.2.3 From 36517fd397f124acfa396e770468530136f4207d Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Wed, 26 Jun 2019 02:20:13 -0700 Subject: ice: track hardware stat registers past rollover Currently, ice_stat_update32 and ice_stat_update40 will limit the value of the software statistic to 32 or 40 bits wide, depending on which register is being read. This means that if a driver is running for a long time, the displayed software register values will roll over to zero at 40 bits or 32 bits. This occurs because the functions directly assign the difference between the previous value and current value of the hardware statistic. Instead, add this value to the current software statistic, and then update the previous value. In this way, each time ice_stat_update40 or ice_stat_update32 are called, they will increment the software tracking value by the difference of the hardware register from its last read. The software tracking value will correctly count up until it overflows a u64. The only requirement is that the ice_stat_update functions be called at least once each time the hardware register overflows. While we're fixing ice_stat_update40, modify it to use rd64 instead of two calls to rd32. Additionally, drop the now unnecessary hireg function parameter. Signed-off-by: Jacob Keller Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_common.c | 57 ++++++++++------ drivers/net/ethernet/intel/ice/ice_common.h | 4 +- drivers/net/ethernet/intel/ice/ice_hw_autogen.h | 30 --------- drivers/net/ethernet/intel/ice/ice_lib.c | 40 +++++------ drivers/net/ethernet/intel/ice/ice_main.c | 90 +++++++++++-------------- 5 files changed, 91 insertions(+), 130 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 2e0731c1e1a3..4be3559de207 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -3240,40 +3240,44 @@ void ice_replay_post(struct ice_hw *hw) /** * ice_stat_update40 - read 40 bit stat from the chip and update stat values * @hw: ptr to the hardware info - * @hireg: high 32 bit HW register to read from - * @loreg: low 32 bit HW register to read from + * @reg: offset of 64 bit HW register to read from * @prev_stat_loaded: bool to specify if previous stats are loaded * @prev_stat: ptr to previous loaded stat value * @cur_stat: ptr to current stat value */ void -ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg, - bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat) +ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, + u64 *prev_stat, u64 *cur_stat) { - u64 new_data; - - new_data = rd32(hw, loreg); - new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; + u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1); /* device stats are not reset at PFR, they likely will not be zeroed - * when the driver starts. So save the first values read and use them as - * offsets to be subtracted from the raw values in order to report stats - * that count from zero. + * when the driver starts. Thus, save the value from the first read + * without adding to the statistic value so that we report stats which + * count up from zero. */ - if (!prev_stat_loaded) + if (!prev_stat_loaded) { *prev_stat = new_data; + return; + } + + /* Calculate the difference between the new and old values, and then + * add it to the software stat value. + */ if (new_data >= *prev_stat) - *cur_stat = new_data - *prev_stat; + *cur_stat += new_data - *prev_stat; else /* to manage the potential roll-over */ - *cur_stat = (new_data + BIT_ULL(40)) - *prev_stat; - *cur_stat &= 0xFFFFFFFFFFULL; + *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat; + + /* Update the previously stored value to prepare for next read */ + *prev_stat = new_data; } /** * ice_stat_update32 - read 32 bit stat from the chip and update stat values * @hw: ptr to the hardware info - * @reg: HW register to read from + * @reg: offset of HW register to read from * @prev_stat_loaded: bool to specify if previous stats are loaded * @prev_stat: ptr to previous loaded stat value * @cur_stat: ptr to current stat value @@ -3287,17 +3291,26 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, new_data = rd32(hw, reg); /* device stats are not reset at PFR, they likely will not be zeroed - * when the driver starts. So save the first values read and use them as - * offsets to be subtracted from the raw values in order to report stats - * that count from zero. + * when the driver starts. Thus, save the value from the first read + * without adding to the statistic value so that we report stats which + * count up from zero. */ - if (!prev_stat_loaded) + if (!prev_stat_loaded) { *prev_stat = new_data; + return; + } + + /* Calculate the difference between the new and old values, and then + * add it to the software stat value. + */ if (new_data >= *prev_stat) - *cur_stat = new_data - *prev_stat; + *cur_stat += new_data - *prev_stat; else /* to manage the potential roll-over */ - *cur_stat = (new_data + BIT_ULL(32)) - *prev_stat; + *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat; + + /* Update the previously stored value to prepare for next read */ + *prev_stat = new_data; } /** diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index d1f8353fe6bb..68218e63afc2 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -123,8 +123,8 @@ enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle); void ice_replay_post(struct ice_hw *hw); void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf); void -ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg, - bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat); +ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, + u64 *prev_stat, u64 *cur_stat); void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat); diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 6c5ce05742b1..3250dfc00002 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -281,14 +281,10 @@ #define GL_PWR_MODE_CTL 0x000B820C #define GL_PWR_MODE_CTL_CAR_MAX_BW_S 30 #define GL_PWR_MODE_CTL_CAR_MAX_BW_M ICE_M(0x3, 30) -#define GLPRT_BPRCH(_i) (0x00381384 + ((_i) * 8)) #define GLPRT_BPRCL(_i) (0x00381380 + ((_i) * 8)) -#define GLPRT_BPTCH(_i) (0x00381244 + ((_i) * 8)) #define GLPRT_BPTCL(_i) (0x00381240 + ((_i) * 8)) #define GLPRT_CRCERRS(_i) (0x00380100 + ((_i) * 8)) -#define GLPRT_GORCH(_i) (0x00380004 + ((_i) * 8)) #define GLPRT_GORCL(_i) (0x00380000 + ((_i) * 8)) -#define GLPRT_GOTCH(_i) (0x00380B44 + ((_i) * 8)) #define GLPRT_GOTCL(_i) (0x00380B40 + ((_i) * 8)) #define GLPRT_ILLERRC(_i) (0x003801C0 + ((_i) * 8)) #define GLPRT_LXOFFRXC(_i) (0x003802C0 + ((_i) * 8)) @@ -296,38 +292,22 @@ #define GLPRT_LXONRXC(_i) (0x00380280 + ((_i) * 8)) #define GLPRT_LXONTXC(_i) (0x00381140 + ((_i) * 8)) #define GLPRT_MLFC(_i) (0x00380040 + ((_i) * 8)) -#define GLPRT_MPRCH(_i) (0x00381344 + ((_i) * 8)) #define GLPRT_MPRCL(_i) (0x00381340 + ((_i) * 8)) -#define GLPRT_MPTCH(_i) (0x00381204 + ((_i) * 8)) #define GLPRT_MPTCL(_i) (0x00381200 + ((_i) * 8)) #define GLPRT_MRFC(_i) (0x00380080 + ((_i) * 8)) -#define GLPRT_PRC1023H(_i) (0x00380A04 + ((_i) * 8)) #define GLPRT_PRC1023L(_i) (0x00380A00 + ((_i) * 8)) -#define GLPRT_PRC127H(_i) (0x00380944 + ((_i) * 8)) #define GLPRT_PRC127L(_i) (0x00380940 + ((_i) * 8)) -#define GLPRT_PRC1522H(_i) (0x00380A44 + ((_i) * 8)) #define GLPRT_PRC1522L(_i) (0x00380A40 + ((_i) * 8)) -#define GLPRT_PRC255H(_i) (0x00380984 + ((_i) * 8)) #define GLPRT_PRC255L(_i) (0x00380980 + ((_i) * 8)) -#define GLPRT_PRC511H(_i) (0x003809C4 + ((_i) * 8)) #define GLPRT_PRC511L(_i) (0x003809C0 + ((_i) * 8)) -#define GLPRT_PRC64H(_i) (0x00380904 + ((_i) * 8)) #define GLPRT_PRC64L(_i) (0x00380900 + ((_i) * 8)) -#define GLPRT_PRC9522H(_i) (0x00380A84 + ((_i) * 8)) #define GLPRT_PRC9522L(_i) (0x00380A80 + ((_i) * 8)) -#define GLPRT_PTC1023H(_i) (0x00380C84 + ((_i) * 8)) #define GLPRT_PTC1023L(_i) (0x00380C80 + ((_i) * 8)) -#define GLPRT_PTC127H(_i) (0x00380BC4 + ((_i) * 8)) #define GLPRT_PTC127L(_i) (0x00380BC0 + ((_i) * 8)) -#define GLPRT_PTC1522H(_i) (0x00380CC4 + ((_i) * 8)) #define GLPRT_PTC1522L(_i) (0x00380CC0 + ((_i) * 8)) -#define GLPRT_PTC255H(_i) (0x00380C04 + ((_i) * 8)) #define GLPRT_PTC255L(_i) (0x00380C00 + ((_i) * 8)) -#define GLPRT_PTC511H(_i) (0x00380C44 + ((_i) * 8)) #define GLPRT_PTC511L(_i) (0x00380C40 + ((_i) * 8)) -#define GLPRT_PTC64H(_i) (0x00380B84 + ((_i) * 8)) #define GLPRT_PTC64L(_i) (0x00380B80 + ((_i) * 8)) -#define GLPRT_PTC9522H(_i) (0x00380D04 + ((_i) * 8)) #define GLPRT_PTC9522L(_i) (0x00380D00 + ((_i) * 8)) #define GLPRT_PXOFFRXC(_i, _j) (0x00380500 + ((_i) * 8 + (_j) * 64)) #define GLPRT_PXOFFTXC(_i, _j) (0x00380F40 + ((_i) * 8 + (_j) * 64)) @@ -340,27 +320,17 @@ #define GLPRT_RUC(_i) (0x00380200 + ((_i) * 8)) #define GLPRT_RXON2OFFCNT(_i, _j) (0x00380700 + ((_i) * 8 + (_j) * 64)) #define GLPRT_TDOLD(_i) (0x00381280 + ((_i) * 8)) -#define GLPRT_UPRCH(_i) (0x00381304 + ((_i) * 8)) #define GLPRT_UPRCL(_i) (0x00381300 + ((_i) * 8)) -#define GLPRT_UPTCH(_i) (0x003811C4 + ((_i) * 8)) #define GLPRT_UPTCL(_i) (0x003811C0 + ((_i) * 8)) -#define GLV_BPRCH(_i) (0x003B6004 + ((_i) * 8)) #define GLV_BPRCL(_i) (0x003B6000 + ((_i) * 8)) -#define GLV_BPTCH(_i) (0x0030E004 + ((_i) * 8)) #define GLV_BPTCL(_i) (0x0030E000 + ((_i) * 8)) -#define GLV_GORCH(_i) (0x003B0004 + ((_i) * 8)) #define GLV_GORCL(_i) (0x003B0000 + ((_i) * 8)) -#define GLV_GOTCH(_i) (0x00300004 + ((_i) * 8)) #define GLV_GOTCL(_i) (0x00300000 + ((_i) * 8)) -#define GLV_MPRCH(_i) (0x003B4004 + ((_i) * 8)) #define GLV_MPRCL(_i) (0x003B4000 + ((_i) * 8)) -#define GLV_MPTCH(_i) (0x0030C004 + ((_i) * 8)) #define GLV_MPTCL(_i) (0x0030C000 + ((_i) * 8)) #define GLV_RDPC(_i) (0x00294C04 + ((_i) * 4)) #define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4)) -#define GLV_UPRCH(_i) (0x003B2004 + ((_i) * 8)) #define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8)) -#define GLV_UPTCH(_i) (0x0030A004 + ((_i) * 8)) #define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8)) #define PF_VT_PFALLOC_HIF 0x0009DD80 #define VSIQF_HKEY_MAX_INDEX 12 diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index a19f5920733b..e9e8340b1ab7 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -1477,40 +1477,32 @@ void ice_update_eth_stats(struct ice_vsi *vsi) prev_es = &vsi->eth_stats_prev; cur_es = &vsi->eth_stats; - ice_stat_update40(hw, GLV_GORCH(vsi_num), GLV_GORCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->rx_bytes, - &cur_es->rx_bytes); + ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded, + &prev_es->rx_bytes, &cur_es->rx_bytes); - ice_stat_update40(hw, GLV_UPRCH(vsi_num), GLV_UPRCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->rx_unicast, - &cur_es->rx_unicast); + ice_stat_update40(hw, GLV_UPRCL(vsi_num), vsi->stat_offsets_loaded, + &prev_es->rx_unicast, &cur_es->rx_unicast); - ice_stat_update40(hw, GLV_MPRCH(vsi_num), GLV_MPRCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->rx_multicast, - &cur_es->rx_multicast); + ice_stat_update40(hw, GLV_MPRCL(vsi_num), vsi->stat_offsets_loaded, + &prev_es->rx_multicast, &cur_es->rx_multicast); - ice_stat_update40(hw, GLV_BPRCH(vsi_num), GLV_BPRCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->rx_broadcast, - &cur_es->rx_broadcast); + ice_stat_update40(hw, GLV_BPRCL(vsi_num), vsi->stat_offsets_loaded, + &prev_es->rx_broadcast, &cur_es->rx_broadcast); ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded, &prev_es->rx_discards, &cur_es->rx_discards); - ice_stat_update40(hw, GLV_GOTCH(vsi_num), GLV_GOTCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->tx_bytes, - &cur_es->tx_bytes); + ice_stat_update40(hw, GLV_GOTCL(vsi_num), vsi->stat_offsets_loaded, + &prev_es->tx_bytes, &cur_es->tx_bytes); - ice_stat_update40(hw, GLV_UPTCH(vsi_num), GLV_UPTCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->tx_unicast, - &cur_es->tx_unicast); + ice_stat_update40(hw, GLV_UPTCL(vsi_num), vsi->stat_offsets_loaded, + &prev_es->tx_unicast, &cur_es->tx_unicast); - ice_stat_update40(hw, GLV_MPTCH(vsi_num), GLV_MPTCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->tx_multicast, - &cur_es->tx_multicast); + ice_stat_update40(hw, GLV_MPTCL(vsi_num), vsi->stat_offsets_loaded, + &prev_es->tx_multicast, &cur_es->tx_multicast); - ice_stat_update40(hw, GLV_BPTCH(vsi_num), GLV_BPTCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->tx_broadcast, - &cur_es->tx_broadcast); + ice_stat_update40(hw, GLV_BPTCL(vsi_num), vsi->stat_offsets_loaded, + &prev_es->tx_broadcast, &cur_es->tx_broadcast); ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded, &prev_es->tx_errors, &cur_es->tx_errors); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 63db08d9bafa..f490e65c64bc 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -3176,96 +3176,82 @@ static void ice_update_pf_stats(struct ice_pf *pf) cur_ps = &pf->stats; pf_id = hw->pf_id; - ice_stat_update40(hw, GLPRT_GORCH(pf_id), GLPRT_GORCL(pf_id), - pf->stat_prev_loaded, &prev_ps->eth.rx_bytes, + ice_stat_update40(hw, GLPRT_GORCL(pf_id), pf->stat_prev_loaded, + &prev_ps->eth.rx_bytes, &cur_ps->eth.rx_bytes); - ice_stat_update40(hw, GLPRT_UPRCH(pf_id), GLPRT_UPRCL(pf_id), - pf->stat_prev_loaded, &prev_ps->eth.rx_unicast, + ice_stat_update40(hw, GLPRT_UPRCL(pf_id), pf->stat_prev_loaded, + &prev_ps->eth.rx_unicast, &cur_ps->eth.rx_unicast); - ice_stat_update40(hw, GLPRT_MPRCH(pf_id), GLPRT_MPRCL(pf_id), - pf->stat_prev_loaded, &prev_ps->eth.rx_multicast, + ice_stat_update40(hw, GLPRT_MPRCL(pf_id), pf->stat_prev_loaded, + &prev_ps->eth.rx_multicast, &cur_ps->eth.rx_multicast); - ice_stat_update40(hw, GLPRT_BPRCH(pf_id), GLPRT_BPRCL(pf_id), - pf->stat_prev_loaded, &prev_ps->eth.rx_broadcast, + ice_stat_update40(hw, GLPRT_BPRCL(pf_id), pf->stat_prev_loaded, + &prev_ps->eth.rx_broadcast, &cur_ps->eth.rx_broadcast); - ice_stat_update40(hw, GLPRT_GOTCH(pf_id), GLPRT_GOTCL(pf_id), - pf->stat_prev_loaded, &prev_ps->eth.tx_bytes, + ice_stat_update40(hw, GLPRT_GOTCL(pf_id), pf->stat_prev_loaded, + &prev_ps->eth.tx_bytes, &cur_ps->eth.tx_bytes); - ice_stat_update40(hw, GLPRT_UPTCH(pf_id), GLPRT_UPTCL(pf_id), - pf->stat_prev_loaded, &prev_ps->eth.tx_unicast, + ice_stat_update40(hw, GLPRT_UPTCL(pf_id), pf->stat_prev_loaded, + &prev_ps->eth.tx_unicast, &cur_ps->eth.tx_unicast); - ice_stat_update40(hw, GLPRT_MPTCH(pf_id), GLPRT_MPTCL(pf_id), - pf->stat_prev_loaded, &prev_ps->eth.tx_multicast, + ice_stat_update40(hw, GLPRT_MPTCL(pf_id), pf->stat_prev_loaded, + &prev_ps->eth.tx_multicast, &cur_ps->eth.tx_multicast); - ice_stat_update40(hw, GLPRT_BPTCH(pf_id), GLPRT_BPTCL(pf_id), - pf->stat_prev_loaded, &prev_ps->eth.tx_broadcast, + ice_stat_update40(hw, GLPRT_BPTCL(pf_id), pf->stat_prev_loaded, + &prev_ps->eth.tx_broadcast, &cur_ps->eth.tx_broadcast); ice_stat_update32(hw, GLPRT_TDOLD(pf_id), pf->stat_prev_loaded, &prev_ps->tx_dropped_link_down, &cur_ps->tx_dropped_link_down); - ice_stat_update40(hw, GLPRT_PRC64H(pf_id), GLPRT_PRC64L(pf_id), - pf->stat_prev_loaded, &prev_ps->rx_size_64, - &cur_ps->rx_size_64); + ice_stat_update40(hw, GLPRT_PRC64L(pf_id), pf->stat_prev_loaded, + &prev_ps->rx_size_64, &cur_ps->rx_size_64); - ice_stat_update40(hw, GLPRT_PRC127H(pf_id), GLPRT_PRC127L(pf_id), - pf->stat_prev_loaded, &prev_ps->rx_size_127, - &cur_ps->rx_size_127); + ice_stat_update40(hw, GLPRT_PRC127L(pf_id), pf->stat_prev_loaded, + &prev_ps->rx_size_127, &cur_ps->rx_size_127); - ice_stat_update40(hw, GLPRT_PRC255H(pf_id), GLPRT_PRC255L(pf_id), - pf->stat_prev_loaded, &prev_ps->rx_size_255, - &cur_ps->rx_size_255); + ice_stat_update40(hw, GLPRT_PRC255L(pf_id), pf->stat_prev_loaded, + &prev_ps->rx_size_255, &cur_ps->rx_size_255); - ice_stat_update40(hw, GLPRT_PRC511H(pf_id), GLPRT_PRC511L(pf_id), - pf->stat_prev_loaded, &prev_ps->rx_size_511, - &cur_ps->rx_size_511); + ice_stat_update40(hw, GLPRT_PRC511L(pf_id), pf->stat_prev_loaded, + &prev_ps->rx_size_511, &cur_ps->rx_size_511); - ice_stat_update40(hw, GLPRT_PRC1023H(pf_id), - GLPRT_PRC1023L(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_PRC1023L(pf_id), pf->stat_prev_loaded, &prev_ps->rx_size_1023, &cur_ps->rx_size_1023); - ice_stat_update40(hw, GLPRT_PRC1522H(pf_id), - GLPRT_PRC1522L(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_PRC1522L(pf_id), pf->stat_prev_loaded, &prev_ps->rx_size_1522, &cur_ps->rx_size_1522); - ice_stat_update40(hw, GLPRT_PRC9522H(pf_id), - GLPRT_PRC9522L(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_PRC9522L(pf_id), pf->stat_prev_loaded, &prev_ps->rx_size_big, &cur_ps->rx_size_big); - ice_stat_update40(hw, GLPRT_PTC64H(pf_id), GLPRT_PTC64L(pf_id), - pf->stat_prev_loaded, &prev_ps->tx_size_64, - &cur_ps->tx_size_64); + ice_stat_update40(hw, GLPRT_PTC64L(pf_id), pf->stat_prev_loaded, + &prev_ps->tx_size_64, &cur_ps->tx_size_64); - ice_stat_update40(hw, GLPRT_PTC127H(pf_id), GLPRT_PTC127L(pf_id), - pf->stat_prev_loaded, &prev_ps->tx_size_127, - &cur_ps->tx_size_127); + ice_stat_update40(hw, GLPRT_PTC127L(pf_id), pf->stat_prev_loaded, + &prev_ps->tx_size_127, &cur_ps->tx_size_127); - ice_stat_update40(hw, GLPRT_PTC255H(pf_id), GLPRT_PTC255L(pf_id), - pf->stat_prev_loaded, &prev_ps->tx_size_255, - &cur_ps->tx_size_255); + ice_stat_update40(hw, GLPRT_PTC255L(pf_id), pf->stat_prev_loaded, + &prev_ps->tx_size_255, &cur_ps->tx_size_255); - ice_stat_update40(hw, GLPRT_PTC511H(pf_id), GLPRT_PTC511L(pf_id), - pf->stat_prev_loaded, &prev_ps->tx_size_511, - &cur_ps->tx_size_511); + ice_stat_update40(hw, GLPRT_PTC511L(pf_id), pf->stat_prev_loaded, + &prev_ps->tx_size_511, &cur_ps->tx_size_511); - ice_stat_update40(hw, GLPRT_PTC1023H(pf_id), - GLPRT_PTC1023L(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_PTC1023L(pf_id), pf->stat_prev_loaded, &prev_ps->tx_size_1023, &cur_ps->tx_size_1023); - ice_stat_update40(hw, GLPRT_PTC1522H(pf_id), - GLPRT_PTC1522L(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_PTC1522L(pf_id), pf->stat_prev_loaded, &prev_ps->tx_size_1522, &cur_ps->tx_size_1522); - ice_stat_update40(hw, GLPRT_PTC9522H(pf_id), - GLPRT_PTC9522L(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_PTC9522L(pf_id), pf->stat_prev_loaded, &prev_ps->tx_size_big, &cur_ps->tx_size_big); ice_stat_update32(hw, GLPRT_LXONRXC(pf_id), pf->stat_prev_loaded, -- cgit v1.2.3 From 17bc6d07212c8bc4521056a7f871d143192d385c Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Wed, 26 Jun 2019 02:20:14 -0700 Subject: ice: Move vector base setup to PF VSI When interrupt tracking was refactored, during rebuild, the call to ice_vsi_setup_vector_base() was inadvertently removed from the PF VSI instead of being removed from the VF VSI. During reset, the failure to properly setup the vector base generates a call trace. Correct this so that resets/rebuilds properly complete. Fixes: cbe66bfee6a0 ("ice: Refactor interrupt tracking") Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_lib.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index e9e8340b1ab7..01f38abd4277 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -2978,6 +2978,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) if (ret) goto err_rings; + ret = ice_vsi_setup_vector_base(vsi); + if (ret) + goto err_vectors; + ret = ice_vsi_set_q_vectors_reg_idx(vsi); if (ret) goto err_vectors; @@ -2999,10 +3003,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) if (ret) goto err_rings; - ret = ice_vsi_setup_vector_base(vsi); - if (ret) - goto err_vectors; - ret = ice_vsi_set_q_vectors_reg_idx(vsi); if (ret) goto err_vectors; -- cgit v1.2.3 From c31a5c25bb19c62d1cea69d3abcc7e0405bd4596 Mon Sep 17 00:00:00 2001 From: Brett Creeley Date: Wed, 26 Jun 2019 02:20:15 -0700 Subject: ice: Always set prefena when configuring an Rx queue Currently we are always setting prefena to 0. This is causing the hardware to only fetch descriptors when there are none free in the cache for a received packet instead of prefetching when it has used the last descriptor regardless of incoming packets. Fix this by allowing the hardware to prefetch Rx descriptors. Signed-off-by: Brett Creeley Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_common.c | 9 ++++++++- drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h | 1 + 2 files changed, 9 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 4be3559de207..01e5ecaaa322 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -1078,6 +1078,7 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = { ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195), ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), + ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201), { 0 } }; @@ -1088,7 +1089,8 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = { * @rxq_index: the index of the Rx queue * * Converts rxq context from sparse to dense structure and then writes - * it to HW register space + * it to HW register space and enables the hardware to prefetch descriptors + * instead of only fetching them on demand */ enum ice_status ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, @@ -1096,6 +1098,11 @@ ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, { u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; + if (!rlan_ctx) + return ICE_ERR_BAD_PTR; + + rlan_ctx->prefena = 1; + ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); } diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index 510a8c900e61..57ea6811fe2c 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -290,6 +290,7 @@ struct ice_rlan_ctx { u8 tphdata_ena; u8 tphhead_ena; u16 lrxqthresh; /* bigger than needed, see above for reason */ + u8 prefena; /* NOTE: normally must be set to 1 at init */ }; struct ice_ctx_ele { -- cgit v1.2.3 From 5c91ecfda5a8bfbc4697b35fe875cbc43a0f9100 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Wed, 26 Jun 2019 02:20:16 -0700 Subject: ice: separate out control queue lock creation The ice_init_all_ctrlq and ice_shutdown_all_ctrlq functions create and destroy the locks used to protect the send and receive process of each control queue. This is problematic, as the driver may use these functions to shutdown and re-initialize the control queues at run time. For example, it may do this in response to a device reset. If the driver failed to recover from a reset, it might leave the control queues offline. In this case, the locks will no longer be initialized. A later call to ice_sq_send_cmd will then attempt to acquire a lock that has been destroyed. It is incorrect behavior to access a lock that has been destroyed. Indeed, ice_aq_send_cmd already tries to avoid accessing an offline control queue, but the check occurs inside the lock. The root of the problem is that the locks are destroyed at run time. Modify ice_init_all_ctrlq and ice_shutdown_all_ctrlq such that they no longer create or destroy the locks. Introduce new functions, ice_create_all_ctrlq and ice_destroy_all_ctrlq. Call these functions in ice_init_hw and ice_deinit_hw. Now, the control queue locks will remain valid for the life of the driver, and will not be destroyed until the driver unloads. This also allows removing a duplicate check of the sq.count and rq.count values when shutting down the controlqs. The ice_shutdown_ctrlq function already checks this value under the lock. Previously commit dec64ff10ed9 ("ice: use [sr]q.count when checking if queue is initialized") needed this check to happen outside the lock, because it prevented duplicate attempts at destroying the locks. The driver may now safely use ice_init_all_ctrlq and ice_shutdown_all_ctrlq while handling reset events, without causing the locks to be invalid. Signed-off-by: Jacob Keller Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_common.c | 6 +- drivers/net/ethernet/intel/ice/ice_common.h | 2 + drivers/net/ethernet/intel/ice/ice_controlq.c | 112 ++++++++++++++++++++------ 3 files changed, 91 insertions(+), 29 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 01e5ecaaa322..5f9dc76699d2 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -740,7 +740,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw) ice_get_itr_intrl_gran(hw); - status = ice_init_all_ctrlq(hw); + status = ice_create_all_ctrlq(hw); if (status) goto err_unroll_cqinit; @@ -855,7 +855,7 @@ err_unroll_sched: err_unroll_alloc: devm_kfree(ice_hw_to_dev(hw), hw->port_info); err_unroll_cqinit: - ice_shutdown_all_ctrlq(hw); + ice_destroy_all_ctrlq(hw); return status; } @@ -881,7 +881,7 @@ void ice_deinit_hw(struct ice_hw *hw) /* Attempt to disable FW logging before shutting down control queues */ ice_cfg_fw_log(hw, false); - ice_shutdown_all_ctrlq(hw); + ice_destroy_all_ctrlq(hw); /* Clear VSI contexts if not already cleared */ ice_clear_all_vsi_ctx(hw); diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index 68218e63afc2..e376d1eadba4 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -17,8 +17,10 @@ enum ice_status ice_init_hw(struct ice_hw *hw); void ice_deinit_hw(struct ice_hw *hw); enum ice_status ice_check_reset(struct ice_hw *hw); enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req); +enum ice_status ice_create_all_ctrlq(struct ice_hw *hw); enum ice_status ice_init_all_ctrlq(struct ice_hw *hw); void ice_shutdown_all_ctrlq(struct ice_hw *hw); +void ice_destroy_all_ctrlq(struct ice_hw *hw); enum ice_status ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_rq_event_info *e, u16 *pending); diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index e91ac4df0242..2353166c654e 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c @@ -310,7 +310,7 @@ ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) * @cq: pointer to the specific Control queue * * This is the main initialization routine for the Control Send Queue - * Prior to calling this function, drivers *MUST* set the following fields + * Prior to calling this function, the driver *MUST* set the following fields * in the cq->structure: * - cq->num_sq_entries * - cq->sq_buf_size @@ -369,7 +369,7 @@ init_ctrlq_exit: * @cq: pointer to the specific Control queue * * The main initialization routine for the Admin Receive (Event) Queue. - * Prior to calling this function, drivers *MUST* set the following fields + * Prior to calling this function, the driver *MUST* set the following fields * in the cq->structure: * - cq->num_rq_entries * - cq->rq_buf_size @@ -569,14 +569,8 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw) return 0; init_ctrlq_free_rq: - if (cq->rq.count) { - ice_shutdown_rq(hw, cq); - mutex_destroy(&cq->rq_lock); - } - if (cq->sq.count) { - ice_shutdown_sq(hw, cq); - mutex_destroy(&cq->sq_lock); - } + ice_shutdown_rq(hw, cq); + ice_shutdown_sq(hw, cq); return status; } @@ -585,12 +579,14 @@ init_ctrlq_free_rq: * @hw: pointer to the hardware structure * @q_type: specific Control queue type * - * Prior to calling this function, drivers *MUST* set the following fields + * Prior to calling this function, the driver *MUST* set the following fields * in the cq->structure: * - cq->num_sq_entries * - cq->num_rq_entries * - cq->rq_buf_size * - cq->sq_buf_size + * + * NOTE: this function does not initialize the controlq locks */ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) { @@ -616,8 +612,6 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) !cq->rq_buf_size || !cq->sq_buf_size) { return ICE_ERR_CFG; } - mutex_init(&cq->sq_lock); - mutex_init(&cq->rq_lock); /* setup SQ command write back timeout */ cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT; @@ -625,7 +619,7 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) /* allocate the ATQ */ ret_code = ice_init_sq(hw, cq); if (ret_code) - goto init_ctrlq_destroy_locks; + return ret_code; /* allocate the ARQ */ ret_code = ice_init_rq(hw, cq); @@ -637,9 +631,6 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) init_ctrlq_free_sq: ice_shutdown_sq(hw, cq); -init_ctrlq_destroy_locks: - mutex_destroy(&cq->sq_lock); - mutex_destroy(&cq->rq_lock); return ret_code; } @@ -647,12 +638,14 @@ init_ctrlq_destroy_locks: * ice_init_all_ctrlq - main initialization routine for all control queues * @hw: pointer to the hardware structure * - * Prior to calling this function, drivers *MUST* set the following fields + * Prior to calling this function, the driver MUST* set the following fields * in the cq->structure for all control queues: * - cq->num_sq_entries * - cq->num_rq_entries * - cq->rq_buf_size * - cq->sq_buf_size + * + * NOTE: this function does not initialize the controlq locks. */ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw) { @@ -671,10 +664,48 @@ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw) return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX); } +/** + * ice_init_ctrlq_locks - Initialize locks for a control queue + * @cq: pointer to the control queue + * + * Initializes the send and receive queue locks for a given control queue. + */ +static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq) +{ + mutex_init(&cq->sq_lock); + mutex_init(&cq->rq_lock); +} + +/** + * ice_create_all_ctrlq - main initialization routine for all control queues + * @hw: pointer to the hardware structure + * + * Prior to calling this function, the driver *MUST* set the following fields + * in the cq->structure for all control queues: + * - cq->num_sq_entries + * - cq->num_rq_entries + * - cq->rq_buf_size + * - cq->sq_buf_size + * + * This function creates all the control queue locks and then calls + * ice_init_all_ctrlq. It should be called once during driver load. If the + * driver needs to re-initialize control queues at run time it should call + * ice_init_all_ctrlq instead. + */ +enum ice_status ice_create_all_ctrlq(struct ice_hw *hw) +{ + ice_init_ctrlq_locks(&hw->adminq); + ice_init_ctrlq_locks(&hw->mailboxq); + + return ice_init_all_ctrlq(hw); +} + /** * ice_shutdown_ctrlq - shutdown routine for any control queue * @hw: pointer to the hardware structure * @q_type: specific Control queue type + * + * NOTE: this function does not destroy the control queue locks. */ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) { @@ -693,19 +724,17 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) return; } - if (cq->sq.count) { - ice_shutdown_sq(hw, cq); - mutex_destroy(&cq->sq_lock); - } - if (cq->rq.count) { - ice_shutdown_rq(hw, cq); - mutex_destroy(&cq->rq_lock); - } + ice_shutdown_sq(hw, cq); + ice_shutdown_rq(hw, cq); } /** * ice_shutdown_all_ctrlq - shutdown routine for all control queues * @hw: pointer to the hardware structure + * + * NOTE: this function does not destroy the control queue locks. The driver + * may call this at runtime to shutdown and later restart control queues, such + * as in response to a reset event. */ void ice_shutdown_all_ctrlq(struct ice_hw *hw) { @@ -715,6 +744,37 @@ void ice_shutdown_all_ctrlq(struct ice_hw *hw) ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX); } +/** + * ice_destroy_ctrlq_locks - Destroy locks for a control queue + * @cq: pointer to the control queue + * + * Destroys the send and receive queue locks for a given control queue. + */ +static void +ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq) +{ + mutex_destroy(&cq->sq_lock); + mutex_destroy(&cq->rq_lock); +} + +/** + * ice_destroy_all_ctrlq - exit routine for all control queues + * @hw: pointer to the hardware structure + * + * This function shuts down all the control queues and then destroys the + * control queue locks. It should be called once during driver unload. The + * driver should call ice_shutdown_all_ctrlq if it needs to shut down and + * reinitialize control queues, such as in response to a reset event. + */ +void ice_destroy_all_ctrlq(struct ice_hw *hw) +{ + /* shut down all the control queues first */ + ice_shutdown_all_ctrlq(hw); + + ice_destroy_ctrlq_locks(&hw->adminq); + ice_destroy_ctrlq_locks(&hw->mailboxq); +} + /** * ice_clean_sq - cleans Admin send queue (ATQ) * @hw: pointer to the hardware structure -- cgit v1.2.3 From 6d5999467df03d1d7fd64ac761ffa20d00ce9e52 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Wed, 26 Jun 2019 02:20:17 -0700 Subject: ice: Do not configure port with no media The firmware reports an error when trying to configure a port with no media. Instead of always configuring the port, check for media before attempting to configure it. In the absence of media, turn off link and poll for media to become available before re-enabling link. Move ice_force_phys_link_state() up to avoid forward declaration. Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice.h | 1 + drivers/net/ethernet/intel/ice/ice_main.c | 239 ++++++++++++++++++++---------- 2 files changed, 158 insertions(+), 82 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 9ee6b55553c0..596b09a905aa 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -337,6 +337,7 @@ enum ice_pf_flags { ICE_FLAG_DCB_CAPABLE, ICE_FLAG_DCB_ENA, ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, + ICE_FLAG_NO_MEDIA, ICE_FLAG_ENABLE_FW_LLDP, ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */ ICE_PF_FLAGS_NBITS /* must be last */ diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index f490e65c64bc..91334d1e83ed 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -810,6 +810,20 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, if (!vsi || !vsi->port_info) return -EINVAL; + /* turn off PHY if media was removed */ + if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) && + !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) { + set_bit(ICE_FLAG_NO_MEDIA, pf->flags); + + result = ice_aq_set_link_restart_an(pi, false, NULL); + if (result) { + dev_dbg(&pf->pdev->dev, + "Failed to set link down, VSI %d error %d\n", + vsi->vsi_num, result); + return result; + } + } + ice_vsi_link_event(vsi, link_up); ice_print_link_msg(vsi, link_up); @@ -1314,6 +1328,124 @@ static void ice_handle_mdd_event(struct ice_pf *pf) } } +/** + * ice_force_phys_link_state - Force the physical link state + * @vsi: VSI to force the physical link state to up/down + * @link_up: true/false indicates to set the physical link to up/down + * + * Force the physical link state by getting the current PHY capabilities from + * hardware and setting the PHY config based on the determined capabilities. If + * link changes a link event will be triggered because both the Enable Automatic + * Link Update and LESM Enable bits are set when setting the PHY capabilities. + * + * Returns 0 on success, negative on failure + */ +static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up) +{ + struct ice_aqc_get_phy_caps_data *pcaps; + struct ice_aqc_set_phy_cfg_data *cfg; + struct ice_port_info *pi; + struct device *dev; + int retcode; + + if (!vsi || !vsi->port_info || !vsi->back) + return -EINVAL; + if (vsi->type != ICE_VSI_PF) + return 0; + + dev = &vsi->back->pdev->dev; + + pi = vsi->port_info; + + pcaps = devm_kzalloc(dev, sizeof(*pcaps), GFP_KERNEL); + if (!pcaps) + return -ENOMEM; + + retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, + NULL); + if (retcode) { + dev_err(dev, + "Failed to get phy capabilities, VSI %d error %d\n", + vsi->vsi_num, retcode); + retcode = -EIO; + goto out; + } + + /* No change in link */ + if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) && + link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP)) + goto out; + + cfg = devm_kzalloc(dev, sizeof(*cfg), GFP_KERNEL); + if (!cfg) { + retcode = -ENOMEM; + goto out; + } + + cfg->phy_type_low = pcaps->phy_type_low; + cfg->phy_type_high = pcaps->phy_type_high; + cfg->caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; + cfg->low_power_ctrl = pcaps->low_power_ctrl; + cfg->eee_cap = pcaps->eee_cap; + cfg->eeer_value = pcaps->eeer_value; + cfg->link_fec_opt = pcaps->link_fec_options; + if (link_up) + cfg->caps |= ICE_AQ_PHY_ENA_LINK; + else + cfg->caps &= ~ICE_AQ_PHY_ENA_LINK; + + retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi->lport, cfg, NULL); + if (retcode) { + dev_err(dev, "Failed to set phy config, VSI %d error %d\n", + vsi->vsi_num, retcode); + retcode = -EIO; + } + + devm_kfree(dev, cfg); +out: + devm_kfree(dev, pcaps); + return retcode; +} + +/** + * ice_check_media_subtask - Check for media; bring link up if detected. + * @pf: pointer to PF struct + */ +static void ice_check_media_subtask(struct ice_pf *pf) +{ + struct ice_port_info *pi; + struct ice_vsi *vsi; + int err; + + vsi = ice_find_vsi_by_type(pf, ICE_VSI_PF); + if (!vsi) + return; + + /* No need to check for media if it's already present or the interface + * is down + */ + if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) || + test_bit(__ICE_DOWN, vsi->state)) + return; + + /* Refresh link info and check if media is present */ + pi = vsi->port_info; + err = ice_update_link_info(pi); + if (err) + return; + + if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { + err = ice_force_phys_link_state(vsi, true); + if (err) + return; + clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); + + /* A Link Status Event will be generated; the event handler + * will complete bringing the interface up + */ + } +} + /** * ice_service_task - manage and run subtasks * @work: pointer to work_struct contained by the PF struct @@ -1336,6 +1468,7 @@ static void ice_service_task(struct work_struct *work) return; } + ice_check_media_subtask(pf); ice_check_for_hang_subtask(pf); ice_sync_fltr_subtask(pf); ice_handle_mdd_event(pf); @@ -3357,85 +3490,6 @@ static void ice_napi_disable_all(struct ice_vsi *vsi) } } -/** - * ice_force_phys_link_state - Force the physical link state - * @vsi: VSI to force the physical link state to up/down - * @link_up: true/false indicates to set the physical link to up/down - * - * Force the physical link state by getting the current PHY capabilities from - * hardware and setting the PHY config based on the determined capabilities. If - * link changes a link event will be triggered because both the Enable Automatic - * Link Update and LESM Enable bits are set when setting the PHY capabilities. - * - * Returns 0 on success, negative on failure - */ -static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up) -{ - struct ice_aqc_get_phy_caps_data *pcaps; - struct ice_aqc_set_phy_cfg_data *cfg; - struct ice_port_info *pi; - struct device *dev; - int retcode; - - if (!vsi || !vsi->port_info || !vsi->back) - return -EINVAL; - if (vsi->type != ICE_VSI_PF) - return 0; - - dev = &vsi->back->pdev->dev; - - pi = vsi->port_info; - - pcaps = devm_kzalloc(dev, sizeof(*pcaps), GFP_KERNEL); - if (!pcaps) - return -ENOMEM; - - retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, - NULL); - if (retcode) { - dev_err(dev, - "Failed to get phy capabilities, VSI %d error %d\n", - vsi->vsi_num, retcode); - retcode = -EIO; - goto out; - } - - /* No change in link */ - if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) && - link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP)) - goto out; - - cfg = devm_kzalloc(dev, sizeof(*cfg), GFP_KERNEL); - if (!cfg) { - retcode = -ENOMEM; - goto out; - } - - cfg->phy_type_low = pcaps->phy_type_low; - cfg->phy_type_high = pcaps->phy_type_high; - cfg->caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; - cfg->low_power_ctrl = pcaps->low_power_ctrl; - cfg->eee_cap = pcaps->eee_cap; - cfg->eeer_value = pcaps->eeer_value; - cfg->link_fec_opt = pcaps->link_fec_options; - if (link_up) - cfg->caps |= ICE_AQ_PHY_ENA_LINK; - else - cfg->caps &= ~ICE_AQ_PHY_ENA_LINK; - - retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi->lport, cfg, NULL); - if (retcode) { - dev_err(dev, "Failed to set phy config, VSI %d error %d\n", - vsi->vsi_num, retcode); - retcode = -EIO; - } - - devm_kfree(dev, cfg); -out: - devm_kfree(dev, pcaps); - return retcode; -} - /** * ice_down - Shutdown the connection * @vsi: The VSI being stopped @@ -4281,6 +4335,7 @@ int ice_open(struct net_device *netdev) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; + struct ice_port_info *pi; int err; if (test_bit(__ICE_NEEDS_RESTART, vsi->back->state)) { @@ -4290,13 +4345,33 @@ int ice_open(struct net_device *netdev) netif_carrier_off(netdev); - err = ice_force_phys_link_state(vsi, true); + pi = vsi->port_info; + err = ice_update_link_info(pi); if (err) { - netdev_err(netdev, - "Failed to set physical link up, error %d\n", err); + netdev_err(netdev, "Failed to get link info, error %d\n", + err); return err; } + /* Set PHY if there is media, otherwise, turn off PHY */ + if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { + err = ice_force_phys_link_state(vsi, true); + if (err) { + netdev_err(netdev, + "Failed to set physical link up, error %d\n", + err); + return err; + } + } else { + err = ice_aq_set_link_restart_an(pi, false, NULL); + if (err) { + netdev_err(netdev, "Failed to set PHY state, VSI %d error %d\n", + vsi->vsi_num, err); + return err; + } + set_bit(ICE_FLAG_NO_MEDIA, vsi->back->flags); + } + err = ice_vsi_open(vsi); if (err) netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n", -- cgit v1.2.3 From c7aeb4d1b9bfdb4e07da1c77cb095f02e912a83f Mon Sep 17 00:00:00 2001 From: Akeem G Abodunrin Date: Wed, 26 Jun 2019 02:20:18 -0700 Subject: ice: Disable VFs until reset is completed This patch adds code to clear VFs enable status until reset is completed, and Tx/Rx rings are setup. Without this patch, the code flow request Tx queues to be disabled after reset, especially PFR - where VF VSI Tx rings have already been wiped off in the NVM and result to adminq error based on the call to disable Tx LAN queue in ice_reset_all_vfs function call. Signed-off-by: Akeem G Abodunrin Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_main.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 91334d1e83ed..d13f56803658 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -488,6 +488,7 @@ static void ice_prepare_for_reset(struct ice_pf *pf) { struct ice_hw *hw = &pf->hw; + u8 i; /* already prepared for reset */ if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) @@ -497,6 +498,10 @@ ice_prepare_for_reset(struct ice_pf *pf) if (ice_check_sq_alive(hw, &hw->mailboxq)) ice_vc_notify_reset(pf); + /* Disable VFs until reset is completed */ + for (i = 0; i < pf->num_alloc_vfs; i++) + clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states); + /* disable the VSIs and their queues that are not already DOWN */ ice_pf_dis_all_vsi(pf, false); -- cgit v1.2.3 From cb7db35641c9a508247bdcd73831c855c8758cd3 Mon Sep 17 00:00:00 2001 From: Brett Creeley Date: Wed, 26 Jun 2019 02:20:19 -0700 Subject: ice: Only bump Rx tail and release buffers once per napi_poll Currently we bump the Rx tail and release/give buffers to hardware every 16 descriptors. This causes us to bump Rx tail up to 4 times per napi_poll call. Also we are always bumping tail on an odd index and this is a problem because hardware ignores the lower 3 bits in the QRX_TAIL register. This is making it so hardware sees tail bumps only every 8 descriptors. Instead lets only bump Rx tail once per napi_poll if the value aligns with hardware's expectations of the lower 3 bits being cleared. Also only release/give Rx buffers once per napi_poll call. Signed-off-by: Brett Creeley Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_txrx.c | 42 ++++++++++++++++++++----------- 1 file changed, 27 insertions(+), 15 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index dd7392f293bf..0c459305c12f 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -377,18 +377,28 @@ err: */ static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val) { + u16 prev_ntu = rx_ring->next_to_use; + rx_ring->next_to_use = val; /* update next to alloc since we have filled the ring */ rx_ring->next_to_alloc = val; - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). + /* QRX_TAIL will be updated with any tail value, but hardware ignores + * the lower 3 bits. This makes it so we only bump tail on meaningful + * boundaries. Also, this allows us to bump tail on intervals of 8 up to + * the budget depending on the current traffic load. */ - wmb(); - writel(val, rx_ring->tail); + val &= ~0x7; + if (prev_ntu != val) { + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(val, rx_ring->tail); + } } /** @@ -445,7 +455,13 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi) * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace * - * Returns false if all allocations were successful, true if any fail + * Returns false if all allocations were successful, true if any fail. Returning + * true signals to the caller that we didn't replace cleaned_count buffers and + * there is more work to do. + * + * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx + * buffers. Then bump tail at most one time. Grouping like this lets us avoid + * multiple tail writes per call. */ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) { @@ -990,7 +1006,7 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_pkts = 0; u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); - bool failure = false; + bool failure; /* start the loop to process Rx packets bounded by 'budget' */ while (likely(total_rx_pkts < (unsigned int)budget)) { @@ -1002,13 +1018,6 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) u16 vlan_tag = 0; u8 rx_ptype; - /* return some buffers to hardware, one at a time is too slow */ - if (cleaned_count >= ICE_RX_BUF_WRITE) { - failure = failure || - ice_alloc_rx_bufs(rx_ring, cleaned_count); - cleaned_count = 0; - } - /* get the Rx desc from Rx ring based on 'next_to_clean' */ rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean); @@ -1085,6 +1094,9 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) total_rx_pkts++; } + /* return up to cleaned_count buffers to hardware */ + failure = ice_alloc_rx_bufs(rx_ring, cleaned_count); + /* update queue and vector specific stats */ u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.pkts += total_rx_pkts; -- cgit v1.2.3 From d5a46359171a00539179aefa04115c9c30002617 Mon Sep 17 00:00:00 2001 From: Akeem G Abodunrin Date: Wed, 26 Jun 2019 02:20:20 -0700 Subject: ice: Set up Tx scheduling tree based on alloc VSI Tx queues This patch uses allocated number of Tx queues per VSI to set up its scheduling tree instead of using total number of available Tx queues. Only PF VSIs have total number of allocated Tx queues equal to number of available Tx queues, other VSIs have different number of queues configured. Signed-off-by: Akeem G Abodunrin Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_lib.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 01f38abd4277..e28478215810 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -2511,7 +2511,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, /* configure VSI nodes based on number of queues and TC's */ for (i = 0; i < vsi->tc_cfg.numtc; i++) - max_txqs[i] = pf->num_lan_tx; + max_txqs[i] = vsi->alloc_txq; status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, max_txqs); @@ -3020,7 +3020,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) /* configure VSI nodes based on number of queues and TC's */ for (i = 0; i < vsi->tc_cfg.numtc; i++) - max_txqs[i] = pf->num_lan_tx; + max_txqs[i] = vsi->alloc_txq; status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, max_txqs); @@ -3137,7 +3137,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) if (ena_tc & BIT(i)) num_tc++; /* populate max_txqs per TC */ - max_txqs[i] = pf->num_lan_tx; + max_txqs[i] = vsi->alloc_txq; } vsi->tc_cfg.ena_tc = ena_tc; -- cgit v1.2.3 From 66b29e7a884e53e7e32ffb893d9897b299cd5cdf Mon Sep 17 00:00:00 2001 From: Akeem G Abodunrin Date: Wed, 26 Jun 2019 02:20:21 -0700 Subject: ice: Update number of VF queue before setting VSI resources In case there is a request from a VF to change its number of queues, and the request was successful, we need to update number of queues configured on the VF before updating corresponding VSI for that VF, especially LAN Tx queue tree and TC update, otherwise, we would continued to use old value of vf->num_vf_qs for allocated Tx/Rx queues... Signed-off-by: Akeem G Abodunrin Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 5d24b539648f..00aa6364124a 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -567,11 +567,6 @@ static int ice_alloc_vf_res(struct ice_vf *vf) int tx_rx_queue_left; int status; - /* setup VF VSI and necessary resources */ - status = ice_alloc_vsi_res(vf); - if (status) - goto ice_alloc_vf_res_exit; - /* Update number of VF queues, in case VF had requested for queue * changes */ @@ -581,6 +576,11 @@ static int ice_alloc_vf_res(struct ice_vf *vf) vf->num_req_qs != vf->num_vf_qs) vf->num_vf_qs = vf->num_req_qs; + /* setup VF VSI and necessary resources */ + status = ice_alloc_vsi_res(vf); + if (status) + goto ice_alloc_vf_res_exit; + if (vf->trusted) set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); else -- cgit v1.2.3 From 56923ab66467c2edbd1ff97240ff14805e0b1cdc Mon Sep 17 00:00:00 2001 From: Brett Creeley Date: Wed, 26 Jun 2019 02:20:22 -0700 Subject: ice: Add stats for Rx drops at the port level Currently we are not reporting dropped counts at the port level to ethtool or netlink. This was found when debugging Rx dropped issues and the total packets sent did not equal the total packets received minus the rx_dropped, which was very confusing. To determine dropped counts at the port level we need to read the PRTRPB_RDPC register. To fix reporting we will store the dropped counts in the PF's rx_discards. This will be reported to netlink by storing it in the PF VSI's rx_missed_errors signaling that the receiver missed the packet. Also, we will report this to ethtool in the rx_dropped.nic field. Signed-off-by: Brett Creeley Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_hw_autogen.h | 1 + drivers/net/ethernet/intel/ice/ice_main.c | 6 ++++++ 2 files changed, 7 insertions(+) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 3250dfc00002..87652d722a30 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -337,5 +337,6 @@ #define VSIQF_HLUT_MAX_INDEX 15 #define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4)) #define VFINT_DYN_CTLN_CLEARPBA_M BIT(1) +#define PRTRPB_RDPC 0x000AC260 #endif /* _ICE_HW_AUTOGEN_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index d13f56803658..e4be9aa79337 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -3297,6 +3297,8 @@ static void ice_update_vsi_stats(struct ice_vsi *vsi) cur_ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes; cur_ns->rx_length_errors = pf->stats.rx_len_errors; + /* record drops from the port level */ + cur_ns->rx_missed_errors = pf->stats.eth.rx_discards; } } @@ -3330,6 +3332,10 @@ static void ice_update_pf_stats(struct ice_pf *pf) &prev_ps->eth.rx_broadcast, &cur_ps->eth.rx_broadcast); + ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded, + &prev_ps->eth.rx_discards, + &cur_ps->eth.rx_discards); + ice_stat_update40(hw, GLPRT_GOTCL(pf_id), pf->stat_prev_loaded, &prev_ps->eth.tx_bytes, &cur_ps->eth.tx_bytes); -- cgit v1.2.3 From a1e9968593234c179fcb3e71875897ae585c8362 Mon Sep 17 00:00:00 2001 From: Brett Creeley Date: Wed, 26 Jun 2019 02:20:23 -0700 Subject: ice: Remove duplicate code in ice_alloc_rx_bufs Currently if the call to ice_alloc_mapped_page() fails we jump to the no_buf label, possibly call ice_release_rx_desc(), and return true indicating that there is more work to do. In the success case we just fall out of the while loop, possibly call ice_alloc_mapped_page(), and return false saying we exhausted cleaned_count. This flow can be improved by breaking if ice_alloc_mapped_page() fails and then the flow outside of the while loop is the same for the failure and success case. Signed-off-by: Brett Creeley Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_txrx.c | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 0c459305c12f..020dac283f07 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -478,8 +478,9 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) bi = &rx_ring->rx_buf[ntu]; do { + /* if we fail here, we have work remaining */ if (!ice_alloc_mapped_page(rx_ring, bi)) - goto no_bufs; + break; /* sync the buffer for use by the device */ dma_sync_single_range_for_device(rx_ring->dev, bi->dma, @@ -510,16 +511,7 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) if (rx_ring->next_to_use != ntu) ice_release_rx_desc(rx_ring, ntu); - return false; - -no_bufs: - if (rx_ring->next_to_use != ntu) - ice_release_rx_desc(rx_ring, ntu); - - /* make sure to come back via polling to try again after - * allocation failure - */ - return true; + return !!cleaned_count; } /** -- cgit v1.2.3 From 992149446353bc22dff8edf231f2a5d8c5a3bd50 Mon Sep 17 00:00:00 2001 From: Akeem G Abodunrin Date: Wed, 26 Jun 2019 02:20:24 -0700 Subject: ice: Don't return error for disabling LAN Tx queue that does exist Since Tx rings are being managed by FW/NVM, Tx rings might have not been set up or driver had already wiped them off - In that case, call to disable LAN Tx queue is being returned as not in existence. This patch makes sure we don't return unnecessary error for such scenario. Signed-off-by: Akeem G Abodunrin Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_lib.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index e28478215810..2add10e02280 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -2148,6 +2148,9 @@ ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, if (status == ICE_ERR_RESET_ONGOING) { dev_dbg(&pf->pdev->dev, "Reset in progress. LAN Tx queues already disabled\n"); + } else if (status == ICE_ERR_DOES_NOT_EXIST) { + dev_dbg(&pf->pdev->dev, + "LAN Tx queues does not exist, nothing to disabled\n"); } else if (status) { dev_err(&pf->pdev->dev, "Failed to disable LAN Tx queues, error: %d\n", -- cgit v1.2.3 From ba880734ba9c24eca589b3f734d38309568301b2 Mon Sep 17 00:00:00 2001 From: Brett Creeley Date: Wed, 26 Jun 2019 02:20:25 -0700 Subject: ice: Remove unnecessary flag ICE_FLAG_MSIX_ENA This flag is not needed and is called every time we re-enable interrupts in the hotpath so remove it. Also remove ice_vsi_req_irq() because it was a wrapper function for ice_vsi_req_irq_msix() whose sole purpose was checking the ICE_FLAG_MSIX_ENA flag. Signed-off-by: Brett Creeley Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice.h | 1 - drivers/net/ethernet/intel/ice/ice_lib.c | 71 ++++++++++++----------------- drivers/net/ethernet/intel/ice/ice_main.c | 74 ++++++++----------------------- drivers/net/ethernet/intel/ice/ice_txrx.c | 4 +- 4 files changed, 48 insertions(+), 102 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 596b09a905aa..794d97460fc7 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -329,7 +329,6 @@ struct ice_q_vector { } ____cacheline_internodealigned_in_smp; enum ice_pf_flags { - ICE_FLAG_MSIX_ENA, ICE_FLAG_FLTR_SYNC, ICE_FLAG_RSS_ENA, ICE_FLAG_SRIOV_ENA, diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 2add10e02280..6e34c40e7840 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -1129,12 +1129,7 @@ static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi) return -EEXIST; } - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { - num_q_vectors = vsi->num_q_vectors; - } else { - err = -EINVAL; - goto err_out; - } + num_q_vectors = vsi->num_q_vectors; for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { err = ice_vsi_alloc_q_vector(vsi, v_idx); @@ -1180,9 +1175,6 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi) return -EEXIST; } - if (!test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) - return -ENOENT; - num_q_vectors = vsi->num_q_vectors; /* reserve slots from OS requested IRQs */ vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors, @@ -2605,39 +2597,36 @@ void ice_vsi_free_irq(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; int base = vsi->base_vector; + int i; - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { - int i; - - if (!vsi->q_vectors || !vsi->irqs_ready) - return; + if (!vsi->q_vectors || !vsi->irqs_ready) + return; - ice_vsi_release_msix(vsi); - if (vsi->type == ICE_VSI_VF) - return; + ice_vsi_release_msix(vsi); + if (vsi->type == ICE_VSI_VF) + return; - vsi->irqs_ready = false; - ice_for_each_q_vector(vsi, i) { - u16 vector = i + base; - int irq_num; + vsi->irqs_ready = false; + ice_for_each_q_vector(vsi, i) { + u16 vector = i + base; + int irq_num; - irq_num = pf->msix_entries[vector].vector; + irq_num = pf->msix_entries[vector].vector; - /* free only the irqs that were actually requested */ - if (!vsi->q_vectors[i] || - !(vsi->q_vectors[i]->num_ring_tx || - vsi->q_vectors[i]->num_ring_rx)) - continue; + /* free only the irqs that were actually requested */ + if (!vsi->q_vectors[i] || + !(vsi->q_vectors[i]->num_ring_tx || + vsi->q_vectors[i]->num_ring_rx)) + continue; - /* clear the affinity notifier in the IRQ descriptor */ - irq_set_affinity_notifier(irq_num, NULL); + /* clear the affinity notifier in the IRQ descriptor */ + irq_set_affinity_notifier(irq_num, NULL); - /* clear the affinity_mask in the IRQ descriptor */ - irq_set_affinity_hint(irq_num, NULL); - synchronize_irq(irq_num); - devm_free_irq(&pf->pdev->dev, irq_num, - vsi->q_vectors[i]); - } + /* clear the affinity_mask in the IRQ descriptor */ + irq_set_affinity_hint(irq_num, NULL); + synchronize_irq(irq_num); + devm_free_irq(&pf->pdev->dev, irq_num, + vsi->q_vectors[i]); } } @@ -2816,15 +2805,13 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi) } /* disable each interrupt */ - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { - ice_for_each_q_vector(vsi, i) - wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0); + ice_for_each_q_vector(vsi, i) + wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0); - ice_flush(hw); + ice_flush(hw); - ice_for_each_q_vector(vsi, i) - synchronize_irq(pf->msix_entries[i + base].vector); - } + ice_for_each_q_vector(vsi, i) + synchronize_irq(pf->msix_entries[i + base].vector); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index e4be9aa79337..8a8f9170e219 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -1547,15 +1547,11 @@ static void ice_irq_affinity_release(struct kref __always_unused *ref) {} */ static int ice_vsi_ena_irq(struct ice_vsi *vsi) { - struct ice_pf *pf = vsi->back; - struct ice_hw *hw = &pf->hw; - - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { - int i; + struct ice_hw *hw = &vsi->back->hw; + int i; - ice_for_each_q_vector(vsi, i) - ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]); - } + ice_for_each_q_vector(vsi, i) + ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]); ice_flush(hw); return 0; @@ -1803,7 +1799,7 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf) wr32(hw, PFINT_OICR_ENA, 0); ice_flush(hw); - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) { + if (pf->msix_entries) { synchronize_irq(pf->msix_entries[pf->oicr_idx].vector); devm_free_irq(&pf->pdev->dev, pf->msix_entries[pf->oicr_idx].vector, pf); @@ -2229,7 +2225,6 @@ static void ice_deinit_pf(struct ice_pf *pf) static void ice_init_pf(struct ice_pf *pf) { bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS); - set_bit(ICE_FLAG_MSIX_ENA, pf->flags); #ifdef CONFIG_PCI_IOV if (pf->hw.func_caps.common_cap.sr_iov_1_1) { struct ice_hw *hw = &pf->hw; @@ -2329,7 +2324,6 @@ msix_err: exit_err: pf->num_lan_msix = 0; - clear_bit(ICE_FLAG_MSIX_ENA, pf->flags); return err; } @@ -2342,7 +2336,6 @@ static void ice_dis_msix(struct ice_pf *pf) pci_disable_msix(pf->pdev); devm_kfree(&pf->pdev->dev, pf->msix_entries); pf->msix_entries = NULL; - clear_bit(ICE_FLAG_MSIX_ENA, pf->flags); } /** @@ -2351,8 +2344,7 @@ static void ice_dis_msix(struct ice_pf *pf) */ static void ice_clear_interrupt_scheme(struct ice_pf *pf) { - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) - ice_dis_msix(pf); + ice_dis_msix(pf); if (pf->irq_tracker) { devm_kfree(&pf->pdev->dev, pf->irq_tracker); @@ -2368,10 +2360,7 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf) { int vectors; - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) - vectors = ice_ena_msix_range(pf); - else - return -ENODEV; + vectors = ice_ena_msix_range(pf); if (vectors < 0) return vectors; @@ -2528,12 +2517,10 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) * the misc functionality and queue processing is combined in * the same vector and that gets setup at open. */ - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { - err = ice_req_irq_msix_misc(pf); - if (err) { - dev_err(dev, "setup of misc vector failed: %d\n", err); - goto err_init_interrupt_unroll; - } + err = ice_req_irq_msix_misc(pf); + if (err) { + dev_err(dev, "setup of misc vector failed: %d\n", err); + goto err_init_interrupt_unroll; } /* create switch struct for the switch element created by FW on boot */ @@ -3146,10 +3133,7 @@ static int ice_up_complete(struct ice_vsi *vsi) struct ice_pf *pf = vsi->back; int err; - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) - ice_vsi_cfg_msix(vsi); - else - return -ENOTSUPP; + ice_vsi_cfg_msix(vsi); /* Enable only Rx rings, Tx rings were enabled by the FW when the * Tx queue group list was configured and the context bits were @@ -3609,24 +3593,6 @@ int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) return err; } -/** - * ice_vsi_req_irq - Request IRQ from the OS - * @vsi: The VSI IRQ is being requested for - * @basename: name for the vector - * - * Return 0 on success and a negative value on error - */ -static int ice_vsi_req_irq(struct ice_vsi *vsi, char *basename) -{ - struct ice_pf *pf = vsi->back; - int err = -EINVAL; - - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) - err = ice_vsi_req_irq_msix(vsi, basename); - - return err; -} - /** * ice_vsi_open - Called when a network interface is made active * @vsi: the VSI to open @@ -3656,7 +3622,7 @@ static int ice_vsi_open(struct ice_vsi *vsi) snprintf(int_name, sizeof(int_name) - 1, "%s-%s", dev_driver_string(&pf->pdev->dev), vsi->netdev->name); - err = ice_vsi_req_irq(vsi, int_name); + err = ice_vsi_req_irq_msix(vsi, int_name); if (err) goto err_setup_rx; @@ -3893,12 +3859,10 @@ static void ice_rebuild(struct ice_pf *pf) } /* start misc vector */ - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { - err = ice_req_irq_msix_misc(pf); - if (err) { - dev_err(dev, "misc vector setup failed: %d\n", err); - goto err_vsi_rebuild; - } + err = ice_req_irq_msix_misc(pf); + if (err) { + dev_err(dev, "misc vector setup failed: %d\n", err); + goto err_vsi_rebuild; } /* restart the VSIs that were rebuilt and running before the reset */ @@ -4295,9 +4259,7 @@ static void ice_tx_timeout(struct net_device *netdev) head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[hung_queue])) & QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S; /* Read interrupt register */ - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) - val = rd32(hw, - GLINT_DYN_CTL(tx_ring->q_vector->reg_idx)); + val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx)); netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n", vsi->vsi_num, hung_queue, tx_ring->next_to_clean, diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 020dac283f07..9234fd203929 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -1413,7 +1413,6 @@ int ice_napi_poll(struct napi_struct *napi, int budget) struct ice_q_vector *q_vector = container_of(napi, struct ice_q_vector, napi); struct ice_vsi *vsi = q_vector->vsi; - struct ice_pf *pf = vsi->back; bool clean_complete = true; int budget_per_ring = 0; struct ice_ring *ring; @@ -1454,8 +1453,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget) * poll us due to busy-polling */ if (likely(napi_complete_done(napi, work_done))) - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) - ice_update_ena_itr(vsi, q_vector); + ice_update_ena_itr(vsi, q_vector); return min_t(int, work_done, budget - 1); } -- cgit v1.2.3 From b67f25d76e9f5e73024b8782acb47f7e7bd339b8 Mon Sep 17 00:00:00 2001 From: Akeem G Abodunrin Date: Wed, 26 Jun 2019 02:20:26 -0700 Subject: ice: Remove flag to track VF interrupt status As a result of refactoring of VF VSIs interrupts code, there is no need to track its configuration status again with ICE_VF_STATE_CFG_INTR flag - In fact, it is not being checked anywhere in the code right now, so this patch removes the dead code as applicable to the flag. Signed-off-by: Akeem G Abodunrin Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c | 11 ----------- drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h | 5 ----- 2 files changed, 16 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 00aa6364124a..ce01cbe70ea4 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -297,13 +297,6 @@ void ice_free_vfs(struct ice_pf *pf) if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) { /* disable VF qp mappings */ ice_dis_vf_mappings(&pf->vf[i]); - - /* Set this state so that assigned VF vectors can be - * reclaimed by PF for reuse in ice_vsi_release(). No - * need to clear this bit since pf->vf array is being - * freed anyways after this for loop - */ - set_bit(ICE_VF_STATE_CFG_INTR, pf->vf[i].vf_states); ice_free_vf_res(&pf->vf[i]); } } @@ -551,7 +544,6 @@ static int ice_alloc_vsi_res(struct ice_vf *vf) * expect vector assignment to be changed unless there is a request for * more vectors. */ - clear_bit(ICE_VF_STATE_CFG_INTR, vf->vf_states); ice_alloc_vsi_res_exit: ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); return status; @@ -1283,9 +1275,6 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs) /* assign default capabilities */ set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); vfs[i].spoofchk = true; - - /* Set this state so that PF driver does VF vector assignment */ - set_bit(ICE_VF_STATE_CFG_INTR, vfs[i].vf_states); } pf->num_alloc_vfs = num_alloc_vfs; diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h index c3ca522c245a..ada69120ff38 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h @@ -30,11 +30,6 @@ enum ice_vf_states { ICE_VF_STATE_DIS, ICE_VF_STATE_MC_PROMISC, ICE_VF_STATE_UC_PROMISC, - /* state to indicate if PF needs to do vector assignment for VF. - * This needs to be set during first time VF initialization or later - * when VF asks for more Vectors through virtchnl OP. - */ - ICE_VF_STATE_CFG_INTR, ICE_VF_STATES_NBITS }; -- cgit v1.2.3 From 3015b8fcb60d448a9521b49359e068d021aaec97 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Wed, 26 Jun 2019 02:20:27 -0700 Subject: ice: Bump version number Update driver version to 0.7.5 Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 8a8f9170e219..c26e6a102dac 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -9,7 +9,7 @@ #include "ice_lib.h" #include "ice_dcb_lib.h" -#define DRV_VERSION "0.7.4-k" +#define DRV_VERSION "0.7.5-k" #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" const char ice_drv_ver[] = DRV_VERSION; static const char ice_driver_string[] = DRV_SUMMARY; -- cgit v1.2.3 From c1ddf1f5c4adc1e885dce8533b2245d979f38280 Mon Sep 17 00:00:00 2001 From: Brett Creeley Date: Thu, 25 Jul 2019 01:55:28 -0700 Subject: ice: Use the software based tail when checking for hung Tx ring Currently in ice_get_tx_pending we try to read a Tx ring's tail. This is then compared with the software based head (next_to_clean) to determine if we have pending work. This will never work because reading of the Tx ring's tail is no longer supported. Fix this by using the software based tail (next_to_use) to determine if there is pending work. Signed-off-by: Brett Creeley Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_main.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index c26e6a102dac..1aa7e06ebbdc 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -41,12 +41,12 @@ static void ice_update_pf_stats(struct ice_pf *pf); * ice_get_tx_pending - returns number of Tx descriptors not processed * @ring: the ring of descriptors */ -static u32 ice_get_tx_pending(struct ice_ring *ring) +static u16 ice_get_tx_pending(struct ice_ring *ring) { - u32 head, tail; + u16 head, tail; head = ring->next_to_clean; - tail = readl(ring->tail); + tail = ring->next_to_use; if (head != tail) return (head < tail) ? -- cgit v1.2.3 From 9118fcd5255f6d3891c90e01edd98dfc402ac663 Mon Sep 17 00:00:00 2001 From: Brett Creeley Date: Thu, 25 Jul 2019 01:55:29 -0700 Subject: ice: Assume that more than one Rx queue is rare in ice_napi_poll Currently we divide budget by the number of Rx queues per Rx ring container in ice_napi_poll even if there is only 1. This is an unnecessary divide for the normal case of 1 Rx ring per Rx ring container. Fix this by using an unlikely() call in the case where we actually need to divide. Also, we will always set budget_per_ring even if there are no Rx rings in the Rx ring container so we don't need to initialize it to 0. Signed-off-by: Brett Creeley Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_txrx.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 9234fd203929..837d6ae2f33b 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -1414,8 +1414,8 @@ int ice_napi_poll(struct napi_struct *napi, int budget) container_of(napi, struct ice_q_vector, napi); struct ice_vsi *vsi = q_vector->vsi; bool clean_complete = true; - int budget_per_ring = 0; struct ice_ring *ring; + int budget_per_ring; int work_done = 0; /* Since the actual Tx work is minimal, we can give the Tx a larger @@ -1429,11 +1429,16 @@ int ice_napi_poll(struct napi_struct *napi, int budget) if (budget <= 0) return budget; - /* We attempt to distribute budget to each Rx queue fairly, but don't - * allow the budget to go below 1 because that would exit polling early. - */ - if (q_vector->num_ring_rx) + /* normally we have 1 Rx ring per q_vector */ + if (unlikely(q_vector->num_ring_rx > 1)) + /* We attempt to distribute budget to each Rx queue fairly, but + * don't allow the budget to go below 1 because that would exit + * polling early. + */ budget_per_ring = max(budget / q_vector->num_ring_rx, 1); + else + /* Max of 1 Rx ring in this q_vector so give it the budget */ + budget_per_ring = budget; ice_for_each_ring(ring, q_vector->rx) { int cleaned; -- cgit v1.2.3 From d82dd83df27e31da3f2a838b32d0b82940dcd504 Mon Sep 17 00:00:00 2001 From: Akeem G Abodunrin Date: Thu, 25 Jul 2019 01:55:30 -0700 Subject: ice: Restructure VFs initialization flows This patch restructures how VFs are configured, and resources allocated. Instead of freeing resources that were never allocated, and resetting empty VFs that have never been created - the new flow will just allocate resources for number of requested VFs based on the availability. During VFs initialization process, global interrupt is disabled, and rearmed after getting MSIX vectors for VFs. This allows immediate mailbox communications, instead of delaying it till later and VFs. PF communications resulted to using polling instead of actual interrupt. The issue manifested when creating higher number of VFs (128 VFs) per PF. Signed-off-by: Akeem G Abodunrin Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice.h | 1 + drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c | 69 ++++++++++++++++-------- 2 files changed, 48 insertions(+), 22 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 794d97460fc7..bde0b3617d9b 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -220,6 +220,7 @@ enum ice_state { __ICE_CFG_BUSY, __ICE_SERVICE_SCHED, __ICE_SERVICE_DIS, + __ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */ __ICE_STATE_NBITS /* must be last */ }; diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index ce01cbe70ea4..93b835a24bb1 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -974,6 +974,47 @@ ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m, return status; } +/** + * ice_config_res_vfs - Finalize allocation of VFs resources in one go + * @pf: pointer to the PF structure + * + * This function is being called as last part of resetting all VFs, or when + * configuring VFs for the first time, where there is no resource to be freed + * Returns true if resources were properly allocated for all VFs, and false + * otherwise. + */ +static bool ice_config_res_vfs(struct ice_pf *pf) +{ + struct ice_hw *hw = &pf->hw; + int v; + + if (ice_check_avail_res(pf)) { + dev_err(&pf->pdev->dev, + "Cannot allocate VF resources, try with fewer number of VFs\n"); + return false; + } + + /* rearm global interrupts */ + if (test_and_clear_bit(__ICE_OICR_INTR_DIS, pf->state)) + ice_irq_dynamic_ena(hw, NULL, NULL); + + /* Finish resetting each VF and allocate resources */ + for (v = 0; v < pf->num_alloc_vfs; v++) { + struct ice_vf *vf = &pf->vf[v]; + + vf->num_vf_qs = pf->num_vf_qps; + dev_dbg(&pf->pdev->dev, + "VF-id %d has %d queues configured\n", + vf->vf_id, vf->num_vf_qs); + ice_cleanup_and_realloc_vf(vf); + } + + ice_flush(hw); + clear_bit(__ICE_VF_DIS, pf->state); + + return true; +} + /** * ice_reset_all_vfs - reset all allocated VFs in one go * @pf: pointer to the PF structure @@ -1066,25 +1107,8 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) dev_err(&pf->pdev->dev, "Failed to free MSIX resources used by SR-IOV\n"); - if (ice_check_avail_res(pf)) { - dev_err(&pf->pdev->dev, - "Cannot allocate VF resources, try with fewer number of VFs\n"); + if (!ice_config_res_vfs(pf)) return false; - } - - /* Finish the reset on each VF */ - for (v = 0; v < pf->num_alloc_vfs; v++) { - vf = &pf->vf[v]; - - vf->num_vf_qs = pf->num_vf_qps; - dev_dbg(&pf->pdev->dev, - "VF-id %d has %d queues configured\n", - vf->vf_id, vf->num_vf_qs); - ice_cleanup_and_realloc_vf(vf); - } - - ice_flush(hw); - clear_bit(__ICE_VF_DIS, pf->state); return true; } @@ -1249,7 +1273,7 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs) /* Disable global interrupt 0 so we don't try to handle the VFLR. */ wr32(hw, GLINT_DYN_CTL(pf->oicr_idx), ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S); - + set_bit(__ICE_OICR_INTR_DIS, pf->state); ice_flush(hw); ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); @@ -1278,13 +1302,13 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs) } pf->num_alloc_vfs = num_alloc_vfs; - /* VF resources get allocated during reset */ - if (!ice_reset_all_vfs(pf, true)) { + /* VF resources get allocated with initialization */ + if (!ice_config_res_vfs(pf)) { ret = -EIO; goto err_unroll_sriov; } - goto err_unroll_intr; + return ret; err_unroll_sriov: pf->vf = NULL; @@ -1296,6 +1320,7 @@ err_pci_disable_sriov: err_unroll_intr: /* rearm interrupts here */ ice_irq_dynamic_ena(hw, NULL, NULL); + clear_bit(__ICE_OICR_INTR_DIS, pf->state); return ret; } -- cgit v1.2.3 From f1a4a66d23102e7681beb8725e7d00f359ef5c87 Mon Sep 17 00:00:00 2001 From: Paul Greenwalt Date: Thu, 25 Jul 2019 01:55:31 -0700 Subject: ice: fix set pause param autoneg check When ETHTOOL_GLINKSETTINGS is defined get pause param pause->autoneg reports SW configured setting, however when not defined get pause param pause->autoneg reports the link status. Set pause param needs to compare pause->autoneg with the same source as get pause param to block the user from changing autoneg with the set pause param option, or the user may be incorrectly blocked from changing Rx|Tx pause settings. Signed-off-by: Paul Greenwalt Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_ethtool.c | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index d3ba535bd65a..6a97ddbbda76 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -2856,6 +2856,7 @@ static int ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_aqc_get_phy_caps_data *pcaps; struct ice_link_status *hw_link_info; struct ice_pf *pf = np->vsi->back; struct ice_dcbx_cfg *dcbx_cfg; @@ -2866,6 +2867,7 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) u8 aq_failures; bool link_up; int err = 0; + u32 is_an; pi = vsi->port_info; hw_link_info = &pi->phy.link_info; @@ -2880,7 +2882,30 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) return -EOPNOTSUPP; } - if (pause->autoneg != (hw_link_info->an_info & ICE_AQ_AN_COMPLETED)) { + /* Get pause param reports configured and negotiated flow control pause + * when ETHTOOL_GLINKSETTINGS is defined. Since ETHTOOL_GLINKSETTINGS is + * defined get pause param pause->autoneg reports SW configured setting, + * so compare pause->autoneg with SW configured to prevent the user from + * using set pause param to chance autoneg. + */ + pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); + if (!pcaps) + return -ENOMEM; + + /* Get current PHY config */ + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, + NULL); + if (status) { + kfree(pcaps); + return -EIO; + } + + is_an = ((pcaps->caps & ICE_AQC_PHY_AN_MODE) ? + AUTONEG_ENABLE : AUTONEG_DISABLE); + + kfree(pcaps); + + if (pause->autoneg != is_an) { netdev_info(netdev, "To change autoneg please use: ethtool -s autoneg \n"); return -EOPNOTSUPP; } -- cgit v1.2.3 From 2ab28bb04ce6432f15cd5086e7c7384f3deab639 Mon Sep 17 00:00:00 2001 From: Brett Creeley Date: Thu, 25 Jul 2019 01:55:32 -0700 Subject: ice: Set WB_ON_ITR when we don't re-enable interrupts Currently when busy polling is enabled we aren't setting/enabling WB_ON_ITR in the driver. This doesn't break the driver, but it does cause issues. If we don't enable WB_ON_ITR mode we will still get write-backs from hardware during polling when a cache line has been filled, but if a cache line is not filled we will not get the write-back because WB_ON_ITR is not set. Fix this by enabling WB_ON_ITR in the driver when interrupts are disabled. Signed-off-by: Brett Creeley Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_hw_autogen.h | 3 ++ drivers/net/ethernet/intel/ice/ice_txrx.c | 54 +++++++++++++++++++++++++ drivers/net/ethernet/intel/ice/ice_txrx.h | 13 ++++++ 3 files changed, 70 insertions(+) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 87652d722a30..6f78ff5534af 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -127,8 +127,11 @@ #define GLINT_DYN_CTL_CLEARPBA_M BIT(1) #define GLINT_DYN_CTL_SWINT_TRIG_M BIT(2) #define GLINT_DYN_CTL_ITR_INDX_S 3 +#define GLINT_DYN_CTL_ITR_INDX_M ICE_M(0x3, 3) #define GLINT_DYN_CTL_INTERVAL_S 5 +#define GLINT_DYN_CTL_INTERVAL_M ICE_M(0xFFF, 5) #define GLINT_DYN_CTL_SW_ITR_INDX_M ICE_M(0x3, 25) +#define GLINT_DYN_CTL_WB_ON_ITR_M BIT(30) #define GLINT_DYN_CTL_INTENA_MSK_M BIT(31) #define GLINT_ITR(_i, _INT) (0x00154000 + ((_i) * 8192 + (_INT) * 4)) #define GLINT_RATE(_INT) (0x0015A000 + ((_INT) * 4)) diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 837d6ae2f33b..c88e0701e1d7 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -1355,6 +1355,23 @@ ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector) struct ice_ring_container *rx = &q_vector->rx; u32 itr_val; + /* when exiting WB_ON_ITR lets set a low ITR value and trigger + * interrupts to expire right away in case we have more work ready to go + * already + */ + if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) { + itr_val = ice_buildreg_itr(rx->itr_idx, ICE_WB_ON_ITR_USECS); + wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val); + /* set target back to last user set value */ + rx->target_itr = rx->itr_setting; + /* set current to what we just wrote and dynamic if needed */ + rx->current_itr = ICE_WB_ON_ITR_USECS | + (rx->itr_setting & ICE_ITR_DYNAMIC); + /* allow normal interrupt flow to start */ + q_vector->itr_countdown = 0; + return; + } + /* This will do nothing if dynamic updates are not enabled */ ice_update_itr(q_vector, tx); ice_update_itr(q_vector, rx); @@ -1399,6 +1416,41 @@ ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector) itr_val); } +/** + * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector + * @vsi: pointer to the VSI structure + * @q_vector: q_vector to set WB_ON_ITR on + * + * We need to tell hardware to write-back completed descriptors even when + * interrupts are disabled. Descriptors will be written back on cache line + * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR + * descriptors may not be written back if they don't fill a cache line until the + * next interrupt. + * + * This sets the write-back frequency to 2 microseconds as that is the minimum + * value that's not 0 due to ITR granularity. Also, set the INTENA_MSK bit to + * make sure hardware knows we aren't meddling with the INTENA_M bit. + */ +static void +ice_set_wb_on_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector) +{ + /* already in WB_ON_ITR mode no need to change it */ + if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) + return; + + if (q_vector->num_ring_rx) + wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), + ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS, + ICE_RX_ITR)); + + if (q_vector->num_ring_tx) + wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), + ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS, + ICE_TX_ITR)); + + q_vector->itr_countdown = ICE_IN_WB_ON_ITR_MODE; +} + /** * ice_napi_poll - NAPI polling Rx/Tx cleanup routine * @napi: napi struct with our devices info in it @@ -1459,6 +1511,8 @@ int ice_napi_poll(struct napi_struct *napi, int budget) */ if (likely(napi_complete_done(napi, work_done))) ice_update_ena_itr(vsi, q_vector); + else + ice_set_wb_on_itr(vsi, q_vector); return min_t(int, work_done, budget - 1); } diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index ec76aba347b9..94a9280193e2 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -144,6 +144,19 @@ enum ice_rx_dtype { #define ICE_DFLT_INTRL 0 #define ICE_MAX_INTRL 236 +#define ICE_WB_ON_ITR_USECS 2 +#define ICE_IN_WB_ON_ITR_MODE 255 +/* Sets WB_ON_ITR and assumes INTENA bit is already cleared, which allows + * setting the MSK_M bit to tell hardware to ignore the INTENA_M bit. Also, + * set the write-back latency to the usecs passed in. + */ +#define ICE_GLINT_DYN_CTL_WB_ON_ITR(usecs, itr_idx) \ + ((((usecs) << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)) & \ + GLINT_DYN_CTL_INTERVAL_M) | \ + (((itr_idx) << GLINT_DYN_CTL_ITR_INDX_S) & \ + GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M | \ + GLINT_DYN_CTL_WB_ON_ITR_M) + /* Legacy or Advanced Mode Queue */ #define ICE_TX_ADVANCED 0 #define ICE_TX_LEGACY 1 -- cgit v1.2.3 From 7829570e287d938fc49b8ae151d9af26436967a8 Mon Sep 17 00:00:00 2001 From: Usha Ketineni Date: Thu, 25 Jul 2019 01:55:33 -0700 Subject: ice: Fix kernel hang with DCB reset in CEE mode This patch fixes the set local MIB AQ call failures in the DCB rebuild path by setting the defaults for the ETS recommended DCB configuration. Also, willing bits for the DCB configuration needs to be set correctly. Resets works fine in IEEE mode as the ETS recommended DCB configuration is populated but not in CEE mode. Without this patch, PFR causes the kernel hang in CEE mode. Signed-off-by: Usha Ketineni Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_dcb_lib.c | 149 ++++++++++++++++----------- 1 file changed, 88 insertions(+), 61 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index fe88b127ca42..bf6cd4760a48 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -203,16 +203,87 @@ out: return ret; } +/** + * ice_cfg_etsrec_defaults - Set default ETS recommended DCB config + * @pi: port information structure + */ +static void ice_cfg_etsrec_defaults(struct ice_port_info *pi) +{ + struct ice_dcbx_cfg *dcbcfg = &pi->local_dcbx_cfg; + u8 i; + + /* Ensure ETS recommended DCB configuration is not already set */ + if (dcbcfg->etsrec.maxtcs) + return; + + /* In CEE mode, set the default to 1 TC */ + dcbcfg->etsrec.maxtcs = 1; + for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { + dcbcfg->etsrec.tcbwtable[i] = i ? 0 : 100; + dcbcfg->etsrec.tsatable[i] = i ? ICE_IEEE_TSA_STRICT : + ICE_IEEE_TSA_ETS; + } +} + +/** + * ice_dcb_need_recfg - Check if DCB needs reconfig + * @pf: board private structure + * @old_cfg: current DCB config + * @new_cfg: new DCB config + */ +static bool +ice_dcb_need_recfg(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg, + struct ice_dcbx_cfg *new_cfg) +{ + bool need_reconfig = false; + + /* Check if ETS configuration has changed */ + if (memcmp(&new_cfg->etscfg, &old_cfg->etscfg, + sizeof(new_cfg->etscfg))) { + /* If Priority Table has changed reconfig is needed */ + if (memcmp(&new_cfg->etscfg.prio_table, + &old_cfg->etscfg.prio_table, + sizeof(new_cfg->etscfg.prio_table))) { + need_reconfig = true; + dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n"); + } + + if (memcmp(&new_cfg->etscfg.tcbwtable, + &old_cfg->etscfg.tcbwtable, + sizeof(new_cfg->etscfg.tcbwtable))) + dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n"); + + if (memcmp(&new_cfg->etscfg.tsatable, + &old_cfg->etscfg.tsatable, + sizeof(new_cfg->etscfg.tsatable))) + dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n"); + } + + /* Check if PFC configuration has changed */ + if (memcmp(&new_cfg->pfc, &old_cfg->pfc, sizeof(new_cfg->pfc))) { + need_reconfig = true; + dev_dbg(&pf->pdev->dev, "PFC config change detected.\n"); + } + + /* Check if APP Table has changed */ + if (memcmp(&new_cfg->app, &old_cfg->app, sizeof(new_cfg->app))) { + need_reconfig = true; + dev_dbg(&pf->pdev->dev, "APP Table change detected.\n"); + } + + dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig); + return need_reconfig; +} + /** * ice_dcb_rebuild - rebuild DCB post reset * @pf: physical function instance */ void ice_dcb_rebuild(struct ice_pf *pf) { + struct ice_dcbx_cfg *local_dcbx_cfg, *desired_dcbx_cfg, *prev_cfg; struct ice_aqc_port_ets_elem buf = { 0 }; - struct ice_dcbx_cfg *prev_cfg; enum ice_status ret; - u8 willing; ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL); if (ret) { @@ -224,9 +295,15 @@ void ice_dcb_rebuild(struct ice_pf *pf) if (!test_bit(ICE_FLAG_DCB_ENA, pf->flags)) return; + local_dcbx_cfg = &pf->hw.port_info->local_dcbx_cfg; + desired_dcbx_cfg = &pf->hw.port_info->desired_dcbx_cfg; + /* Save current willing state and force FW to unwilling */ - willing = pf->hw.port_info->local_dcbx_cfg.etscfg.willing; - pf->hw.port_info->local_dcbx_cfg.etscfg.willing = 0x0; + local_dcbx_cfg->etscfg.willing = 0x0; + local_dcbx_cfg->pfc.willing = 0x0; + local_dcbx_cfg->app_mode = ICE_DCBX_APPS_NON_WILLING; + + ice_cfg_etsrec_defaults(pf->hw.port_info); ret = ice_set_dcb_cfg(pf->hw.port_info); if (ret) { dev_err(&pf->pdev->dev, "Failed to set DCB to unwilling\n"); @@ -234,8 +311,7 @@ void ice_dcb_rebuild(struct ice_pf *pf) } /* Retrieve DCB config and ensure same as current in SW */ - prev_cfg = devm_kmemdup(&pf->pdev->dev, - &pf->hw.port_info->local_dcbx_cfg, + prev_cfg = devm_kmemdup(&pf->pdev->dev, local_dcbx_cfg, sizeof(*prev_cfg), GFP_KERNEL); if (!prev_cfg) { dev_err(&pf->pdev->dev, "Failed to alloc space for DCB cfg\n"); @@ -243,22 +319,22 @@ void ice_dcb_rebuild(struct ice_pf *pf) } ice_init_dcb(&pf->hw); - if (memcmp(prev_cfg, &pf->hw.port_info->local_dcbx_cfg, - sizeof(*prev_cfg))) { + if (ice_dcb_need_recfg(pf, prev_cfg, local_dcbx_cfg)) { /* difference in cfg detected - disable DCB till next MIB */ dev_err(&pf->pdev->dev, "Set local MIB not accurate\n"); - devm_kfree(&pf->pdev->dev, prev_cfg); goto dcb_error; } /* fetched config congruent to previous configuration */ devm_kfree(&pf->pdev->dev, prev_cfg); - /* Configuration replayed - reset willing state to previous */ - pf->hw.port_info->local_dcbx_cfg.etscfg.willing = willing; + /* Set the local desired config */ + memset(&pf->hw.port_info->local_dcbx_cfg, 0, sizeof(*local_dcbx_cfg)); + memcpy(local_dcbx_cfg, desired_dcbx_cfg, sizeof(*local_dcbx_cfg)); + ice_cfg_etsrec_defaults(pf->hw.port_info); ret = ice_set_dcb_cfg(pf->hw.port_info); if (ret) { - dev_err(&pf->pdev->dev, "Fail restoring prev willing state\n"); + dev_err(&pf->pdev->dev, "Failed to set desired config\n"); goto dcb_error; } dev_info(&pf->pdev->dev, "DCB restored after reset\n"); @@ -501,55 +577,6 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring, return 0; } -/** - * ice_dcb_need_recfg - Check if DCB needs reconfig - * @pf: board private structure - * @old_cfg: current DCB config - * @new_cfg: new DCB config - */ -static bool ice_dcb_need_recfg(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg, - struct ice_dcbx_cfg *new_cfg) -{ - bool need_reconfig = false; - - /* Check if ETS configuration has changed */ - if (memcmp(&new_cfg->etscfg, &old_cfg->etscfg, - sizeof(new_cfg->etscfg))) { - /* If Priority Table has changed reconfig is needed */ - if (memcmp(&new_cfg->etscfg.prio_table, - &old_cfg->etscfg.prio_table, - sizeof(new_cfg->etscfg.prio_table))) { - need_reconfig = true; - dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n"); - } - - if (memcmp(&new_cfg->etscfg.tcbwtable, - &old_cfg->etscfg.tcbwtable, - sizeof(new_cfg->etscfg.tcbwtable))) - dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n"); - - if (memcmp(&new_cfg->etscfg.tsatable, - &old_cfg->etscfg.tsatable, - sizeof(new_cfg->etscfg.tsatable))) - dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n"); - } - - /* Check if PFC configuration has changed */ - if (memcmp(&new_cfg->pfc, &old_cfg->pfc, sizeof(new_cfg->pfc))) { - need_reconfig = true; - dev_dbg(&pf->pdev->dev, "PFC config change detected.\n"); - } - - /* Check if APP Table has changed */ - if (memcmp(&new_cfg->app, &old_cfg->app, sizeof(new_cfg->app))) { - need_reconfig = true; - dev_dbg(&pf->pdev->dev, "APP Table change detected.\n"); - } - - dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig); - return need_reconfig; -} - /** * ice_dcb_process_lldp_set_mib_change - Process MIB change * @pf: ptr to ice_pf -- cgit v1.2.3 From ac6f733a7bd5e23ce9d58ef995f51fbd1ad2fa97 Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Thu, 25 Jul 2019 01:55:34 -0700 Subject: ice: allow empty Rx descriptors In some circumstances, the hardware will hand us a receive descriptor which has no data attached, but is otherwise valid. The receive code was improperly ignoring these descriptors, which result in an infinite loop. To fix this, change the receive code to process all descriptors, regardless of the size of the associated data. Add checks to the memory-handling functions to allow for zero size. Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_txrx.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index c88e0701e1d7..e5c4c9139e54 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -607,6 +607,8 @@ ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb, unsigned int truesize = ICE_RXBUF_2048; #endif + if (!size) + return; skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, rx_buf->page_offset, size, truesize); @@ -662,6 +664,8 @@ ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb, prefetchw(rx_buf->page); *skb = rx_buf->skb; + if (!size) + return rx_buf; /* we are reusing so sync this buffer for CPU use */ dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, rx_buf->page_offset, size, @@ -745,8 +749,11 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, */ static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) { - /* hand second half of page back to the ring */ + if (!rx_buf) + return; + if (ice_can_reuse_rx_page(rx_buf)) { + /* hand second half of page back to the ring */ ice_reuse_rx_page(rx_ring, rx_buf); rx_ring->rx_stats.page_reuse_count++; } else { @@ -1031,8 +1038,9 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) size = le16_to_cpu(rx_desc->wb.pkt_len) & ICE_RX_FLX_DESC_PKT_LEN_M; + /* retrieve a buffer from the ring */ rx_buf = ice_get_rx_buf(rx_ring, &skb, size); - /* allocate (if needed) and populate skb */ + if (skb) ice_add_rx_frag(rx_buf, skb, size); else @@ -1041,7 +1049,8 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) /* exit if we failed to retrieve a buffer */ if (!skb) { rx_ring->rx_stats.alloc_buf_failed++; - rx_buf->pagecnt_bias++; + if (rx_buf) + rx_buf->pagecnt_bias++; break; } -- cgit v1.2.3 From e6c45149b88e9dee8a97e6c365fe967c196fd500 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Thu, 25 Jul 2019 01:55:35 -0700 Subject: ice: Do not always bring up PF VSI in ice_ena_vsi() During rebuild ice_ena_vsi() is called to recover the VSI state. This function assumes the PF VSI is always to be enabled, however, it's possible that during reset/rebuild the interface can be brought down. If this occurs, we can attempt to bring up the PF VSI on a downed interface which can lead to various crashes. If the interface is not running, do not bring up the associated VSI. Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_main.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 1aa7e06ebbdc..7805c2abd4ac 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -3701,8 +3701,6 @@ static int ice_ena_vsi(struct ice_vsi *vsi, bool locked) err = netd->netdev_ops->ndo_open(netd); rtnl_unlock(); } - } else { - err = ice_vsi_open(vsi); } } -- cgit v1.2.3 From 1337175deca72c3a1c846344d5743667a4dbedef Mon Sep 17 00:00:00 2001 From: Paul Greenwalt Date: Thu, 25 Jul 2019 01:55:36 -0700 Subject: ice: update GLINT_DYN_CTL and GLINT_VECT2FUNC register access Register access for GLINT_DYN_CTL and GLINT_VECT2FUNC should be within the PF space and not the absolute device space. Signed-off-by: Paul Greenwalt Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c | 24 ++++++++++++++---------- drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h | 3 ++- 2 files changed, 16 insertions(+), 11 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 93b835a24bb1..54b0e88af4ca 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -474,19 +474,20 @@ ice_vf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, u16 vf_id) } /** - * ice_calc_vf_first_vector_idx - Calculate absolute MSIX vector index in HW + * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space * @pf: pointer to PF structure * @vf: pointer to VF that the first MSIX vector index is being calculated for * - * This returns the first MSIX vector index in HW that is used by this VF and - * this will always be the OICR index in the AVF driver so any functionality + * This returns the first MSIX vector index in PF space that is used by this VF. + * This index is used when accessing PF relative registers such as + * GLINT_VECT2FUNC and GLINT_DYN_CTL. + * This will always be the OICR index in the AVF driver so any functionality * using vf->first_vector_idx for queue configuration will have to increment by * 1 to avoid meddling with the OICR index. */ static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf) { - return pf->hw.func_caps.common_cap.msix_vector_first_id + - pf->sriov_base_vector + vf->vf_id * pf->num_vf_msix; + return pf->sriov_base_vector + vf->vf_id * pf->num_vf_msix; } /** @@ -597,27 +598,30 @@ ice_alloc_vf_res_exit: */ static void ice_ena_vf_mappings(struct ice_vf *vf) { + int abs_vf_id, abs_first, abs_last; struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; int first, last, v; struct ice_hw *hw; - int abs_vf_id; u32 reg; hw = &pf->hw; vsi = pf->vsi[vf->lan_vsi_idx]; first = vf->first_vector_idx; last = (first + pf->num_vf_msix) - 1; + abs_first = first + pf->hw.func_caps.common_cap.msix_vector_first_id; + abs_last = (abs_first + pf->num_vf_msix) - 1; abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; /* VF Vector allocation */ - reg = (((first << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) | - ((last << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) | + reg = (((abs_first << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) | + ((abs_last << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M); wr32(hw, VPINT_ALLOC(vf->vf_id), reg); - reg = (((first << VPINT_ALLOC_PCI_FIRST_S) & VPINT_ALLOC_PCI_FIRST_M) | - ((last << VPINT_ALLOC_PCI_LAST_S) & VPINT_ALLOC_PCI_LAST_M) | + reg = (((abs_first << VPINT_ALLOC_PCI_FIRST_S) + & VPINT_ALLOC_PCI_FIRST_M) | + ((abs_last << VPINT_ALLOC_PCI_LAST_S) & VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M); wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg); /* map the interrupts to its functions */ diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h index ada69120ff38..424bc0538956 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h @@ -45,7 +45,8 @@ struct ice_vf { s16 vf_id; /* VF ID in the PF space */ u16 lan_vsi_idx; /* index into PF struct */ - int first_vector_idx; /* first vector index of this VF */ + /* first vector index of this VF in the PF space */ + int first_vector_idx; struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */ struct virtchnl_version_info vf_ver; u32 driver_caps; /* reported by VF driver */ -- cgit v1.2.3 From 60d628ea27d29499d2f71817a6caf5b20b6e293a Mon Sep 17 00:00:00 2001 From: Brett Creeley Date: Thu, 25 Jul 2019 01:55:37 -0700 Subject: ice: Reduce wait times during VF bringup/reset Currently there are a couple places where the VF is waiting too long when checking the status of registers. This is causing the AVF driver to spin for longer than necessary in the __IAVF_STARTUP state. Sometimes it causes the AVF to go into the __IAVF_COMM_FAILED, which may retrigger the __IAVF_STARTUP state. Try to reduce the chance of this happening by removing unnecessary wait times in VF bringup/resets. Signed-off-by: Brett Creeley Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c | 26 ++++++++++++++---------- drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h | 4 ++++ 2 files changed, 19 insertions(+), 11 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 54b0e88af4ca..8a5bf9730fdf 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -382,12 +382,15 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr) wr32(hw, PF_PCI_CIAA, VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S)); - for (i = 0; i < 100; i++) { + for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) { reg = rd32(hw, PF_PCI_CIAD); - if ((reg & VF_TRANS_PENDING_M) != 0) - dev_err(&pf->pdev->dev, - "VF %d PCI transactions stuck\n", vf->vf_id); - udelay(1); + /* no transactions pending so stop polling */ + if ((reg & VF_TRANS_PENDING_M) == 0) + break; + + dev_err(&pf->pdev->dev, + "VF %d PCI transactions stuck\n", vf->vf_id); + udelay(ICE_PCI_CIAD_WAIT_DELAY_US); } } @@ -1068,7 +1071,6 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) * finished resetting. */ for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) { - usleep_range(10000, 20000); /* Check each VF in sequence */ while (v < pf->num_alloc_vfs) { @@ -1076,8 +1078,11 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) vf = &pf->vf[v]; reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id)); - if (!(reg & VPGEN_VFRSTAT_VFRD_M)) + if (!(reg & VPGEN_VFRSTAT_VFRD_M)) { + /* only delay if the check failed */ + usleep_range(10, 20); break; + } /* If the current VF has finished resetting, move on * to the next VF in sequence. @@ -1091,7 +1096,6 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) */ if (v < pf->num_alloc_vfs) dev_warn(&pf->pdev->dev, "VF reset check timeout\n"); - usleep_range(10000, 20000); /* free VF resources to begin resetting the VSI state */ for (v = 0; v < pf->num_alloc_vfs; v++) { @@ -1165,12 +1169,14 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) * poll the status register to make sure that the reset * completed successfully. */ - usleep_range(10000, 20000); reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id)); if (reg & VPGEN_VFRSTAT_VFRD_M) { rsd = true; break; } + + /* only sleep if the reset is not done */ + usleep_range(10, 20); } /* Display a warning if VF didn't manage to reset in time, but need to @@ -1180,8 +1186,6 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) dev_warn(&pf->pdev->dev, "VF reset check timeout on VF %d\n", vf->vf_id); - usleep_range(10000, 20000); - /* disable promiscuous modes in case they were enabled * ignore any error if disabling process failed */ diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h index 424bc0538956..79bb47f73879 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h @@ -22,6 +22,10 @@ #define VF_DEVICE_STATUS 0xAA #define VF_TRANS_PENDING_M 0x20 +/* wait defines for polling PF_PCI_CIAD register status */ +#define ICE_PCI_CIAD_WAIT_COUNT 100 +#define ICE_PCI_CIAD_WAIT_DELAY_US 1 + /* Specific VF states */ enum ice_vf_states { ICE_VF_STATE_INIT = 0, -- cgit v1.2.3 From 11836214d5b7fbfe9df8aeb5dc2b23a6bedf185c Mon Sep 17 00:00:00 2001 From: Brett Creeley Date: Thu, 25 Jul 2019 01:55:38 -0700 Subject: ice: Increase size of Mailbox receive queue for many VFs Currently we use the ICE_MBXQ_LEN for both the Mailbox send and receive queues that are used to communicate with VFs. This is fine for the send queue because the PF driver will lock the queue for every single send, but for the Mailbox receive queue every VF is posting to its Mailbox send queue and the hardware is then handing the message to the PF on its Mailbox receive queue. This becomes a problem with many VFs because it seems to overburden the Mailbox receive queue on the PF. Fix this by increasing the Mailbox receive queue for the PF to 512 entries. The number 512 was determined based on the number of VFs supported by the device. We can have a total of 256 VFs so in the worst case this allows the VFs to put 2 messages in the PFs Mailbox receive queue at the same time. Signed-off-by: Brett Creeley Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice.h | 3 ++- drivers/net/ethernet/intel/ice/ice_main.c | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index bde0b3617d9b..b60e64c0036b 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -69,7 +69,8 @@ extern const char ice_drv_ver[]; #define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16) #define ICE_ETHTOOL_FWVER_LEN 32 #define ICE_AQ_LEN 64 -#define ICE_MBXQ_LEN 64 +#define ICE_MBXSQ_LEN 64 +#define ICE_MBXRQ_LEN 512 #define ICE_MIN_MSIX 2 #define ICE_NO_VSI 0xffff #define ICE_MAX_TXQS 2048 diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 7805c2abd4ac..a0d148f590c2 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -1507,8 +1507,8 @@ static void ice_set_ctrlq_len(struct ice_hw *hw) hw->adminq.num_sq_entries = ICE_AQ_LEN; hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN; hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN; - hw->mailboxq.num_rq_entries = ICE_MBXQ_LEN; - hw->mailboxq.num_sq_entries = ICE_MBXQ_LEN; + hw->mailboxq.num_rq_entries = ICE_MBXRQ_LEN; + hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN; hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN; hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN; } -- cgit v1.2.3 From c275684b9250f66e83b1769d76120e03ac964ec4 Mon Sep 17 00:00:00 2001 From: Akeem G Abodunrin Date: Thu, 25 Jul 2019 01:55:39 -0700 Subject: ice: Move VF resources definition to SR-IOV specific file In order to use some of the VF resources definition in the SR-IOV specific virtchnl header file, this patch moves applicable code to ice_virtchnl_pf.h file accordingly... and they should have been defined in the destination file originally. Signed-off-by: Akeem G Abodunrin Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice.h | 10 ---------- drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h | 13 +++++++++++++ 2 files changed, 13 insertions(+), 10 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index b60e64c0036b..9f9c30d29eb5 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -87,16 +87,6 @@ extern const char ice_drv_ver[]; #define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1) #define ICE_INVAL_Q_INDEX 0xffff #define ICE_INVAL_VFID 256 -#define ICE_MAX_VF_COUNT 256 -#define ICE_MAX_QS_PER_VF 256 -#define ICE_MIN_QS_PER_VF 1 -#define ICE_DFLT_QS_PER_VF 4 -#define ICE_NONQ_VECS_VF 1 -#define ICE_MAX_SCATTER_QS_PER_VF 16 -#define ICE_MAX_BASE_QS_PER_VF 16 -#define ICE_MAX_INTR_PER_VF 65 -#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1) -#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1) #define ICE_MAX_RESET_WAIT 20 diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h index 79bb47f73879..4d94853f119a 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h @@ -26,6 +26,19 @@ #define ICE_PCI_CIAD_WAIT_COUNT 100 #define ICE_PCI_CIAD_WAIT_DELAY_US 1 +/* VF resources default values and limitation */ +#define ICE_MAX_VF_COUNT 256 +#define ICE_MAX_QS_PER_VF 256 +#define ICE_MIN_QS_PER_VF 1 +#define ICE_DFLT_QS_PER_VF 4 +#define ICE_NONQ_VECS_VF 1 +#define ICE_MAX_SCATTER_QS_PER_VF 16 +#define ICE_MAX_BASE_QS_PER_VF 16 +#define ICE_MAX_INTR_PER_VF 65 +#define ICE_MAX_POLICY_INTR_PER_VF 33 +#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1) +#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1) + /* Specific VF states */ enum ice_vf_states { ICE_VF_STATE_INIT = 0, -- cgit v1.2.3 From cbfe31b5d74dcfba46329813855e28e53d080d57 Mon Sep 17 00:00:00 2001 From: Pawel Kaminski Date: Thu, 25 Jul 2019 01:55:40 -0700 Subject: ice: Change type for queue counts These queue variables are being assigned values that are type u16. Change the local variables to match these types. Since these represent queue counts, they should never be negative. Signed-off-by: Pawel Kaminski Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c | 25 ++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 8a5bf9730fdf..61cf1c5af928 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -2337,11 +2337,11 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg) enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct virtchnl_vf_res_request *vfres = (struct virtchnl_vf_res_request *)msg; - int req_queues = vfres->num_queue_pairs; + u16 req_queues = vfres->num_queue_pairs; struct ice_pf *pf = vf->pf; - int max_allowed_vf_queues; - int tx_rx_queue_left; - int cur_queues; + u16 max_allowed_vf_queues; + u16 tx_rx_queue_left; + u16 cur_queues; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; @@ -2349,29 +2349,30 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg) } cur_queues = vf->num_vf_qs; - tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx); + tx_rx_queue_left = min_t(u16, pf->q_left_tx, pf->q_left_rx); max_allowed_vf_queues = tx_rx_queue_left + cur_queues; - if (req_queues <= 0) { + if (!req_queues) { dev_err(&pf->pdev->dev, - "VF %d tried to request %d queues. Ignoring.\n", - vf->vf_id, req_queues); + "VF %d tried to request 0 queues. Ignoring.\n", + vf->vf_id); } else if (req_queues > ICE_MAX_BASE_QS_PER_VF) { dev_err(&pf->pdev->dev, "VF %d tried to request more than %d queues.\n", vf->vf_id, ICE_MAX_BASE_QS_PER_VF); vfres->num_queue_pairs = ICE_MAX_BASE_QS_PER_VF; - } else if (req_queues - cur_queues > tx_rx_queue_left) { + } else if (req_queues > cur_queues && + req_queues - cur_queues > tx_rx_queue_left) { dev_warn(&pf->pdev->dev, - "VF %d requested %d more queues, but only %d left.\n", + "VF %d requested %u more queues, but only %u left.\n", vf->vf_id, req_queues - cur_queues, tx_rx_queue_left); - vfres->num_queue_pairs = min_t(int, max_allowed_vf_queues, + vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues, ICE_MAX_BASE_QS_PER_VF); } else { /* request is successful, then reset VF */ vf->num_req_qs = req_queues; ice_vc_dis_vf(vf); dev_info(&pf->pdev->dev, - "VF %d granted request of %d queues.\n", + "VF %d granted request of %u queues.\n", vf->vf_id, req_queues); return 0; } -- cgit v1.2.3 From be6f7ef69cf0d8cfeb6af5ad62c558fe35e13cce Mon Sep 17 00:00:00 2001 From: Brett Creeley Date: Thu, 25 Jul 2019 01:55:41 -0700 Subject: ice: improve print for VF's when adding/deleting MAC filters When we fail to add/delete MAC filters in the VF, the print doesn't distinguish between the two. Fix that by printing whether or not we failed to add/delete the MAC filter respectively. Signed-off-by: Brett Creeley Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 61cf1c5af928..1b1d1ea0c8f9 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -2283,8 +2283,8 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set) if (v_ret) { dev_err(&pf->pdev->dev, - "can't update MAC filters for VF %d, error %d\n", - vf->vf_id, v_ret); + "can't %s MAC filters for VF %d, error %d\n", + set ? "add" : "remove", vf->vf_id, v_ret); } else { if (set) vf->num_mac += mac_count; -- cgit v1.2.3 From 0c3a6101ff2df98d5d960bda9b159d7c4a952f27 Mon Sep 17 00:00:00 2001 From: Dave Ertman Date: Mon, 29 Jul 2019 02:04:43 -0700 Subject: ice: Allow egress control packets from PF_VSI For control packets (i.e. LLDP packets) to be able to egress from the main VSI, a bit has to be set in the TX_descriptor. This should only be done for the main VSI and only if the FW LLDP agent is disabled. A bit to allow this also has to be set in the VSI context. Add the logic to add the necessary bits in the VSI context for the PF_VSI and the TX_descriptors for control packets egressing the PF_VSI. Signed-off-by: Dave Ertman Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_lib.c | 7 +++++++ drivers/net/ethernet/intel/ice/ice_txrx.c | 11 ++++++++++- 2 files changed, 17 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 6e34c40e7840..d6279dfe029e 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -1010,6 +1010,13 @@ static int ice_vsi_init(struct ice_vsi *vsi) ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF; } + /* Allow control frames out of main VSI */ + if (vsi->type == ICE_VSI_PF) { + ctxt->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; + ctxt->info.valid_sections |= + cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); + } + ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL); if (ret) { dev_err(&pf->pdev->dev, diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index e5c4c9139e54..5bf5c179a738 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -2106,6 +2106,7 @@ static netdev_tx_t ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring) { struct ice_tx_offload_params offload = { 0 }; + struct ice_vsi *vsi = tx_ring->vsi; struct ice_tx_buf *first; unsigned int count; int tso, csum; @@ -2153,7 +2154,15 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring) if (csum < 0) goto out_drop; - if (tso || offload.cd_tunnel_params) { + /* allow CONTROL frames egress from main VSI if FW LLDP disabled */ + if (unlikely(skb->priority == TC_PRIO_CONTROL && + vsi->type == ICE_VSI_PF && + vsi->port_info->is_sw_lldp)) + offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | + ICE_TX_CTX_DESC_SWTCH_UPLINK << + ICE_TXD_CTX_QW1_CMD_S); + + if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) { struct ice_tx_ctx_desc *cdesc; int i = tx_ring->next_to_use; -- cgit v1.2.3 From 1b0c3247a092db672bf4599f234f0e90d6e30e8b Mon Sep 17 00:00:00 2001 From: Dave Ertman Date: Mon, 29 Jul 2019 02:04:44 -0700 Subject: ice: Account for all states of FW DCBx and LLDP Currently, only the DCBx status is taken into account to determine if FW LLDP is possible. But there are NVM version coming out with DCBx enabled, and FW LLDP disabled. This is causing errors where the driver sees that DCBx is not disabled, and then tries to register for LLDP MIB change events, and fails. Change the logic to detect both DCBx and LLDP states in the FW engine. Signed-off-by: Dave Ertman Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_dcb_lib.c | 34 ++++++++++------------------ 1 file changed, 12 insertions(+), 22 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index bf6cd4760a48..22bdc244c7e0 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -319,6 +319,11 @@ void ice_dcb_rebuild(struct ice_pf *pf) } ice_init_dcb(&pf->hw); + if (pf->hw.port_info->dcbx_status == ICE_DCBX_STATUS_DIS) + pf->hw.port_info->is_sw_lldp = true; + else + pf->hw.port_info->is_sw_lldp = false; + if (ice_dcb_need_recfg(pf, prev_cfg, local_dcbx_cfg)) { /* difference in cfg detected - disable DCB till next MIB */ dev_err(&pf->pdev->dev, "Set local MIB not accurate\n"); @@ -440,35 +445,17 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked) struct device *dev = &pf->pdev->dev; struct ice_port_info *port_info; struct ice_hw *hw = &pf->hw; - int sw_default = 0; int err; port_info = hw->port_info; err = ice_init_dcb(hw); if (err) { - /* FW LLDP is not active, default to SW DCBX/LLDP */ - dev_info(&pf->pdev->dev, "FW LLDP is not active\n"); - hw->port_info->dcbx_status = ICE_DCBX_STATUS_NOT_STARTED; - hw->port_info->is_sw_lldp = true; - } - - if (port_info->dcbx_status == ICE_DCBX_STATUS_DIS) - dev_info(&pf->pdev->dev, "DCBX disabled\n"); - - /* LLDP disabled in FW */ - if (port_info->is_sw_lldp) { - sw_default = 1; - dev_info(&pf->pdev->dev, "DCBx/LLDP in SW mode.\n"); + /* FW LLDP is disabled, activate SW DCBX/LLDP mode */ + dev_info(&pf->pdev->dev, + "FW LLDP is disabled, DCBx/LLDP in SW mode.\n"); + port_info->is_sw_lldp = true; clear_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags); - } else { - set_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags); - } - - if (port_info->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) - dev_info(&pf->pdev->dev, "DCBX not started\n"); - - if (sw_default) { err = ice_dcb_sw_dflt_cfg(pf, locked); if (err) { dev_err(&pf->pdev->dev, @@ -483,6 +470,9 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked) return 0; } + port_info->is_sw_lldp = false; + set_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags); + /* DCBX in FW and LLDP enabled in FW */ pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_IEEE; -- cgit v1.2.3 From da4a9e73d8a58eccef93682c9f2d910c346d2c1e Mon Sep 17 00:00:00 2001 From: Brett Creeley Date: Mon, 29 Jul 2019 02:04:45 -0700 Subject: ice: Don't call synchronize_irq() for VF's from the host Currently we will call synchronize_irq() from the host for VF's. This is not correct, so don't allow it. Signed-off-by: Brett Creeley Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_lib.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index d6279dfe029e..c067ef6be7f4 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -2817,6 +2817,10 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi) ice_flush(hw); + /* don't call synchronize_irq() for VF's from the host */ + if (vsi->type == ICE_VSI_VF) + return; + ice_for_each_q_vector(vsi, i) synchronize_irq(pf->msix_entries[i + base].vector); } -- cgit v1.2.3 From 64bcaec64284e08430db5f00944443ec41a86df2 Mon Sep 17 00:00:00 2001 From: Dave Ertman Date: Mon, 29 Jul 2019 02:04:46 -0700 Subject: ice: Treat DCBx state NOT_STARTED as valid When a port is not cabled, but DCBx is enabled in the firmware, the status of DCBx will be NOT_STARTED. This is a valid state for FW enabled and should not be treated as a is_fw_lldp true automatically. Add the code to treat NOT_STARTED as another valid state. Signed-off-by: Dave Ertman Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_dcb.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c index c2002ded65f6..d60c942249e8 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb.c @@ -954,7 +954,8 @@ enum ice_status ice_init_dcb(struct ice_hw *hw) pi->dcbx_status = ice_get_dcbx_status(hw); if (pi->dcbx_status == ICE_DCBX_STATUS_DONE || - pi->dcbx_status == ICE_DCBX_STATUS_IN_PROGRESS) { + pi->dcbx_status == ICE_DCBX_STATUS_IN_PROGRESS || + pi->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) { /* Get current DCBX configuration */ ret = ice_get_dcb_cfg(pi); pi->is_sw_lldp = (hw->adminq.sq_last_status == ICE_AQ_RC_EPERM); -- cgit v1.2.3 From 42a179c80ddd19955ec90f01858b2993145dc874 Mon Sep 17 00:00:00 2001 From: Michal Swiatkowski Date: Mon, 29 Jul 2019 02:04:48 -0700 Subject: ice: Copy dcbx configuration only if mode is correct In rebuild DCB desired_dcbx_cfg was copy to local_dcbx_cfg, but if DCBX mode is IEEE desired_dcbx_cfg is not initialized by DCBX config from FW. Change logic to copy config value only if mode is set to CEE. If driver copy desired_dcbx_cfg to local_dcbx_cfg in IEEE mode there is problem with globr. System is frozen after two or more globr. Signed-off-by: Michal Swiatkowski Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_dcb_lib.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index 22bdc244c7e0..4fc9faf5bc71 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -334,8 +334,10 @@ void ice_dcb_rebuild(struct ice_pf *pf) devm_kfree(&pf->pdev->dev, prev_cfg); /* Set the local desired config */ - memset(&pf->hw.port_info->local_dcbx_cfg, 0, sizeof(*local_dcbx_cfg)); - memcpy(local_dcbx_cfg, desired_dcbx_cfg, sizeof(*local_dcbx_cfg)); + if (local_dcbx_cfg->dcbx_mode == ICE_DCBX_MODE_CEE) + memcpy(local_dcbx_cfg, desired_dcbx_cfg, + sizeof(*local_dcbx_cfg)); + ice_cfg_etsrec_defaults(pf->hw.port_info); ret = ice_set_dcb_cfg(pf->hw.port_info); if (ret) { -- cgit v1.2.3 From f8af5bf5b45ecbbef2c246865b02c7350db81392 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Mon, 29 Jul 2019 02:04:49 -0700 Subject: ice: reject VF attempts to enable head writeback The virtchnl interface provides a mechanism for a VF driver to request head writeback support. This feature is deprecated as of AVF 1.0, but older versions of a VF driver may still attempt to request the mode. Since the ice hardware does not support head writeback, we should not accept Tx queue configuration which attempts to enable it. Currently, the driver simply assumes that the headwb_enabled bit will never be set. If a VF driver does request head writeback, the configuration will return successfully, even though head writeback is not enabled. This leaves the VF driver in a non functional state since it is assuming to be operating in head writeback mode. Fix the PF driver to reject any attempt to setup headwb_enabled. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 1b1d1ea0c8f9..73ab6222d29b 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -2109,6 +2109,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) if (qpi->txq.vsi_id != qci->vsi_id || qpi->rxq.vsi_id != qci->vsi_id || qpi->rxq.queue_id != qpi->txq.queue_id || + qpi->txq.headwb_enabled || !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; -- cgit v1.2.3 From 84a118ab58edddf3d29e9972136a4c4a923ea1fa Mon Sep 17 00:00:00 2001 From: Dave Ertman Date: Mon, 29 Jul 2019 02:04:50 -0700 Subject: ice: Rename ethtool private flag for lldp The current flag name of "enable-fw-lldp" is a bit cumbersome. Change priv-flag name to "fw-lldp-agent" with a value of on or off. This is more straight-forward in meaning. Signed-off-by: Dave Ertman Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice.h | 2 +- drivers/net/ethernet/intel/ice/ice_dcb_lib.c | 4 ++-- drivers/net/ethernet/intel/ice/ice_ethtool.c | 6 +++--- drivers/net/ethernet/intel/ice/ice_lib.c | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 9f9c30d29eb5..99e0febd8e50 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -329,7 +329,7 @@ enum ice_pf_flags { ICE_FLAG_DCB_ENA, ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, ICE_FLAG_NO_MEDIA, - ICE_FLAG_ENABLE_FW_LLDP, + ICE_FLAG_FW_LLDP_AGENT, ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */ ICE_PF_FLAGS_NBITS /* must be last */ }; diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index 4fc9faf5bc71..734cef8eed9e 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -457,7 +457,7 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked) dev_info(&pf->pdev->dev, "FW LLDP is disabled, DCBx/LLDP in SW mode.\n"); port_info->is_sw_lldp = true; - clear_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags); + clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags); err = ice_dcb_sw_dflt_cfg(pf, locked); if (err) { dev_err(&pf->pdev->dev, @@ -473,7 +473,7 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked) } port_info->is_sw_lldp = false; - set_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags); + set_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags); /* DCBX in FW and LLDP enabled in FW */ pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_IEEE; diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 6a97ddbbda76..948a33716290 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -155,7 +155,7 @@ struct ice_priv_flag { static const struct ice_priv_flag ice_gstrings_priv_flags[] = { ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA), - ICE_PRIV_FLAG("enable-fw-lldp", ICE_FLAG_ENABLE_FW_LLDP), + ICE_PRIV_FLAG("fw-lldp-agent", ICE_FLAG_FW_LLDP_AGENT), }; #define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags) @@ -1201,8 +1201,8 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) bitmap_xor(change_flags, pf->flags, orig_flags, ICE_PF_FLAGS_NBITS); - if (test_bit(ICE_FLAG_ENABLE_FW_LLDP, change_flags)) { - if (!test_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags)) { + if (test_bit(ICE_FLAG_FW_LLDP_AGENT, change_flags)) { + if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) { enum ice_status status; /* Disable FW LLDP engine */ diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index c067ef6be7f4..343d0c305423 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -2541,7 +2541,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, ice_cfg_sw_lldp(vsi, true, true); /* Rx LLDP packets */ - if (!test_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags)) + if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) ice_cfg_sw_lldp(vsi, false, true); } @@ -2888,7 +2888,7 @@ int ice_vsi_release(struct ice_vsi *vsi) /* The Rx rule will only exist to remove if the LLDP FW * engine is currently stopped */ - if (!test_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags)) + if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) ice_cfg_sw_lldp(vsi, false, false); } -- cgit v1.2.3 From 90e477379e92c110a34ca0d108aa97d5b02076ee Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Mon, 29 Jul 2019 02:04:51 -0700 Subject: ice: silence some bogus error messages In some circumstances, VF devices can be deactivated while a message is in-flight. In that case, a series of scary error message will be printed in the log. Since these are actually harmless, check for this case and suppress them. No harm, no foul. Signed-off-by: Mitch Williams Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_adminq_cmd.h | 1 + drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 765e3c2ed045..bf9aa533a7c6 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -1610,6 +1610,7 @@ enum ice_aq_err { ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */ ICE_AQ_RC_EEXIST = 13, /* Object already exists */ ICE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */ + ICE_AQ_RC_ENOSYS = 17, /* Function not implemented */ }; /* Admin Queue command opcodes */ diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 73ab6222d29b..83e58e71081e 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -1512,10 +1512,10 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval, msg, msglen, NULL); - if (aq_ret) { + if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) { dev_info(&pf->pdev->dev, - "Unable to send the message to VF %d aq_err %d\n", - vf->vf_id, pf->hw.mailboxq.sq_last_status); + "Unable to send the message to VF %d ret %d aq_err %d\n", + vf->vf_id, aq_ret, pf->hw.mailboxq.sq_last_status); return -EIO; } -- cgit v1.2.3 From 057911ba9b7939a3395e5c3c497fa2af830123e4 Mon Sep 17 00:00:00 2001 From: Chinh T Cao Date: Mon, 29 Jul 2019 02:04:52 -0700 Subject: ice: Fix flag used for module query When checking the PHY for status, by specification, the driver should be using "topology" mode when querying the module type. Signed-off-by: Chinh T Cao Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_common.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 5f9dc76699d2..15648d4a8bab 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -2031,7 +2031,7 @@ enum ice_status ice_update_link_info(struct ice_port_info *pi) if (!pcaps) return ICE_ERR_NO_MEMORY; - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL); if (!status) memcpy(li->module_type, &pcaps->module_type, -- cgit v1.2.3 From 3747f03115c1c7e577e86c7698e061a21f20576c Mon Sep 17 00:00:00 2001 From: Chinh T Cao Date: Mon, 29 Jul 2019 02:04:53 -0700 Subject: ice: Don't clear auto_fec bit in ice_cfg_phy_fec() The driver should never clear the auto_fec_enable bit. Signed-off-by: Chinh T Cao Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_common.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 15648d4a8bab..4b43e6de847b 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -2181,27 +2181,24 @@ ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec) { switch (fec) { case ICE_FEC_BASER: - /* Clear auto FEC and RS bits, and AND BASE-R ability + /* Clear RS bits, and AND BASE-R ability * bits and OR request bits. */ - cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC; cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN; cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | ICE_AQC_PHY_FEC_25G_KR_REQ; break; case ICE_FEC_RS: - /* Clear auto FEC and BASE-R bits, and AND RS ability + /* Clear BASE-R bits, and AND RS ability * bits and OR request bits. */ - cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC; cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN; cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ | ICE_AQC_PHY_FEC_25G_RS_544_REQ; break; case ICE_FEC_NONE: - /* Clear auto FEC and all FEC option bits. */ - cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC; + /* Clear all FEC option bits. */ cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK; break; case ICE_FEC_AUTO: -- cgit v1.2.3 From 3f416961b0a5000bf5556de1a53cc3cf87a6e744 Mon Sep 17 00:00:00 2001 From: "Amruth G.P" Date: Mon, 29 Jul 2019 02:04:54 -0700 Subject: ice: Add input handlers for virtual channel handlers Move the assignment to local variables after validation. Remove unnecessary checks in ice_vc_process_vf_msg() as the respective functions are now performing the checks. Signed-off-by: "Amruth G.P" Signed-off-by: Nitesh B Venkatesh Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c | 64 ++++++++++++------------ 1 file changed, 32 insertions(+), 32 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 83e58e71081e..de0a1ef54e83 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -1734,18 +1734,18 @@ static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg) goto error_param; } - vsi = pf->vsi[vf->lan_vsi_idx]; - if (!vsi) { + if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) { + if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vsi) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -1781,18 +1781,18 @@ static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg) goto error_param; } - vsi = pf->vsi[vf->lan_vsi_idx]; - if (!vsi) { + if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) { + if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vsi) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -1877,6 +1877,12 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) goto error_param; } + if (vqs->rx_queues > ICE_MAX_BASE_QS_PER_VF || + vqs->tx_queues > ICE_MAX_BASE_QS_PER_VF) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; @@ -1932,6 +1938,12 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) goto error_param; } + if (vqs->rx_queues > ICE_MAX_BASE_QS_PER_VF || + vqs->tx_queues > ICE_MAX_BASE_QS_PER_VF) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; @@ -1984,12 +1996,6 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) irqmap_info = (struct virtchnl_irq_map_info *)msg; num_q_vectors_mapped = irqmap_info->num_vectors; - vsi = pf->vsi[vf->lan_vsi_idx]; - if (!vsi) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; - goto error_param; - } - /* Check to make sure number of VF vectors mapped is not greater than * number of VF vectors originally allocated, and check that * there is actually at least a single VF queue vector mapped @@ -2001,6 +2007,12 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) goto error_param; } + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vsi) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + for (i = 0; i < num_q_vectors_mapped; i++) { struct ice_q_vector *q_vector; @@ -2092,10 +2104,6 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) goto error_param; } - vsi = pf->vsi[vf->lan_vsi_idx]; - if (!vsi) - goto error_param; - if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF) { dev_err(&pf->pdev->dev, "VF-%d requesting more than supported number of queues: %d\n", @@ -2104,6 +2112,12 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) goto error_param; } + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vsi) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + for (i = 0; i < qci->num_queue_pairs; i++) { qpi = &qci->qpair[i]; if (qpi->txq.vsi_id != qci->vsi_id || @@ -2755,20 +2769,6 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) err = -EPERM; else err = -EINVAL; - goto error_handler; - } - - /* Perform additional checks specific to RSS and Virtchnl */ - if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) { - struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg; - - if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) - err = -EINVAL; - } else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) { - struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg; - - if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) - err = -EINVAL; } error_handler: -- cgit v1.2.3 From 5a4a8673102761fb87c94ee20633bf1f2a6911ca Mon Sep 17 00:00:00 2001 From: Bruce Allan Date: Thu, 25 Jul 2019 02:53:50 -0700 Subject: ice: update ethtool stats on-demand Users expect ethtool statistics to be updated on-demand when invoking 'ethtool -S ' instead of providing a snapshot of statistics taken once a second (the frequency of the watchdog task where stats are currently updated). Update stats every time 'ethtool -S ' is run. Also, fix an indentation style issue and an unnecessary local variable initialization in ice_get_ethtool_stats() discovered while investigating the subject issue. Signed-off-by: Bruce Allan Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice.h | 2 ++ drivers/net/ethernet/intel/ice/ice_ethtool.c | 7 +++++-- drivers/net/ethernet/intel/ice/ice_main.c | 6 ++---- 3 files changed, 9 insertions(+), 6 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 99e0febd8e50..97d0f61cf52b 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -447,6 +447,8 @@ ice_find_vsi_by_type(struct ice_pf *pf, enum ice_vsi_type type) int ice_vsi_setup_tx_rings(struct ice_vsi *vsi); int ice_vsi_setup_rx_rings(struct ice_vsi *vsi); void ice_set_ethtool_ops(struct net_device *netdev); +void ice_update_vsi_stats(struct ice_vsi *vsi); +void ice_update_pf_stats(struct ice_pf *pf); int ice_up(struct ice_vsi *vsi); int ice_down(struct ice_vsi *vsi); int ice_vsi_cfg(struct ice_vsi *vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 948a33716290..f7dd0bd03d39 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -1319,14 +1319,17 @@ ice_get_ethtool_stats(struct net_device *netdev, struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; struct ice_ring *ring; - unsigned int j = 0; + unsigned int j; int i = 0; char *p; + ice_update_pf_stats(pf); + ice_update_vsi_stats(vsi); + for (j = 0; j < ICE_VSI_STATS_LEN; j++) { p = (char *)vsi + ice_gstrings_vsi_stats[j].stat_offset; data[i++] = (ice_gstrings_vsi_stats[j].sizeof_stat == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } /* populate per queue stats */ diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index a0d148f590c2..6dd806b763ea 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -34,8 +34,6 @@ static const struct net_device_ops ice_netdev_ops; static void ice_rebuild(struct ice_pf *pf); static void ice_vsi_release_all(struct ice_pf *pf); -static void ice_update_vsi_stats(struct ice_vsi *vsi); -static void ice_update_pf_stats(struct ice_pf *pf); /** * ice_get_tx_pending - returns number of Tx descriptors not processed @@ -3254,7 +3252,7 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) * ice_update_vsi_stats - Update VSI stats counters * @vsi: the VSI to be updated */ -static void ice_update_vsi_stats(struct ice_vsi *vsi) +void ice_update_vsi_stats(struct ice_vsi *vsi) { struct rtnl_link_stats64 *cur_ns = &vsi->net_stats; struct ice_eth_stats *cur_es = &vsi->eth_stats; @@ -3290,7 +3288,7 @@ static void ice_update_vsi_stats(struct ice_vsi *vsi) * ice_update_pf_stats - Update PF port stats counters * @pf: PF whose stats needs to be updated */ -static void ice_update_pf_stats(struct ice_pf *pf) +void ice_update_pf_stats(struct ice_pf *pf) { struct ice_hw_port_stats *prev_ps, *cur_ps; struct ice_hw *hw = &pf->hw; -- cgit v1.2.3 From bbb968e8b34c2cf5ff99d0dfdb73639070b3dee1 Mon Sep 17 00:00:00 2001 From: Akeem G Abodunrin Date: Thu, 25 Jul 2019 02:53:51 -0700 Subject: ice: Fix issues updating VSI MAC filters VSI, especially VF could request to add or remove filter for another VSI, driver should really guide such request and disallow it. However, instead of returning error for such malicious request, driver can simply return success. In addition, we are not tracking number of MAC filters configured per VF correctly - and this leads to issue updating VF MAC filters whenever they were removed and re-configured via bringing VF interface down and up. Also, since VF could send request to update multiple MAC filters at once, driver should program those filters individually in the switch, in order to determine which action resulted to error, and communicate accordingly to the VF. So, with this changes, we now track number of filters added right from when VF resources allocation is done, and could properly add filters for both trusted and non_trusted VFs, without MAC filters mis-match issue in the switch... Also refactor code, so that driver can use new function to add or remove MAC filters. Signed-off-by: Akeem G Abodunrin Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_lib.c | 30 +++++++++++ drivers/net/ethernet/intel/ice/ice_lib.h | 4 ++ drivers/net/ethernet/intel/ice/ice_main.c | 64 +++++++----------------- drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c | 42 ++++++++-------- 4 files changed, 73 insertions(+), 67 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 343d0c305423..8d5d6635a123 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -3181,3 +3181,33 @@ out: return ret; } #endif /* CONFIG_DCB */ + +/** + * ice_vsi_cfg_mac_fltr - Add or remove a MAC address filter for a VSI + * @vsi: the VSI being configured MAC filter + * @macaddr: the MAC address to be added. + * @set: Add or delete a MAC filter + * + * Adds or removes MAC address filter entry for VF VSI + */ +enum ice_status +ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set) +{ + LIST_HEAD(tmp_add_list); + enum ice_status status; + + /* Update MAC filter list to be added or removed for a VSI */ + if (ice_add_mac_to_list(vsi, &tmp_add_list, macaddr)) { + status = ICE_ERR_NO_MEMORY; + goto cfg_mac_fltr_exit; + } + + if (set) + status = ice_add_mac(&vsi->back->hw, &tmp_add_list); + else + status = ice_remove_mac(&vsi->back->hw, &tmp_add_list); + +cfg_mac_fltr_exit: + ice_free_fltr_list(&vsi->back->pdev->dev, &tmp_add_list); + return status; +} diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index 6e43ef03bfc3..969ba27cba95 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -95,4 +95,8 @@ void ice_vsi_free_tx_rings(struct ice_vsi *vsi); int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena); u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran); + +enum ice_status +ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set); + #endif /* !_ICE_LIB_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 6dd806b763ea..f3923dec32b7 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -116,10 +116,9 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf) */ static int ice_init_mac_fltr(struct ice_pf *pf) { - LIST_HEAD(tmp_add_list); + enum ice_status status; u8 broadcast[ETH_ALEN]; struct ice_vsi *vsi; - int status; vsi = ice_find_vsi_by_type(pf, ICE_VSI_PF); if (!vsi) @@ -130,8 +129,7 @@ static int ice_init_mac_fltr(struct ice_pf *pf) */ /* Add a unicast MAC filter so the VSI can get its packets */ - status = ice_add_mac_to_list(vsi, &tmp_add_list, - vsi->port_info->mac.perm_addr); + status = ice_vsi_cfg_mac_fltr(vsi, vsi->port_info->mac.perm_addr, true); if (status) goto unregister; @@ -139,18 +137,11 @@ static int ice_init_mac_fltr(struct ice_pf *pf) * MAC address to the list as well. */ eth_broadcast_addr(broadcast); - status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast); - if (status) - goto free_mac_list; - - /* Program MAC filters for entries in tmp_add_list */ - status = ice_add_mac(&pf->hw, &tmp_add_list); + status = ice_vsi_cfg_mac_fltr(vsi, broadcast, true); if (status) - status = -ENOMEM; - -free_mac_list: - ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); + goto unregister; + return 0; unregister: /* We aren't useful with no MAC filters, so unregister if we * had an error @@ -164,7 +155,7 @@ unregister: vsi->netdev = NULL; } - return status; + return -EIO; } /** @@ -2834,10 +2825,8 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi) struct ice_hw *hw = &pf->hw; struct sockaddr *addr = pi; enum ice_status status; - LIST_HEAD(a_mac_list); - LIST_HEAD(r_mac_list); u8 flags = 0; - int err; + int err = 0; u8 *mac; mac = (u8 *)addr->sa_data; @@ -2860,42 +2849,23 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi) /* When we change the MAC address we also have to change the MAC address * based filter rules that were created previously for the old MAC * address. So first, we remove the old filter rule using ice_remove_mac - * and then create a new filter rule using ice_add_mac. Note that for - * both these operations, we first need to form a "list" of MAC - * addresses (even though in this case, we have only 1 MAC address to be - * added/removed) and this done using ice_add_mac_to_list. Depending on - * the ensuing operation this "list" of MAC addresses is either to be - * added or removed from the filter. + * and then create a new filter rule using ice_add_mac via + * ice_vsi_cfg_mac_fltr function call for both add and/or remove + * filters. */ - err = ice_add_mac_to_list(vsi, &r_mac_list, netdev->dev_addr); - if (err) { - err = -EADDRNOTAVAIL; - goto free_lists; - } - - status = ice_remove_mac(hw, &r_mac_list); + status = ice_vsi_cfg_mac_fltr(vsi, netdev->dev_addr, false); if (status) { err = -EADDRNOTAVAIL; - goto free_lists; - } - - err = ice_add_mac_to_list(vsi, &a_mac_list, mac); - if (err) { - err = -EADDRNOTAVAIL; - goto free_lists; + goto err_update_filters; } - status = ice_add_mac(hw, &a_mac_list); + status = ice_vsi_cfg_mac_fltr(vsi, mac, true); if (status) { err = -EADDRNOTAVAIL; - goto free_lists; + goto err_update_filters; } -free_lists: - /* free list entries */ - ice_free_fltr_list(&pf->pdev->dev, &r_mac_list); - ice_free_fltr_list(&pf->pdev->dev, &a_mac_list); - +err_update_filters: if (err) { netdev_err(netdev, "can't set MAC %pM. filter update failed\n", mac); @@ -2911,8 +2881,8 @@ free_lists: flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL; status = ice_aq_manage_mac_write(hw, mac, flags, NULL); if (status) { - netdev_err(netdev, "can't set MAC %pM. write to firmware failed.\n", - mac); + netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n", + mac, status); } return 0; } diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index de0a1ef54e83..86637d99ee77 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -540,7 +540,10 @@ static int ice_alloc_vsi_res(struct ice_vf *vf) status = ice_add_mac(&pf->hw, &tmp_add_list); if (status) - dev_err(&pf->pdev->dev, "could not add mac filters\n"); + dev_err(&pf->pdev->dev, + "could not add mac filters error %d\n", status); + else + vf->num_mac = 1; /* Clear this bit after VF initialization since we shouldn't reclaim * and reassign interrupts for synchronous or asynchronous VFR events. @@ -2208,7 +2211,7 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set) (struct virtchnl_ether_addr_list *)msg; struct ice_pf *pf = vf->pf; enum virtchnl_ops vc_op; - LIST_HEAD(mac_list); + enum ice_status status; struct ice_vsi *vsi; int mac_count = 0; int i; @@ -2282,33 +2285,32 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set) goto handle_mac_exit; } - /* get here if maddr is multicast or if VF can change MAC */ - if (ice_add_mac_to_list(vsi, &mac_list, al->list[i].addr)) { - v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; + /* program the updated filter list */ + status = ice_vsi_cfg_mac_fltr(vsi, maddr, set); + if (status == ICE_ERR_DOES_NOT_EXIST || + status == ICE_ERR_ALREADY_EXISTS) { + dev_info(&pf->pdev->dev, + "can't %s MAC filters %pM for VF %d, error %d\n", + set ? "add" : "remove", maddr, vf->vf_id, + status); + } else if (status) { + dev_err(&pf->pdev->dev, + "can't %s MAC filters for VF %d, error %d\n", + set ? "add" : "remove", vf->vf_id, status); + v_ret = ice_err_to_virt_err(status); goto handle_mac_exit; } + mac_count++; } - /* program the updated filter list */ + /* Track number of MAC filters programmed for the VF VSI */ if (set) - v_ret = ice_err_to_virt_err(ice_add_mac(&pf->hw, &mac_list)); + vf->num_mac += mac_count; else - v_ret = ice_err_to_virt_err(ice_remove_mac(&pf->hw, &mac_list)); - - if (v_ret) { - dev_err(&pf->pdev->dev, - "can't %s MAC filters for VF %d, error %d\n", - set ? "add" : "remove", vf->vf_id, v_ret); - } else { - if (set) - vf->num_mac += mac_count; - else - vf->num_mac -= mac_count; - } + vf->num_mac -= mac_count; handle_mac_exit: - ice_free_fltr_list(&pf->pdev->dev, &mac_list); /* send the response to the VF */ return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0); } -- cgit v1.2.3 From 8b2c858240aca43c59fc7762f10754354406883d Mon Sep 17 00:00:00 2001 From: Akeem G Abodunrin Date: Thu, 25 Jul 2019 02:53:52 -0700 Subject: ice: Don't allow VSI to remove unassociated ucast filter If a VSI is not using a unicast filter or did not configure that particular unicast filter, driver should not allow it to be removed by the rogue VSI. Signed-off-by: Akeem G Abodunrin Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_switch.c | 56 +++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 8271fd651725..99cf527d2b1a 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -2136,6 +2136,38 @@ out: return status; } +/** + * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry + * @hw: pointer to the hardware structure + * @recp_id: lookup type for which the specified rule needs to be searched + * @f_info: rule information + * + * Helper function to search for a unicast rule entry - this is to be used + * to remove unicast MAC filter that is not shared with other VSIs on the + * PF switch. + * + * Returns pointer to entry storing the rule if found + */ +static struct ice_fltr_mgmt_list_entry * +ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id, + struct ice_fltr_info *f_info) +{ + struct ice_switch_info *sw = hw->switch_info; + struct ice_fltr_mgmt_list_entry *list_itr; + struct list_head *list_head; + + list_head = &sw->recp_list[recp_id].filt_rules; + list_for_each_entry(list_itr, list_head, list_entry) { + if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data, + sizeof(f_info->l_data)) && + f_info->fwd_id.hw_vsi_id == + list_itr->fltr_info.fwd_id.hw_vsi_id && + f_info->flag == list_itr->fltr_info.flag) + return list_itr; + } + return NULL; +} + /** * ice_remove_mac - remove a MAC address based filter rule * @hw: pointer to the hardware structure @@ -2153,15 +2185,39 @@ enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list) { struct ice_fltr_list_entry *list_itr, *tmp; + struct mutex *rule_lock; /* Lock to protect filter rule list */ if (!m_list) return ICE_ERR_PARAM; + rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) { enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type; + u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0]; + u16 vsi_handle; if (l_type != ICE_SW_LKUP_MAC) return ICE_ERR_PARAM; + + vsi_handle = list_itr->fltr_info.vsi_handle; + if (!ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + + list_itr->fltr_info.fwd_id.hw_vsi_id = + ice_get_hw_vsi_num(hw, vsi_handle); + if (is_unicast_ether_addr(add) && !hw->ucast_shared) { + /* Don't remove the unicast address that belongs to + * another VSI on the switch, since it is not being + * shared... + */ + mutex_lock(rule_lock); + if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC, + &list_itr->fltr_info)) { + mutex_unlock(rule_lock); + return ICE_ERR_DOES_NOT_EXIST; + } + mutex_unlock(rule_lock); + } list_itr->status = ice_remove_rule_internal(hw, ICE_SW_LKUP_MAC, list_itr); -- cgit v1.2.3 From 9e7a5d1746222238712df19ce9833e574d4cac8e Mon Sep 17 00:00:00 2001 From: Usha Ketineni Date: Thu, 25 Jul 2019 02:53:53 -0700 Subject: ice: Fix ethtool port and PFC stats for 4x25G cards This patch fixes the issue where port and PFC statistics counters are incrementing at the wrong port with 4x25G cards. Read the GLPRT port registers using lport parameter instead of pf_id to update the statistics otherwise the pf_ids are flipped for ports 2 and 3 when read from the HW register PF_FUNC_RID and this is expected as per hardware specification. Signed-off-by: Usha Ketineni Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_dcb_lib.c | 13 ++--- drivers/net/ethernet/intel/ice/ice_main.c | 76 ++++++++++++++-------------- 2 files changed, 45 insertions(+), 44 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index 734cef8eed9e..d9578919aad8 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -500,30 +500,31 @@ void ice_update_dcb_stats(struct ice_pf *pf) { struct ice_hw_port_stats *prev_ps, *cur_ps; struct ice_hw *hw = &pf->hw; - u8 pf_id = hw->pf_id; + u8 port; int i; + port = hw->port_info->lport; prev_ps = &pf->stats_prev; cur_ps = &pf->stats; for (i = 0; i < 8; i++) { - ice_stat_update32(hw, GLPRT_PXOFFRXC(pf_id, i), + ice_stat_update32(hw, GLPRT_PXOFFRXC(port, i), pf->stat_prev_loaded, &prev_ps->priority_xoff_rx[i], &cur_ps->priority_xoff_rx[i]); - ice_stat_update32(hw, GLPRT_PXONRXC(pf_id, i), + ice_stat_update32(hw, GLPRT_PXONRXC(port, i), pf->stat_prev_loaded, &prev_ps->priority_xon_rx[i], &cur_ps->priority_xon_rx[i]); - ice_stat_update32(hw, GLPRT_PXONTXC(pf_id, i), + ice_stat_update32(hw, GLPRT_PXONTXC(port, i), pf->stat_prev_loaded, &prev_ps->priority_xon_tx[i], &cur_ps->priority_xon_tx[i]); - ice_stat_update32(hw, GLPRT_PXOFFTXC(pf_id, i), + ice_stat_update32(hw, GLPRT_PXOFFTXC(port, i), pf->stat_prev_loaded, &prev_ps->priority_xoff_tx[i], &cur_ps->priority_xoff_tx[i]); - ice_stat_update32(hw, GLPRT_RXON2OFFCNT(pf_id, i), + ice_stat_update32(hw, GLPRT_RXON2OFFCNT(port, i), pf->stat_prev_loaded, &prev_ps->priority_xon_2_xoff[i], &cur_ps->priority_xon_2_xoff[i]); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index f3923dec32b7..76a647cc2dbd 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -3262,25 +3262,25 @@ void ice_update_pf_stats(struct ice_pf *pf) { struct ice_hw_port_stats *prev_ps, *cur_ps; struct ice_hw *hw = &pf->hw; - u8 pf_id; + u8 port; + port = hw->port_info->lport; prev_ps = &pf->stats_prev; cur_ps = &pf->stats; - pf_id = hw->pf_id; - ice_stat_update40(hw, GLPRT_GORCL(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded, &prev_ps->eth.rx_bytes, &cur_ps->eth.rx_bytes); - ice_stat_update40(hw, GLPRT_UPRCL(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded, &prev_ps->eth.rx_unicast, &cur_ps->eth.rx_unicast); - ice_stat_update40(hw, GLPRT_MPRCL(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded, &prev_ps->eth.rx_multicast, &cur_ps->eth.rx_multicast); - ice_stat_update40(hw, GLPRT_BPRCL(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded, &prev_ps->eth.rx_broadcast, &cur_ps->eth.rx_broadcast); @@ -3288,109 +3288,109 @@ void ice_update_pf_stats(struct ice_pf *pf) &prev_ps->eth.rx_discards, &cur_ps->eth.rx_discards); - ice_stat_update40(hw, GLPRT_GOTCL(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded, &prev_ps->eth.tx_bytes, &cur_ps->eth.tx_bytes); - ice_stat_update40(hw, GLPRT_UPTCL(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded, &prev_ps->eth.tx_unicast, &cur_ps->eth.tx_unicast); - ice_stat_update40(hw, GLPRT_MPTCL(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded, &prev_ps->eth.tx_multicast, &cur_ps->eth.tx_multicast); - ice_stat_update40(hw, GLPRT_BPTCL(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded, &prev_ps->eth.tx_broadcast, &cur_ps->eth.tx_broadcast); - ice_stat_update32(hw, GLPRT_TDOLD(pf_id), pf->stat_prev_loaded, + ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded, &prev_ps->tx_dropped_link_down, &cur_ps->tx_dropped_link_down); - ice_stat_update40(hw, GLPRT_PRC64L(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded, &prev_ps->rx_size_64, &cur_ps->rx_size_64); - ice_stat_update40(hw, GLPRT_PRC127L(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded, &prev_ps->rx_size_127, &cur_ps->rx_size_127); - ice_stat_update40(hw, GLPRT_PRC255L(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded, &prev_ps->rx_size_255, &cur_ps->rx_size_255); - ice_stat_update40(hw, GLPRT_PRC511L(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded, &prev_ps->rx_size_511, &cur_ps->rx_size_511); - ice_stat_update40(hw, GLPRT_PRC1023L(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded, &prev_ps->rx_size_1023, &cur_ps->rx_size_1023); - ice_stat_update40(hw, GLPRT_PRC1522L(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded, &prev_ps->rx_size_1522, &cur_ps->rx_size_1522); - ice_stat_update40(hw, GLPRT_PRC9522L(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded, &prev_ps->rx_size_big, &cur_ps->rx_size_big); - ice_stat_update40(hw, GLPRT_PTC64L(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded, &prev_ps->tx_size_64, &cur_ps->tx_size_64); - ice_stat_update40(hw, GLPRT_PTC127L(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded, &prev_ps->tx_size_127, &cur_ps->tx_size_127); - ice_stat_update40(hw, GLPRT_PTC255L(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded, &prev_ps->tx_size_255, &cur_ps->tx_size_255); - ice_stat_update40(hw, GLPRT_PTC511L(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded, &prev_ps->tx_size_511, &cur_ps->tx_size_511); - ice_stat_update40(hw, GLPRT_PTC1023L(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded, &prev_ps->tx_size_1023, &cur_ps->tx_size_1023); - ice_stat_update40(hw, GLPRT_PTC1522L(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded, &prev_ps->tx_size_1522, &cur_ps->tx_size_1522); - ice_stat_update40(hw, GLPRT_PTC9522L(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded, &prev_ps->tx_size_big, &cur_ps->tx_size_big); - ice_stat_update32(hw, GLPRT_LXONRXC(pf_id), pf->stat_prev_loaded, + ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded, &prev_ps->link_xon_rx, &cur_ps->link_xon_rx); - ice_stat_update32(hw, GLPRT_LXOFFRXC(pf_id), pf->stat_prev_loaded, + ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded, &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx); - ice_stat_update32(hw, GLPRT_LXONTXC(pf_id), pf->stat_prev_loaded, + ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded, &prev_ps->link_xon_tx, &cur_ps->link_xon_tx); - ice_stat_update32(hw, GLPRT_LXOFFTXC(pf_id), pf->stat_prev_loaded, + ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded, &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx); ice_update_dcb_stats(pf); - ice_stat_update32(hw, GLPRT_CRCERRS(pf_id), pf->stat_prev_loaded, + ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded, &prev_ps->crc_errors, &cur_ps->crc_errors); - ice_stat_update32(hw, GLPRT_ILLERRC(pf_id), pf->stat_prev_loaded, + ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded, &prev_ps->illegal_bytes, &cur_ps->illegal_bytes); - ice_stat_update32(hw, GLPRT_MLFC(pf_id), pf->stat_prev_loaded, + ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded, &prev_ps->mac_local_faults, &cur_ps->mac_local_faults); - ice_stat_update32(hw, GLPRT_MRFC(pf_id), pf->stat_prev_loaded, + ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded, &prev_ps->mac_remote_faults, &cur_ps->mac_remote_faults); - ice_stat_update32(hw, GLPRT_RLEC(pf_id), pf->stat_prev_loaded, + ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded, &prev_ps->rx_len_errors, &cur_ps->rx_len_errors); - ice_stat_update32(hw, GLPRT_RUC(pf_id), pf->stat_prev_loaded, + ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded, &prev_ps->rx_undersize, &cur_ps->rx_undersize); - ice_stat_update32(hw, GLPRT_RFC(pf_id), pf->stat_prev_loaded, + ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded, &prev_ps->rx_fragments, &cur_ps->rx_fragments); - ice_stat_update32(hw, GLPRT_ROC(pf_id), pf->stat_prev_loaded, + ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded, &prev_ps->rx_oversize, &cur_ps->rx_oversize); - ice_stat_update32(hw, GLPRT_RJC(pf_id), pf->stat_prev_loaded, + ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded, &prev_ps->rx_jabber, &cur_ps->rx_jabber); pf->stat_prev_loaded = true; -- cgit v1.2.3 From 2935824873890de1503c0798c20833bb72a6c7c6 Mon Sep 17 00:00:00 2001 From: Victor Raj Date: Thu, 25 Jul 2019 02:53:54 -0700 Subject: ice: added sibling head to parse nodes There was a bug in the previous code which never traverses all the children to get the first node of the requested layer. Add a sibling head pointer to point the first node of each layer per TC. This helps traverse easier and quicker and also removes the recursion. Signed-off-by: Victor Raj Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_sched.c | 57 +++++++++++------------------- drivers/net/ethernet/intel/ice/ice_type.h | 2 ++ 2 files changed, 23 insertions(+), 36 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index 2a232504379d..79d64f9ed609 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -260,33 +260,17 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent, /** * ice_sched_get_first_node - get the first node of the given layer - * @hw: pointer to the HW struct + * @pi: port information structure * @parent: pointer the base node of the subtree * @layer: layer number * * This function retrieves the first node of the given layer from the subtree */ static struct ice_sched_node * -ice_sched_get_first_node(struct ice_hw *hw, struct ice_sched_node *parent, - u8 layer) +ice_sched_get_first_node(struct ice_port_info *pi, + struct ice_sched_node *parent, u8 layer) { - u8 i; - - if (layer < hw->sw_entry_point_layer) - return NULL; - for (i = 0; i < parent->num_children; i++) { - struct ice_sched_node *node = parent->children[i]; - - if (node) { - if (node->tx_sched_layer == layer) - return node; - /* this recursion is intentional, and wouldn't - * go more than 9 calls - */ - return ice_sched_get_first_node(hw, node, layer); - } - } - return NULL; + return pi->sib_head[parent->tc_num][layer]; } /** @@ -342,7 +326,7 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node) parent = node->parent; /* root has no parent */ if (parent) { - struct ice_sched_node *p, *tc_node; + struct ice_sched_node *p; /* update the parent */ for (i = 0; i < parent->num_children; i++) @@ -354,16 +338,7 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node) break; } - /* search for previous sibling that points to this node and - * remove the reference - */ - tc_node = ice_sched_get_tc_node(pi, node->tc_num); - if (!tc_node) { - ice_debug(hw, ICE_DBG_SCHED, - "Invalid TC number %d\n", node->tc_num); - goto err_exit; - } - p = ice_sched_get_first_node(hw, tc_node, node->tx_sched_layer); + p = ice_sched_get_first_node(pi, node, node->tx_sched_layer); while (p) { if (p->sibling == node) { p->sibling = node->sibling; @@ -371,8 +346,13 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node) } p = p->sibling; } + + /* update the sibling head if head is getting removed */ + if (pi->sib_head[node->tc_num][node->tx_sched_layer] == node) + pi->sib_head[node->tc_num][node->tx_sched_layer] = + node->sibling; } -err_exit: + /* leaf nodes have no children */ if (node->children) devm_kfree(ice_hw_to_dev(hw), node->children); @@ -743,13 +723,17 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, /* add it to previous node sibling pointer */ /* Note: siblings are not linked across branches */ - prev = ice_sched_get_first_node(hw, tc_node, layer); + prev = ice_sched_get_first_node(pi, tc_node, layer); if (prev && prev != new_node) { while (prev->sibling) prev = prev->sibling; prev->sibling = new_node; } + /* initialize the sibling head */ + if (!pi->sib_head[tc_node->tc_num][layer]) + pi->sib_head[tc_node->tc_num][layer] = new_node; + if (i == 0) *first_node_teid = teid; } @@ -1160,7 +1144,7 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, goto lan_q_exit; /* get the first queue group node from VSI sub-tree */ - qgrp_node = ice_sched_get_first_node(pi->hw, vsi_node, qgrp_layer); + qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer); while (qgrp_node) { /* make sure the qgroup node is part of the VSI subtree */ if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node)) @@ -1191,7 +1175,7 @@ ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node, u8 vsi_layer; vsi_layer = ice_sched_get_vsi_layer(hw); - node = ice_sched_get_first_node(hw, tc_node, vsi_layer); + node = ice_sched_get_first_node(hw->port_info, tc_node, vsi_layer); /* Check whether it already exists */ while (node) { @@ -1316,7 +1300,8 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw, /* If intermediate nodes are reached max children * then add a new one. */ - node = ice_sched_get_first_node(hw, tc_node, (u8)i); + node = ice_sched_get_first_node(hw->port_info, tc_node, + (u8)i); /* scan all the siblings */ while (node) { if (node->num_children < hw->max_children[i]) diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 24bbef8bbe69..d76e0cb7ef46 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -347,6 +347,8 @@ struct ice_port_info { struct ice_mac_info mac; struct ice_phy_info phy; struct mutex sched_lock; /* protect access to TXSched tree */ + struct ice_sched_node * + sib_head[ICE_MAX_TRAFFIC_CLASS][ICE_AQC_TOPO_MAX_LEVEL_NUM]; struct ice_dcbx_cfg local_dcbx_cfg; /* Oper/Local Cfg */ /* DCBX info */ struct ice_dcbx_cfg remote_dcbx_cfg; /* Peer Cfg */ -- cgit v1.2.3 From f27db2e65e1134c4173ced2db4aebade47485dc5 Mon Sep 17 00:00:00 2001 From: Anirudh Venkataramanan Date: Thu, 25 Jul 2019 02:53:55 -0700 Subject: ice: Sanitize ice_ena_vsi and ice_dis_vsi 1. ndo_open and ndo_stop are implemented by ice_open and ice_stop respectively. When enabling/disabling VSIs, just call ice_open/ice_stop instead of ndo_open/ndo_stop. 2. Rework logic around rtnl_lock/rtnl_unlock 3. In ice_ena_vsi, remove an unnecessary stack variable and return 0 instead of err when __ICE_NEEDS_RESTART is not set. Signed-off-by: Anirudh Venkataramanan Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_main.c | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 76a647cc2dbd..2f6125bfd991 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -436,13 +436,13 @@ static void ice_dis_vsi(struct ice_vsi *vsi, bool locked) if (vsi->type == ICE_VSI_PF && vsi->netdev) { if (netif_running(vsi->netdev)) { - if (!locked) { + if (!locked) rtnl_lock(); - vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); + + ice_stop(vsi->netdev); + + if (!locked) rtnl_unlock(); - } else { - vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); - } } else { ice_vsi_close(vsi); } @@ -3654,21 +3654,19 @@ static int ice_ena_vsi(struct ice_vsi *vsi, bool locked) int err = 0; if (!test_bit(__ICE_NEEDS_RESTART, vsi->state)) - return err; + return 0; clear_bit(__ICE_NEEDS_RESTART, vsi->state); if (vsi->netdev && vsi->type == ICE_VSI_PF) { - struct net_device *netd = vsi->netdev; - if (netif_running(vsi->netdev)) { - if (locked) { - err = netd->netdev_ops->ndo_open(netd); - } else { + if (!locked) rtnl_lock(); - err = netd->netdev_ops->ndo_open(netd); + + err = ice_open(vsi->netdev); + + if (!locked) rtnl_unlock(); - } } } -- cgit v1.2.3 From dc67039b3d1159f8feba34e6fdb798603505d5d6 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Thu, 25 Jul 2019 02:53:56 -0700 Subject: ice: shorten local and add debug prints Add some verbose debugging for dyndbg to help us when we are having issues with link and/or PHY. While there, shorten some strings used by locals that were causing long line wrapping. Signed-off-by: Jesse Brandeburg Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_common.c | 63 ++++++++++++++++++++--------- 1 file changed, 44 insertions(+), 19 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 4b43e6de847b..302ad981129c 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -263,21 +263,23 @@ enum ice_status ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, struct ice_link_status *link, struct ice_sq_cd *cd) { - struct ice_link_status *hw_link_info_old, *hw_link_info; struct ice_aqc_get_link_status_data link_data = { 0 }; struct ice_aqc_get_link_status *resp; + struct ice_link_status *li_old, *li; enum ice_media_type *hw_media_type; struct ice_fc_info *hw_fc_info; bool tx_pause, rx_pause; struct ice_aq_desc desc; enum ice_status status; + struct ice_hw *hw; u16 cmd_flags; if (!pi) return ICE_ERR_PARAM; - hw_link_info_old = &pi->phy.link_info_old; + hw = pi->hw; + li_old = &pi->phy.link_info_old; hw_media_type = &pi->phy.media_type; - hw_link_info = &pi->phy.link_info; + li = &pi->phy.link_info; hw_fc_info = &pi->fc; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status); @@ -286,27 +288,27 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, resp->cmd_flags = cpu_to_le16(cmd_flags); resp->lport_num = pi->lport; - status = ice_aq_send_cmd(pi->hw, &desc, &link_data, sizeof(link_data), - cd); + status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd); if (status) return status; /* save off old link status information */ - *hw_link_info_old = *hw_link_info; + *li_old = *li; /* update current link status information */ - hw_link_info->link_speed = le16_to_cpu(link_data.link_speed); - hw_link_info->phy_type_low = le64_to_cpu(link_data.phy_type_low); - hw_link_info->phy_type_high = le64_to_cpu(link_data.phy_type_high); + li->link_speed = le16_to_cpu(link_data.link_speed); + li->phy_type_low = le64_to_cpu(link_data.phy_type_low); + li->phy_type_high = le64_to_cpu(link_data.phy_type_high); *hw_media_type = ice_get_media_type(pi); - hw_link_info->link_info = link_data.link_info; - hw_link_info->an_info = link_data.an_info; - hw_link_info->ext_info = link_data.ext_info; - hw_link_info->max_frame_size = le16_to_cpu(link_data.max_frame_size); - hw_link_info->fec_info = link_data.cfg & ICE_AQ_FEC_MASK; - hw_link_info->topo_media_conflict = link_data.topo_media_conflict; - hw_link_info->pacing = link_data.cfg & ICE_AQ_CFG_PACING_M; + li->link_info = link_data.link_info; + li->an_info = link_data.an_info; + li->ext_info = link_data.ext_info; + li->max_frame_size = le16_to_cpu(link_data.max_frame_size); + li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK; + li->topo_media_conflict = link_data.topo_media_conflict; + li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M | + ICE_AQ_CFG_PACING_TYPE_M); /* update fc info */ tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX); @@ -320,12 +322,24 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, else hw_fc_info->current_mode = ICE_FC_NONE; - hw_link_info->lse_ena = - !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED)); + li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED)); + + ice_debug(hw, ICE_DBG_LINK, "link_speed = 0x%x\n", li->link_speed); + ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n", + (unsigned long long)li->phy_type_low); + ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n", + (unsigned long long)li->phy_type_high); + ice_debug(hw, ICE_DBG_LINK, "media_type = 0x%x\n", *hw_media_type); + ice_debug(hw, ICE_DBG_LINK, "link_info = 0x%x\n", li->link_info); + ice_debug(hw, ICE_DBG_LINK, "an_info = 0x%x\n", li->an_info); + ice_debug(hw, ICE_DBG_LINK, "ext_info = 0x%x\n", li->ext_info); + ice_debug(hw, ICE_DBG_LINK, "lse_ena = 0x%x\n", li->lse_ena); + ice_debug(hw, ICE_DBG_LINK, "max_frame = 0x%x\n", li->max_frame_size); + ice_debug(hw, ICE_DBG_LINK, "pacing = 0x%x\n", li->pacing); /* save link status information */ if (link) - *link = *hw_link_info; + *link = *li; /* flag cleared so calling functions don't call AQ again */ pi->phy.get_link_info = false; @@ -2000,6 +2014,17 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport, desc.params.set_phy.lport_num = lport; desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n", + (unsigned long long)le64_to_cpu(cfg->phy_type_low)); + ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n", + (unsigned long long)le64_to_cpu(cfg->phy_type_high)); + ice_debug(hw, ICE_DBG_LINK, "caps = 0x%x\n", cfg->caps); + ice_debug(hw, ICE_DBG_LINK, "low_power_ctrl = 0x%x\n", + cfg->low_power_ctrl); + ice_debug(hw, ICE_DBG_LINK, "eee_cap = 0x%x\n", cfg->eee_cap); + ice_debug(hw, ICE_DBG_LINK, "eeer_value = 0x%x\n", cfg->eeer_value); + ice_debug(hw, ICE_DBG_LINK, "link_fec_opt = 0x%x\n", cfg->link_fec_opt); + return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); } -- cgit v1.2.3 From 4425e0531c433e3a9414d09c48b82a0f372978b2 Mon Sep 17 00:00:00 2001 From: Krzysztof Kazimierczak Date: Thu, 25 Jul 2019 02:53:57 -0700 Subject: ice: Introduce a local variable for a VSI in the rebuild path When a VSI is accessed inside the ice_for_each_vsi macro in the rebuild path (ice_vsi_rebuild_all() and ice_vsi_replay_all()), it is referred to as pf->vsi[i]. Introduce local variables to improve readability. Signed-off-by: Krzysztof Kazimierczak Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_main.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 2f6125bfd991..2c6b2bc4e30e 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -3704,22 +3704,23 @@ static int ice_vsi_rebuild_all(struct ice_pf *pf) /* loop through pf->vsi array and reinit the VSI if found */ ice_for_each_vsi(pf, i) { + struct ice_vsi *vsi = pf->vsi[i]; int err; - if (!pf->vsi[i]) + if (!vsi) continue; - err = ice_vsi_rebuild(pf->vsi[i]); + err = ice_vsi_rebuild(vsi); if (err) { dev_err(&pf->pdev->dev, "VSI at index %d rebuild failed\n", - pf->vsi[i]->idx); + vsi->idx); return err; } dev_info(&pf->pdev->dev, "VSI at index %d rebuilt. vsi_num = 0x%x\n", - pf->vsi[i]->idx, pf->vsi[i]->vsi_num); + vsi->idx, vsi->vsi_num); } return 0; @@ -3737,25 +3738,27 @@ static int ice_vsi_replay_all(struct ice_pf *pf) /* loop through pf->vsi array and replay the VSI if found */ ice_for_each_vsi(pf, i) { - if (!pf->vsi[i]) + struct ice_vsi *vsi = pf->vsi[i]; + + if (!vsi) continue; - ret = ice_replay_vsi(hw, pf->vsi[i]->idx); + ret = ice_replay_vsi(hw, vsi->idx); if (ret) { dev_err(&pf->pdev->dev, "VSI at index %d replay failed %d\n", - pf->vsi[i]->idx, ret); + vsi->idx, ret); return -EIO; } /* Re-map HW VSI number, using VSI handle that has been * previously validated in ice_replay_vsi() call above */ - pf->vsi[i]->vsi_num = ice_get_hw_vsi_num(hw, pf->vsi[i]->idx); + vsi->vsi_num = ice_get_hw_vsi_num(hw, vsi->idx); dev_info(&pf->pdev->dev, "VSI at index %d filter replayed successfully - vsi_num %i\n", - pf->vsi[i]->idx, pf->vsi[i]->vsi_num); + vsi->idx, vsi->vsi_num); } /* Clean up replay filter after successful re-configuration */ -- cgit v1.2.3 From e63a1dbdc7df4b6b48af920a86630c20a44340cf Mon Sep 17 00:00:00 2001 From: Akeem G Abodunrin Date: Thu, 25 Jul 2019 02:53:58 -0700 Subject: ice: Don't clog kernel debug log with VF MDD events errors In case of MDD events on VF, don't clog kernel log with unlimited VF MDD events message "VF 0 has had 1018 MDD events since last boot" - limit events log message to 30, based on the observation in some experimentation with sending malicious packet once, and number of events reported before device stopped observing MDD events. Also removed defunct macro "ICE_DFLT_NUM_MDD_EVENTS_ALLOWED" for tracking number of MDD events allowed before disabling the interface... Signed-off-by: Akeem G Abodunrin Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_main.c | 6 ++++-- drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 2c6b2bc4e30e..67cbebe1ff3f 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -1315,8 +1315,10 @@ static void ice_handle_mdd_event(struct ice_pf *pf) if (vf_mdd_detected) { vf->num_mdd_events++; - if (vf->num_mdd_events > 1) - dev_info(&pf->pdev->dev, "VF %d has had %llu MDD events since last boot\n", + if (vf->num_mdd_events && + vf->num_mdd_events <= ICE_MDD_EVENTS_THRESHOLD) + dev_info(&pf->pdev->dev, + "VF %d has had %llu MDD events since last boot, Admin might need to reload AVF driver with this number of events\n", i, vf->num_mdd_events); } } diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h index 4d94853f119a..13f45f37d75e 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h @@ -15,8 +15,8 @@ #define ICE_MAX_MACADDR_PER_VF 12 /* Malicious Driver Detection */ -#define ICE_DFLT_NUM_MDD_EVENTS_ALLOWED 3 #define ICE_DFLT_NUM_INVAL_MSGS_ALLOWED 10 +#define ICE_MDD_EVENTS_THRESHOLD 30 /* Static VF transaction/status register def */ #define VF_DEVICE_STATUS 0xAA -- cgit v1.2.3 From 9c7dd7566d189426dd013edc3ddd00c1d981a8a0 Mon Sep 17 00:00:00 2001 From: Michal Swiatkowski Date: Thu, 25 Jul 2019 02:53:59 -0700 Subject: ice: add validation in OP_CONFIG_VSI_QUEUES VF message Check num_queue_pairs to avoid access to unallocated field of vsi->tx_rings/vsi->rx_rings. Without this validation we can set vsi->alloc_txq/vsi->alloc_rxq to value smaller than ICE_MAX_BASE_QS_PER_VF and send this command with num_queue_pairs greater than vsi->alloc_txq/vsi->alloc_rxq. This lead to access to unallocated memory. In VF vsi alloc_txq and alloc_rxq should be the same. Get minimum because looks more readable. Also add validation for ring_len param. It should be greater than 32 and be multiple of 32. Incorrect value leads to hang traffic on PF. Signed-off-by: Michal Swiatkowski Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c | 31 ++++++++++++++++++------ 1 file changed, 24 insertions(+), 7 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 86637d99ee77..e6578d2f0876 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -1712,6 +1712,19 @@ static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid) return (vsi && (qid < vsi->alloc_txq)); } +/** + * ice_vc_isvalid_ring_len + * @ring_len: length of ring + * + * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE + */ +static bool ice_vc_isvalid_ring_len(u16 ring_len) +{ + return (ring_len >= ICE_MIN_NUM_DESC && + ring_len <= ICE_MAX_NUM_DESC && + !(ring_len % ICE_REQ_DESC_MULTIPLE)); +} + /** * ice_vc_config_rss_key * @vf: pointer to the VF info @@ -2107,16 +2120,17 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) goto error_param; } - if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF) { - dev_err(&pf->pdev->dev, - "VF-%d requesting more than supported number of queues: %d\n", - vf->vf_id, qci->num_queue_pairs); + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vsi) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - vsi = pf->vsi[vf->lan_vsi_idx]; - if (!vsi) { + if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF || + qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) { + dev_err(&pf->pdev->dev, + "VF-%d requesting more than supported number of queues: %d\n", + vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)); v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -2127,6 +2141,8 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) qpi->rxq.vsi_id != qci->vsi_id || qpi->rxq.queue_id != qpi->txq.queue_id || qpi->txq.headwb_enabled || + !ice_vc_isvalid_ring_len(qpi->txq.ring_len) || + !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) || !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; @@ -2137,7 +2153,8 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) /* copy Rx queue info from VF into VSI */ vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr; vsi->rx_rings[i]->count = qpi->rxq.ring_len; - if (qpi->rxq.databuffer_size > ((16 * 1024) - 128)) { + if (qpi->rxq.databuffer_size > ((16 * 1024) - 128) || + qpi->rxq.databuffer_size < 1024) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } -- cgit v1.2.3 From 35b4f4372f91d79632e8881b8c4a8f7802ff12f4 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 25 Jul 2019 02:54:01 -0700 Subject: ice: fix ice_is_tc_ena ice_is_tc_ena is used to check whether a given traffic class is enabled. Because there are only 8 traffic classes, the function took a u8 bitmap. This causes problems because it is cast to an unsigned long causing a static analysis warning regarding Out-of-bounds read. Fix this by simply updating ice_is_tc_ena to take an unsigned long. Passing a u8 to this function should implicitly convert the value. Signed-off-by: Jacob Keller Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_type.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index d76e0cb7ef46..b538d0b9eb80 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -13,9 +13,9 @@ #define ICE_BYTES_PER_WORD 2 #define ICE_BYTES_PER_DWORD 4 -static inline bool ice_is_tc_ena(u8 bitmap, u8 tc) +static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc) { - return test_bit(tc, (unsigned long *)&bitmap); + return test_bit(tc, &bitmap); } /* Driver always calls main vsi_handle first */ -- cgit v1.2.3 From a1199d679af4103acdc0fdc17ae9c1274733d673 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 2 Aug 2019 16:52:17 +0100 Subject: ice: fix potential infinite loop The loop counter of a for-loop is a u8 however this is being compared to an int upper bound and this can lead to an infinite loop if the upper bound is greater than 255 since the loop counter will wrap back to zero. Fix this potential issue by making the loop counter an int. Addresses-Coverity: ("Infinite loop") Fixes: c7aeb4d1b9bf ("ice: Disable VFs until reset is completed") Signed-off-by: Colin Ian King Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 67cbebe1ff3f..0398c86226f0 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -477,7 +477,7 @@ static void ice_prepare_for_reset(struct ice_pf *pf) { struct ice_hw *hw = &pf->hw; - u8 i; + int i; /* already prepared for reset */ if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) -- cgit v1.2.3 From d02f734cb713aee558f61883ea694c056b01beb0 Mon Sep 17 00:00:00 2001 From: Maciej Fijalkowski Date: Fri, 2 Aug 2019 01:25:19 -0700 Subject: ice: add support for enabling/disabling single queues Refactor the queue handling functions that are going through queue arrays in a way that the logic done for a single queue is pulled out and it will be called for each ring when traversing ring array. This implies that when disabling Tx rings we won't fill up q_ids, q_teids and q_handles arrays. Drop also 'offset' parameter; the value from vsi's txq_map is stored in ring->reg_idx and that drops the need for mentioned parameter. Introduce the ice_vsi_cfg_txq, ice_vsi_stop_tx_ring and ice_vsi_ctrl_rx_ring that are the functions with pulled out logic. There's several Tx queue meta data (q_id, q_handle, q_teid and other) that need to be set up during Tx queue disablement, so let's as well add a helper structure that wraps it up and a function that will be filling it up. Signed-off-by: Maciej Fijalkowski Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_lib.c | 342 ++++++++++++++++++------------- drivers/net/ethernet/intel/ice/ice_lib.h | 18 +- 2 files changed, 214 insertions(+), 146 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 8d5d6635a123..cfd6723de857 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -191,41 +191,55 @@ static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena) } /** - * ice_vsi_ctrl_rx_rings - Start or stop a VSI's Rx rings + * ice_vsi_ctrl_rx_ring - Start or stop a VSI's Rx ring * @vsi: the VSI being configured * @ena: start or stop the Rx rings + * @rxq_idx: Rx queue index */ -static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena) +static int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx) { + int pf_q = vsi->rxq_map[rxq_idx]; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; - int i, ret = 0; + int ret = 0; + u32 rx_reg; - for (i = 0; i < vsi->num_rxq; i++) { - int pf_q = vsi->rxq_map[i]; - u32 rx_reg; + rx_reg = rd32(hw, QRX_CTRL(pf_q)); - rx_reg = rd32(hw, QRX_CTRL(pf_q)); + /* Skip if the queue is already in the requested state */ + if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) + return 0; - /* Skip if the queue is already in the requested state */ - if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) - continue; + /* turn on/off the queue */ + if (ena) + rx_reg |= QRX_CTRL_QENA_REQ_M; + else + rx_reg &= ~QRX_CTRL_QENA_REQ_M; + wr32(hw, QRX_CTRL(pf_q), rx_reg); - /* turn on/off the queue */ - if (ena) - rx_reg |= QRX_CTRL_QENA_REQ_M; - else - rx_reg &= ~QRX_CTRL_QENA_REQ_M; - wr32(hw, QRX_CTRL(pf_q), rx_reg); - - /* wait for the change to finish */ - ret = ice_pf_rxq_wait(pf, pf_q, ena); - if (ret) { - dev_err(&pf->pdev->dev, - "VSI idx %d Rx ring %d %sable timeout\n", - vsi->idx, pf_q, (ena ? "en" : "dis")); + /* wait for the change to finish */ + ret = ice_pf_rxq_wait(pf, pf_q, ena); + if (ret) + dev_err(&pf->pdev->dev, + "VSI idx %d Rx ring %d %sable timeout\n", + vsi->idx, pf_q, (ena ? "en" : "dis")); + + return ret; +} + +/** + * ice_vsi_ctrl_rx_rings - Start or stop a VSI's Rx rings + * @vsi: the VSI being configured + * @ena: start or stop the Rx rings + */ +static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena) +{ + int i, ret = 0; + + for (i = 0; i < vsi->num_rxq; i++) { + ret = ice_vsi_ctrl_rx_ring(vsi, ena, i); + if (ret) break; - } } return ret; @@ -1648,6 +1662,62 @@ setup_rings: return 0; } +/** + * ice_vsi_cfg_txq - Configure single Tx queue + * @vsi: the VSI that queue belongs to + * @ring: Tx ring to be configured + * @tc_q_idx: queue index within given TC + * @qg_buf: queue group buffer + * @tc: TC that Tx ring belongs to + */ +static int +ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, u16 tc_q_idx, + struct ice_aqc_add_tx_qgrp *qg_buf, u8 tc) +{ + struct ice_tlan_ctx tlan_ctx = { 0 }; + struct ice_aqc_add_txqs_perq *txq; + struct ice_pf *pf = vsi->back; + u8 buf_len = sizeof(*qg_buf); + enum ice_status status; + u16 pf_q; + + pf_q = ring->reg_idx; + ice_setup_tx_ctx(ring, &tlan_ctx, pf_q); + /* copy context contents into the qg_buf */ + qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); + ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, + ice_tlan_ctx_info); + + /* init queue specific tail reg. It is referred as + * transmit comm scheduler queue doorbell. + */ + ring->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q); + + /* Add unique software queue handle of the Tx queue per + * TC into the VSI Tx ring + */ + ring->q_handle = tc_q_idx; + + status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle, + 1, qg_buf, buf_len, NULL); + if (status) { + dev_err(&pf->pdev->dev, + "Failed to set LAN Tx queue context, error: %d\n", + status); + return -ENODEV; + } + + /* Add Tx Queue TEID into the VSI Tx ring from the + * response. This will complete configuring and + * enabling the queue. + */ + txq = &qg_buf->txqs[0]; + if (pf_q == le16_to_cpu(txq->txq_id)) + ring->txq_teid = le32_to_cpu(txq->q_teid); + + return 0; +} + /** * ice_vsi_cfg_txqs - Configure the VSI for Tx * @vsi: the VSI being configured @@ -1661,20 +1731,16 @@ static int ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset) { struct ice_aqc_add_tx_qgrp *qg_buf; - struct ice_aqc_add_txqs_perq *txq; struct ice_pf *pf = vsi->back; - u8 num_q_grps, q_idx = 0; - enum ice_status status; - u16 buf_len, i, pf_q; - int err = 0, tc; + u16 q_idx = 0, i; + int err = 0; + u8 tc; - buf_len = sizeof(*qg_buf); - qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL); + qg_buf = devm_kzalloc(&pf->pdev->dev, sizeof(*qg_buf), GFP_KERNEL); if (!qg_buf) return -ENOMEM; qg_buf->num_txqs = 1; - num_q_grps = 1; /* set up and configure the Tx queues for each enabled TC */ ice_for_each_traffic_class(tc) { @@ -1682,39 +1748,10 @@ ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset) break; for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) { - struct ice_tlan_ctx tlan_ctx = { 0 }; - - pf_q = vsi->txq_map[q_idx + offset]; - ice_setup_tx_ctx(rings[q_idx], &tlan_ctx, pf_q); - /* copy context contents into the qg_buf */ - qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); - ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, - ice_tlan_ctx_info); - - /* init queue specific tail reg. It is referred as - * transmit comm scheduler queue doorbell. - */ - rings[q_idx]->tail = - pf->hw.hw_addr + QTX_COMM_DBELL(pf_q); - status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, - i, num_q_grps, qg_buf, - buf_len, NULL); - if (status) { - dev_err(&pf->pdev->dev, - "Failed to set LAN Tx queue context, error: %d\n", - status); - err = -ENODEV; + err = ice_vsi_cfg_txq(vsi, rings[q_idx], i + offset, + qg_buf, tc); + if (err) goto err_cfg_txqs; - } - - /* Add Tx Queue TEID into the VSI Tx ring from the - * response. This will complete configuring and - * enabling the queue. - */ - txq = &qg_buf->txqs[0]; - if (pf_q == le16_to_cpu(txq->txq_id)) - rings[q_idx]->txq_teid = - le32_to_cpu(txq->q_teid); q_idx++; } @@ -2061,45 +2098,106 @@ void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector) } /** - * ice_vsi_stop_tx_rings - Disable Tx rings + * ice_vsi_stop_tx_ring - Disable single Tx ring * @vsi: the VSI being configured * @rst_src: reset source * @rel_vmvf_num: Relative ID of VF/VM - * @rings: Tx ring array to be stopped - * @offset: offset within vsi->txq_map + * @ring: Tx ring to be stopped + * @txq_meta: Meta data of Tx ring to be stopped */ static int -ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, - u16 rel_vmvf_num, struct ice_ring **rings, int offset) +ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, + u16 rel_vmvf_num, struct ice_ring *ring, + struct ice_txq_meta *txq_meta) { struct ice_pf *pf = vsi->back; + struct ice_q_vector *q_vector; struct ice_hw *hw = &pf->hw; - int tc, q_idx = 0, err = 0; - u16 *q_ids, *q_handles, i; enum ice_status status; - u32 *q_teids, val; + u32 val; - if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS) - return -EINVAL; + /* clear cause_ena bit for disabled queues */ + val = rd32(hw, QINT_TQCTL(ring->reg_idx)); + val &= ~QINT_TQCTL_CAUSE_ENA_M; + wr32(hw, QINT_TQCTL(ring->reg_idx), val); - q_teids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_teids), - GFP_KERNEL); - if (!q_teids) - return -ENOMEM; + /* software is expected to wait for 100 ns */ + ndelay(100); - q_ids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_ids), - GFP_KERNEL); - if (!q_ids) { - err = -ENOMEM; - goto err_alloc_q_ids; + /* trigger a software interrupt for the vector + * associated to the queue to schedule NAPI handler + */ + q_vector = ring->q_vector; + if (q_vector) + ice_trigger_sw_intr(hw, q_vector); + + status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx, + txq_meta->tc, 1, &txq_meta->q_handle, + &txq_meta->q_id, &txq_meta->q_teid, rst_src, + rel_vmvf_num, NULL); + + /* if the disable queue command was exercised during an + * active reset flow, ICE_ERR_RESET_ONGOING is returned. + * This is not an error as the reset operation disables + * queues at the hardware level anyway. + */ + if (status == ICE_ERR_RESET_ONGOING) { + dev_dbg(&vsi->back->pdev->dev, + "Reset in progress. LAN Tx queues already disabled\n"); + } else if (status == ICE_ERR_DOES_NOT_EXIST) { + dev_dbg(&vsi->back->pdev->dev, + "LAN Tx queues do not exist, nothing to disable\n"); + } else if (status) { + dev_err(&vsi->back->pdev->dev, + "Failed to disable LAN Tx queues, error: %d\n", status); + return -ENODEV; } - q_handles = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, - sizeof(*q_handles), GFP_KERNEL); - if (!q_handles) { - err = -ENOMEM; - goto err_alloc_q_handles; - } + return 0; +} + +/** + * ice_fill_txq_meta - Prepare the Tx queue's meta data + * @vsi: VSI that ring belongs to + * @ring: ring that txq_meta will be based on + * @txq_meta: a helper struct that wraps Tx queue's information + * + * Set up a helper struct that will contain all the necessary fields that + * are needed for stopping Tx queue + */ +static void +ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring, + struct ice_txq_meta *txq_meta) +{ + u8 tc = 0; + +#ifdef CONFIG_DCB + tc = ring->dcb_tc; +#endif /* CONFIG_DCB */ + txq_meta->q_id = ring->reg_idx; + txq_meta->q_teid = ring->txq_teid; + txq_meta->q_handle = ring->q_handle; + txq_meta->vsi_idx = vsi->idx; + txq_meta->tc = tc; +} + +/** + * ice_vsi_stop_tx_rings - Disable Tx rings + * @vsi: the VSI being configured + * @rst_src: reset source + * @rel_vmvf_num: Relative ID of VF/VM + * @rings: Tx ring array to be stopped + */ +static int +ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, + u16 rel_vmvf_num, struct ice_ring **rings) +{ + u16 i, q_idx = 0; + int status; + u8 tc; + + if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS) + return -EINVAL; /* set up the Tx queue list to be disabled for each enabled TC */ ice_for_each_traffic_class(tc) { @@ -2107,67 +2205,24 @@ ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, break; for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) { - struct ice_q_vector *q_vector; + struct ice_txq_meta txq_meta = { }; - if (!rings || !rings[q_idx]) { - err = -EINVAL; - goto err_out; - } + if (!rings || !rings[q_idx]) + return -EINVAL; - q_ids[i] = vsi->txq_map[q_idx + offset]; - q_teids[i] = rings[q_idx]->txq_teid; - q_handles[i] = i; + ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta); + status = ice_vsi_stop_tx_ring(vsi, rst_src, + rel_vmvf_num, + rings[q_idx], &txq_meta); - /* clear cause_ena bit for disabled queues */ - val = rd32(hw, QINT_TQCTL(rings[i]->reg_idx)); - val &= ~QINT_TQCTL_CAUSE_ENA_M; - wr32(hw, QINT_TQCTL(rings[i]->reg_idx), val); - - /* software is expected to wait for 100 ns */ - ndelay(100); - - /* trigger a software interrupt for the vector - * associated to the queue to schedule NAPI handler - */ - q_vector = rings[i]->q_vector; - if (q_vector) - ice_trigger_sw_intr(hw, q_vector); + if (status) + return status; q_idx++; } - status = ice_dis_vsi_txq(vsi->port_info, vsi->idx, tc, - vsi->num_txq, q_handles, q_ids, - q_teids, rst_src, rel_vmvf_num, NULL); - - /* if the disable queue command was exercised during an active - * reset flow, ICE_ERR_RESET_ONGOING is returned. This is not - * an error as the reset operation disables queues at the - * hardware level anyway. - */ - if (status == ICE_ERR_RESET_ONGOING) { - dev_dbg(&pf->pdev->dev, - "Reset in progress. LAN Tx queues already disabled\n"); - } else if (status == ICE_ERR_DOES_NOT_EXIST) { - dev_dbg(&pf->pdev->dev, - "LAN Tx queues does not exist, nothing to disabled\n"); - } else if (status) { - dev_err(&pf->pdev->dev, - "Failed to disable LAN Tx queues, error: %d\n", - status); - err = -ENODEV; - } } -err_out: - devm_kfree(&pf->pdev->dev, q_handles); - -err_alloc_q_handles: - devm_kfree(&pf->pdev->dev, q_ids); - -err_alloc_q_ids: - devm_kfree(&pf->pdev->dev, q_teids); - - return err; + return 0; } /** @@ -2180,8 +2235,7 @@ int ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, u16 rel_vmvf_num) { - return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, - 0); + return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index 969ba27cba95..33074b8b7557 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -6,8 +6,22 @@ #include "ice.h" -int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list, - const u8 *macaddr); +struct ice_txq_meta { + /* Tx-scheduler element identifier */ + u32 q_teid; + /* Entry in VSI's txq_map bitmap */ + u16 q_id; + /* Relative index of Tx queue within TC */ + u16 q_handle; + /* VSI index that Tx queue belongs to */ + u16 vsi_idx; + /* TC number that Tx queue belongs to */ + u8 tc; +}; + +int +ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list, + const u8 *macaddr); void ice_free_fltr_list(struct device *dev, struct list_head *h); -- cgit v1.2.3 From 77ca27c417050376b703cec52810aaa10e17d9ef Mon Sep 17 00:00:00 2001 From: Paul Greenwalt Date: Fri, 2 Aug 2019 01:25:20 -0700 Subject: ice: add support for virtchnl_queue_select.[tx|rx]_queues bitmap The VF driver can call VIRTCHNL_OP_[ENABLE|DISABLE]_QUEUES separately for each queue. Add support for virtchnl_queue_select.[tx|rx]_queues bitmap which is used to indicate which queues to enable and disable. Add tracing of VF Tx/Rx per queue enable state to avoid enabling enabled queues and disabling disabled queues. Add total queues enabled count and clear ICE_VF_STATE_QS_ENA when count is zero. Signed-off-by: Paul Greenwalt Signed-off-by: Peng Huang Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_lib.c | 15 +- drivers/net/ethernet/intel/ice/ice_lib.h | 10 + drivers/net/ethernet/intel/ice/ice_main.c | 2 +- drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c | 243 ++++++++++++++++------- drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h | 12 +- 5 files changed, 207 insertions(+), 75 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index cfd6723de857..fb866be84088 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -196,7 +196,10 @@ static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena) * @ena: start or stop the Rx rings * @rxq_idx: Rx queue index */ -static int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx) +#ifndef CONFIG_PCI_IOV +static +#endif /* !CONFIG_PCI_IOV */ +int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx) { int pf_q = vsi->rxq_map[rxq_idx]; struct ice_pf *pf = vsi->back; @@ -2105,7 +2108,10 @@ void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector) * @ring: Tx ring to be stopped * @txq_meta: Meta data of Tx ring to be stopped */ -static int +#ifndef CONFIG_PCI_IOV +static +#endif /* !CONFIG_PCI_IOV */ +int ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, u16 rel_vmvf_num, struct ice_ring *ring, struct ice_txq_meta *txq_meta) @@ -2165,7 +2171,10 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, * Set up a helper struct that will contain all the necessary fields that * are needed for stopping Tx queue */ -static void +#ifndef CONFIG_PCI_IOV +static +#endif /* !CONFIG_PCI_IOV */ +void ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring, struct ice_txq_meta *txq_meta) { diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index 33074b8b7557..7faf8db844f6 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -39,6 +39,16 @@ ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx); void ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx); + +int +ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, + u16 rel_vmvf_num, struct ice_ring *ring, + struct ice_txq_meta *txq_meta); + +void ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring, + struct ice_txq_meta *txq_meta); + +int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx); #endif /* CONFIG_PCI_IOV */ int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 0398c86226f0..e47aab6d998d 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -489,7 +489,7 @@ ice_prepare_for_reset(struct ice_pf *pf) /* Disable VFs until reset is completed */ for (i = 0; i < pf->num_alloc_vfs; i++) - clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states); + ice_set_vf_state_qs_dis(&pf->vf[i]); /* disable the VSIs and their queues that are not already DOWN */ ice_pf_dis_all_vsi(pf, false); diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index e6578d2f0876..78fd3fa8ac8b 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -251,6 +251,35 @@ static int ice_sriov_free_msix_res(struct ice_pf *pf) return 0; } +/** + * ice_set_vf_state_qs_dis - Set VF queues state to disabled + * @vf: pointer to the VF structure + */ +void ice_set_vf_state_qs_dis(struct ice_vf *vf) +{ + /* Clear Rx/Tx enabled queues flag */ + bitmap_zero(vf->txq_ena, ICE_MAX_BASE_QS_PER_VF); + bitmap_zero(vf->rxq_ena, ICE_MAX_BASE_QS_PER_VF); + vf->num_qs_ena = 0; + clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states); +} + +/** + * ice_dis_vf_qs - Disable the VF queues + * @vf: pointer to the VF structure + */ +static void ice_dis_vf_qs(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + + vsi = pf->vsi[vf->lan_vsi_idx]; + + ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id); + ice_vsi_stop_rx_rings(vsi); + ice_set_vf_state_qs_dis(vf); +} + /** * ice_free_vfs - Free all VFs * @pf: pointer to the PF structure @@ -267,19 +296,9 @@ void ice_free_vfs(struct ice_pf *pf) usleep_range(1000, 2000); /* Avoid wait time by stopping all VFs at the same time */ - for (i = 0; i < pf->num_alloc_vfs; i++) { - struct ice_vsi *vsi; - - if (!test_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states)) - continue; - - vsi = pf->vsi[pf->vf[i].lan_vsi_idx]; - /* stop rings without wait time */ - ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, i); - ice_vsi_stop_rx_rings(vsi); - - clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states); - } + for (i = 0; i < pf->num_alloc_vfs; i++) + if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states)) + ice_dis_vf_qs(&pf->vf[i]); /* Disable IOV before freeing resources. This lets any VF drivers * running in the host get themselves cleaned up before we yank @@ -1055,17 +1074,9 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) for (v = 0; v < pf->num_alloc_vfs; v++) ice_trigger_vf_reset(&pf->vf[v], is_vflr); - for (v = 0; v < pf->num_alloc_vfs; v++) { - struct ice_vsi *vsi; - - vf = &pf->vf[v]; - vsi = pf->vsi[vf->lan_vsi_idx]; - if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) { - ice_vsi_stop_lan_tx_rings(vsi, ICE_VF_RESET, vf->vf_id); - ice_vsi_stop_rx_rings(vsi); - clear_bit(ICE_VF_STATE_ENA, vf->vf_states); - } - } + for (v = 0; v < pf->num_alloc_vfs; v++) + if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[v].vf_states)) + ice_dis_vf_qs(&pf->vf[v]); /* HW requires some time to make sure it can flush the FIFO for a VF * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in @@ -1144,24 +1155,21 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) /* If the VFs have been disabled, this means something else is * resetting the VF, so we shouldn't continue. */ - if (test_and_set_bit(__ICE_VF_DIS, pf->state)) + if (test_bit(__ICE_VF_DIS, pf->state)) return false; ice_trigger_vf_reset(vf, is_vflr); vsi = pf->vsi[vf->lan_vsi_idx]; - if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) { - ice_vsi_stop_lan_tx_rings(vsi, ICE_VF_RESET, vf->vf_id); - ice_vsi_stop_rx_rings(vsi); - clear_bit(ICE_VF_STATE_ENA, vf->vf_states); - } else { + if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) + ice_dis_vf_qs(vf); + else /* Call Disable LAN Tx queue AQ call even when queues are not - * enabled. This is needed for successful completiom of VFR + * enabled. This is needed for successful completion of VFR */ ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL, NULL, ICE_VF_RESET, vf->vf_id, NULL); - } hw = &pf->hw; /* poll VPGEN_VFRSTAT reg to make sure @@ -1210,7 +1218,6 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) ice_cleanup_and_realloc_vf(vf); ice_flush(hw); - clear_bit(__ICE_VF_DIS, pf->state); return true; } @@ -1717,10 +1724,12 @@ static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid) * @ring_len: length of ring * * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE + * or zero */ static bool ice_vc_isvalid_ring_len(u16 ring_len) { - return (ring_len >= ICE_MIN_NUM_DESC && + return ring_len == 0 || + (ring_len >= ICE_MIN_NUM_DESC && ring_len <= ICE_MAX_NUM_DESC && !(ring_len % ICE_REQ_DESC_MULTIPLE)); } @@ -1877,6 +1886,8 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) (struct virtchnl_queue_select *)msg; struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; + unsigned long q_map; + u16 vf_q_id; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; @@ -1909,12 +1920,48 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) * Tx queue group list was configured and the context bits were * programmed using ice_vsi_cfg_txqs */ - if (ice_vsi_start_rx_rings(vsi)) - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + q_map = vqs->rx_queues; + for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) { + if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + /* Skip queue if enabled */ + if (test_bit(vf_q_id, vf->rxq_ena)) + continue; + + if (ice_vsi_ctrl_rx_ring(vsi, true, vf_q_id)) { + dev_err(&vsi->back->pdev->dev, + "Failed to enable Rx ring %d on VSI %d\n", + vf_q_id, vsi->vsi_num); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + set_bit(vf_q_id, vf->rxq_ena); + vf->num_qs_ena++; + } + + vsi = pf->vsi[vf->lan_vsi_idx]; + q_map = vqs->tx_queues; + for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) { + if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + /* Skip queue if enabled */ + if (test_bit(vf_q_id, vf->txq_ena)) + continue; + + set_bit(vf_q_id, vf->txq_ena); + vf->num_qs_ena++; + } /* Set flag to indicate that queues are enabled */ if (v_ret == VIRTCHNL_STATUS_SUCCESS) - set_bit(ICE_VF_STATE_ENA, vf->vf_states); + set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states); error_param: /* send the response to the VF */ @@ -1937,9 +1984,11 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) (struct virtchnl_queue_select *)msg; struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; + unsigned long q_map; + u16 vf_q_id; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) && - !test_bit(ICE_VF_STATE_ENA, vf->vf_states)) { + !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -1966,23 +2015,69 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) goto error_param; } - if (ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id)) { - dev_err(&vsi->back->pdev->dev, - "Failed to stop tx rings on VSI %d\n", - vsi->vsi_num); - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + if (vqs->tx_queues) { + q_map = vqs->tx_queues; + + for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) { + struct ice_ring *ring = vsi->tx_rings[vf_q_id]; + struct ice_txq_meta txq_meta = { 0 }; + + if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + /* Skip queue if not enabled */ + if (!test_bit(vf_q_id, vf->txq_ena)) + continue; + + ice_fill_txq_meta(vsi, ring, &txq_meta); + + if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, + ring, &txq_meta)) { + dev_err(&vsi->back->pdev->dev, + "Failed to stop Tx ring %d on VSI %d\n", + vf_q_id, vsi->vsi_num); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + /* Clear enabled queues flag */ + clear_bit(vf_q_id, vf->txq_ena); + vf->num_qs_ena--; + } } - if (ice_vsi_stop_rx_rings(vsi)) { - dev_err(&vsi->back->pdev->dev, - "Failed to stop rx rings on VSI %d\n", - vsi->vsi_num); - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + if (vqs->rx_queues) { + q_map = vqs->rx_queues; + + for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) { + if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + /* Skip queue if not enabled */ + if (!test_bit(vf_q_id, vf->rxq_ena)) + continue; + + if (ice_vsi_ctrl_rx_ring(vsi, false, vf_q_id)) { + dev_err(&vsi->back->pdev->dev, + "Failed to stop Rx ring %d on VSI %d\n", + vf_q_id, vsi->vsi_num); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + /* Clear enabled queues flag */ + clear_bit(vf_q_id, vf->rxq_ena); + vf->num_qs_ena--; + } } /* Clear enabled queues flag */ - if (v_ret == VIRTCHNL_STATUS_SUCCESS) - clear_bit(ICE_VF_STATE_ENA, vf->vf_states); + if (v_ret == VIRTCHNL_STATUS_SUCCESS && !vf->num_qs_ena) + clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states); error_param: /* send the response to the VF */ @@ -2106,6 +2201,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) struct virtchnl_vsi_queue_config_info *qci = (struct virtchnl_vsi_queue_config_info *)msg; struct virtchnl_queue_pair_info *qpi; + u16 num_rxq = 0, num_txq = 0; struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; int i; @@ -2148,33 +2244,44 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) goto error_param; } /* copy Tx queue info from VF into VSI */ - vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr; - vsi->tx_rings[i]->count = qpi->txq.ring_len; - /* copy Rx queue info from VF into VSI */ - vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr; - vsi->rx_rings[i]->count = qpi->rxq.ring_len; - if (qpi->rxq.databuffer_size > ((16 * 1024) - 128) || - qpi->rxq.databuffer_size < 1024) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; - goto error_param; + if (qpi->txq.ring_len > 0) { + num_txq++; + vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr; + vsi->tx_rings[i]->count = qpi->txq.ring_len; } - vsi->rx_buf_len = qpi->rxq.databuffer_size; - if (qpi->rxq.max_pkt_size >= (16 * 1024) || - qpi->rxq.max_pkt_size < 64) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; - goto error_param; + + /* copy Rx queue info from VF into VSI */ + if (qpi->rxq.ring_len > 0) { + num_rxq++; + vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr; + vsi->rx_rings[i]->count = qpi->rxq.ring_len; + + if (qpi->rxq.databuffer_size != 0 && + (qpi->rxq.databuffer_size > ((16 * 1024) - 128) || + qpi->rxq.databuffer_size < 1024)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + vsi->rx_buf_len = qpi->rxq.databuffer_size; + vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len; + if (qpi->rxq.max_pkt_size >= (16 * 1024) || + qpi->rxq.max_pkt_size < 64) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } } + vsi->max_frame = qpi->rxq.max_pkt_size; } /* VF can request to configure less than allocated queues * or default allocated queues. So update the VSI with new number */ - vsi->num_txq = qci->num_queue_pairs; - vsi->num_rxq = qci->num_queue_pairs; + vsi->num_txq = num_txq; + vsi->num_rxq = num_rxq; /* All queues of VF VSI are in TC 0 */ - vsi->tc_cfg.tc_info[0].qcount_tx = qci->num_queue_pairs; - vsi->tc_cfg.tc_info[0].qcount_rx = qci->num_queue_pairs; + vsi->tc_cfg.tc_info[0].qcount_tx = num_txq; + vsi->tc_cfg.tc_info[0].qcount_rx = num_rxq; if (ice_vsi_cfg_lan_txqs(vsi) || ice_vsi_cfg_rxqs(vsi)) v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h index 13f45f37d75e..0d9880c8bba3 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h @@ -41,9 +41,9 @@ /* Specific VF states */ enum ice_vf_states { - ICE_VF_STATE_INIT = 0, - ICE_VF_STATE_ACTIVE, - ICE_VF_STATE_ENA, + ICE_VF_STATE_INIT = 0, /* PF is initializing VF */ + ICE_VF_STATE_ACTIVE, /* VF resources are allocated for use */ + ICE_VF_STATE_QS_ENA, /* VF queue(s) enabled */ ICE_VF_STATE_DIS, ICE_VF_STATE_MC_PROMISC, ICE_VF_STATE_UC_PROMISC, @@ -68,6 +68,8 @@ struct ice_vf { struct virtchnl_version_info vf_ver; u32 driver_caps; /* reported by VF driver */ struct virtchnl_ether_addr dflt_lan_addr; + DECLARE_BITMAP(txq_ena, ICE_MAX_BASE_QS_PER_VF); + DECLARE_BITMAP(rxq_ena, ICE_MAX_BASE_QS_PER_VF); u16 port_vlan_id; u8 pf_set_mac:1; /* VF MAC address set by VMM admin */ u8 trusted:1; @@ -90,6 +92,7 @@ struct ice_vf { u16 num_mac; u16 num_vlan; u16 num_vf_qs; /* num of queue configured per VF */ + u16 num_qs_ena; /* total num of Tx/Rx queue enabled */ }; #ifdef CONFIG_PCI_IOV @@ -116,12 +119,15 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state); int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena); int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector); + +void ice_set_vf_state_qs_dis(struct ice_vf *vf); #else /* CONFIG_PCI_IOV */ #define ice_process_vflr_event(pf) do {} while (0) #define ice_free_vfs(pf) do {} while (0) #define ice_vc_process_vf_msg(pf, event) do {} while (0) #define ice_vc_notify_link_state(pf) do {} while (0) #define ice_vc_notify_reset(pf) do {} while (0) +#define ice_set_vf_state_qs_dis(vf) do {} while (0) static inline bool ice_reset_all_vfs(struct ice_pf __always_unused *pf, -- cgit v1.2.3 From 78b5713ac12417d796bbdcabf67dd94584f6102b Mon Sep 17 00:00:00 2001 From: Anirudh Venkataramanan Date: Fri, 2 Aug 2019 01:25:21 -0700 Subject: ice: Alloc queue management bitmaps and arrays dynamically The total number of queues available on the device is divided between multiple physical functions (PF) in the firmware and provided to the driver when it gets function capabilities from the firmware. Thus each PF knows how many Tx/Rx queues it has. These queues are then doled out to different VSIs (for LAN traffic, SR-IOV VF traffic, etc.) To track usage of these queues at the PF level, the driver uses two bitmaps avail_txqs and avail_rxqs. At the VSI level (i.e. struct ice_vsi instances) the driver uses two arrays txq_map and rxq_map, to track ownership of VSIs' queues in avail_txqs and avail_rxqs respectively. The aforementioned bitmaps and arrays should be allocated dynamically, because the number of queues supported by a PF is only available once function capabilities have been queried. The current static allocation consumes way more memory than required. This patch removes the DECLARE_BITMAP for avail_txqs and avail_rxqs and instead uses bitmap_zalloc to allocate the bitmaps during init. Similarly txq_map and rxq_map are now allocated in ice_vsi_alloc_arrays. As a result ICE_MAX_TXQS and ICE_MAX_RXQS defines are no longer needed. Also as txq_map and rxq_map are now allocated and freed, some code reordering was required in ice_vsi_rebuild for correct functioning. Signed-off-by: Anirudh Venkataramanan Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice.h | 12 ++++----- drivers/net/ethernet/intel/ice/ice_lib.c | 45 ++++++++++++++++++++++++------- drivers/net/ethernet/intel/ice/ice_main.c | 40 +++++++++++++++++++++------ 3 files changed, 74 insertions(+), 23 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 97d0f61cf52b..fb2bc836b20a 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -73,8 +73,6 @@ extern const char ice_drv_ver[]; #define ICE_MBXRQ_LEN 512 #define ICE_MIN_MSIX 2 #define ICE_NO_VSI 0xffff -#define ICE_MAX_TXQS 2048 -#define ICE_MAX_RXQS 2048 #define ICE_VSI_MAP_CONTIG 0 #define ICE_VSI_MAP_SCATTER 1 #define ICE_MAX_SCATTER_TXQS 16 @@ -284,8 +282,8 @@ struct ice_vsi { /* queue information */ u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ u8 rx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ - u16 txq_map[ICE_MAX_TXQS]; /* index in pf->avail_txqs */ - u16 rxq_map[ICE_MAX_RXQS]; /* index in pf->avail_rxqs */ + u16 *txq_map; /* index in pf->avail_txqs */ + u16 *rxq_map; /* index in pf->avail_rxqs */ u16 alloc_txq; /* Allocated Tx queues */ u16 num_txq; /* Used Tx queues */ u16 alloc_rxq; /* Allocated Rx queues */ @@ -355,9 +353,9 @@ struct ice_pf { u16 num_vf_qps; /* num queue pairs per VF */ u16 num_vf_msix; /* num vectors per VF */ DECLARE_BITMAP(state, __ICE_STATE_NBITS); - DECLARE_BITMAP(avail_txqs, ICE_MAX_TXQS); - DECLARE_BITMAP(avail_rxqs, ICE_MAX_RXQS); DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS); + unsigned long *avail_txqs; /* bitmap to track PF Tx queue usage */ + unsigned long *avail_rxqs; /* bitmap to track PF Rx queue usage */ unsigned long serv_tmr_period; unsigned long serv_tmr_prev; struct timer_list serv_tmr; @@ -368,6 +366,8 @@ struct ice_pf { u32 hw_csum_rx_error; u32 oicr_idx; /* Other interrupt cause MSIX vector index */ u32 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */ + u16 max_pf_txqs; /* Total Tx queues PF wide */ + u16 max_pf_rxqs; /* Total Rx queues PF wide */ u32 num_lan_msix; /* Total MSIX vectors for base driver */ u16 num_lan_tx; /* num LAN Tx queues setup */ u16 num_lan_rx; /* num LAN Rx queues setup */ diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index fb866be84088..a39767e8c2a2 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -263,12 +263,24 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq, sizeof(*vsi->tx_rings), GFP_KERNEL); if (!vsi->tx_rings) - goto err_txrings; + return -ENOMEM; vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq, sizeof(*vsi->rx_rings), GFP_KERNEL); if (!vsi->rx_rings) - goto err_rxrings; + goto err_rings; + + vsi->txq_map = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq, + sizeof(*vsi->txq_map), GFP_KERNEL); + + if (!vsi->txq_map) + goto err_txq_map; + + vsi->rxq_map = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq, + sizeof(*vsi->rxq_map), GFP_KERNEL); + if (!vsi->rxq_map) + goto err_rxq_map; + /* There is no need to allocate q_vectors for a loopback VSI. */ if (vsi->type == ICE_VSI_LB) @@ -283,10 +295,13 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) return 0; err_vectors: + devm_kfree(&pf->pdev->dev, vsi->rxq_map); +err_rxq_map: + devm_kfree(&pf->pdev->dev, vsi->txq_map); +err_txq_map: devm_kfree(&pf->pdev->dev, vsi->rx_rings); -err_rxrings: +err_rings: devm_kfree(&pf->pdev->dev, vsi->tx_rings); -err_txrings: return -ENOMEM; } @@ -433,6 +448,14 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi) devm_kfree(&pf->pdev->dev, vsi->rx_rings); vsi->rx_rings = NULL; } + if (vsi->txq_map) { + devm_kfree(&pf->pdev->dev, vsi->txq_map); + vsi->txq_map = NULL; + } + if (vsi->rxq_map) { + devm_kfree(&pf->pdev->dev, vsi->rxq_map); + vsi->rxq_map = NULL; + } } /** @@ -664,7 +687,7 @@ static int ice_vsi_get_qs(struct ice_vsi *vsi) struct ice_qs_cfg tx_qs_cfg = { .qs_mutex = &pf->avail_q_mutex, .pf_map = pf->avail_txqs, - .pf_map_size = ICE_MAX_TXQS, + .pf_map_size = pf->max_pf_txqs, .q_count = vsi->alloc_txq, .scatter_count = ICE_MAX_SCATTER_TXQS, .vsi_map = vsi->txq_map, @@ -674,7 +697,7 @@ static int ice_vsi_get_qs(struct ice_vsi *vsi) struct ice_qs_cfg rx_qs_cfg = { .qs_mutex = &pf->avail_q_mutex, .pf_map = pf->avail_rxqs, - .pf_map_size = ICE_MAX_RXQS, + .pf_map_size = pf->max_pf_rxqs, .q_count = vsi->alloc_rxq, .scatter_count = ICE_MAX_SCATTER_RXQS, .vsi_map = vsi->rxq_map, @@ -3018,6 +3041,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) vsi->base_vector = 0; } + ice_vsi_put_qs(vsi); ice_vsi_clear_rings(vsi); ice_vsi_free_arrays(vsi); ice_dev_onetime_setup(&pf->hw); @@ -3025,6 +3049,12 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) ice_vsi_set_num_qs(vsi, vf->vf_id); else ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID); + + ret = ice_vsi_alloc_arrays(vsi); + if (ret < 0) + goto err_vsi; + + ice_vsi_get_qs(vsi); ice_vsi_set_tc_cfg(vsi); /* Initialize VSI struct elements and create VSI in FW */ @@ -3032,9 +3062,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) if (ret < 0) goto err_vsi; - ret = ice_vsi_alloc_arrays(vsi); - if (ret < 0) - goto err_vsi; switch (vsi->type) { case ICE_VSI_PF: diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index e47aab6d998d..2499c7ee5038 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -2207,13 +2207,23 @@ static void ice_deinit_pf(struct ice_pf *pf) ice_service_task_stop(pf); mutex_destroy(&pf->sw_mutex); mutex_destroy(&pf->avail_q_mutex); + + if (pf->avail_txqs) { + bitmap_free(pf->avail_txqs); + pf->avail_txqs = NULL; + } + + if (pf->avail_rxqs) { + bitmap_free(pf->avail_rxqs); + pf->avail_rxqs = NULL; + } } /** * ice_init_pf - Initialize general software structures (struct ice_pf) * @pf: board private structure to initialize */ -static void ice_init_pf(struct ice_pf *pf) +static int ice_init_pf(struct ice_pf *pf) { bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS); #ifdef CONFIG_PCI_IOV @@ -2229,12 +2239,6 @@ static void ice_init_pf(struct ice_pf *pf) mutex_init(&pf->sw_mutex); mutex_init(&pf->avail_q_mutex); - /* Clear avail_[t|r]x_qs bitmaps (set all to avail) */ - mutex_lock(&pf->avail_q_mutex); - bitmap_zero(pf->avail_txqs, ICE_MAX_TXQS); - bitmap_zero(pf->avail_rxqs, ICE_MAX_RXQS); - mutex_unlock(&pf->avail_q_mutex); - if (pf->hw.func_caps.common_cap.rss_table_size) set_bit(ICE_FLAG_RSS_ENA, pf->flags); @@ -2243,6 +2247,22 @@ static void ice_init_pf(struct ice_pf *pf) pf->serv_tmr_period = HZ; INIT_WORK(&pf->serv_task, ice_service_task); clear_bit(__ICE_SERVICE_SCHED, pf->state); + + pf->max_pf_txqs = pf->hw.func_caps.common_cap.num_txq; + pf->max_pf_rxqs = pf->hw.func_caps.common_cap.num_rxq; + + pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL); + if (!pf->avail_txqs) + return -ENOMEM; + + pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL); + if (!pf->avail_rxqs) { + devm_kfree(&pf->pdev->dev, pf->avail_txqs); + pf->avail_txqs = NULL; + return -ENOMEM; + } + + return 0; } /** @@ -2467,7 +2487,11 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build, hw->api_maj_ver, hw->api_min_ver); - ice_init_pf(pf); + err = ice_init_pf(pf); + if (err) { + dev_err(dev, "ice_init_pf failed: %d\n", err); + goto err_init_pf_unroll; + } err = ice_init_pf_dcb(pf, false); if (err) { -- cgit v1.2.3 From cb6a8dc07827086a7ce4151c62367d959c6ad5f1 Mon Sep 17 00:00:00 2001 From: Akeem G Abodunrin Date: Fri, 2 Aug 2019 01:25:22 -0700 Subject: ice: Fix VF configuration issues due to reset This patch fixes a critical reset issue that resulting to the server reboot when an Admin changes VF configuration on the host, for example changing VF to Trusted/non_Trusted mode, the PF driver send reset notification to AVF driver while also continue with reset flow. However, AVF driver schedule another reset due to notification, which causes two concurrent reset going on, and trigger lock up in the FW, with AQ call to delete VSI. Signed-off-by: Akeem G Abodunrin Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 78fd3fa8ac8b..b93324e9f4bc 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -1152,12 +1152,19 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) u32 reg; int i; - /* If the VFs have been disabled, this means something else is - * resetting the VF, so we shouldn't continue. + /* If the PF has been disabled, there is no need resetting VF until + * PF is active again. */ if (test_bit(__ICE_VF_DIS, pf->state)) return false; + /* If the VF has been disabled, this means something else is + * resetting the VF, so we shouldn't continue. Otherwise, set + * disable VF state bit for actual reset, and continue. + */ + if (test_and_set_bit(ICE_VF_STATE_DIS, vf->vf_states)) + return false; + ice_trigger_vf_reset(vf, is_vflr); vsi = pf->vsi[vf->lan_vsi_idx]; -- cgit v1.2.3 From 152b978a1f904f252ae3df548bc2e183e7368c6d Mon Sep 17 00:00:00 2001 From: Anirudh Venkataramanan Date: Fri, 2 Aug 2019 01:25:23 -0700 Subject: ice: Rework ice_ena_msix_range The current implementation of ice_ena_msix_range is difficult to read and has subtle issues. This patch reworks the said function for clarity and correctness. More specifically, 1. Add more checks to bail out of 'needed' is greater than 'v_left'. 2. Simplify fallback logic 3. Do not set pf->num_avail_sw_msix in ice_ena_msix_range as it gets overwritten by ice_init_interrupt_scheme. Signed-off-by: Anirudh Venkataramanan Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_main.c | 32 ++++++++++++++++++++----------- 1 file changed, 21 insertions(+), 11 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 2499c7ee5038..9371148dc489 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -2281,13 +2281,18 @@ static int ice_ena_msix_range(struct ice_pf *pf) /* reserve one vector for miscellaneous handler */ needed = 1; + if (v_left < needed) + goto no_hw_vecs_left_err; v_budget += needed; v_left -= needed; /* reserve vectors for LAN traffic */ - pf->num_lan_msix = min_t(int, num_online_cpus(), v_left); - v_budget += pf->num_lan_msix; - v_left -= pf->num_lan_msix; + needed = min_t(int, num_online_cpus(), v_left); + if (v_left < needed) + goto no_hw_vecs_left_err; + pf->num_lan_msix = needed; + v_budget += needed; + v_left -= needed; pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget, sizeof(*pf->msix_entries), GFP_KERNEL); @@ -2312,18 +2317,18 @@ static int ice_ena_msix_range(struct ice_pf *pf) if (v_actual < v_budget) { dev_warn(&pf->pdev->dev, - "not enough vectors. requested = %d, obtained = %d\n", + "not enough OS MSI-X vectors. requested = %d, obtained = %d\n", v_budget, v_actual); - if (v_actual >= (pf->num_lan_msix + 1)) { - pf->num_avail_sw_msix = v_actual - - (pf->num_lan_msix + 1); - } else if (v_actual >= 2) { - pf->num_lan_msix = 1; - pf->num_avail_sw_msix = v_actual - 2; - } else { +/* 2 vectors for LAN (traffic + OICR) */ +#define ICE_MIN_LAN_VECS 2 + + if (v_actual < ICE_MIN_LAN_VECS) { + /* error if we can't get minimum vectors */ pci_disable_msix(pf->pdev); err = -ERANGE; goto msix_err; + } else { + pf->num_lan_msix = ICE_MIN_LAN_VECS; } } @@ -2333,6 +2338,11 @@ msix_err: devm_kfree(&pf->pdev->dev, pf->msix_entries); goto exit_err; +no_hw_vecs_left_err: + dev_err(&pf->pdev->dev, + "not enough device MSI-X vectors. requested = %d, available = %d\n", + needed, v_left); + err = -ERANGE; exit_err: pf->num_lan_msix = 0; return err; -- cgit v1.2.3 From ae2bdbb45d38ee2ec67a958bf221138b58817c82 Mon Sep 17 00:00:00 2001 From: Henry Tieman Date: Fri, 2 Aug 2019 01:25:24 -0700 Subject: ice: fix adminq calls during remove The order of operations was incorrect in ice_remove(). The code would try to use adminq operations after the adminq was disabled. This caused all adminq calls to fail and possibly timeout waiting. Signed-off-by: Henry Tieman Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 9371148dc489..f029aee32913 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -2633,9 +2633,9 @@ static void ice_remove(struct pci_dev *pdev) continue; ice_vsi_free_q_vectors(pf->vsi[i]); } - ice_clear_interrupt_scheme(pf); ice_deinit_pf(pf); ice_deinit_hw(&pf->hw); + ice_clear_interrupt_scheme(pf); pci_disable_pcie_error_reporting(pdev); } -- cgit v1.2.3 From 03af840650bb4a1cfa95ea93493d04b679ab5f5c Mon Sep 17 00:00:00 2001 From: Anirudh Venkataramanan Date: Fri, 2 Aug 2019 01:25:25 -0700 Subject: ice: Fix EMP reset handling ice_reset_subtask needs to handle EMP resets as well, as EMP resets can be triggered by the firmware. This patch adds the logic to do this. Signed-off-by: Anirudh Venkataramanan Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_main.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index f029aee32913..b62c01ca9c28 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -567,6 +567,8 @@ static void ice_reset_subtask(struct ice_pf *pf) reset_type = ICE_RESET_CORER; if (test_and_clear_bit(__ICE_GLOBR_RECV, pf->state)) reset_type = ICE_RESET_GLOBR; + if (test_and_clear_bit(__ICE_EMPR_RECV, pf->state)) + reset_type = ICE_RESET_EMPR; /* return if no valid reset type requested */ if (reset_type == ICE_RESET_INVAL) return; -- cgit v1.2.3 From 8132e17dfb1683f1ef7ebf6fa8b61c1f48152660 Mon Sep 17 00:00:00 2001 From: Jeb Cramer Date: Fri, 2 Aug 2019 01:25:26 -0700 Subject: ice: Fix resource leak in ice_remove_rule_internal() We don't free s_rule if ice_aq_sw_rules() returns a non-zero status. If it returned a zero status, s_rule would be freed right after, so this implies it should be freed within the scope of the function regardless. Signed-off-by: Jeb Cramer Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_switch.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 99cf527d2b1a..1acdd43a2edd 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -1623,12 +1623,13 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id, status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1, ice_aqc_opc_remove_sw_rules, NULL); - if (status) - goto exit; /* Remove a book keeping from the list */ devm_kfree(ice_hw_to_dev(hw), s_rule); + if (status) + goto exit; + list_del(&list_elem->list_entry); devm_kfree(ice_hw_to_dev(hw), list_elem); } -- cgit v1.2.3 From 567af267fa1d95d13b0fdfa1fdc18ec474220b88 Mon Sep 17 00:00:00 2001 From: Brett Creeley Date: Fri, 2 Aug 2019 01:25:27 -0700 Subject: ice: Report what the user set for coalesce [tx|rx]-usecs Currently if the user sets an odd value for [tx|rx]-usecs we align the value because the hardware only understands ITR values in multiples of 2. This seems misleading because we are essentially telling the user that the ITR value is odd, when in fact we have changed it internally. Fix this by reporting that setting odd ITR values is not allowed. Also, while making changes to ice_set_rc_coalesce() I noticed a bit of code/error duplication. Make the necessary changes to remove the duplication. Signed-off-by: Brett Creeley Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_ethtool.c | 88 ++++++++++++++-------------- 1 file changed, 44 insertions(+), 44 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index f7dd0bd03d39..edba5bd79097 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -3253,25 +3253,25 @@ static int ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec, struct ice_ring_container *rc, struct ice_vsi *vsi) { + const char *c_type_str = (c_type == ICE_RX_CONTAINER) ? "rx" : "tx"; + u32 use_adaptive_coalesce, coalesce_usecs; struct ice_pf *pf = vsi->back; u16 itr_setting; if (!rc->ring) return -EINVAL; - itr_setting = rc->itr_setting & ~ICE_ITR_DYNAMIC; - switch (c_type) { case ICE_RX_CONTAINER: if (ec->rx_coalesce_usecs_high > ICE_MAX_INTRL || (ec->rx_coalesce_usecs_high && ec->rx_coalesce_usecs_high < pf->hw.intrl_gran)) { netdev_info(vsi->netdev, - "Invalid value, rx-usecs-high valid values are 0 (disabled), %d-%d\n", - pf->hw.intrl_gran, ICE_MAX_INTRL); + "Invalid value, %s-usecs-high valid values are 0 (disabled), %d-%d\n", + c_type_str, pf->hw.intrl_gran, + ICE_MAX_INTRL); return -EINVAL; } - if (ec->rx_coalesce_usecs_high != rc->ring->q_vector->intrl) { rc->ring->q_vector->intrl = ec->rx_coalesce_usecs_high; wr32(&pf->hw, GLINT_RATE(rc->ring->q_vector->reg_idx), @@ -3279,60 +3279,60 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec, pf->hw.intrl_gran)); } - if (ec->rx_coalesce_usecs != itr_setting && - ec->use_adaptive_rx_coalesce) { - netdev_info(vsi->netdev, - "Rx interrupt throttling cannot be changed if adaptive-rx is enabled\n"); - return -EINVAL; - } + use_adaptive_coalesce = ec->use_adaptive_rx_coalesce; + coalesce_usecs = ec->rx_coalesce_usecs; - if (ec->rx_coalesce_usecs > ICE_ITR_MAX) { - netdev_info(vsi->netdev, - "Invalid value, rx-usecs range is 0-%d\n", - ICE_ITR_MAX); - return -EINVAL; - } - - if (ec->use_adaptive_rx_coalesce) { - rc->itr_setting |= ICE_ITR_DYNAMIC; - } else { - rc->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs); - rc->target_itr = ITR_TO_REG(rc->itr_setting); - } break; case ICE_TX_CONTAINER: if (ec->tx_coalesce_usecs_high) { netdev_info(vsi->netdev, - "setting tx-usecs-high is not supported\n"); - return -EINVAL; - } - - if (ec->tx_coalesce_usecs != itr_setting && - ec->use_adaptive_tx_coalesce) { - netdev_info(vsi->netdev, - "Tx interrupt throttling cannot be changed if adaptive-tx is enabled\n"); + "setting %s-usecs-high is not supported\n", + c_type_str); return -EINVAL; } - if (ec->tx_coalesce_usecs > ICE_ITR_MAX) { - netdev_info(vsi->netdev, - "Invalid value, tx-usecs range is 0-%d\n", - ICE_ITR_MAX); - return -EINVAL; - } + use_adaptive_coalesce = ec->use_adaptive_tx_coalesce; + coalesce_usecs = ec->tx_coalesce_usecs; - if (ec->use_adaptive_tx_coalesce) { - rc->itr_setting |= ICE_ITR_DYNAMIC; - } else { - rc->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs); - rc->target_itr = ITR_TO_REG(rc->itr_setting); - } break; default: dev_dbg(&pf->pdev->dev, "Invalid container type %d\n", c_type); return -EINVAL; } + itr_setting = rc->itr_setting & ~ICE_ITR_DYNAMIC; + if (coalesce_usecs != itr_setting && use_adaptive_coalesce) { + netdev_info(vsi->netdev, + "%s interrupt throttling cannot be changed if adaptive-%s is enabled\n", + c_type_str, c_type_str); + return -EINVAL; + } + + if (coalesce_usecs > ICE_ITR_MAX) { + netdev_info(vsi->netdev, + "Invalid value, %s-usecs range is 0-%d\n", + c_type_str, ICE_ITR_MAX); + return -EINVAL; + } + + /* hardware only supports an ITR granularity of 2us */ + if (coalesce_usecs % 2 != 0) { + netdev_info(vsi->netdev, + "Invalid value, %s-usecs must be even\n", + c_type_str); + return -EINVAL; + } + + if (use_adaptive_coalesce) { + rc->itr_setting |= ICE_ITR_DYNAMIC; + } else { + /* store user facing value how it was set */ + rc->itr_setting = coalesce_usecs; + /* set to static and convert to value HW understands */ + rc->target_itr = + ITR_TO_REG(ITR_REG_ALIGN(rc->itr_setting)); + } + return 0; } -- cgit v1.2.3 From d24ef08a9d9405c09505f41dd675dea0039dd694 Mon Sep 17 00:00:00 2001 From: Chinh T Cao Date: Fri, 2 Aug 2019 01:25:28 -0700 Subject: ice: Deduce TSA value from the priority value in the CEE mode In CEE mode, the TSA information can be derived from the reported priority value. Signed-off-by: Chinh T Cao Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_dcb.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c index d60c942249e8..c5ee8d930611 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb.c @@ -444,9 +444,15 @@ ice_parse_cee_pgcfg_tlv(struct ice_cee_feat_tlv *tlv, * |pg0|pg1|pg2|pg3|pg4|pg5|pg6|pg7| * --------------------------------- */ - ice_for_each_traffic_class(i) + ice_for_each_traffic_class(i) { etscfg->tcbwtable[i] = buf[offset++]; + if (etscfg->prio_table[i] == ICE_CEE_PGID_STRICT) + dcbcfg->etscfg.tsatable[i] = ICE_IEEE_TSA_STRICT; + else + dcbcfg->etscfg.tsatable[i] = ICE_IEEE_TSA_ETS; + } + /* Number of TCs supported (1 octet) */ etscfg->maxtcs = buf[offset]; } -- cgit v1.2.3 From 18057cb3578ae950c73b20588ad66a1f45fba68c Mon Sep 17 00:00:00 2001 From: Bruce Allan Date: Fri, 2 Aug 2019 01:25:29 -0700 Subject: ice: add needed PFR during driver unload According to the specification, a PF Reset must be done as part of the driver unload flow. Signed-off-by: Bruce Allan Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_main.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index b62c01ca9c28..8217b81eb9d8 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -2638,6 +2638,11 @@ static void ice_remove(struct pci_dev *pdev) ice_deinit_pf(pf); ice_deinit_hw(&pf->hw); ice_clear_interrupt_scheme(pf); + /* Issue a PFR as part of the prescribed driver unload flow. Do not + * do it via ice_schedule_reset() since there is no need to rebuild + * and the service task is already stopped. + */ + ice_reset(&pf->hw, ICE_RESET_PFR); pci_disable_pcie_error_reporting(pdev); } -- cgit v1.2.3 From 7404e84a2332572da70aabb081f9a933309d57a4 Mon Sep 17 00:00:00 2001 From: Bruce Allan Date: Fri, 2 Aug 2019 01:25:30 -0700 Subject: ice: update driver unloading field for Queue Shutdown AQ command According to recent specification versions, the field in the Queue Shutdown AdminQ command consisting of the "driver unloading" indication is not a 4 byte field (it is byte.bit 16.0). Change it to a byte and remove the unnecessary endian conversion. Signed-off-by: Bruce Allan Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_adminq_cmd.h | 4 ++-- drivers/net/ethernet/intel/ice/ice_common.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index bf9aa533a7c6..8ebc695171b6 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -35,9 +35,9 @@ struct ice_aqc_get_ver { /* Queue Shutdown (direct 0x0003) */ struct ice_aqc_q_shutdown { - __le32 driver_unloading; + u8 driver_unloading; #define ICE_AQC_DRIVER_UNLOADING BIT(0) - u8 reserved[12]; + u8 reserved[15]; }; /* Request resource ownership (direct 0x0008) diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 302ad981129c..6c0abb284c10 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -1275,7 +1275,7 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown); if (unloading) - cmd->driver_unloading = cpu_to_le32(ICE_AQC_DRIVER_UNLOADING); + cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING; return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); } -- cgit v1.2.3 From 432609887aa9b6d5ef237f779db6939ac5e8b7a2 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Fri, 2 Aug 2019 01:25:31 -0700 Subject: ice: add print of autoneg state to link message Print the state of auto-negotiation when printing the Link up message. Adds new text to the "NIC Link is up" line like Autoneg: Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_main.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 8217b81eb9d8..905aab017e6f 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -626,6 +626,7 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) const char *speed; const char *fec; const char *fc; + const char *an; if (!vsi) return; @@ -709,6 +710,12 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) break; } + /* check if autoneg completed, might be false due to not supported */ + if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) + an = "True"; + else + an = "False"; + /* Get FEC mode requested based on PHY caps last SW configuration */ caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL); if (!caps) { @@ -733,8 +740,8 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) devm_kfree(&vsi->back->pdev->dev, caps); done: - netdev_info(vsi->netdev, "NIC Link is up %sbps, Requested FEC: %s, FEC: %s, Flow Control: %s\n", - speed, fec_req, fec, fc); + netdev_info(vsi->netdev, "NIC Link is up %sbps, Requested FEC: %s, FEC: %s, Autoneg: %s, Flow Control: %s\n", + speed, fec_req, fec, an, fc); } /** -- cgit v1.2.3 From 2e0ab37c04c2f18d91d55fd417dc8d4a45843a9f Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Fri, 2 Aug 2019 01:25:32 -0700 Subject: ice: print extra message if topology issue The driver needs to inform the user if there is an issue with the topology / configuration of the link. Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_main.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 905aab017e6f..732397a2e8fa 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -613,6 +613,22 @@ static void ice_reset_subtask(struct ice_pf *pf) } } +/** + * ice_print_topo_conflict - print topology conflict message + * @vsi: the VSI whose topology status is being checked + */ +static void ice_print_topo_conflict(struct ice_vsi *vsi) +{ + switch (vsi->port_info->phy.link_info.topo_media_conflict) { + case ICE_AQ_LINK_TOPO_CONFLICT: + case ICE_AQ_LINK_MEDIA_CONFLICT: + netdev_info(vsi->netdev, "Possible mis-configuration of the Ethernet port detected, please use the Intel(R) Ethernet Port Configuration Tool application to address the issue.\n"); + break; + default: + break; + } +} + /** * ice_print_link_msg - print link up or down message * @vsi: the VSI whose link status is being queried @@ -742,6 +758,7 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) done: netdev_info(vsi->netdev, "NIC Link is up %sbps, Requested FEC: %s, FEC: %s, Autoneg: %s, Flow Control: %s\n", speed, fec_req, fec, an, fc); + ice_print_topo_conflict(vsi); } /** -- cgit v1.2.3 From 6a025730e0cdf47f8cdc323708b9cbe23fdff7f7 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Fri, 2 Aug 2019 01:25:33 -0700 Subject: ice: Cleanup defines in ice_type.h Conventionally, if the #defines/other are not needed by other header files being included, #includes are done first followed by #defines and other stuff. Move the #defines before the #includes to follow this convention. Suggested by: Bruce Allan Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_type.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index b538d0b9eb80..40b028e73234 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -4,15 +4,15 @@ #ifndef _ICE_TYPE_H_ #define _ICE_TYPE_H_ +#define ICE_BYTES_PER_WORD 2 +#define ICE_BYTES_PER_DWORD 4 + #include "ice_status.h" #include "ice_hw_autogen.h" #include "ice_osdep.h" #include "ice_controlq.h" #include "ice_lan_tx_rx.h" -#define ICE_BYTES_PER_WORD 2 -#define ICE_BYTES_PER_DWORD 4 - static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc) { return test_bit(tc, &bitmap); -- cgit v1.2.3 From a257f188b72bf0f8b5a08efba174373f5708ff0c Mon Sep 17 00:00:00 2001 From: Usha Ketineni Date: Thu, 8 Aug 2019 07:39:24 -0700 Subject: ice: Limit Max TCs on devices with more than 4 ports This patch limits the max TCs set by the driver to the value provided by the firmware as per the capabilities of the device. Otherwise, hard coding to 8 TC max would fail the device configurations with more than 4 ports. Signed-off-by: Usha Ketineni Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_adminq_cmd.h | 1 + drivers/net/ethernet/intel/ice/ice_common.c | 12 ++++++++++++ drivers/net/ethernet/intel/ice/ice_dcb_lib.c | 10 ++++++++-- drivers/net/ethernet/intel/ice/ice_type.h | 3 +++ 4 files changed, 24 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 8ebc695171b6..4da0cde9695b 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -91,6 +91,7 @@ struct ice_aqc_list_caps_elem { #define ICE_AQC_CAPS_SRIOV 0x0012 #define ICE_AQC_CAPS_VF 0x0013 #define ICE_AQC_CAPS_VSI 0x0017 +#define ICE_AQC_CAPS_DCB 0x0018 #define ICE_AQC_CAPS_RSS 0x0040 #define ICE_AQC_CAPS_RXQS 0x0041 #define ICE_AQC_CAPS_TXQS 0x0042 diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 6c0abb284c10..9492cd34b09d 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -1594,6 +1594,18 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, prefix, func_p->guar_num_vsi); } break; + case ICE_AQC_CAPS_DCB: + caps->dcb = (number == 1); + caps->active_tc_bitmap = logical_id; + caps->maxtc = phys_id; + ice_debug(hw, ICE_DBG_INIT, + "%s: DCB = %d\n", prefix, caps->dcb); + ice_debug(hw, ICE_DBG_INIT, + "%s: active TC bitmap = %d\n", prefix, + caps->active_tc_bitmap); + ice_debug(hw, ICE_DBG_INIT, + "%s: TC max = %d\n", prefix, caps->maxtc); + break; case ICE_AQC_CAPS_RSS: caps->rss_table_size = number; caps->rss_table_entry_width = logical_id; diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index d9578919aad8..4614ec95529b 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -413,7 +413,7 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool locked) memset(&pi->local_dcbx_cfg, 0, sizeof(*dcbcfg)); dcbcfg->etscfg.willing = 1; - dcbcfg->etscfg.maxtcs = 8; + dcbcfg->etscfg.maxtcs = hw->func_caps.common_cap.maxtc; dcbcfg->etscfg.tcbwtable[0] = 100; dcbcfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS; @@ -422,7 +422,7 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool locked) dcbcfg->etsrec.willing = 0; dcbcfg->pfc.willing = 1; - dcbcfg->pfc.pfccap = IEEE_8021QAZ_MAX_TCS; + dcbcfg->pfc.pfccap = hw->func_caps.common_cap.maxtc; dcbcfg->numapps = 1; dcbcfg->app[0].selector = ICE_APP_SEL_ETHTYPE; @@ -454,6 +454,9 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked) err = ice_init_dcb(hw); if (err) { /* FW LLDP is disabled, activate SW DCBX/LLDP mode */ + dev_info(&pf->pdev->dev, + "DCB is enabled in the hardware, max number of TCs supported on this port are %d\n", + pf->hw.func_caps.common_cap.maxtc); dev_info(&pf->pdev->dev, "FW LLDP is disabled, DCBx/LLDP in SW mode.\n"); port_info->is_sw_lldp = true; @@ -484,6 +487,9 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked) if (err) goto dcb_init_err; + dev_info(&pf->pdev->dev, + "DCB is enabled in the hardware, max number of TCs supported on this port are %d\n", + pf->hw.func_caps.common_cap.maxtc); dev_info(&pf->pdev->dev, "DCBX offload supported\n"); return err; diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 40b028e73234..4501d50a7dcc 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -139,6 +139,9 @@ struct ice_phy_info { /* Common HW capabilities for SW use */ struct ice_hw_common_caps { u32 valid_functions; + /* DCB capabilities */ + u32 active_tc_bitmap; + u32 maxtc; /* Tx/Rx queues */ u16 num_rxq; /* Number/Total Rx queues */ -- cgit v1.2.3 From 473ca574884bdb674cf5bb2c5d27c21017f6b55f Mon Sep 17 00:00:00 2001 From: Dave Ertman Date: Thu, 8 Aug 2019 07:39:25 -0700 Subject: ice: Correctly handle return values for init DCB In the init path for DCB, the call to ice_init_dcb() can return a non-zero value for either an actual error, or due to the FW lldp engine being stopped. We are currently treating all non-zero values only as an indication that the FW LLDP engine is stopped. Check for an actual error in the DCB init flow. Signed-off-by: Dave Ertman Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_dcb_lib.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index 4614ec95529b..021e2e81d731 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -452,14 +452,18 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked) port_info = hw->port_info; err = ice_init_dcb(hw); + if (err && !port_info->is_sw_lldp) { + dev_err(&pf->pdev->dev, "Error initializing DCB %d\n", err); + goto dcb_init_err; + } + + dev_info(&pf->pdev->dev, + "DCB is enabled in the hardware, max number of TCs supported on this port are %d\n", + pf->hw.func_caps.common_cap.maxtc); if (err) { /* FW LLDP is disabled, activate SW DCBX/LLDP mode */ - dev_info(&pf->pdev->dev, - "DCB is enabled in the hardware, max number of TCs supported on this port are %d\n", - pf->hw.func_caps.common_cap.maxtc); dev_info(&pf->pdev->dev, "FW LLDP is disabled, DCBx/LLDP in SW mode.\n"); - port_info->is_sw_lldp = true; clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags); err = ice_dcb_sw_dflt_cfg(pf, locked); if (err) { @@ -475,7 +479,6 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked) return 0; } - port_info->is_sw_lldp = false; set_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags); /* DCBX in FW and LLDP enabled in FW */ @@ -487,10 +490,6 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked) if (err) goto dcb_init_err; - dev_info(&pf->pdev->dev, - "DCB is enabled in the hardware, max number of TCs supported on this port are %d\n", - pf->hw.func_caps.common_cap.maxtc); - dev_info(&pf->pdev->dev, "DCBX offload supported\n"); return err; dcb_init_err: -- cgit v1.2.3 From 06914ac20abba21bb810fd96eb85388a402add9b Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Thu, 8 Aug 2019 07:39:26 -0700 Subject: ice: Always notify FW of VF reset The call to ice_dis_vsi_txq() acts as the notification to the firmware that the VF is being reset. Because of this, we need to make this call every time we reset, regardless of whatever else we do to stop the Tx queues. Without this change, VF resets would fail to complete on interfaces that were up and running. Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c | 25 +++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index b93324e9f4bc..c58e3e3212df 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -1074,9 +1074,16 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) for (v = 0; v < pf->num_alloc_vfs; v++) ice_trigger_vf_reset(&pf->vf[v], is_vflr); - for (v = 0; v < pf->num_alloc_vfs; v++) - if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[v].vf_states)) - ice_dis_vf_qs(&pf->vf[v]); + for (v = 0; v < pf->num_alloc_vfs; v++) { + struct ice_vsi *vsi; + + vf = &pf->vf[v]; + vsi = pf->vsi[vf->lan_vsi_idx]; + if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) + ice_dis_vf_qs(vf); + ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL, + NULL, ICE_VF_RESET, vf->vf_id, NULL); + } /* HW requires some time to make sure it can flush the FIFO for a VF * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in @@ -1171,12 +1178,12 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) ice_dis_vf_qs(vf); - else - /* Call Disable LAN Tx queue AQ call even when queues are not - * enabled. This is needed for successful completion of VFR - */ - ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL, - NULL, ICE_VF_RESET, vf->vf_id, NULL); + + /* Call Disable LAN Tx queue AQ whether or not queues are + * enabled. This is needed for successful completion of VFR. + */ + ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL, + NULL, ICE_VF_RESET, vf->vf_id, NULL); hw = &pf->hw; /* poll VPGEN_VFRSTAT reg to make sure -- cgit v1.2.3 From 3d57fd10f2c9100b32d4dd3ab060f12f8c5583a2 Mon Sep 17 00:00:00 2001 From: Dave Ertman Date: Thu, 8 Aug 2019 07:39:28 -0700 Subject: ice: Report stats when VSI is down There is currently a check in get_ndo_stats that returns before updating stats if the VSI is down or there are no Tx or Rx queues. This causes the netdev to report zero stats with the netdev is down. Remove the check so that the behavior of reporting stats is the same as it was in IXGBE. Signed-off-by: Dave Ertman Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_main.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 732397a2e8fa..50a17a0337be 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -3477,12 +3477,16 @@ void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) vsi_stats = &vsi->net_stats; - if (test_bit(__ICE_DOWN, vsi->state) || !vsi->num_txq || !vsi->num_rxq) + if (!vsi->num_txq || !vsi->num_rxq) return; + /* netdev packet/byte stats come from ring counter. These are obtained * by summing up ring counters (done by ice_update_vsi_ring_stats). + * But, only call the update routine and read the registers if VSI is + * not down. */ - ice_update_vsi_ring_stats(vsi); + if (!test_bit(__ICE_DOWN, vsi->state)) + ice_update_vsi_ring_stats(vsi); stats->tx_packets = vsi_stats->tx_packets; stats->tx_bytes = vsi_stats->tx_bytes; stats->rx_packets = vsi_stats->rx_packets; -- cgit v1.2.3 From 03bba02016f9b493cd0ebef1e0311b3bef730ee2 Mon Sep 17 00:00:00 2001 From: Michal Swiatkowski Date: Thu, 8 Aug 2019 07:39:29 -0700 Subject: ice: Remove enable DCB when SW LLDP is activated Remove code that enables DCB in initialization when SW LLDP is activated. DCB flag is set or reset before in ice_init_pf_dcb based on number of TCs. So there is not need to overwrite it. Setting DCB without checking number of TCs can cause communication problems with other cards. Host card sends packet with VLAN priority tag, but client card doesn't strip this tag and ping doesn't work. Signed-off-by: Michal Swiatkowski Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_dcb_lib.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index 021e2e81d731..e922adf1fa15 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -475,7 +475,6 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked) pf->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); - set_bit(ICE_FLAG_DCB_ENA, pf->flags); return 0; } -- cgit v1.2.3 From cd186e51513c6b9cfdfa7280acf37db7e3dd114b Mon Sep 17 00:00:00 2001 From: Brett Creeley Date: Thu, 8 Aug 2019 07:39:30 -0700 Subject: ice: Only disable VLAN pruning for the VF when all VLANs are removed Currently if the VF adds a VLAN, VLAN pruning will be enabled for that VSI. Also, when a VLAN gets deleted it will disable VLAN pruning even if other VLAN(s) exists for the VF. Fix this by only disabling VLAN pruning on the VF VSI when removing the last VF (i.e. vf->num_vlan == 0). Signed-off-by: Brett Creeley Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index c58e3e3212df..c38939b1d496 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -2767,8 +2767,9 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) } vf->num_vlan--; - /* Disable VLAN pruning when removing VLAN */ - ice_cfg_vlan_pruning(vsi, false, false); + /* Disable VLAN pruning when the last VLAN is removed */ + if (!vf->num_vlan) + ice_cfg_vlan_pruning(vsi, false, false); /* Disable Unicast/Multicast VLAN promiscuous mode */ if (vlan_promisc) { -- cgit v1.2.3 From 34cdcb165b05acfce9b0b6611306ec24740ea91b Mon Sep 17 00:00:00 2001 From: Brett Creeley Date: Thu, 8 Aug 2019 07:39:31 -0700 Subject: ice: Update fields in ice_vsi_set_num_qs when reconfiguring Currently when vsi->req_txqs or vsi->req_rxqs are set we don't correctly set the number of vsi->num_q_vectors. Fix this by setting the number of queue vectors based on the max between the vsi->alloc_txqs and vsi->alloc_rxqs. Signed-off-by: Brett Creeley Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_lib.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index a39767e8c2a2..6cc01ebc0b01 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -345,7 +345,7 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id) case ICE_VSI_PF: vsi->alloc_txq = pf->num_lan_tx; vsi->alloc_rxq = pf->num_lan_rx; - vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx); + vsi->num_q_vectors = max_t(int, vsi->alloc_rxq, vsi->alloc_txq); break; case ICE_VSI_VF: vf = &pf->vf[vsi->vf_id]; -- cgit v1.2.3 From 208ff75135cd68a42638221626d2af2f8f17a3cb Mon Sep 17 00:00:00 2001 From: Anirudh Venkataramanan Date: Thu, 8 Aug 2019 07:39:33 -0700 Subject: ice: Add ice_get_main_vsi to get PF/main VSI There are multiple places where we currently use ice_find_vsi_by_type to get the PF (a.k.a. main) VSI. The PF VSI by definition is always the first element in the pf->vsi array (i.e. pf->vsi[0]). So instead add and use a new helper function ice_get_main_vsi, which just returns pf->vsi[0]. Signed-off-by: Anirudh Venkataramanan Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice.h | 20 +++++++------------- drivers/net/ethernet/intel/ice/ice_main.c | 6 +++--- 2 files changed, 10 insertions(+), 16 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index fb2bc836b20a..bbb3c290a0bf 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -425,21 +425,15 @@ ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi, } /** - * ice_find_vsi_by_type - Find and return VSI of a given type - * @pf: PF to search for VSI - * @type: Value indicating type of VSI we are looking for + * ice_get_main_vsi - Get the PF VSI + * @pf: PF instance + * + * returns pf->vsi[0], which by definition is the PF VSI */ -static inline struct ice_vsi * -ice_find_vsi_by_type(struct ice_pf *pf, enum ice_vsi_type type) +static inline struct ice_vsi *ice_get_main_vsi(struct ice_pf *pf) { - int i; - - for (i = 0; i < pf->num_alloc_vsi; i++) { - struct ice_vsi *vsi = pf->vsi[i]; - - if (vsi && vsi->type == type) - return vsi; - } + if (pf->vsi) + return pf->vsi[0]; return NULL; } diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 50a17a0337be..703fc7bf2b31 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -120,7 +120,7 @@ static int ice_init_mac_fltr(struct ice_pf *pf) u8 broadcast[ETH_ALEN]; struct ice_vsi *vsi; - vsi = ice_find_vsi_by_type(pf, ICE_VSI_PF); + vsi = ice_get_main_vsi(pf); if (!vsi) return -EINVAL; @@ -826,7 +826,7 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, if (link_up == old_link && link_speed == old_link_speed) return result; - vsi = ice_find_vsi_by_type(pf, ICE_VSI_PF); + vsi = ice_get_main_vsi(pf); if (!vsi || !vsi->port_info) return -EINVAL; @@ -1439,7 +1439,7 @@ static void ice_check_media_subtask(struct ice_pf *pf) struct ice_vsi *vsi; int err; - vsi = ice_find_vsi_by_type(pf, ICE_VSI_PF); + vsi = ice_get_main_vsi(pf); if (!vsi) return; -- cgit v1.2.3 From ade78c2ec1def21bdaa597d4d01a7d6b9343fa60 Mon Sep 17 00:00:00 2001 From: Anirudh Venkataramanan Date: Thu, 8 Aug 2019 07:39:34 -0700 Subject: ice: Check root pointer for validity ice_sched_get_tc_node uses pi->root without checking for NULL. Add a check to prevent NULL pointer dereference. Signed-off-by: Anirudh Venkataramanan Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_sched.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index 79d64f9ed609..fc624b73d05d 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -284,7 +284,7 @@ struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc) { u8 i; - if (!pi) + if (!pi || !pi->root) return NULL; for (i = 0; i < pi->root->num_children; i++) if (pi->root->children[i]->tc_num == tc) -- cgit v1.2.3 From 2fb0821fd54cbc6c87f58f1ab5449af191bf8aac Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Thu, 8 Aug 2019 07:39:35 -0700 Subject: ice: clean up arguments There are a couple of functions that don't need two arguments passed in when the second argument already had access to the pointer pointed to by the first. Remove the unnecessary arguments. Signed-off-by: Jesse Brandeburg Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_txrx.c | 43 +++++++++++++++---------------- 1 file changed, 21 insertions(+), 22 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 5bf5c179a738..4fe1b332e67e 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -95,17 +95,16 @@ void ice_free_tx_ring(struct ice_ring *tx_ring) /** * ice_clean_tx_irq - Reclaim resources after transmit completes - * @vsi: the VSI we care about * @tx_ring: Tx ring to clean * @napi_budget: Used to determine if we are in netpoll * * Returns true if there's any budget left (e.g. the clean is finished) */ -static bool -ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring, int napi_budget) +static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget) { unsigned int total_bytes = 0, total_pkts = 0; - unsigned int budget = vsi->work_lmt; + unsigned int budget = ICE_DFLT_IRQ_WORK; + struct ice_vsi *vsi = tx_ring->vsi; s16 i = tx_ring->next_to_clean; struct ice_tx_desc *tx_desc; struct ice_tx_buf *tx_buf; @@ -114,6 +113,8 @@ ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring, int napi_budget) tx_desc = ICE_TX_DESC(tx_ring, i); i -= tx_ring->count; + prefetch(&vsi->state); + do { struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; @@ -206,7 +207,7 @@ ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring, int napi_budget) smp_mb(); if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->q_index) && - !test_bit(__ICE_DOWN, vsi->state)) { + !test_bit(__ICE_DOWN, vsi->state)) { netif_wake_subqueue(tx_ring->netdev, tx_ring->q_index); ++tx_ring->tx_stats.restart_q; @@ -879,7 +880,7 @@ ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, /** * ice_rx_csum - Indicate in skb if checksum is good - * @vsi: the VSI we care about + * @ring: the ring we care about * @skb: skb currently being received and modified * @rx_desc: the receive descriptor * @ptype: the packet type decoded by hardware @@ -887,7 +888,7 @@ ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, * skb->protocol must be set before this function is called */ static void -ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb, +ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb, union ice_32b_rx_flex_desc *rx_desc, u8 ptype) { struct ice_rx_ptype_decoded decoded; @@ -904,7 +905,7 @@ ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb, skb_checksum_none_assert(skb); /* check if Rx checksum is enabled */ - if (!(vsi->netdev->features & NETIF_F_RXCSUM)) + if (!(ring->netdev->features & NETIF_F_RXCSUM)) return; /* check if HW has decoded the packet and checksum */ @@ -944,7 +945,7 @@ ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb, return; checksum_fail: - vsi->back->hw_csum_rx_error++; + ring->vsi->back->hw_csum_rx_error++; } /** @@ -968,7 +969,7 @@ ice_process_skb_fields(struct ice_ring *rx_ring, /* modifies the skb - consumes the enet header */ skb->protocol = eth_type_trans(skb, rx_ring->netdev); - ice_rx_csum(rx_ring->vsi, skb, rx_desc, ptype); + ice_rx_csum(rx_ring, skb, rx_desc, ptype); } /** @@ -1354,14 +1355,13 @@ static u32 ice_buildreg_itr(u16 itr_idx, u16 itr) /** * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt - * @vsi: the VSI associated with the q_vector * @q_vector: q_vector for which ITR is being updated and interrupt enabled */ -static void -ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector) +static void ice_update_ena_itr(struct ice_q_vector *q_vector) { struct ice_ring_container *tx = &q_vector->tx; struct ice_ring_container *rx = &q_vector->rx; + struct ice_vsi *vsi = q_vector->vsi; u32 itr_val; /* when exiting WB_ON_ITR lets set a low ITR value and trigger @@ -1419,15 +1419,14 @@ ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector) q_vector->itr_countdown--; } - if (!test_bit(__ICE_DOWN, vsi->state)) - wr32(&vsi->back->hw, + if (!test_bit(__ICE_DOWN, q_vector->vsi->state)) + wr32(&q_vector->vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val); } /** * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector - * @vsi: pointer to the VSI structure * @q_vector: q_vector to set WB_ON_ITR on * * We need to tell hardware to write-back completed descriptors even when @@ -1440,9 +1439,10 @@ ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector) * value that's not 0 due to ITR granularity. Also, set the INTENA_MSK bit to * make sure hardware knows we aren't meddling with the INTENA_M bit. */ -static void -ice_set_wb_on_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector) +static void ice_set_wb_on_itr(struct ice_q_vector *q_vector) { + struct ice_vsi *vsi = q_vector->vsi; + /* already in WB_ON_ITR mode no need to change it */ if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) return; @@ -1473,7 +1473,6 @@ int ice_napi_poll(struct napi_struct *napi, int budget) { struct ice_q_vector *q_vector = container_of(napi, struct ice_q_vector, napi); - struct ice_vsi *vsi = q_vector->vsi; bool clean_complete = true; struct ice_ring *ring; int budget_per_ring; @@ -1483,7 +1482,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget) * budget and be more aggressive about cleaning up the Tx descriptors. */ ice_for_each_ring(ring, q_vector->tx) - if (!ice_clean_tx_irq(vsi, ring, budget)) + if (!ice_clean_tx_irq(ring, budget)) clean_complete = false; /* Handle case where we are called by netpoll with a budget of 0 */ @@ -1519,9 +1518,9 @@ int ice_napi_poll(struct napi_struct *napi, int budget) * poll us due to busy-polling */ if (likely(napi_complete_done(napi, work_done))) - ice_update_ena_itr(vsi, q_vector); + ice_update_ena_itr(q_vector); else - ice_set_wb_on_itr(vsi, q_vector); + ice_set_wb_on_itr(q_vector); return min_t(int, work_done, budget - 1); } -- cgit v1.2.3 From 6503b659302893af700d9e9b82d3210d09a3aefb Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Thu, 8 Aug 2019 07:39:36 -0700 Subject: ice: move code closer together This is a simple patch to move the assignment to a local variable closer to the site where the local variable is used. This can help readability and also maybe performance, although the performance enhancement is really dependent upon the compiler. No functional change. Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_txrx.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 4fe1b332e67e..ec581b1f0fcb 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -1068,9 +1068,6 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) continue; } - rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) & - ICE_RX_FLEX_DESC_PTYPE_M; - stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S); if (ice_test_staterr(rx_desc, stat_err_bits)) vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1); @@ -1087,6 +1084,9 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) total_rx_bytes += skb->len; /* populate checksum, VLAN, and protocol */ + rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) & + ICE_RX_FLEX_DESC_PTYPE_M; + ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); /* send completed skb up the stack */ -- cgit v1.2.3 From d27525ec1fdd01740edb7d4f3dc801256d543393 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Thu, 8 Aug 2019 07:39:37 -0700 Subject: ice: small efficiency fixes Add a small bit of efficiency to the code by adding a prefetch of the port_info structure in order to help avoid a cache miss a little later on in execution. Also add an unlikely statement to a branch which generally will never happen in normal operation. Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_txrx.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index ec581b1f0fcb..33dd103035dc 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -1226,6 +1226,8 @@ ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc) if (time_after(next_update, rc->next_update)) goto clear_counts; + prefetch(q_vector->vsi->port_info); + packets = rc->total_pkts; bytes = rc->total_bytes; @@ -1486,7 +1488,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget) clean_complete = false; /* Handle case where we are called by netpoll with a budget of 0 */ - if (budget <= 0) + if (unlikely(budget <= 0)) return budget; /* normally we have 1 Rx ring per q_vector */ -- cgit v1.2.3 From 9d56b7fd6a1a2293c61749fa3b6b8a133ade9d44 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Thu, 8 Aug 2019 07:39:38 -0700 Subject: ice: change work limit to a constant The driver has supported a transmit work limit that was configurable from ethtool for a long time, but there are no good use cases for having it be a variable that can be changed at run time. In addition, this variable was noted to be causing performance overhead due to cache misses. Just remove the variable and let the code use a constant so that the functionality is maintained (a limit on the number of transmits that will be cleaned in any one call to the clean routines) without the cache miss. Removes code, removes a variable, removes testing surface. Yay. Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice.h | 3 --- drivers/net/ethernet/intel/ice/ice_ethtool.c | 14 ++------------ drivers/net/ethernet/intel/ice/ice_lib.c | 2 +- 3 files changed, 3 insertions(+), 16 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index bbb3c290a0bf..c7f234688499 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -247,9 +247,6 @@ struct ice_vsi { u16 vsi_num; /* HW (absolute) index of this VSI */ u16 idx; /* software index in pf->vsi[] */ - /* Interrupt thresholds */ - u16 work_lmt; - s16 vf_id; /* VF ID for SR-IOV VSIs */ u16 ethtype; /* Ethernet protocol for pause frame */ diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index edba5bd79097..ae9921b7de7b 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -3214,12 +3214,6 @@ __ice_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, if (ice_get_q_coalesce(vsi, ec, q_num)) return -EINVAL; - if (q_num < vsi->num_txq) - ec->tx_max_coalesced_frames_irq = vsi->work_lmt; - - if (q_num < vsi->num_rxq) - ec->rx_max_coalesced_frames_irq = vsi->work_lmt; - return 0; } @@ -3399,17 +3393,13 @@ __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, if (ice_set_q_coalesce(vsi, ec, i)) return -EINVAL; } - goto set_work_lmt; + goto set_complete; } if (ice_set_q_coalesce(vsi, ec, q_num)) return -EINVAL; -set_work_lmt: - - if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq) - vsi->work_lmt = max(ec->tx_max_coalesced_frames_irq, - ec->rx_max_coalesced_frames_irq); +set_complete: return 0; } diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 6cc01ebc0b01..5f7c75c3b24b 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -548,8 +548,8 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id) vsi->type = type; vsi->back = pf; set_bit(__ICE_DOWN, vsi->state); + vsi->idx = pf->next_vsi; - vsi->work_lmt = ICE_DFLT_IRQ_WORK; if (type == ICE_VSI_VF) ice_vsi_set_num_qs(vsi, vf_id); -- cgit v1.2.3 From 29d42f1f3ae5624b5344aefccb5081eb9c49d44f Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Tue, 3 Sep 2019 01:31:00 -0700 Subject: ice: Reliably reset VFs When a PFR (or bigger reset) occurs, the device clears the VF_MBX_ARQLEN register for all VFs. But if a VFR is triggered by a VF, the device does NOT clear this register, and the VF driver will never see the reset. When this happens, the VF driver will eventually timeout and attempt recovery, and usually it will be successful. But this makes resets take a long time and there are occasional failures. We cannot just blithely clear this register on every reset; this has been shown to cause synchronization problems when a PFR is triggered with a large number of VFs. Fix this by clearing VF_MBX_ARQLEN when the reset source is not PFR. GlobR will trigger PFR, so this test catches that occurrence as well. Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index c38939b1d496..3ba6613048ef 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -353,12 +353,13 @@ void ice_free_vfs(struct ice_pf *pf) * ice_trigger_vf_reset - Reset a VF on HW * @vf: pointer to the VF structure * @is_vflr: true if VFLR was issued, false if not + * @is_pfr: true if the reset was triggered due to a previous PFR * * Trigger hardware to start a reset for a particular VF. Expects the caller * to wait the proper amount of time to allow hardware to reset the VF before * it cleans up and restores VF functionality. */ -static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr) +static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr) { struct ice_pf *pf = vf->pf; u32 reg, reg_idx, bit_idx; @@ -379,10 +380,13 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr) */ clear_bit(ICE_VF_STATE_INIT, vf->vf_states); - /* Clear the VF's ARQLEN register. This is how the VF detects reset, - * since the VFGEN_RSTAT register doesn't stick at 0 after reset. + /* VF_MBX_ARQLEN is cleared by PFR, so the driver needs to clear it + * in the case of VFR. If this is done for PFR, it can mess up VF + * resets because the VF driver may already have started cleanup + * by the time we get here. */ - wr32(hw, VF_MBX_ARQLEN(vf_abs_id), 0); + if (!is_pfr) + wr32(hw, VF_MBX_ARQLEN(vf_abs_id), 0); /* In the case of a VFLR, the HW has already reset the VF and we * just need to clean up, so don't hit the VFRTRIG register. @@ -1072,7 +1076,7 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) /* Begin reset on all VFs at once */ for (v = 0; v < pf->num_alloc_vfs; v++) - ice_trigger_vf_reset(&pf->vf[v], is_vflr); + ice_trigger_vf_reset(&pf->vf[v], is_vflr, true); for (v = 0; v < pf->num_alloc_vfs; v++) { struct ice_vsi *vsi; @@ -1172,7 +1176,7 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) if (test_and_set_bit(ICE_VF_STATE_DIS, vf->vf_states)) return false; - ice_trigger_vf_reset(vf, is_vflr); + ice_trigger_vf_reset(vf, is_vflr, false); vsi = pf->vsi[vf->lan_vsi_idx]; -- cgit v1.2.3 From c61d2342349fe01687db70bcbec91766596daef4 Mon Sep 17 00:00:00 2001 From: Lukasz Czapnik Date: Tue, 3 Sep 2019 01:31:01 -0700 Subject: ice: report link down for VF when PF's queues are not enabled This is port of a fix from i40e commit 2ad1274fa35a ("i40e: don't report link up for a VF who hasn't enabled queues") Older VF drivers do not respond well to receiving a link up notification before queues are enabled. This can cause their state machine to think that it is safe to send traffic. This results in a Tx hang on the VF. Record whether the PF has actually enabled queues for the VF. When reporting link status, always report link down if the queues aren't enabled. In this way, the VF driver will never receive a link up notification until after its queues are enabled. Signed-off-by: Lukasz Czapnik Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 3ba6613048ef..1ec2a037a369 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -129,7 +129,10 @@ static void ice_vc_notify_vf_link_state(struct ice_vf *vf) pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; pfe.severity = PF_EVENT_SEVERITY_INFO; - if (vf->link_forced) + /* Always report link is down if the VF queues aren't enabled */ + if (!vf->num_qs_ena) + ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false); + else if (vf->link_forced) ice_set_pfe_link_forced(vf, &pfe, vf->link_up); else ice_set_pfe_link(vf, &pfe, ls->link_speed, ls->link_info & -- cgit v1.2.3 From 80739b57b1604e8abfa4d733af0817fb537f0946 Mon Sep 17 00:00:00 2001 From: Anirudh Venkataramanan Date: Tue, 3 Sep 2019 01:31:02 -0700 Subject: ice: Check for DCB capability before initializing DCB Check the ICE_FLAG_DCB_CAPABLE before calling ice_init_pf_dcb. Signed-off-by: Anirudh Venkataramanan Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_dcb_lib.c | 3 --- drivers/net/ethernet/intel/ice/ice_main.c | 15 ++++++++------- 2 files changed, 8 insertions(+), 10 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index e922adf1fa15..20f440a64650 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -474,7 +474,6 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked) } pf->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; - set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); return 0; } @@ -483,8 +482,6 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked) /* DCBX in FW and LLDP enabled in FW */ pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_IEEE; - set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); - err = ice_dcb_init_cfg(pf, locked); if (err) goto dcb_init_err; diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 703fc7bf2b31..8bb3b81876a9 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -2252,6 +2252,8 @@ static void ice_deinit_pf(struct ice_pf *pf) static int ice_init_pf(struct ice_pf *pf) { bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS); + if (pf->hw.func_caps.common_cap.dcb) + set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); #ifdef CONFIG_PCI_IOV if (pf->hw.func_caps.common_cap.sr_iov_1_1) { struct ice_hw *hw = &pf->hw; @@ -2529,13 +2531,12 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) goto err_init_pf_unroll; } - err = ice_init_pf_dcb(pf, false); - if (err) { - clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); - clear_bit(ICE_FLAG_DCB_ENA, pf->flags); - - /* do not fail overall init if DCB init fails */ - err = 0; + if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags)) { + /* Note: DCB init failure is non-fatal to load */ + if (ice_init_pf_dcb(pf, false)) { + clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); + clear_bit(ICE_FLAG_DCB_ENA, pf->flags); + } } ice_determine_q_usage(pf); -- cgit v1.2.3 From dfc62400125f407ec6dbe6e6461843e654cd7fa9 Mon Sep 17 00:00:00 2001 From: Akeem G Abodunrin Date: Tue, 3 Sep 2019 01:31:03 -0700 Subject: ice: Report VF link status with opcode to get resources This patch changes how and when the driver report link status, instead of waiting till the call to enable queues for VF, we should report link status earlier with opcode to get VF resources - So as to avoid reporting erroneous information, especially when queues have not been configured. In addition, we can also make a call to get and report link status change after when queue is enabled, at least to report netdev or PHY link status. This is in accordance to how link speed is being reported for PF... Signed-off-by: Akeem G Abodunrin Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 1ec2a037a369..30e8e6166a59 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -2934,6 +2934,7 @@ error_handler: break; case VIRTCHNL_OP_GET_VF_RESOURCES: err = ice_vc_get_vf_res_msg(vf, msg); + ice_vc_notify_vf_link_state(vf); break; case VIRTCHNL_OP_RESET_VF: ice_vc_reset_vf_msg(vf); -- cgit v1.2.3 From 201beeb71595dbc4c989ab5f4247334a32652642 Mon Sep 17 00:00:00 2001 From: Ashish Shah Date: Tue, 3 Sep 2019 01:31:04 -0700 Subject: ice: update Tx context struct Add internal usage flag, bit 91 as described in spec. Update width of internal queue state to 122 also as described in spec. Signed-off-by: Ashish Shah Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_common.c | 3 ++- drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 9492cd34b09d..e8397e5b6267 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -1132,6 +1132,7 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = { ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78), ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80), ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90), + ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91), ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92), ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93), ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101), @@ -1150,7 +1151,7 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = { ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165), ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166), ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168), - ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 110, 171), + ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171), { 0 } }; diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index 57ea6811fe2c..2aac8f13daeb 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -428,6 +428,7 @@ struct ice_tlan_ctx { #define ICE_TLAN_CTX_VMVF_TYPE_PF 2 u16 src_vsi; u8 tsyn_ena; + u8 internal_usage_flag; u8 alt_vlan; u16 cpuid; /* bigger than needed, see above for reason */ u8 wb_mode; -- cgit v1.2.3 From ea300f41bb49605ccbc64f354e6708bb385ac1c6 Mon Sep 17 00:00:00 2001 From: Dave Ertman Date: Tue, 3 Sep 2019 01:31:05 -0700 Subject: ice: Allow for delayed LLDP MIB change registration Add an additional boolean parameter to the ice_init_dcb function. This boolean controls if the LLDP MIB change events are registered for. Also, add a new function defined ice_cfg_lldp_mib_change. The additional function is necessary to be able to register for LLDP MIB change events after calling ice_init_dcb. The net effect of these two changes is to allow a delayed registration for MIB change events so that the driver is not accepting events before it is ready for them. Signed-off-by: Dave Ertman Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_dcb.c | 39 +++++++++++++++++++++++++--- drivers/net/ethernet/intel/ice/ice_dcb.h | 11 +++----- drivers/net/ethernet/intel/ice/ice_dcb_lib.c | 4 +-- drivers/net/ethernet/intel/ice/ice_ethtool.c | 10 +++++-- drivers/net/ethernet/intel/ice/ice_main.c | 2 ++ 5 files changed, 51 insertions(+), 15 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c index c5ee8d930611..dd7efff121bd 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb.c @@ -60,7 +60,7 @@ ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf, * Enable or Disable posting of an event on ARQ when LLDP MIB * associated with the interface changes (0x0A01) */ -enum ice_status +static enum ice_status ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update, struct ice_sq_cd *cd) { @@ -943,10 +943,11 @@ enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi) /** * ice_init_dcb * @hw: pointer to the HW struct + * @enable_mib_change: enable MIB change event * * Update DCB configuration from the Firmware */ -enum ice_status ice_init_dcb(struct ice_hw *hw) +enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change) { struct ice_port_info *pi = hw->port_info; enum ice_status ret = 0; @@ -972,9 +973,39 @@ enum ice_status ice_init_dcb(struct ice_hw *hw) } /* Configure the LLDP MIB change event */ - ret = ice_aq_cfg_lldp_mib_change(hw, true, NULL); + if (enable_mib_change) { + ret = ice_aq_cfg_lldp_mib_change(hw, true, NULL); + if (!ret) + pi->is_sw_lldp = false; + } + + return ret; +} + +/** + * ice_cfg_lldp_mib_change + * @hw: pointer to the HW struct + * @ena_mib: enable/disable MIB change event + * + * Configure (disable/enable) MIB + */ +enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib) +{ + struct ice_port_info *pi = hw->port_info; + enum ice_status ret; + + if (!hw->func_caps.common_cap.dcb) + return ICE_ERR_NOT_SUPPORTED; + + /* Get DCBX status */ + pi->dcbx_status = ice_get_dcbx_status(hw); + + if (pi->dcbx_status == ICE_DCBX_STATUS_DIS) + return ICE_ERR_NOT_READY; + + ret = ice_aq_cfg_lldp_mib_change(hw, ena_mib, NULL); if (!ret) - pi->is_sw_lldp = false; + pi->is_sw_lldp = !ena_mib; return ret; } diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.h b/drivers/net/ethernet/intel/ice/ice_dcb.h index 522e1452abe2..ee138f9bdc7c 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.h +++ b/drivers/net/ethernet/intel/ice/ice_dcb.h @@ -125,7 +125,7 @@ ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype, struct ice_dcbx_cfg *dcbcfg); enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi); enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi); -enum ice_status ice_init_dcb(struct ice_hw *hw); +enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change); enum ice_status ice_query_port_ets(struct ice_port_info *pi, struct ice_aqc_port_ets_elem *buf, u16 buf_size, @@ -139,9 +139,7 @@ ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd); enum ice_status ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent, bool *dcbx_agent_status, struct ice_sq_cd *cd); -enum ice_status -ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update, - struct ice_sq_cd *cd); +enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib); #else /* CONFIG_DCB */ static inline enum ice_status ice_aq_stop_lldp(struct ice_hw __always_unused *hw, @@ -172,9 +170,8 @@ ice_aq_start_stop_dcbx(struct ice_hw __always_unused *hw, } static inline enum ice_status -ice_aq_cfg_lldp_mib_change(struct ice_hw __always_unused *hw, - bool __always_unused ena_update, - struct ice_sq_cd __always_unused *cd) +ice_cfg_lldp_mib_change(struct ice_hw __always_unused *hw, + bool __always_unused ena_mib) { return 0; } diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index 20f440a64650..97c22d4aae1d 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -318,7 +318,7 @@ void ice_dcb_rebuild(struct ice_pf *pf) goto dcb_error; } - ice_init_dcb(&pf->hw); + ice_init_dcb(&pf->hw, true); if (pf->hw.port_info->dcbx_status == ICE_DCBX_STATUS_DIS) pf->hw.port_info->is_sw_lldp = true; else @@ -451,7 +451,7 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked) port_info = hw->port_info; - err = ice_init_dcb(hw); + err = ice_init_dcb(hw, false); if (err && !port_info->is_sw_lldp) { dev_err(&pf->pdev->dev, "Error initializing DCB %d\n", err); goto dcb_init_err; diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index ae9921b7de7b..d5db1426d484 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -1206,8 +1206,8 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) enum ice_status status; /* Disable FW LLDP engine */ - status = ice_aq_cfg_lldp_mib_change(&pf->hw, false, - NULL); + status = ice_cfg_lldp_mib_change(&pf->hw, false); + /* If unregistering for LLDP events fails, this is * not an error state, as there shouldn't be any * events to respond to. @@ -1273,6 +1273,12 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) * The FW LLDP engine will now be consuming them. */ ice_cfg_sw_lldp(vsi, false, false); + + /* Register for MIB change events */ + status = ice_cfg_lldp_mib_change(&pf->hw, true); + if (status) + dev_dbg(&pf->pdev->dev, + "Fail to enable MIB change events\n"); } } clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 8bb3b81876a9..2d92d8591a8a 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -2536,6 +2536,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) if (ice_init_pf_dcb(pf, false)) { clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); clear_bit(ICE_FLAG_DCB_ENA, pf->flags); + } else { + ice_cfg_lldp_mib_change(&pf->hw, true); } } -- cgit v1.2.3 From 8c243700ab10a56171858cc9dd17c5d6494a0ed6 Mon Sep 17 00:00:00 2001 From: Anirudh Venkataramanan Date: Tue, 3 Sep 2019 01:31:06 -0700 Subject: ice: Minor refactor in queue management Remove q_left_tx and q_left_rx from the PF struct as these can be obtained by calling ice_get_avail_txq_count and ice_get_avail_rxq_count respectively. The function ice_determine_q_usage is only setting num_lan_tx and num_lan_rx in the PF structure, and these are later assigned to vsi->alloc_txq and vsi->alloc_rxq respectively. This is an unnecessary indirection, so remove ice_determine_q_usage and just assign values for vsi->alloc_txq and vsi->alloc_rxq in ice_vsi_set_num_qs and use these to set num_lan_tx and num_lan_rx respectively. Signed-off-by: Anirudh Venkataramanan Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice.h | 4 +- drivers/net/ethernet/intel/ice/ice_lib.c | 25 ++++++------ drivers/net/ethernet/intel/ice/ice_main.c | 50 ++++++++++++++---------- drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c | 14 ++++--- 4 files changed, 54 insertions(+), 39 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index c7f234688499..6c4faf7551f6 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -368,8 +368,6 @@ struct ice_pf { u32 num_lan_msix; /* Total MSIX vectors for base driver */ u16 num_lan_tx; /* num LAN Tx queues setup */ u16 num_lan_rx; /* num LAN Rx queues setup */ - u16 q_left_tx; /* remaining num Tx queues left unclaimed */ - u16 q_left_rx; /* remaining num Rx queues left unclaimed */ u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */ u16 num_alloc_vsi; u16 corer_count; /* Core reset count */ @@ -438,6 +436,8 @@ static inline struct ice_vsi *ice_get_main_vsi(struct ice_pf *pf) int ice_vsi_setup_tx_rings(struct ice_vsi *vsi); int ice_vsi_setup_rx_rings(struct ice_vsi *vsi); void ice_set_ethtool_ops(struct net_device *netdev); +u16 ice_get_avail_txq_count(struct ice_pf *pf); +u16 ice_get_avail_rxq_count(struct ice_pf *pf); void ice_update_vsi_stats(struct ice_vsi *vsi); void ice_update_pf_stats(struct ice_pf *pf); int ice_up(struct ice_vsi *vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 5f7c75c3b24b..7cd8c5d13bcc 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -343,8 +343,20 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id) switch (vsi->type) { case ICE_VSI_PF: - vsi->alloc_txq = pf->num_lan_tx; - vsi->alloc_rxq = pf->num_lan_rx; + vsi->alloc_txq = min_t(int, ice_get_avail_txq_count(pf), + num_online_cpus()); + + pf->num_lan_tx = vsi->alloc_txq; + + /* only 1 Rx queue unless RSS is enabled */ + if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) + vsi->alloc_rxq = 1; + else + vsi->alloc_rxq = min_t(int, ice_get_avail_rxq_count(pf), + num_online_cpus()); + + pf->num_lan_rx = vsi->alloc_rxq; + vsi->num_q_vectors = max_t(int, vsi->alloc_rxq, vsi->alloc_txq); break; case ICE_VSI_VF: @@ -2577,9 +2589,6 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, if (ret) goto unroll_vector_base; - pf->q_left_tx -= vsi->alloc_txq; - pf->q_left_rx -= vsi->alloc_rxq; - /* Do not exit if configuring RSS had an issue, at least * receive traffic on first queue. Hence no need to capture * return value @@ -2643,8 +2652,6 @@ unroll_vsi_init: ice_vsi_delete(vsi); unroll_get_qs: ice_vsi_put_qs(vsi); - pf->q_left_tx += vsi->alloc_txq; - pf->q_left_rx += vsi->alloc_rxq; ice_vsi_clear(vsi); return NULL; @@ -2992,8 +2999,6 @@ int ice_vsi_release(struct ice_vsi *vsi) ice_vsi_clear_rings(vsi); ice_vsi_put_qs(vsi); - pf->q_left_tx += vsi->alloc_txq; - pf->q_left_rx += vsi->alloc_rxq; /* retain SW VSI data structure since it is needed to unregister and * free VSI netdev when PF is not in reset recovery pending state,\ @@ -3102,8 +3107,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) if (ret) goto err_vectors; - pf->q_left_tx -= vsi->alloc_txq; - pf->q_left_rx -= vsi->alloc_rxq; break; default: break; diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 2d92d8591a8a..f8be9ada2447 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -2192,36 +2192,48 @@ unroll_vsi_setup: ice_vsi_free_q_vectors(vsi); ice_vsi_delete(vsi); ice_vsi_put_qs(vsi); - pf->q_left_tx += vsi->alloc_txq; - pf->q_left_rx += vsi->alloc_rxq; ice_vsi_clear(vsi); } return status; } /** - * ice_determine_q_usage - Calculate queue distribution - * @pf: board private structure - * - * Return -ENOMEM if we don't get enough queues for all ports + * ice_get_avail_q_count - Get count of queues in use + * @pf_qmap: bitmap to get queue use count from + * @lock: pointer to a mutex that protects access to pf_qmap + * @size: size of the bitmap */ -static void ice_determine_q_usage(struct ice_pf *pf) +static u16 +ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size) { - u16 q_left_tx, q_left_rx; + u16 count = 0, bit; - q_left_tx = pf->hw.func_caps.common_cap.num_txq; - q_left_rx = pf->hw.func_caps.common_cap.num_rxq; + mutex_lock(lock); + for_each_clear_bit(bit, pf_qmap, size) + count++; + mutex_unlock(lock); - pf->num_lan_tx = min_t(int, q_left_tx, num_online_cpus()); + return count; +} - /* only 1 Rx queue unless RSS is enabled */ - if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) - pf->num_lan_rx = 1; - else - pf->num_lan_rx = min_t(int, q_left_rx, num_online_cpus()); +/** + * ice_get_avail_txq_count - Get count of Tx queues in use + * @pf: pointer to an ice_pf instance + */ +u16 ice_get_avail_txq_count(struct ice_pf *pf) +{ + return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex, + pf->max_pf_txqs); +} - pf->q_left_tx = q_left_tx - pf->num_lan_tx; - pf->q_left_rx = q_left_rx - pf->num_lan_rx; +/** + * ice_get_avail_rxq_count - Get count of Rx queues in use + * @pf: pointer to an ice_pf instance + */ +u16 ice_get_avail_rxq_count(struct ice_pf *pf) +{ + return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex, + pf->max_pf_rxqs); } /** @@ -2541,8 +2553,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) } } - ice_determine_q_usage(pf); - pf->num_alloc_vsi = hw->func_caps.guar_num_vsi; if (!pf->num_alloc_vsi) { err = -EIO; diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 30e8e6166a59..64de05ccbc47 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -595,7 +595,8 @@ static int ice_alloc_vf_res(struct ice_vf *vf) /* Update number of VF queues, in case VF had requested for queue * changes */ - tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx); + tx_rx_queue_left = min_t(int, ice_get_avail_txq_count(pf), + ice_get_avail_rxq_count(pf)); tx_rx_queue_left += ICE_DFLT_QS_PER_VF; if (vf->num_req_qs && vf->num_req_qs <= tx_rx_queue_left && vf->num_req_qs != vf->num_vf_qs) @@ -898,11 +899,11 @@ static int ice_check_avail_res(struct ice_pf *pf) * at runtime through Virtchnl, that is the reason we start by reserving * few queues. */ - num_txq = ice_determine_res(pf, pf->q_left_tx, ICE_DFLT_QS_PER_VF, - ICE_MIN_QS_PER_VF); + num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf), + ICE_DFLT_QS_PER_VF, ICE_MIN_QS_PER_VF); - num_rxq = ice_determine_res(pf, pf->q_left_rx, ICE_DFLT_QS_PER_VF, - ICE_MIN_QS_PER_VF); + num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf), + ICE_DFLT_QS_PER_VF, ICE_MIN_QS_PER_VF); if (!num_txq || !num_rxq) return -EIO; @@ -2511,7 +2512,8 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg) } cur_queues = vf->num_vf_qs; - tx_rx_queue_left = min_t(u16, pf->q_left_tx, pf->q_left_rx); + tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf), + ice_get_avail_rxq_count(pf)); max_allowed_vf_queues = tx_rx_queue_left + cur_queues; if (!req_queues) { dev_err(&pf->pdev->dev, -- cgit v1.2.3 From dd47e1fd8650417af7956c63f46b000ad0e314f5 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 3 Sep 2019 01:31:07 -0700 Subject: ice: change default number of receive descriptors The driver should start out with a reasonable number of descriptors that can prevent drops due to a CPU being in a power management state. Change the default number of descriptors to 2048. The user can always change the value at runtime. Transmit descriptor counts are not modified because they don't need to change due to the speed of the interface, or for power managed CPUs, but the code is simplified to a fixed value for the transmit default. Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice.h | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 6c4faf7551f6..b36e1cf0e461 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -47,23 +47,8 @@ extern const char ice_drv_ver[]; #define ICE_MIN_NUM_DESC 64 #define ICE_MAX_NUM_DESC 8160 #define ICE_DFLT_MIN_RX_DESC 512 -/* if the default number of Rx descriptors between ICE_MAX_NUM_DESC and the - * number of descriptors to fill up an entire page is greater than or equal to - * ICE_DFLT_MIN_RX_DESC set it based on page size, otherwise set it to - * ICE_DFLT_MIN_RX_DESC - */ -#define ICE_DFLT_NUM_RX_DESC \ - min_t(u16, ICE_MAX_NUM_DESC, \ - max_t(u16, ALIGN(PAGE_SIZE / sizeof(union ice_32byte_rx_desc), \ - ICE_REQ_DESC_MULTIPLE), \ - ICE_DFLT_MIN_RX_DESC)) -/* set default number of Tx descriptors to the minimum between ICE_MAX_NUM_DESC - * and the number of descriptors to fill up an entire page - */ -#define ICE_DFLT_NUM_TX_DESC min_t(u16, ICE_MAX_NUM_DESC, \ - ALIGN(PAGE_SIZE / \ - sizeof(struct ice_tx_desc), \ - ICE_REQ_DESC_MULTIPLE)) +#define ICE_DFLT_NUM_TX_DESC 256 +#define ICE_DFLT_NUM_RX_DESC 2048 #define ICE_DFLT_TRAFFIC_CLASS BIT(0) #define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16) -- cgit v1.2.3 From 5c875c1af8dc69e64e0e457956b4271eac792733 Mon Sep 17 00:00:00 2001 From: Anirudh Venkataramanan Date: Tue, 3 Sep 2019 01:31:08 -0700 Subject: ice: Rework around device/function capabilities ice_parse_caps is printing capabilities in a different way when compared to the variable names. This makes it difficult to search for the right strings in the debug logs. So this patch updates the print strings to be exactly the same as the fields' name in the structure. Signed-off-by: Anirudh Venkataramanan Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_common.c | 40 ++++++++++++++--------------- 1 file changed, 20 insertions(+), 20 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index e8397e5b6267..8b2c46615834 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -1551,29 +1551,29 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, case ICE_AQC_CAPS_VALID_FUNCTIONS: caps->valid_functions = number; ice_debug(hw, ICE_DBG_INIT, - "%s: valid functions = %d\n", prefix, + "%s: valid_functions (bitmap) = %d\n", prefix, caps->valid_functions); break; case ICE_AQC_CAPS_SRIOV: caps->sr_iov_1_1 = (number == 1); ice_debug(hw, ICE_DBG_INIT, - "%s: SR-IOV = %d\n", prefix, + "%s: sr_iov_1_1 = %d\n", prefix, caps->sr_iov_1_1); break; case ICE_AQC_CAPS_VF: if (dev_p) { dev_p->num_vfs_exposed = number; ice_debug(hw, ICE_DBG_INIT, - "%s: VFs exposed = %d\n", prefix, + "%s: num_vfs_exposed = %d\n", prefix, dev_p->num_vfs_exposed); } else if (func_p) { func_p->num_allocd_vfs = number; func_p->vf_base_id = logical_id; ice_debug(hw, ICE_DBG_INIT, - "%s: VFs allocated = %d\n", prefix, + "%s: num_allocd_vfs = %d\n", prefix, func_p->num_allocd_vfs); ice_debug(hw, ICE_DBG_INIT, - "%s: VF base_id = %d\n", prefix, + "%s: vf_base_id = %d\n", prefix, func_p->vf_base_id); } break; @@ -1581,17 +1581,17 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, if (dev_p) { dev_p->num_vsi_allocd_to_host = number; ice_debug(hw, ICE_DBG_INIT, - "%s: num VSI alloc to host = %d\n", + "%s: num_vsi_allocd_to_host = %d\n", prefix, dev_p->num_vsi_allocd_to_host); } else if (func_p) { func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI); ice_debug(hw, ICE_DBG_INIT, - "%s: num guaranteed VSI (fw) = %d\n", + "%s: guar_num_vsi (fw) = %d\n", prefix, number); ice_debug(hw, ICE_DBG_INIT, - "%s: num guaranteed VSI = %d\n", + "%s: guar_num_vsi = %d\n", prefix, func_p->guar_num_vsi); } break; @@ -1600,56 +1600,56 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, caps->active_tc_bitmap = logical_id; caps->maxtc = phys_id; ice_debug(hw, ICE_DBG_INIT, - "%s: DCB = %d\n", prefix, caps->dcb); + "%s: dcb = %d\n", prefix, caps->dcb); ice_debug(hw, ICE_DBG_INIT, - "%s: active TC bitmap = %d\n", prefix, + "%s: active_tc_bitmap = %d\n", prefix, caps->active_tc_bitmap); ice_debug(hw, ICE_DBG_INIT, - "%s: TC max = %d\n", prefix, caps->maxtc); + "%s: maxtc = %d\n", prefix, caps->maxtc); break; case ICE_AQC_CAPS_RSS: caps->rss_table_size = number; caps->rss_table_entry_width = logical_id; ice_debug(hw, ICE_DBG_INIT, - "%s: RSS table size = %d\n", prefix, + "%s: rss_table_size = %d\n", prefix, caps->rss_table_size); ice_debug(hw, ICE_DBG_INIT, - "%s: RSS table width = %d\n", prefix, + "%s: rss_table_entry_width = %d\n", prefix, caps->rss_table_entry_width); break; case ICE_AQC_CAPS_RXQS: caps->num_rxq = number; caps->rxq_first_id = phys_id; ice_debug(hw, ICE_DBG_INIT, - "%s: num Rx queues = %d\n", prefix, + "%s: num_rxq = %d\n", prefix, caps->num_rxq); ice_debug(hw, ICE_DBG_INIT, - "%s: Rx first queue ID = %d\n", prefix, + "%s: rxq_first_id = %d\n", prefix, caps->rxq_first_id); break; case ICE_AQC_CAPS_TXQS: caps->num_txq = number; caps->txq_first_id = phys_id; ice_debug(hw, ICE_DBG_INIT, - "%s: num Tx queues = %d\n", prefix, + "%s: num_txq = %d\n", prefix, caps->num_txq); ice_debug(hw, ICE_DBG_INIT, - "%s: Tx first queue ID = %d\n", prefix, + "%s: txq_first_id = %d\n", prefix, caps->txq_first_id); break; case ICE_AQC_CAPS_MSIX: caps->num_msix_vectors = number; caps->msix_vector_first_id = phys_id; ice_debug(hw, ICE_DBG_INIT, - "%s: MSIX vector count = %d\n", prefix, + "%s: num_msix_vectors = %d\n", prefix, caps->num_msix_vectors); ice_debug(hw, ICE_DBG_INIT, - "%s: MSIX first vector index = %d\n", prefix, + "%s: msix_vector_first_id = %d\n", prefix, caps->msix_vector_first_id); break; case ICE_AQC_CAPS_MAX_MTU: caps->max_mtu = number; - ice_debug(hw, ICE_DBG_INIT, "%s: max MTU = %d\n", + ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n", prefix, caps->max_mtu); break; default: -- cgit v1.2.3 From e3710a01a869917271718acdc53134ced24d4c82 Mon Sep 17 00:00:00 2001 From: Paul M Stillwell Jr Date: Mon, 9 Sep 2019 06:47:42 -0700 Subject: ice: send driver version to firmware The driver is required to send a version to the firmware to indicate that the driver is up. If the driver doesn't do this the firmware doesn't behave properly. Signed-off-by: Paul M Stillwell Jr Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice.h | 1 + drivers/net/ethernet/intel/ice/ice_adminq_cmd.h | 13 +++++++++ drivers/net/ethernet/intel/ice/ice_common.c | 37 +++++++++++++++++++++++++ drivers/net/ethernet/intel/ice/ice_common.h | 3 ++ drivers/net/ethernet/intel/ice/ice_main.c | 36 +++++++++++++++++++++++- drivers/net/ethernet/intel/ice/ice_type.h | 8 ++++++ 6 files changed, 97 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index b36e1cf0e461..4cdedcebb163 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include "ice_devids.h" diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 4da0cde9695b..9c9791788610 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -33,6 +33,17 @@ struct ice_aqc_get_ver { u8 api_patch; }; +/* Send driver version (indirect 0x0002) */ +struct ice_aqc_driver_ver { + u8 major_ver; + u8 minor_ver; + u8 build_ver; + u8 subbuild_ver; + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; +}; + /* Queue Shutdown (direct 0x0003) */ struct ice_aqc_q_shutdown { u8 driver_unloading; @@ -1547,6 +1558,7 @@ struct ice_aq_desc { u8 raw[16]; struct ice_aqc_generic generic; struct ice_aqc_get_ver get_ver; + struct ice_aqc_driver_ver driver_ver; struct ice_aqc_q_shutdown q_shutdown; struct ice_aqc_req_res res_owner; struct ice_aqc_manage_mac_read mac_read; @@ -1618,6 +1630,7 @@ enum ice_aq_err { enum ice_adminq_opc { /* AQ commands */ ice_aqc_opc_get_ver = 0x0001, + ice_aqc_opc_driver_ver = 0x0002, ice_aqc_opc_q_shutdown = 0x0003, /* resource ownership */ diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 8b2c46615834..db62cc748544 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -1258,6 +1258,43 @@ enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) return status; } +/** + * ice_aq_send_driver_ver + * @hw: pointer to the HW struct + * @dv: driver's major, minor version + * @cd: pointer to command details structure or NULL + * + * Send the driver version (0x0002) to the firmware + */ +enum ice_status +ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, + struct ice_sq_cd *cd) +{ + struct ice_aqc_driver_ver *cmd; + struct ice_aq_desc desc; + u16 len; + + cmd = &desc.params.driver_ver; + + if (!dv) + return ICE_ERR_PARAM; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver); + + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + cmd->major_ver = dv->major_ver; + cmd->minor_ver = dv->minor_ver; + cmd->build_ver = dv->build_ver; + cmd->subbuild_ver = dv->subbuild_ver; + + len = 0; + while (len < sizeof(dv->driver_string) && + isascii(dv->driver_string[len]) && dv->driver_string[len]) + len++; + + return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd); +} + /** * ice_aq_q_shutdown * @hw: pointer to the HW struct diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index e376d1eadba4..e9d77370a17c 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -71,6 +71,9 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd); enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd); +enum ice_status +ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, + struct ice_sq_cd *cd); enum ice_status ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, struct ice_aqc_get_phy_caps_data *caps, diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index f8be9ada2447..c0988b74f007 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -9,7 +9,13 @@ #include "ice_lib.h" #include "ice_dcb_lib.h" -#define DRV_VERSION "0.7.5-k" +#define DRV_VERSION_MAJOR 0 +#define DRV_VERSION_MINOR 7 +#define DRV_VERSION_BUILD 5 + +#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ + __stringify(DRV_VERSION_MINOR) "." \ + __stringify(DRV_VERSION_BUILD) "-k" #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" const char ice_drv_ver[] = DRV_VERSION; static const char ice_driver_string[] = DRV_SUMMARY; @@ -2459,6 +2465,25 @@ static void ice_verify_cacheline_size(struct ice_pf *pf) ICE_CACHE_LINE_BYTES); } +/** + * ice_send_version - update firmware with driver version + * @pf: PF struct + * + * Returns ICE_SUCCESS on success, else error code + */ +static enum ice_status ice_send_version(struct ice_pf *pf) +{ + struct ice_driver_ver dv; + + dv.major_ver = DRV_VERSION_MAJOR; + dv.minor_ver = DRV_VERSION_MINOR; + dv.build_ver = DRV_VERSION_BUILD; + dv.subbuild_ver = 0; + strscpy((char *)dv.driver_string, DRV_VERSION, + sizeof(dv.driver_string)); + return ice_aq_send_driver_ver(&pf->hw, &dv, NULL); +} + /** * ice_probe - Device initialization routine * @pdev: PCI device information struct @@ -2612,6 +2637,15 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) clear_bit(__ICE_SERVICE_DIS, pf->state); + /* tell the firmware we are up */ + err = ice_send_version(pf); + if (err) { + dev_err(dev, + "probe failed sending driver version %s. error: %d\n", + ice_drv_ver, err); + goto err_alloc_sw_unroll; + } + /* since everything is good, start the service timer */ mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 4501d50a7dcc..a2676003275a 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -53,6 +53,14 @@ enum ice_aq_res_access_type { ICE_RES_WRITE }; +struct ice_driver_ver { + u8 major_ver; + u8 minor_ver; + u8 build_ver; + u8 subbuild_ver; + u8 driver_string[32]; +}; + enum ice_fc_mode { ICE_FC_NONE = 0, ICE_FC_RX_PAUSE, -- cgit v1.2.3 From 870f805e97d9af3ffa752cd5b9cc6e81bc7d96ad Mon Sep 17 00:00:00 2001 From: Lukasz Czapnik Date: Mon, 9 Sep 2019 06:47:43 -0700 Subject: ice: Fix FW version formatting in dmesg The FW build id is currently being displayed as an int which doesn't make sense. Instead display FW build id as a hex value. Also add other useful information to the output such as NVM version, API patch info, and FW build hash. Signed-off-by: Lukasz Czapnik Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice.h | 1 - drivers/net/ethernet/intel/ice/ice_common.c | 23 +++++++++++++++++++++++ drivers/net/ethernet/intel/ice/ice_common.h | 3 +++ drivers/net/ethernet/intel/ice/ice_ethtool.c | 25 ------------------------- drivers/net/ethernet/intel/ice/ice_lib.c | 19 +++++++++++++++++++ drivers/net/ethernet/intel/ice/ice_lib.h | 2 ++ drivers/net/ethernet/intel/ice/ice_main.c | 7 ++++--- drivers/net/ethernet/intel/ice/ice_type.h | 2 ++ 8 files changed, 53 insertions(+), 29 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 4cdedcebb163..3a3e69a2bc5a 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -53,7 +53,6 @@ extern const char ice_drv_ver[]; #define ICE_DFLT_TRAFFIC_CLASS BIT(0) #define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16) -#define ICE_ETHTOOL_FWVER_LEN 32 #define ICE_AQ_LEN 64 #define ICE_MBXSQ_LEN 64 #define ICE_MBXRQ_LEN 512 diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index db62cc748544..22d2a11ef41f 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -728,6 +728,29 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw) } } +/** + * ice_get_nvm_version - get cached NVM version data + * @hw: pointer to the hardware structure + * @oem_ver: 8 bit NVM version + * @oem_build: 16 bit NVM build number + * @oem_patch: 8 NVM patch number + * @ver_hi: high 16 bits of the NVM version + * @ver_lo: low 16 bits of the NVM version + */ +void +ice_get_nvm_version(struct ice_hw *hw, u8 *oem_ver, u16 *oem_build, + u8 *oem_patch, u8 *ver_hi, u8 *ver_lo) +{ + struct ice_nvm_info *nvm = &hw->nvm; + + *oem_ver = (u8)((nvm->oem_ver & ICE_OEM_VER_MASK) >> ICE_OEM_VER_SHIFT); + *oem_patch = (u8)(nvm->oem_ver & ICE_OEM_VER_PATCH_MASK); + *oem_build = (u16)((nvm->oem_ver & ICE_OEM_VER_BUILD_MASK) >> + ICE_OEM_VER_BUILD_SHIFT); + *ver_hi = (nvm->ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT; + *ver_lo = (nvm->ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT; +} + /** * ice_init_hw - main hardware initialization routine * @hw: pointer to the hardware structure diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index e9d77370a17c..0525a051e05b 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -133,6 +133,9 @@ ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat); +void +ice_get_nvm_version(struct ice_hw *hw, u8 *oem_ver, u16 *oem_build, + u8 *oem_patch, u8 *ver_hi, u8 *ver_lo); enum ice_status ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, struct ice_aqc_get_elem *buf); diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index d5db1426d484..a16b461b46bb 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -160,31 +160,6 @@ static const struct ice_priv_flag ice_gstrings_priv_flags[] = { #define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags) -/** - * ice_nvm_version_str - format the NVM version strings - * @hw: ptr to the hardware info - */ -static char *ice_nvm_version_str(struct ice_hw *hw) -{ - static char buf[ICE_ETHTOOL_FWVER_LEN]; - u8 ver, patch; - u32 full_ver; - u16 build; - - full_ver = hw->nvm.oem_ver; - ver = (u8)((full_ver & ICE_OEM_VER_MASK) >> ICE_OEM_VER_SHIFT); - build = (u16)((full_ver & ICE_OEM_VER_BUILD_MASK) >> - ICE_OEM_VER_BUILD_SHIFT); - patch = (u8)(full_ver & ICE_OEM_VER_PATCH_MASK); - - snprintf(buf, sizeof(buf), "%x.%02x 0x%x %d.%d.%d", - (hw->nvm.ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT, - (hw->nvm.ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT, - hw->nvm.eetrack, ver, build, patch); - - return buf; -} - static void ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 7cd8c5d13bcc..9680692bf27c 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -3275,6 +3275,25 @@ out: } #endif /* CONFIG_DCB */ +/** + * ice_nvm_version_str - format the NVM version strings + * @hw: ptr to the hardware info + */ +char *ice_nvm_version_str(struct ice_hw *hw) +{ + u8 oem_ver, oem_patch, ver_hi, ver_lo; + static char buf[ICE_NVM_VER_LEN]; + u16 oem_build; + + ice_get_nvm_version(hw, &oem_ver, &oem_build, &oem_patch, &ver_hi, + &ver_lo); + + snprintf(buf, sizeof(buf), "%x.%02x 0x%x %d.%d.%d", ver_hi, ver_lo, + hw->nvm.eetrack, oem_ver, oem_build, oem_patch); + + return buf; +} + /** * ice_vsi_cfg_mac_fltr - Add or remove a MAC address filter for a VSI * @vsi: the VSI being configured MAC filter diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index 7faf8db844f6..87f7f5422b46 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -120,6 +120,8 @@ int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena); u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran); +char *ice_nvm_version_str(struct ice_hw *hw); + enum ice_status ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index c0988b74f007..ff295cb54cfd 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -2558,9 +2558,10 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) goto err_exit_unroll; } - dev_info(dev, "firmware %d.%d.%05d api %d.%d\n", - hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build, - hw->api_maj_ver, hw->api_min_ver); + dev_info(dev, "firmware %d.%d.%d api %d.%d.%d nvm %s build 0x%08x\n", + hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch, + hw->api_maj_ver, hw->api_min_ver, hw->api_patch, + ice_nvm_version_str(hw), hw->fw_build); err = ice_init_pf(pf); if (err) { diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index a2676003275a..7ec8a529b5cf 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -230,6 +230,8 @@ struct ice_nvm_info { u8 blank_nvm_mode; /* is NVM empty (no FW present) */ }; +#define ICE_NVM_VER_LEN 32 + /* Max number of port to queue branches w.r.t topology */ #define ICE_MAX_TRAFFIC_CLASS 8 #define ICE_TXSCHED_MAX_BRANCHES ICE_MAX_TRAFFIC_CLASS -- cgit v1.2.3 From c7648810961682b9388be2dd041df06915647445 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Mon, 9 Sep 2019 06:47:44 -0700 Subject: ice: Implement Dynamic Device Personalization (DDP) download Add the required defines, structures, and functions to enable downloading a DDP package. Before download, checks are performed to ensure the package is valid and compatible. Note that package download is not yet requested by the driver as further initialization is required to utilize the package. Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/Makefile | 1 + drivers/net/ethernet/intel/ice/ice_adminq_cmd.h | 60 +++ drivers/net/ethernet/intel/ice/ice_common.c | 40 +- drivers/net/ethernet/intel/ice/ice_common.h | 4 + drivers/net/ethernet/intel/ice/ice_flex_pipe.c | 608 ++++++++++++++++++++++++ drivers/net/ethernet/intel/ice/ice_flex_pipe.h | 25 + drivers/net/ethernet/intel/ice/ice_flex_type.h | 372 +++++++++++++++ drivers/net/ethernet/intel/ice/ice_hw_autogen.h | 2 + drivers/net/ethernet/intel/ice/ice_type.h | 26 + 9 files changed, 1137 insertions(+), 1 deletion(-) create mode 100644 drivers/net/ethernet/intel/ice/ice_flex_pipe.c create mode 100644 drivers/net/ethernet/intel/ice/ice_flex_pipe.h create mode 100644 drivers/net/ethernet/intel/ice/ice_flex_type.h (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile index 2d140ba83781..9edde960b4f2 100644 --- a/drivers/net/ethernet/intel/ice/Makefile +++ b/drivers/net/ethernet/intel/ice/Makefile @@ -15,6 +15,7 @@ ice-y := ice_main.o \ ice_sched.o \ ice_lib.o \ ice_txrx.o \ + ice_flex_pipe.o \ ice_ethtool.o ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_lib.o diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 9c9791788610..023e3d2fee5f 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -1530,6 +1530,56 @@ struct ice_aqc_get_clear_fw_log { __le32 addr_low; }; +/* Download Package (indirect 0x0C40) */ +/* Also used for Update Package (indirect 0x0C42) */ +struct ice_aqc_download_pkg { + u8 flags; +#define ICE_AQC_DOWNLOAD_PKG_LAST_BUF 0x01 + u8 reserved[3]; + __le32 reserved1; + __le32 addr_high; + __le32 addr_low; +}; + +struct ice_aqc_download_pkg_resp { + __le32 error_offset; + __le32 error_info; + __le32 addr_high; + __le32 addr_low; +}; + +/* Get Package Info List (indirect 0x0C43) */ +struct ice_aqc_get_pkg_info_list { + __le32 reserved1; + __le32 reserved2; + __le32 addr_high; + __le32 addr_low; +}; + +/* Version format for packages */ +struct ice_pkg_ver { + u8 major; + u8 minor; + u8 update; + u8 draft; +}; + +#define ICE_PKG_NAME_SIZE 32 + +struct ice_aqc_get_pkg_info { + struct ice_pkg_ver ver; + char name[ICE_PKG_NAME_SIZE]; + u8 is_in_nvm; + u8 is_active; + u8 is_active_at_boot; + u8 is_modified; +}; + +/* Get Package Info List response buffer format (0x0C43) */ +struct ice_aqc_get_pkg_info_resp { + __le32 count; + struct ice_aqc_get_pkg_info pkg_info[1]; +}; /** * struct ice_aq_desc - Admin Queue (AQ) descriptor * @flags: ICE_AQ_FLAG_* flags @@ -1592,6 +1642,7 @@ struct ice_aq_desc { struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res; struct ice_aqc_fw_logging fw_logging; struct ice_aqc_get_clear_fw_log get_clear_fw_log; + struct ice_aqc_download_pkg download_pkg; struct ice_aqc_set_mac_lb set_mac_lb; struct ice_aqc_alloc_free_res_cmd sw_res_ctrl; struct ice_aqc_set_event_mask set_event_mask; @@ -1624,6 +1675,11 @@ enum ice_aq_err { ICE_AQ_RC_EEXIST = 13, /* Object already exists */ ICE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */ ICE_AQ_RC_ENOSYS = 17, /* Function not implemented */ + ICE_AQ_RC_ENOSEC = 24, /* Missing security manifest */ + ICE_AQ_RC_EBADSIG = 25, /* Bad RSA signature */ + ICE_AQ_RC_ESVN = 26, /* SVN number prohibits this package */ + ICE_AQ_RC_EBADMAN = 27, /* Manifest hash mismatch */ + ICE_AQ_RC_EBADBUF = 28, /* Buffer hash mismatches manifest */ }; /* Admin Queue command opcodes */ @@ -1712,6 +1768,10 @@ enum ice_adminq_opc { ice_aqc_opc_add_txqs = 0x0C30, ice_aqc_opc_dis_txqs = 0x0C31, + /* package commands */ + ice_aqc_opc_download_pkg = 0x0C40, + ice_aqc_opc_get_pkg_info_list = 0x0C43, + /* debug commands */ ice_aqc_opc_fw_logging = 0xFF09, ice_aqc_opc_fw_logging_info = 0xFF10, diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 22d2a11ef41f..20956643036c 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -910,6 +910,7 @@ void ice_deinit_hw(struct ice_hw *hw) ice_sched_cleanup_all(hw); ice_sched_clear_agg(hw); + ice_free_seg(hw); if (hw->port_info) { devm_kfree(ice_hw_to_dev(hw), hw->port_info); @@ -1230,6 +1231,12 @@ ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc, void *buf, /* FW Admin Queue command wrappers */ +/* Software lock/mutex that is meant to be held while the Global Config Lock + * in firmware is acquired by the software to prevent most (but not all) types + * of AQ commands from being sent to FW + */ +DEFINE_MUTEX(ice_global_cfg_lock_sw); + /** * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue * @hw: pointer to the HW struct @@ -1244,7 +1251,38 @@ enum ice_status ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd) { - return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd); + struct ice_aqc_req_res *cmd = &desc->params.res_owner; + bool lock_acquired = false; + enum ice_status status; + + /* When a package download is in process (i.e. when the firmware's + * Global Configuration Lock resource is held), only the Download + * Package, Get Version, Get Package Info List and Release Resource + * (with resource ID set to Global Config Lock) AdminQ commands are + * allowed; all others must block until the package download completes + * and the Global Config Lock is released. See also + * ice_acquire_global_cfg_lock(). + */ + switch (le16_to_cpu(desc->opcode)) { + case ice_aqc_opc_download_pkg: + case ice_aqc_opc_get_pkg_info_list: + case ice_aqc_opc_get_ver: + break; + case ice_aqc_opc_release_res: + if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK) + break; + /* fall-through */ + default: + mutex_lock(&ice_global_cfg_lock_sw); + lock_acquired = true; + break; + } + + status = ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd); + if (lock_acquired) + mutex_unlock(&ice_global_cfg_lock_sw); + + return status; } /** diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index 0525a051e05b..ce55f8ae6b52 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -6,6 +6,7 @@ #include "ice.h" #include "ice_type.h" +#include "ice_flex_pipe.h" #include "ice_switch.h" #include @@ -66,6 +67,9 @@ void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode); extern const struct ice_ctx_ele ice_tlan_ctx_info[]; enum ice_status ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info); + +extern struct mutex ice_global_cfg_lock_sw; + enum ice_status ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd); diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c new file mode 100644 index 000000000000..4e965dc5eb93 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c @@ -0,0 +1,608 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019, Intel Corporation. */ + +#include "ice_common.h" +#include "ice_flex_pipe.h" + +/** + * ice_find_buf_table + * @ice_seg: pointer to the ice segment + * + * Returns the address of the buffer table within the ice segment. + */ +static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg) +{ + struct ice_nvm_table *nvms; + + nvms = (struct ice_nvm_table *) + (ice_seg->device_table + + le32_to_cpu(ice_seg->device_table_count)); + + return (__force struct ice_buf_table *) + (nvms->vers + le32_to_cpu(nvms->table_count)); +} + +/** + * ice_acquire_global_cfg_lock + * @hw: pointer to the HW structure + * @access: access type (read or write) + * + * This function will request ownership of the global config lock for reading + * or writing of the package. When attempting to obtain write access, the + * caller must check for the following two return values: + * + * ICE_SUCCESS - Means the caller has acquired the global config lock + * and can perform writing of the package. + * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the + * package or has found that no update was necessary; in + * this case, the caller can just skip performing any + * update of the package. + */ +static enum ice_status +ice_acquire_global_cfg_lock(struct ice_hw *hw, + enum ice_aq_res_access_type access) +{ + enum ice_status status; + + status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access, + ICE_GLOBAL_CFG_LOCK_TIMEOUT); + + if (!status) + mutex_lock(&ice_global_cfg_lock_sw); + else if (status == ICE_ERR_AQ_NO_WORK) + ice_debug(hw, ICE_DBG_PKG, + "Global config lock: No work to do\n"); + + return status; +} + +/** + * ice_release_global_cfg_lock + * @hw: pointer to the HW structure + * + * This function will release the global config lock. + */ +static void ice_release_global_cfg_lock(struct ice_hw *hw) +{ + mutex_unlock(&ice_global_cfg_lock_sw); + ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID); +} + +/** + * ice_aq_download_pkg + * @hw: pointer to the hardware structure + * @pkg_buf: the package buffer to transfer + * @buf_size: the size of the package buffer + * @last_buf: last buffer indicator + * @error_offset: returns error offset + * @error_info: returns error information + * @cd: pointer to command details structure or NULL + * + * Download Package (0x0C40) + */ +static enum ice_status +ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, + u16 buf_size, bool last_buf, u32 *error_offset, + u32 *error_info, struct ice_sq_cd *cd) +{ + struct ice_aqc_download_pkg *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + if (error_offset) + *error_offset = 0; + if (error_info) + *error_info = 0; + + cmd = &desc.params.download_pkg; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg); + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + if (last_buf) + cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; + + status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); + if (status == ICE_ERR_AQ_ERROR) { + /* Read error from buffer only when the FW returned an error */ + struct ice_aqc_download_pkg_resp *resp; + + resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; + if (error_offset) + *error_offset = le32_to_cpu(resp->error_offset); + if (error_info) + *error_info = le32_to_cpu(resp->error_info); + } + + return status; +} + +/** + * ice_find_seg_in_pkg + * @hw: pointer to the hardware structure + * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK) + * @pkg_hdr: pointer to the package header to be searched + * + * This function searches a package file for a particular segment type. On + * success it returns a pointer to the segment header, otherwise it will + * return NULL. + */ +static struct ice_generic_seg_hdr * +ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, + struct ice_pkg_hdr *pkg_hdr) +{ + u32 i; + + ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n", + pkg_hdr->format_ver.major, pkg_hdr->format_ver.minor, + pkg_hdr->format_ver.update, pkg_hdr->format_ver.draft); + + /* Search all package segments for the requested segment type */ + for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) { + struct ice_generic_seg_hdr *seg; + + seg = (struct ice_generic_seg_hdr *) + ((u8 *)pkg_hdr + le32_to_cpu(pkg_hdr->seg_offset[i])); + + if (le32_to_cpu(seg->seg_type) == seg_type) + return seg; + } + + return NULL; +} + +/** + * ice_dwnld_cfg_bufs + * @hw: pointer to the hardware structure + * @bufs: pointer to an array of buffers + * @count: the number of buffers in the array + * + * Obtains global config lock and downloads the package configuration buffers + * to the firmware. Metadata buffers are skipped, and the first metadata buffer + * found indicates that the rest of the buffers are all metadata buffers. + */ +static enum ice_status +ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) +{ + enum ice_status status; + struct ice_buf_hdr *bh; + u32 offset, info, i; + + if (!bufs || !count) + return ICE_ERR_PARAM; + + /* If the first buffer's first section has its metadata bit set + * then there are no buffers to be downloaded, and the operation is + * considered a success. + */ + bh = (struct ice_buf_hdr *)bufs; + if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF) + return 0; + + /* reset pkg_dwnld_status in case this function is called in the + * reset/rebuild flow + */ + hw->pkg_dwnld_status = ICE_AQ_RC_OK; + + status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); + if (status) { + if (status == ICE_ERR_AQ_NO_WORK) + hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST; + else + hw->pkg_dwnld_status = hw->adminq.sq_last_status; + return status; + } + + for (i = 0; i < count; i++) { + bool last = ((i + 1) == count); + + if (!last) { + /* check next buffer for metadata flag */ + bh = (struct ice_buf_hdr *)(bufs + i + 1); + + /* A set metadata flag in the next buffer will signal + * that the current buffer will be the last buffer + * downloaded + */ + if (le16_to_cpu(bh->section_count)) + if (le32_to_cpu(bh->section_entry[0].type) & + ICE_METADATA_BUF) + last = true; + } + + bh = (struct ice_buf_hdr *)(bufs + i); + + status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last, + &offset, &info, NULL); + + /* Save AQ status from download package */ + hw->pkg_dwnld_status = hw->adminq.sq_last_status; + if (status) { + ice_debug(hw, ICE_DBG_PKG, + "Pkg download failed: err %d off %d inf %d\n", + status, offset, info); + + break; + } + + if (last) + break; + } + + ice_release_global_cfg_lock(hw); + + return status; +} + +/** + * ice_aq_get_pkg_info_list + * @hw: pointer to the hardware structure + * @pkg_info: the buffer which will receive the information list + * @buf_size: the size of the pkg_info information buffer + * @cd: pointer to command details structure or NULL + * + * Get Package Info List (0x0C43) + */ +static enum ice_status +ice_aq_get_pkg_info_list(struct ice_hw *hw, + struct ice_aqc_get_pkg_info_resp *pkg_info, + u16 buf_size, struct ice_sq_cd *cd) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list); + + return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd); +} + +/** + * ice_download_pkg + * @hw: pointer to the hardware structure + * @ice_seg: pointer to the segment of the package to be downloaded + * + * Handles the download of a complete package. + */ +static enum ice_status +ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg) +{ + struct ice_buf_table *ice_buf_tbl; + + ice_debug(hw, ICE_DBG_PKG, "Segment version: %d.%d.%d.%d\n", + ice_seg->hdr.seg_ver.major, ice_seg->hdr.seg_ver.minor, + ice_seg->hdr.seg_ver.update, ice_seg->hdr.seg_ver.draft); + + ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n", + le32_to_cpu(ice_seg->hdr.seg_type), + le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_name); + + ice_buf_tbl = ice_find_buf_table(ice_seg); + + ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n", + le32_to_cpu(ice_buf_tbl->buf_count)); + + return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array, + le32_to_cpu(ice_buf_tbl->buf_count)); +} + +/** + * ice_init_pkg_info + * @hw: pointer to the hardware structure + * @pkg_hdr: pointer to the driver's package hdr + * + * Saves off the package details into the HW structure. + */ +static enum ice_status +ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) +{ + struct ice_global_metadata_seg *meta_seg; + struct ice_generic_seg_hdr *seg_hdr; + + if (!pkg_hdr) + return ICE_ERR_PARAM; + + meta_seg = (struct ice_global_metadata_seg *) + ice_find_seg_in_pkg(hw, SEGMENT_TYPE_METADATA, pkg_hdr); + if (meta_seg) { + hw->pkg_ver = meta_seg->pkg_ver; + memcpy(hw->pkg_name, meta_seg->pkg_name, sizeof(hw->pkg_name)); + + ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n", + meta_seg->pkg_ver.major, meta_seg->pkg_ver.minor, + meta_seg->pkg_ver.update, meta_seg->pkg_ver.draft, + meta_seg->pkg_name); + } else { + ice_debug(hw, ICE_DBG_INIT, + "Did not find metadata segment in driver package\n"); + return ICE_ERR_CFG; + } + + seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr); + if (seg_hdr) { + hw->ice_pkg_ver = seg_hdr->seg_ver; + memcpy(hw->ice_pkg_name, seg_hdr->seg_name, + sizeof(hw->ice_pkg_name)); + + ice_debug(hw, ICE_DBG_PKG, "Ice Pkg: %d.%d.%d.%d, %s\n", + seg_hdr->seg_ver.major, seg_hdr->seg_ver.minor, + seg_hdr->seg_ver.update, seg_hdr->seg_ver.draft, + seg_hdr->seg_name); + } else { + ice_debug(hw, ICE_DBG_INIT, + "Did not find ice segment in driver package\n"); + return ICE_ERR_CFG; + } + + return 0; +} + +/** + * ice_get_pkg_info + * @hw: pointer to the hardware structure + * + * Store details of the package currently loaded in HW into the HW structure. + */ +static enum ice_status ice_get_pkg_info(struct ice_hw *hw) +{ + struct ice_aqc_get_pkg_info_resp *pkg_info; + enum ice_status status; + u16 size; + u32 i; + + size = sizeof(*pkg_info) + (sizeof(pkg_info->pkg_info[0]) * + (ICE_PKG_CNT - 1)); + pkg_info = kzalloc(size, GFP_KERNEL); + if (!pkg_info) + return ICE_ERR_NO_MEMORY; + + status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL); + if (status) + goto init_pkg_free_alloc; + + for (i = 0; i < le32_to_cpu(pkg_info->count); i++) { +#define ICE_PKG_FLAG_COUNT 4 + char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 }; + u8 place = 0; + + if (pkg_info->pkg_info[i].is_active) { + flags[place++] = 'A'; + hw->active_pkg_ver = pkg_info->pkg_info[i].ver; + memcpy(hw->active_pkg_name, + pkg_info->pkg_info[i].name, + sizeof(hw->active_pkg_name)); + hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm; + } + if (pkg_info->pkg_info[i].is_active_at_boot) + flags[place++] = 'B'; + if (pkg_info->pkg_info[i].is_modified) + flags[place++] = 'M'; + if (pkg_info->pkg_info[i].is_in_nvm) + flags[place++] = 'N'; + + ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n", + i, pkg_info->pkg_info[i].ver.major, + pkg_info->pkg_info[i].ver.minor, + pkg_info->pkg_info[i].ver.update, + pkg_info->pkg_info[i].ver.draft, + pkg_info->pkg_info[i].name, flags); + } + +init_pkg_free_alloc: + kfree(pkg_info); + + return status; +} + +/** + * ice_verify_pkg - verify package + * @pkg: pointer to the package buffer + * @len: size of the package buffer + * + * Verifies various attributes of the package file, including length, format + * version, and the requirement of at least one segment. + */ +static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) +{ + u32 seg_count; + u32 i; + + if (len < sizeof(*pkg)) + return ICE_ERR_BUF_TOO_SHORT; + + if (pkg->format_ver.major != ICE_PKG_FMT_VER_MAJ || + pkg->format_ver.minor != ICE_PKG_FMT_VER_MNR || + pkg->format_ver.update != ICE_PKG_FMT_VER_UPD || + pkg->format_ver.draft != ICE_PKG_FMT_VER_DFT) + return ICE_ERR_CFG; + + /* pkg must have at least one segment */ + seg_count = le32_to_cpu(pkg->seg_count); + if (seg_count < 1) + return ICE_ERR_CFG; + + /* make sure segment array fits in package length */ + if (len < sizeof(*pkg) + ((seg_count - 1) * sizeof(pkg->seg_offset))) + return ICE_ERR_BUF_TOO_SHORT; + + /* all segments must fit within length */ + for (i = 0; i < seg_count; i++) { + u32 off = le32_to_cpu(pkg->seg_offset[i]); + struct ice_generic_seg_hdr *seg; + + /* segment header must fit */ + if (len < off + sizeof(*seg)) + return ICE_ERR_BUF_TOO_SHORT; + + seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off); + + /* segment body must fit */ + if (len < off + le32_to_cpu(seg->seg_size)) + return ICE_ERR_BUF_TOO_SHORT; + } + + return 0; +} + +/** + * ice_free_seg - free package segment pointer + * @hw: pointer to the hardware structure + * + * Frees the package segment pointer in the proper manner, depending on if the + * segment was allocated or just the passed in pointer was stored. + */ +void ice_free_seg(struct ice_hw *hw) +{ + if (hw->pkg_copy) { + devm_kfree(ice_hw_to_dev(hw), hw->pkg_copy); + hw->pkg_copy = NULL; + hw->pkg_size = 0; + } + hw->seg = NULL; +} + +/** + * ice_chk_pkg_version - check package version for compatibility with driver + * @pkg_ver: pointer to a version structure to check + * + * Check to make sure that the package about to be downloaded is compatible with + * the driver. To be compatible, the major and minor components of the package + * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR + * definitions. + */ +static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver) +{ + if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ || + pkg_ver->minor != ICE_PKG_SUPP_VER_MNR) + return ICE_ERR_NOT_SUPPORTED; + + return 0; +} + +/** + * ice_init_pkg - initialize/download package + * @hw: pointer to the hardware structure + * @buf: pointer to the package buffer + * @len: size of the package buffer + * + * This function initializes a package. The package contains HW tables + * required to do packet processing. First, the function extracts package + * information such as version. Then it finds the ice configuration segment + * within the package; this function then saves a copy of the segment pointer + * within the supplied package buffer. Next, the function will cache any hints + * from the package, followed by downloading the package itself. Note, that if + * a previous PF driver has already downloaded the package successfully, then + * the current driver will not have to download the package again. + * + * The local package contents will be used to query default behavior and to + * update specific sections of the HW's version of the package (e.g. to update + * the parse graph to understand new protocols). + * + * This function stores a pointer to the package buffer memory, and it is + * expected that the supplied buffer will not be freed immediately. If the + * package buffer needs to be freed, such as when read from a file, use + * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this + * case. + */ +enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) +{ + struct ice_pkg_hdr *pkg; + enum ice_status status; + struct ice_seg *seg; + + if (!buf || !len) + return ICE_ERR_PARAM; + + pkg = (struct ice_pkg_hdr *)buf; + status = ice_verify_pkg(pkg, len); + if (status) { + ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n", + status); + return status; + } + + /* initialize package info */ + status = ice_init_pkg_info(hw, pkg); + if (status) + return status; + + /* before downloading the package, check package version for + * compatibility with driver + */ + status = ice_chk_pkg_version(&hw->pkg_ver); + if (status) + return status; + + /* find segment in given package */ + seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg); + if (!seg) { + ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n"); + return ICE_ERR_CFG; + } + + /* download package */ + status = ice_download_pkg(hw, seg); + if (status == ICE_ERR_AQ_NO_WORK) { + ice_debug(hw, ICE_DBG_INIT, + "package previously loaded - no work.\n"); + status = 0; + } + + /* Get information on the package currently loaded in HW, then make sure + * the driver is compatible with this version. + */ + if (!status) { + status = ice_get_pkg_info(hw); + if (!status) + status = ice_chk_pkg_version(&hw->active_pkg_ver); + } + + if (status) + ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n", + status); + + return status; +} + +/** + * ice_copy_and_init_pkg - initialize/download a copy of the package + * @hw: pointer to the hardware structure + * @buf: pointer to the package buffer + * @len: size of the package buffer + * + * This function copies the package buffer, and then calls ice_init_pkg() to + * initialize the copied package contents. + * + * The copying is necessary if the package buffer supplied is constant, or if + * the memory may disappear shortly after calling this function. + * + * If the package buffer resides in the data segment and can be modified, the + * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg(). + * + * However, if the package buffer needs to be copied first, such as when being + * read from a file, the caller should use ice_copy_and_init_pkg(). + * + * This function will first copy the package buffer, before calling + * ice_init_pkg(). The caller is free to immediately destroy the original + * package buffer, as the new copy will be managed by this function and + * related routines. + */ +enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len) +{ + enum ice_status status; + u8 *buf_copy; + + if (!buf || !len) + return ICE_ERR_PARAM; + + buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL); + + status = ice_init_pkg(hw, buf_copy, len); + if (status) { + /* Free the copy, since we failed to initialize the package */ + devm_kfree(ice_hw_to_dev(hw), buf_copy); + } else { + /* Track the copied pkg so we can free it later */ + hw->pkg_copy = buf_copy; + hw->pkg_size = len; + } + + return status; +} diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h new file mode 100644 index 000000000000..3843c462bc42 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2019, Intel Corporation. */ + +#ifndef _ICE_FLEX_PIPE_H_ +#define _ICE_FLEX_PIPE_H_ + +#include "ice_type.h" + +/* Package minimal version supported */ +#define ICE_PKG_SUPP_VER_MAJ 1 +#define ICE_PKG_SUPP_VER_MNR 3 + +/* Package format version */ +#define ICE_PKG_FMT_VER_MAJ 1 +#define ICE_PKG_FMT_VER_MNR 0 +#define ICE_PKG_FMT_VER_UPD 0 +#define ICE_PKG_FMT_VER_DFT 0 + +#define ICE_PKG_CNT 4 + +enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len); +enum ice_status +ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len); +void ice_free_seg(struct ice_hw *hw); +#endif /* _ICE_FLEX_PIPE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h new file mode 100644 index 000000000000..b7fb90594faf --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h @@ -0,0 +1,372 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2019, Intel Corporation. */ + +#ifndef _ICE_FLEX_TYPE_H_ +#define _ICE_FLEX_TYPE_H_ +/* Extraction Sequence (Field Vector) Table */ +struct ice_fv_word { + u8 prot_id; + u16 off; /* Offset within the protocol header */ + u8 resvrd; +} __packed; + +#define ICE_MAX_FV_WORDS 48 +struct ice_fv { + struct ice_fv_word ew[ICE_MAX_FV_WORDS]; +}; + +/* Package and segment headers and tables */ +struct ice_pkg_hdr { + struct ice_pkg_ver format_ver; + __le32 seg_count; + __le32 seg_offset[1]; +}; + +/* generic segment */ +struct ice_generic_seg_hdr { +#define SEGMENT_TYPE_METADATA 0x00000001 +#define SEGMENT_TYPE_ICE 0x00000010 + __le32 seg_type; + struct ice_pkg_ver seg_ver; + __le32 seg_size; + char seg_name[ICE_PKG_NAME_SIZE]; +}; + +/* ice specific segment */ + +union ice_device_id { + struct { + __le16 device_id; + __le16 vendor_id; + } dev_vend_id; + __le32 id; +}; + +struct ice_device_id_entry { + union ice_device_id device; + union ice_device_id sub_device; +}; + +struct ice_seg { + struct ice_generic_seg_hdr hdr; + __le32 device_table_count; + struct ice_device_id_entry device_table[1]; +}; + +struct ice_nvm_table { + __le32 table_count; + __le32 vers[1]; +}; + +struct ice_buf { +#define ICE_PKG_BUF_SIZE 4096 + u8 buf[ICE_PKG_BUF_SIZE]; +}; + +struct ice_buf_table { + __le32 buf_count; + struct ice_buf buf_array[1]; +}; + +/* global metadata specific segment */ +struct ice_global_metadata_seg { + struct ice_generic_seg_hdr hdr; + struct ice_pkg_ver pkg_ver; + __le32 track_id; + char pkg_name[ICE_PKG_NAME_SIZE]; +}; + +#define ICE_MIN_S_OFF 12 +#define ICE_MAX_S_OFF 4095 +#define ICE_MIN_S_SZ 1 +#define ICE_MAX_S_SZ 4084 + +/* section information */ +struct ice_section_entry { + __le32 type; + __le16 offset; + __le16 size; +}; + +#define ICE_MIN_S_COUNT 1 +#define ICE_MAX_S_COUNT 511 +#define ICE_MIN_S_DATA_END 12 +#define ICE_MAX_S_DATA_END 4096 + +#define ICE_METADATA_BUF 0x80000000 + +struct ice_buf_hdr { + __le16 section_count; + __le16 data_end; + struct ice_section_entry section_entry[1]; +}; + +#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \ + sizeof(struct ice_buf_hdr) - (hd_sz)) / (ent_sz)) + +/* ice package section IDs */ +#define ICE_SID_XLT1_SW 12 +#define ICE_SID_XLT2_SW 13 +#define ICE_SID_PROFID_TCAM_SW 14 +#define ICE_SID_PROFID_REDIR_SW 15 +#define ICE_SID_FLD_VEC_SW 16 + +#define ICE_SID_XLT1_ACL 22 +#define ICE_SID_XLT2_ACL 23 +#define ICE_SID_PROFID_TCAM_ACL 24 +#define ICE_SID_PROFID_REDIR_ACL 25 +#define ICE_SID_FLD_VEC_ACL 26 + +#define ICE_SID_XLT1_FD 32 +#define ICE_SID_XLT2_FD 33 +#define ICE_SID_PROFID_TCAM_FD 34 +#define ICE_SID_PROFID_REDIR_FD 35 +#define ICE_SID_FLD_VEC_FD 36 + +#define ICE_SID_XLT1_RSS 42 +#define ICE_SID_XLT2_RSS 43 +#define ICE_SID_PROFID_TCAM_RSS 44 +#define ICE_SID_PROFID_REDIR_RSS 45 +#define ICE_SID_FLD_VEC_RSS 46 + +#define ICE_SID_RXPARSER_BOOST_TCAM 56 + +#define ICE_SID_XLT1_PE 82 +#define ICE_SID_XLT2_PE 83 +#define ICE_SID_PROFID_TCAM_PE 84 +#define ICE_SID_PROFID_REDIR_PE 85 +#define ICE_SID_FLD_VEC_PE 86 + +/* Label Metadata section IDs */ +#define ICE_SID_LBL_FIRST 0x80000010 +#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018 +/* The following define MUST be updated to reflect the last label section ID */ +#define ICE_SID_LBL_LAST 0x80000038 + +enum ice_block { + ICE_BLK_SW = 0, + ICE_BLK_ACL, + ICE_BLK_FD, + ICE_BLK_RSS, + ICE_BLK_PE, + ICE_BLK_COUNT +}; + +/* package labels */ +struct ice_label { + __le16 value; +#define ICE_PKG_LABEL_SIZE 64 + char name[ICE_PKG_LABEL_SIZE]; +}; + +struct ice_label_section { + __le16 count; + struct ice_label label[1]; +}; + +#define ICE_MAX_LABELS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \ + sizeof(struct ice_label_section) - sizeof(struct ice_label), \ + sizeof(struct ice_label)) + +struct ice_sw_fv_section { + __le16 count; + __le16 base_offset; + struct ice_fv fv[1]; +}; + +/* The BOOST TCAM stores the match packet header in reverse order, meaning + * the fields are reversed; in addition, this means that the normally big endian + * fields of the packet are now little endian. + */ +struct ice_boost_key_value { +#define ICE_BOOST_REMAINING_HV_KEY 15 + u8 remaining_hv_key[ICE_BOOST_REMAINING_HV_KEY]; + __le16 hv_dst_port_key; + __le16 hv_src_port_key; + u8 tcam_search_key; +} __packed; + +struct ice_boost_key { + struct ice_boost_key_value key; + struct ice_boost_key_value key2; +}; + +/* package Boost TCAM entry */ +struct ice_boost_tcam_entry { + __le16 addr; + __le16 reserved; + /* break up the 40 bytes of key into different fields */ + struct ice_boost_key key; + u8 boost_hit_index_group; + /* The following contains bitfields which are not on byte boundaries. + * These fields are currently unused by driver software. + */ +#define ICE_BOOST_BIT_FIELDS 43 + u8 bit_fields[ICE_BOOST_BIT_FIELDS]; +}; + +struct ice_boost_tcam_section { + __le16 count; + __le16 reserved; + struct ice_boost_tcam_entry tcam[1]; +}; + +#define ICE_MAX_BST_TCAMS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \ + sizeof(struct ice_boost_tcam_section) - \ + sizeof(struct ice_boost_tcam_entry), \ + sizeof(struct ice_boost_tcam_entry)) + +struct ice_xlt1_section { + __le16 count; + __le16 offset; + u8 value[1]; +} __packed; + +struct ice_xlt2_section { + __le16 count; + __le16 offset; + __le16 value[1]; +}; + +struct ice_prof_redir_section { + __le16 count; + __le16 offset; + u8 redir_value[1]; +}; + +struct ice_pkg_enum { + struct ice_buf_table *buf_table; + u32 buf_idx; + + u32 type; + struct ice_buf_hdr *buf; + u32 sect_idx; + void *sect; + u32 sect_type; + + u32 entry_idx; + void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset); +}; + +struct ice_es { + u32 sid; + u16 count; + u16 fvw; + u16 *ref_count; + struct list_head prof_map; + struct ice_fv_word *t; + struct mutex prof_map_lock; /* protect access to profiles list */ + u8 *written; + u8 reverse; /* set to true to reverse FV order */ +}; + +/* PTYPE Group management */ + +/* Note: XLT1 table takes 13-bit as input, and results in an 8-bit packet type + * group (PTG) ID as output. + * + * Note: PTG 0 is the default packet type group and it is assumed that all PTYPE + * are a part of this group until moved to a new PTG. + */ +#define ICE_DEFAULT_PTG 0 + +struct ice_ptg_entry { + struct ice_ptg_ptype *first_ptype; + u8 in_use; +}; + +struct ice_ptg_ptype { + struct ice_ptg_ptype *next_ptype; + u8 ptg; +}; + +struct ice_vsig_entry { + struct list_head prop_lst; + struct ice_vsig_vsi *first_vsi; + u8 in_use; +}; + +struct ice_vsig_vsi { + struct ice_vsig_vsi *next_vsi; + u32 prop_mask; + u16 changed; + u16 vsig; +}; + +#define ICE_XLT1_CNT 1024 + +/* XLT1 Table */ +struct ice_xlt1 { + struct ice_ptg_entry *ptg_tbl; + struct ice_ptg_ptype *ptypes; + u8 *t; + u32 sid; + u16 count; +}; + +#define ICE_MAX_VSIGS 768 + +/* VSIG bit layout: + * [0:12]: incremental VSIG index 1 to ICE_MAX_VSIGS + * [13:15]: PF number of device + */ +#define ICE_VSIG_IDX_M (0x1FFF) +#define ICE_PF_NUM_S 13 +#define ICE_PF_NUM_M (0x07 << ICE_PF_NUM_S) +#define ICE_VSIG_VALUE(vsig, pf_id) \ + (u16)((((u16)(vsig)) & ICE_VSIG_IDX_M) | \ + (((u16)(pf_id) << ICE_PF_NUM_S) & ICE_PF_NUM_M)) +#define ICE_DEFAULT_VSIG 0 + +/* XLT2 Table */ +struct ice_xlt2 { + struct ice_vsig_entry *vsig_tbl; + struct ice_vsig_vsi *vsis; + u16 *t; + u32 sid; + u16 count; +}; + +/* Keys are made up of two values, each one-half the size of the key. + * For TCAM, the entire key is 80 bits wide (or 2, 40-bit wide values) + */ +#define ICE_TCAM_KEY_VAL_SZ 5 +#define ICE_TCAM_KEY_SZ (2 * ICE_TCAM_KEY_VAL_SZ) + +struct ice_prof_tcam_entry { + __le16 addr; + u8 key[ICE_TCAM_KEY_SZ]; + u8 prof_id; +} __packed; + +struct ice_prof_id_section { + __le16 count; + struct ice_prof_tcam_entry entry[1]; +} __packed; + +struct ice_prof_tcam { + u32 sid; + u16 count; + u16 max_prof_id; + struct ice_prof_tcam_entry *t; + u8 cdid_bits; /* # CDID bits to use in key, 0, 2, 4, or 8 */ +}; + +struct ice_prof_redir { + u8 *t; + u32 sid; + u16 count; +}; + +/* Tables per block */ +struct ice_blk_info { + struct ice_xlt1 xlt1; + struct ice_xlt2 xlt2; + struct ice_prof_tcam prof; + struct ice_prof_redir prof_redir; + struct ice_es es; + u8 overwrite; /* set to true to allow overwrite of table entries */ + u8 is_list_init; +}; + +#endif /* _ICE_FLEX_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 6f78ff5534af..152fbd556e9b 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -55,6 +55,8 @@ #define PRTDCB_GENS 0x00083020 #define PRTDCB_GENS_DCBX_STATUS_S 0 #define PRTDCB_GENS_DCBX_STATUS_M ICE_M(0x7, 0) +#define GL_PREEXT_L2_PMASK0(_i) (0x0020F0FC + ((_i) * 4)) +#define GL_PREEXT_L2_PMASK1(_i) (0x0020F108 + ((_i) * 4)) #define GLFLXP_RXDID_FLAGS(_i, _j) (0x0045D000 + ((_i) * 4 + (_j) * 256)) #define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S 0 #define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, 0) diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 7ec8a529b5cf..6667d17a4206 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -12,6 +12,7 @@ #include "ice_osdep.h" #include "ice_controlq.h" #include "ice_lan_tx_rx.h" +#include "ice_flex_type.h" static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc) { @@ -31,6 +32,7 @@ static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc) #define ICE_DBG_LAN BIT_ULL(8) #define ICE_DBG_SW BIT_ULL(13) #define ICE_DBG_SCHED BIT_ULL(14) +#define ICE_DBG_PKG BIT_ULL(16) #define ICE_DBG_RES BIT_ULL(17) #define ICE_DBG_AQ_MSG BIT_ULL(24) #define ICE_DBG_AQ_CMD BIT_ULL(27) @@ -469,6 +471,30 @@ struct ice_hw { u8 ucast_shared; /* true if VSIs can share unicast addr */ + /* Active package version (currently active) */ + struct ice_pkg_ver active_pkg_ver; + u8 active_pkg_name[ICE_PKG_NAME_SIZE]; + u8 active_pkg_in_nvm; + + enum ice_aq_err pkg_dwnld_status; + + /* Driver's package ver - (from the Metadata seg) */ + struct ice_pkg_ver pkg_ver; + u8 pkg_name[ICE_PKG_NAME_SIZE]; + + /* Driver's Ice package version (from the Ice seg) */ + struct ice_pkg_ver ice_pkg_ver; + u8 ice_pkg_name[ICE_PKG_NAME_SIZE]; + + /* Pointer to the ice segment */ + struct ice_seg *seg; + + /* Pointer to allocated copy of pkg memory */ + u8 *pkg_copy; + u32 pkg_size; + + /* HW block tables */ + struct ice_blk_info blk[ICE_BLK_COUNT]; }; /* Statistics collected by each port, VSI, VEB, and S-channel */ -- cgit v1.2.3 From 32d63fa1e9f33ba5d0a3ad33790c65e1ab49dd13 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Mon, 9 Sep 2019 06:47:45 -0700 Subject: ice: Initialize DDP package structures Add functions to initialize, parse, and clean structures representing the DDP package. Upon completion of package download, read and store the DDP package contents to these structures. This configuration is used to identify the default behavior and later used to update the HW table entries. Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_common.c | 5 +- drivers/net/ethernet/intel/ice/ice_flex_pipe.c | 945 ++++++++++++++++++++++++- drivers/net/ethernet/intel/ice/ice_flex_pipe.h | 3 + drivers/net/ethernet/intel/ice/ice_flex_type.h | 2 + 4 files changed, 953 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 20956643036c..91472e049231 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -882,7 +882,9 @@ enum ice_status ice_init_hw(struct ice_hw *hw) ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC); ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC_2); - + status = ice_init_hw_tbls(hw); + if (status) + goto err_unroll_fltr_mgmt_struct; return 0; err_unroll_fltr_mgmt_struct: @@ -911,6 +913,7 @@ void ice_deinit_hw(struct ice_hw *hw) ice_sched_cleanup_all(hw); ice_sched_clear_agg(hw); ice_free_seg(hw); + ice_free_hw_tbls(hw); if (hw->port_info) { devm_kfree(ice_hw_to_dev(hw), hw->port_info); diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c index 4e965dc5eb93..cc8922bd61b5 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c @@ -4,6 +4,33 @@ #include "ice_common.h" #include "ice_flex_pipe.h" +static void ice_fill_blk_tbls(struct ice_hw *hw); + +/** + * ice_pkg_val_buf + * @buf: pointer to the ice buffer + * + * This helper function validates a buffer's header. + */ +static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf) +{ + struct ice_buf_hdr *hdr; + u16 section_count; + u16 data_end; + + hdr = (struct ice_buf_hdr *)buf->buf; + /* verify data */ + section_count = le16_to_cpu(hdr->section_count); + if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT) + return NULL; + + data_end = le16_to_cpu(hdr->data_end); + if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END) + return NULL; + + return hdr; +} + /** * ice_find_buf_table * @ice_seg: pointer to the ice segment @@ -22,6 +49,117 @@ static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg) (nvms->vers + le32_to_cpu(nvms->table_count)); } +/** + * ice_pkg_enum_buf + * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) + * @state: pointer to the enum state + * + * This function will enumerate all the buffers in the ice segment. The first + * call is made with the ice_seg parameter non-NULL; on subsequent calls, + * ice_seg is set to NULL which continues the enumeration. When the function + * returns a NULL pointer, then the end of the buffers has been reached, or an + * unexpected value has been detected (for example an invalid section count or + * an invalid buffer end value). + */ +static struct ice_buf_hdr * +ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state) +{ + if (ice_seg) { + state->buf_table = ice_find_buf_table(ice_seg); + if (!state->buf_table) + return NULL; + + state->buf_idx = 0; + return ice_pkg_val_buf(state->buf_table->buf_array); + } + + if (++state->buf_idx < le32_to_cpu(state->buf_table->buf_count)) + return ice_pkg_val_buf(state->buf_table->buf_array + + state->buf_idx); + else + return NULL; +} + +/** + * ice_pkg_advance_sect + * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) + * @state: pointer to the enum state + * + * This helper function will advance the section within the ice segment, + * also advancing the buffer if needed. + */ +static bool +ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state) +{ + if (!ice_seg && !state->buf) + return false; + + if (!ice_seg && state->buf) + if (++state->sect_idx < le16_to_cpu(state->buf->section_count)) + return true; + + state->buf = ice_pkg_enum_buf(ice_seg, state); + if (!state->buf) + return false; + + /* start of new buffer, reset section index */ + state->sect_idx = 0; + return true; +} + +/** + * ice_pkg_enum_section + * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) + * @state: pointer to the enum state + * @sect_type: section type to enumerate + * + * This function will enumerate all the sections of a particular type in the + * ice segment. The first call is made with the ice_seg parameter non-NULL; + * on subsequent calls, ice_seg is set to NULL which continues the enumeration. + * When the function returns a NULL pointer, then the end of the matching + * sections has been reached. + */ +static void * +ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, + u32 sect_type) +{ + u16 offset, size; + + if (ice_seg) + state->type = sect_type; + + if (!ice_pkg_advance_sect(ice_seg, state)) + return NULL; + + /* scan for next matching section */ + while (state->buf->section_entry[state->sect_idx].type != + cpu_to_le32(state->type)) + if (!ice_pkg_advance_sect(NULL, state)) + return NULL; + + /* validate section */ + offset = le16_to_cpu(state->buf->section_entry[state->sect_idx].offset); + if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF) + return NULL; + + size = le16_to_cpu(state->buf->section_entry[state->sect_idx].size); + if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) + return NULL; + + /* make sure the section fits in the buffer */ + if (offset + size > ICE_PKG_BUF_SIZE) + return NULL; + + state->sect_type = + le32_to_cpu(state->buf->section_entry[state->sect_idx].type); + + /* calc pointer to this section */ + state->sect = ((u8 *)state->buf) + + le16_to_cpu(state->buf->section_entry[state->sect_idx].offset); + + return state->sect; +} + /** * ice_acquire_global_cfg_lock * @hw: pointer to the HW structure @@ -458,6 +596,21 @@ void ice_free_seg(struct ice_hw *hw) hw->seg = NULL; } +/** + * ice_init_pkg_regs - initialize additional package registers + * @hw: pointer to the hardware structure + */ +static void ice_init_pkg_regs(struct ice_hw *hw) +{ +#define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF +#define ICE_SW_BLK_INP_MASK_H 0x0000FFFF +#define ICE_SW_BLK_IDX 0 + + /* setup Switch block input mask, which is 48-bits in two parts */ + wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L); + wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H); +} + /** * ice_chk_pkg_version - check package version for compatibility with driver * @pkg_ver: pointer to a version structure to check @@ -554,9 +707,18 @@ enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) status = ice_chk_pkg_version(&hw->active_pkg_ver); } - if (status) + if (!status) { + hw->seg = seg; + /* on successful package download update other required + * registers to support the package and fill HW tables + * with package content. + */ + ice_init_pkg_regs(hw); + ice_fill_blk_tbls(hw); + } else { ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n", status); + } return status; } @@ -606,3 +768,784 @@ enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len) return status; } + +/* PTG Management */ + +/** + * ice_ptg_find_ptype - Search for packet type group using packet type (ptype) + * @hw: pointer to the hardware structure + * @blk: HW block + * @ptype: the ptype to search for + * @ptg: pointer to variable that receives the PTG + * + * This function will search the PTGs for a particular ptype, returning the + * PTG ID that contains it through the PTG parameter, with the value of + * ICE_DEFAULT_PTG (0) meaning it is part the default PTG. + */ +static enum ice_status +ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg) +{ + if (ptype >= ICE_XLT1_CNT || !ptg) + return ICE_ERR_PARAM; + + *ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg; + return 0; +} + +/** + * ice_ptg_alloc_val - Allocates a new packet type group ID by value + * @hw: pointer to the hardware structure + * @blk: HW block + * @ptg: the PTG to allocate + * + * This function allocates a given packet type group ID specified by the PTG + * parameter. + */ +static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg) +{ + hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true; +} + +/** + * ice_ptg_remove_ptype - Removes ptype from a particular packet type group + * @hw: pointer to the hardware structure + * @blk: HW block + * @ptype: the ptype to remove + * @ptg: the PTG to remove the ptype from + * + * This function will remove the ptype from the specific PTG, and move it to + * the default PTG (ICE_DEFAULT_PTG). + */ +static enum ice_status +ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg) +{ + struct ice_ptg_ptype **ch; + struct ice_ptg_ptype *p; + + if (ptype > ICE_XLT1_CNT - 1) + return ICE_ERR_PARAM; + + if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use) + return ICE_ERR_DOES_NOT_EXIST; + + /* Should not happen if .in_use is set, bad config */ + if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype) + return ICE_ERR_CFG; + + /* find the ptype within this PTG, and bypass the link over it */ + p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; + ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; + while (p) { + if (ptype == (p - hw->blk[blk].xlt1.ptypes)) { + *ch = p->next_ptype; + break; + } + + ch = &p->next_ptype; + p = p->next_ptype; + } + + hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG; + hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL; + + return 0; +} + +/** + * ice_ptg_add_mv_ptype - Adds/moves ptype to a particular packet type group + * @hw: pointer to the hardware structure + * @blk: HW block + * @ptype: the ptype to add or move + * @ptg: the PTG to add or move the ptype to + * + * This function will either add or move a ptype to a particular PTG depending + * on if the ptype is already part of another group. Note that using a + * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the + * default PTG. + */ +static enum ice_status +ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg) +{ + enum ice_status status; + u8 original_ptg; + + if (ptype > ICE_XLT1_CNT - 1) + return ICE_ERR_PARAM; + + if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG) + return ICE_ERR_DOES_NOT_EXIST; + + status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg); + if (status) + return status; + + /* Is ptype already in the correct PTG? */ + if (original_ptg == ptg) + return 0; + + /* Remove from original PTG and move back to the default PTG */ + if (original_ptg != ICE_DEFAULT_PTG) + ice_ptg_remove_ptype(hw, blk, ptype, original_ptg); + + /* Moving to default PTG? Then we're done with this request */ + if (ptg == ICE_DEFAULT_PTG) + return 0; + + /* Add ptype to PTG at beginning of list */ + hw->blk[blk].xlt1.ptypes[ptype].next_ptype = + hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; + hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype = + &hw->blk[blk].xlt1.ptypes[ptype]; + + hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg; + hw->blk[blk].xlt1.t[ptype] = ptg; + + return 0; +} + +/* Block / table size info */ +struct ice_blk_size_details { + u16 xlt1; /* # XLT1 entries */ + u16 xlt2; /* # XLT2 entries */ + u16 prof_tcam; /* # profile ID TCAM entries */ + u16 prof_id; /* # profile IDs */ + u8 prof_cdid_bits; /* # CDID one-hot bits used in key */ + u16 prof_redir; /* # profile redirection entries */ + u16 es; /* # extraction sequence entries */ + u16 fvw; /* # field vector words */ + u8 overwrite; /* overwrite existing entries allowed */ + u8 reverse; /* reverse FV order */ +}; + +static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = { + /** + * Table Definitions + * XLT1 - Number of entries in XLT1 table + * XLT2 - Number of entries in XLT2 table + * TCAM - Number of entries Profile ID TCAM table + * CDID - Control Domain ID of the hardware block + * PRED - Number of entries in the Profile Redirection Table + * FV - Number of entries in the Field Vector + * FVW - Width (in WORDs) of the Field Vector + * OVR - Overwrite existing table entries + * REV - Reverse FV + */ + /* XLT1 , XLT2 ,TCAM, PID,CDID,PRED, FV, FVW */ + /* Overwrite , Reverse FV */ + /* SW */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256, 0, 256, 256, 48, + false, false }, + /* ACL */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 32, + false, false }, + /* FD */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24, + false, true }, + /* RSS */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24, + true, true }, + /* PE */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 64, 32, 0, 32, 32, 24, + false, false }, +}; + +enum ice_sid_all { + ICE_SID_XLT1_OFF = 0, + ICE_SID_XLT2_OFF, + ICE_SID_PR_OFF, + ICE_SID_PR_REDIR_OFF, + ICE_SID_ES_OFF, + ICE_SID_OFF_COUNT, +}; + +/* VSIG Management */ + +/** + * ice_vsig_find_vsi - find a VSIG that contains a specified VSI + * @hw: pointer to the hardware structure + * @blk: HW block + * @vsi: VSI of interest + * @vsig: pointer to receive the VSI group + * + * This function will lookup the VSI entry in the XLT2 list and return + * the VSI group its associated with. + */ +static enum ice_status +ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig) +{ + if (!vsig || vsi >= ICE_MAX_VSI) + return ICE_ERR_PARAM; + + /* As long as there's a default or valid VSIG associated with the input + * VSI, the functions returns a success. Any handling of VSIG will be + * done by the following add, update or remove functions. + */ + *vsig = hw->blk[blk].xlt2.vsis[vsi].vsig; + + return 0; +} + +/** + * ice_vsig_alloc_val - allocate a new VSIG by value + * @hw: pointer to the hardware structure + * @blk: HW block + * @vsig: the VSIG to allocate + * + * This function will allocate a given VSIG specified by the VSIG parameter. + */ +static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig) +{ + u16 idx = vsig & ICE_VSIG_IDX_M; + + if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) { + INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst); + hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true; + } + + return ICE_VSIG_VALUE(idx, hw->pf_id); +} + +/** + * ice_vsig_remove_vsi - remove VSI from VSIG + * @hw: pointer to the hardware structure + * @blk: HW block + * @vsi: VSI to remove + * @vsig: VSI group to remove from + * + * The function will remove the input VSI from its VSI group and move it + * to the DEFAULT_VSIG. + */ +static enum ice_status +ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) +{ + struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt; + u16 idx; + + idx = vsig & ICE_VSIG_IDX_M; + + if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS) + return ICE_ERR_PARAM; + + if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) + return ICE_ERR_DOES_NOT_EXIST; + + /* entry already in default VSIG, don't have to remove */ + if (idx == ICE_DEFAULT_VSIG) + return 0; + + vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; + if (!(*vsi_head)) + return ICE_ERR_CFG; + + vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi]; + vsi_cur = (*vsi_head); + + /* iterate the VSI list, skip over the entry to be removed */ + while (vsi_cur) { + if (vsi_tgt == vsi_cur) { + (*vsi_head) = vsi_cur->next_vsi; + break; + } + vsi_head = &vsi_cur->next_vsi; + vsi_cur = vsi_cur->next_vsi; + } + + /* verify if VSI was removed from group list */ + if (!vsi_cur) + return ICE_ERR_DOES_NOT_EXIST; + + vsi_cur->vsig = ICE_DEFAULT_VSIG; + vsi_cur->changed = 1; + vsi_cur->next_vsi = NULL; + + return 0; +} + +/** + * ice_vsig_add_mv_vsi - add or move a VSI to a VSI group + * @hw: pointer to the hardware structure + * @blk: HW block + * @vsi: VSI to move + * @vsig: destination VSI group + * + * This function will move or add the input VSI to the target VSIG. + * The function will find the original VSIG the VSI belongs to and + * move the entry to the DEFAULT_VSIG, update the original VSIG and + * then move entry to the new VSIG. + */ +static enum ice_status +ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) +{ + struct ice_vsig_vsi *tmp; + enum ice_status status; + u16 orig_vsig, idx; + + idx = vsig & ICE_VSIG_IDX_M; + + if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS) + return ICE_ERR_PARAM; + + /* if VSIG not in use and VSIG is not default type this VSIG + * doesn't exist. + */ + if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use && + vsig != ICE_DEFAULT_VSIG) + return ICE_ERR_DOES_NOT_EXIST; + + status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig); + if (status) + return status; + + /* no update required if vsigs match */ + if (orig_vsig == vsig) + return 0; + + if (orig_vsig != ICE_DEFAULT_VSIG) { + /* remove entry from orig_vsig and add to default VSIG */ + status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig); + if (status) + return status; + } + + if (idx == ICE_DEFAULT_VSIG) + return 0; + + /* Create VSI entry and add VSIG and prop_mask values */ + hw->blk[blk].xlt2.vsis[vsi].vsig = vsig; + hw->blk[blk].xlt2.vsis[vsi].changed = 1; + + /* Add new entry to the head of the VSIG list */ + tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; + hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = + &hw->blk[blk].xlt2.vsis[vsi]; + hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp; + hw->blk[blk].xlt2.t[vsi] = vsig; + + return 0; +} + +/* Block / table section IDs */ +static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = { + /* SWITCH */ + { ICE_SID_XLT1_SW, + ICE_SID_XLT2_SW, + ICE_SID_PROFID_TCAM_SW, + ICE_SID_PROFID_REDIR_SW, + ICE_SID_FLD_VEC_SW + }, + + /* ACL */ + { ICE_SID_XLT1_ACL, + ICE_SID_XLT2_ACL, + ICE_SID_PROFID_TCAM_ACL, + ICE_SID_PROFID_REDIR_ACL, + ICE_SID_FLD_VEC_ACL + }, + + /* FD */ + { ICE_SID_XLT1_FD, + ICE_SID_XLT2_FD, + ICE_SID_PROFID_TCAM_FD, + ICE_SID_PROFID_REDIR_FD, + ICE_SID_FLD_VEC_FD + }, + + /* RSS */ + { ICE_SID_XLT1_RSS, + ICE_SID_XLT2_RSS, + ICE_SID_PROFID_TCAM_RSS, + ICE_SID_PROFID_REDIR_RSS, + ICE_SID_FLD_VEC_RSS + }, + + /* PE */ + { ICE_SID_XLT1_PE, + ICE_SID_XLT2_PE, + ICE_SID_PROFID_TCAM_PE, + ICE_SID_PROFID_REDIR_PE, + ICE_SID_FLD_VEC_PE + } +}; + +/** + * ice_init_sw_xlt1_db - init software XLT1 database from HW tables + * @hw: pointer to the hardware structure + * @blk: the HW block to initialize + */ +static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk) +{ + u16 pt; + + for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) { + u8 ptg; + + ptg = hw->blk[blk].xlt1.t[pt]; + if (ptg != ICE_DEFAULT_PTG) { + ice_ptg_alloc_val(hw, blk, ptg); + ice_ptg_add_mv_ptype(hw, blk, pt, ptg); + } + } +} + +/** + * ice_init_sw_xlt2_db - init software XLT2 database from HW tables + * @hw: pointer to the hardware structure + * @blk: the HW block to initialize + */ +static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk) +{ + u16 vsi; + + for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) { + u16 vsig; + + vsig = hw->blk[blk].xlt2.t[vsi]; + if (vsig) { + ice_vsig_alloc_val(hw, blk, vsig); + ice_vsig_add_mv_vsi(hw, blk, vsi, vsig); + /* no changes at this time, since this has been + * initialized from the original package + */ + hw->blk[blk].xlt2.vsis[vsi].changed = 0; + } + } +} + +/** + * ice_init_sw_db - init software database from HW tables + * @hw: pointer to the hardware structure + */ +static void ice_init_sw_db(struct ice_hw *hw) +{ + u16 i; + + for (i = 0; i < ICE_BLK_COUNT; i++) { + ice_init_sw_xlt1_db(hw, (enum ice_block)i); + ice_init_sw_xlt2_db(hw, (enum ice_block)i); + } +} + +/** + * ice_fill_tbl - Reads content of a single table type into database + * @hw: pointer to the hardware structure + * @block_id: Block ID of the table to copy + * @sid: Section ID of the table to copy + * + * Will attempt to read the entire content of a given table of a single block + * into the driver database. We assume that the buffer will always + * be as large or larger than the data contained in the package. If + * this condition is not met, there is most likely an error in the package + * contents. + */ +static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid) +{ + u32 dst_len, sect_len, offset = 0; + struct ice_prof_redir_section *pr; + struct ice_prof_id_section *pid; + struct ice_xlt1_section *xlt1; + struct ice_xlt2_section *xlt2; + struct ice_sw_fv_section *es; + struct ice_pkg_enum state; + u8 *src, *dst; + void *sect; + + /* if the HW segment pointer is null then the first iteration of + * ice_pkg_enum_section() will fail. In this case the HW tables will + * not be filled and return success. + */ + if (!hw->seg) { + ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n"); + return; + } + + memset(&state, 0, sizeof(state)); + + sect = ice_pkg_enum_section(hw->seg, &state, sid); + + while (sect) { + switch (sid) { + case ICE_SID_XLT1_SW: + case ICE_SID_XLT1_FD: + case ICE_SID_XLT1_RSS: + case ICE_SID_XLT1_ACL: + case ICE_SID_XLT1_PE: + xlt1 = (struct ice_xlt1_section *)sect; + src = xlt1->value; + sect_len = le16_to_cpu(xlt1->count) * + sizeof(*hw->blk[block_id].xlt1.t); + dst = hw->blk[block_id].xlt1.t; + dst_len = hw->blk[block_id].xlt1.count * + sizeof(*hw->blk[block_id].xlt1.t); + break; + case ICE_SID_XLT2_SW: + case ICE_SID_XLT2_FD: + case ICE_SID_XLT2_RSS: + case ICE_SID_XLT2_ACL: + case ICE_SID_XLT2_PE: + xlt2 = (struct ice_xlt2_section *)sect; + src = (__force u8 *)xlt2->value; + sect_len = le16_to_cpu(xlt2->count) * + sizeof(*hw->blk[block_id].xlt2.t); + dst = (u8 *)hw->blk[block_id].xlt2.t; + dst_len = hw->blk[block_id].xlt2.count * + sizeof(*hw->blk[block_id].xlt2.t); + break; + case ICE_SID_PROFID_TCAM_SW: + case ICE_SID_PROFID_TCAM_FD: + case ICE_SID_PROFID_TCAM_RSS: + case ICE_SID_PROFID_TCAM_ACL: + case ICE_SID_PROFID_TCAM_PE: + pid = (struct ice_prof_id_section *)sect; + src = (u8 *)pid->entry; + sect_len = le16_to_cpu(pid->count) * + sizeof(*hw->blk[block_id].prof.t); + dst = (u8 *)hw->blk[block_id].prof.t; + dst_len = hw->blk[block_id].prof.count * + sizeof(*hw->blk[block_id].prof.t); + break; + case ICE_SID_PROFID_REDIR_SW: + case ICE_SID_PROFID_REDIR_FD: + case ICE_SID_PROFID_REDIR_RSS: + case ICE_SID_PROFID_REDIR_ACL: + case ICE_SID_PROFID_REDIR_PE: + pr = (struct ice_prof_redir_section *)sect; + src = pr->redir_value; + sect_len = le16_to_cpu(pr->count) * + sizeof(*hw->blk[block_id].prof_redir.t); + dst = hw->blk[block_id].prof_redir.t; + dst_len = hw->blk[block_id].prof_redir.count * + sizeof(*hw->blk[block_id].prof_redir.t); + break; + case ICE_SID_FLD_VEC_SW: + case ICE_SID_FLD_VEC_FD: + case ICE_SID_FLD_VEC_RSS: + case ICE_SID_FLD_VEC_ACL: + case ICE_SID_FLD_VEC_PE: + es = (struct ice_sw_fv_section *)sect; + src = (u8 *)es->fv; + sect_len = (u32)(le16_to_cpu(es->count) * + hw->blk[block_id].es.fvw) * + sizeof(*hw->blk[block_id].es.t); + dst = (u8 *)hw->blk[block_id].es.t; + dst_len = (u32)(hw->blk[block_id].es.count * + hw->blk[block_id].es.fvw) * + sizeof(*hw->blk[block_id].es.t); + break; + default: + return; + } + + /* if the section offset exceeds destination length, terminate + * table fill. + */ + if (offset > dst_len) + return; + + /* if the sum of section size and offset exceed destination size + * then we are out of bounds of the HW table size for that PF. + * Changing section length to fill the remaining table space + * of that PF. + */ + if ((offset + sect_len) > dst_len) + sect_len = dst_len - offset; + + memcpy(dst + offset, src, sect_len); + offset += sect_len; + sect = ice_pkg_enum_section(NULL, &state, sid); + } +} + +/** + * ice_fill_blk_tbls - Read package context for tables + * @hw: pointer to the hardware structure + * + * Reads the current package contents and populates the driver + * database with the data iteratively for all advanced feature + * blocks. Assume that the HW tables have been allocated. + */ +static void ice_fill_blk_tbls(struct ice_hw *hw) +{ + u8 i; + + for (i = 0; i < ICE_BLK_COUNT; i++) { + enum ice_block blk_id = (enum ice_block)i; + + ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid); + ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid); + ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid); + ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid); + ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid); + } + + ice_init_sw_db(hw); +} + +/** + * ice_free_hw_tbls - free hardware table memory + * @hw: pointer to the hardware structure + */ +void ice_free_hw_tbls(struct ice_hw *hw) +{ + u8 i; + + for (i = 0; i < ICE_BLK_COUNT; i++) { + hw->blk[i].is_list_init = false; + + devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptypes); + devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptg_tbl); + devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.t); + devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.t); + devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsig_tbl); + devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsis); + devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof.t); + devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_redir.t); + devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.t); + devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.ref_count); + devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.written); + } + + memset(hw->blk, 0, sizeof(hw->blk)); +} + +/** + * ice_clear_hw_tbls - clear HW tables and flow profiles + * @hw: pointer to the hardware structure + */ +void ice_clear_hw_tbls(struct ice_hw *hw) +{ + u8 i; + + for (i = 0; i < ICE_BLK_COUNT; i++) { + struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir; + struct ice_prof_tcam *prof = &hw->blk[i].prof; + struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1; + struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2; + struct ice_es *es = &hw->blk[i].es; + + memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes)); + memset(xlt1->ptg_tbl, 0, + ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl)); + memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t)); + + memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis)); + memset(xlt2->vsig_tbl, 0, + xlt2->count * sizeof(*xlt2->vsig_tbl)); + memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t)); + + memset(prof->t, 0, prof->count * sizeof(*prof->t)); + memset(prof_redir->t, 0, + prof_redir->count * sizeof(*prof_redir->t)); + + memset(es->t, 0, es->count * sizeof(*es->t)); + memset(es->ref_count, 0, es->count * sizeof(*es->ref_count)); + memset(es->written, 0, es->count * sizeof(*es->written)); + } +} + +/** + * ice_init_hw_tbls - init hardware table memory + * @hw: pointer to the hardware structure + */ +enum ice_status ice_init_hw_tbls(struct ice_hw *hw) +{ + u8 i; + + for (i = 0; i < ICE_BLK_COUNT; i++) { + struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir; + struct ice_prof_tcam *prof = &hw->blk[i].prof; + struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1; + struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2; + struct ice_es *es = &hw->blk[i].es; + u16 j; + + if (hw->blk[i].is_list_init) + continue; + + hw->blk[i].is_list_init = true; + + hw->blk[i].overwrite = blk_sizes[i].overwrite; + es->reverse = blk_sizes[i].reverse; + + xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF]; + xlt1->count = blk_sizes[i].xlt1; + + xlt1->ptypes = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count, + sizeof(*xlt1->ptypes), GFP_KERNEL); + + if (!xlt1->ptypes) + goto err; + + xlt1->ptg_tbl = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_PTGS, + sizeof(*xlt1->ptg_tbl), + GFP_KERNEL); + + if (!xlt1->ptg_tbl) + goto err; + + xlt1->t = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count, + sizeof(*xlt1->t), GFP_KERNEL); + if (!xlt1->t) + goto err; + + xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF]; + xlt2->count = blk_sizes[i].xlt2; + + xlt2->vsis = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count, + sizeof(*xlt2->vsis), GFP_KERNEL); + + if (!xlt2->vsis) + goto err; + + xlt2->vsig_tbl = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count, + sizeof(*xlt2->vsig_tbl), + GFP_KERNEL); + if (!xlt2->vsig_tbl) + goto err; + + for (j = 0; j < xlt2->count; j++) + INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst); + + xlt2->t = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count, + sizeof(*xlt2->t), GFP_KERNEL); + if (!xlt2->t) + goto err; + + prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF]; + prof->count = blk_sizes[i].prof_tcam; + prof->max_prof_id = blk_sizes[i].prof_id; + prof->cdid_bits = blk_sizes[i].prof_cdid_bits; + prof->t = devm_kcalloc(ice_hw_to_dev(hw), prof->count, + sizeof(*prof->t), GFP_KERNEL); + + if (!prof->t) + goto err; + + prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF]; + prof_redir->count = blk_sizes[i].prof_redir; + prof_redir->t = devm_kcalloc(ice_hw_to_dev(hw), + prof_redir->count, + sizeof(*prof_redir->t), + GFP_KERNEL); + + if (!prof_redir->t) + goto err; + + es->sid = ice_blk_sids[i][ICE_SID_ES_OFF]; + es->count = blk_sizes[i].es; + es->fvw = blk_sizes[i].fvw; + es->t = devm_kcalloc(ice_hw_to_dev(hw), + (u32)(es->count * es->fvw), + sizeof(*es->t), GFP_KERNEL); + if (!es->t) + goto err; + + es->ref_count = devm_kcalloc(ice_hw_to_dev(hw), es->count, + sizeof(*es->ref_count), + GFP_KERNEL); + + es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count, + sizeof(*es->written), GFP_KERNEL); + if (!es->ref_count) + goto err; + } + return 0; + +err: + ice_free_hw_tbls(hw); + return ICE_ERR_NO_MEMORY; +} diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h index 3843c462bc42..9edf1e7589c7 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h @@ -21,5 +21,8 @@ enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len); enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len); +enum ice_status ice_init_hw_tbls(struct ice_hw *hw); void ice_free_seg(struct ice_hw *hw); +void ice_clear_hw_tbls(struct ice_hw *hw); +void ice_free_hw_tbls(struct ice_hw *hw); #endif /* _ICE_FLEX_PIPE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h index b7fb90594faf..5d5a7eaffa30 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_type.h +++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h @@ -294,6 +294,7 @@ struct ice_vsig_vsi { }; #define ICE_XLT1_CNT 1024 +#define ICE_MAX_PTGS 256 /* XLT1 Table */ struct ice_xlt1 { @@ -304,6 +305,7 @@ struct ice_xlt1 { u16 count; }; +#define ICE_XLT2_CNT 768 #define ICE_MAX_VSIGS 768 /* VSIG bit layout: -- cgit v1.2.3 From 462acf6aca85cd4ee3e475f01240144c314f562c Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Mon, 9 Sep 2019 06:47:46 -0700 Subject: ice: Enable DDP package download Attempt to request an optional device-specific DDP package file (one with the PCIe Device Serial Number in its name so that different DDP package files can be used on different devices). If the optional package file exists, download it to the device. If not, download the default package file. Log an appropriate message based on whether or not a DDP package file exists and the return code from the attempt to download it to the device. If the download fails and there is not already a package file on the device, go into "Safe Mode" where some features are not supported. Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice.h | 14 + drivers/net/ethernet/intel/ice/ice_common.c | 69 +++ drivers/net/ethernet/intel/ice/ice_common.h | 2 + drivers/net/ethernet/intel/ice/ice_dcb_lib.c | 42 ++ drivers/net/ethernet/intel/ice/ice_dcb_lib.h | 2 + drivers/net/ethernet/intel/ice/ice_ethtool.c | 27 + drivers/net/ethernet/intel/ice/ice_flex_pipe.c | 4 +- drivers/net/ethernet/intel/ice/ice_flex_pipe.h | 1 + drivers/net/ethernet/intel/ice/ice_lib.c | 92 ++-- drivers/net/ethernet/intel/ice/ice_lib.h | 1 + drivers/net/ethernet/intel/ice/ice_main.c | 614 ++++++++++++++++++----- drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c | 6 + 12 files changed, 679 insertions(+), 195 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 3a3e69a2bc5a..45e100666049 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -307,6 +308,7 @@ enum ice_pf_flags { ICE_FLAG_SRIOV_CAPABLE, ICE_FLAG_DCB_CAPABLE, ICE_FLAG_DCB_ENA, + ICE_FLAG_ADV_FEATURES, ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, ICE_FLAG_NO_MEDIA, ICE_FLAG_FW_LLDP_AGENT, @@ -404,6 +406,17 @@ ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi, wr32(hw, GLINT_DYN_CTL(vector), val); } +/** + * ice_netdev_to_pf - Retrieve the PF struct associated with a netdev + * @netdev: pointer to the netdev struct + */ +static inline struct ice_pf *ice_netdev_to_pf(struct net_device *netdev) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + + return np->vsi->back; +} + /** * ice_get_main_vsi - Get the PF VSI * @pf: PF instance @@ -421,6 +434,7 @@ static inline struct ice_vsi *ice_get_main_vsi(struct ice_pf *pf) int ice_vsi_setup_tx_rings(struct ice_vsi *vsi); int ice_vsi_setup_rx_rings(struct ice_vsi *vsi); void ice_set_ethtool_ops(struct net_device *netdev); +void ice_set_ethtool_safe_mode_ops(struct net_device *netdev); u16 ice_get_avail_txq_count(struct ice_pf *pf); u16 ice_get_avail_rxq_count(struct ice_pf *pf); void ice_update_vsi_stats(struct ice_vsi *vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 91472e049231..3a6b3950eb0e 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -1846,6 +1846,75 @@ ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc) return status; } +/** + * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode + * @hw: pointer to the hardware structure + */ +void ice_set_safe_mode_caps(struct ice_hw *hw) +{ + struct ice_hw_func_caps *func_caps = &hw->func_caps; + struct ice_hw_dev_caps *dev_caps = &hw->dev_caps; + u32 valid_func, rxq_first_id, txq_first_id; + u32 msix_vector_first_id, max_mtu; + u32 num_func = 0; + u8 i; + + /* cache some func_caps values that should be restored after memset */ + valid_func = func_caps->common_cap.valid_functions; + txq_first_id = func_caps->common_cap.txq_first_id; + rxq_first_id = func_caps->common_cap.rxq_first_id; + msix_vector_first_id = func_caps->common_cap.msix_vector_first_id; + max_mtu = func_caps->common_cap.max_mtu; + + /* unset func capabilities */ + memset(func_caps, 0, sizeof(*func_caps)); + + /* restore cached values */ + func_caps->common_cap.valid_functions = valid_func; + func_caps->common_cap.txq_first_id = txq_first_id; + func_caps->common_cap.rxq_first_id = rxq_first_id; + func_caps->common_cap.msix_vector_first_id = msix_vector_first_id; + func_caps->common_cap.max_mtu = max_mtu; + + /* one Tx and one Rx queue in safe mode */ + func_caps->common_cap.num_rxq = 1; + func_caps->common_cap.num_txq = 1; + + /* two MSIX vectors, one for traffic and one for misc causes */ + func_caps->common_cap.num_msix_vectors = 2; + func_caps->guar_num_vsi = 1; + + /* cache some dev_caps values that should be restored after memset */ + valid_func = dev_caps->common_cap.valid_functions; + txq_first_id = dev_caps->common_cap.txq_first_id; + rxq_first_id = dev_caps->common_cap.rxq_first_id; + msix_vector_first_id = dev_caps->common_cap.msix_vector_first_id; + max_mtu = dev_caps->common_cap.max_mtu; + + /* unset dev capabilities */ + memset(dev_caps, 0, sizeof(*dev_caps)); + + /* restore cached values */ + dev_caps->common_cap.valid_functions = valid_func; + dev_caps->common_cap.txq_first_id = txq_first_id; + dev_caps->common_cap.rxq_first_id = rxq_first_id; + dev_caps->common_cap.msix_vector_first_id = msix_vector_first_id; + dev_caps->common_cap.max_mtu = max_mtu; + + /* valid_func is a bitmap. get number of functions */ +#define ICE_MAX_FUNCS 8 + for (i = 0; i < ICE_MAX_FUNCS; i++) + if (valid_func & BIT(i)) + num_func++; + + /* one Tx and one Rx queue per function in safe mode */ + dev_caps->common_cap.num_rxq = num_func; + dev_caps->common_cap.num_txq = num_func; + + /* two MSIX vectors per function */ + dev_caps->common_cap.num_msix_vectors = 2 * num_func; +} + /** * ice_get_caps - get info about the HW * @hw: pointer to the hardware structure diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index ce55f8ae6b52..c3df92f57777 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -42,6 +42,8 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, void ice_clear_pxe_mode(struct ice_hw *hw); enum ice_status ice_get_caps(struct ice_hw *hw); +void ice_set_safe_mode_caps(struct ice_hw *hw); + void ice_dev_onetime_setup(struct ice_hw *hw); enum ice_status diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index 97c22d4aae1d..dd47869c4ad4 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -3,6 +3,48 @@ #include "ice_dcb_lib.h" +/** + * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration + * @vsi: the VSI being configured + * @ena_tc: TC map to be enabled + */ +void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc) +{ + struct net_device *netdev = vsi->netdev; + struct ice_pf *pf = vsi->back; + struct ice_dcbx_cfg *dcbcfg; + u8 netdev_tc; + int i; + + if (!netdev) + return; + + if (!ena_tc) { + netdev_reset_tc(netdev); + return; + } + + if (netdev_set_num_tc(netdev, vsi->tc_cfg.numtc)) + return; + + dcbcfg = &pf->hw.port_info->local_dcbx_cfg; + + ice_for_each_traffic_class(i) + if (vsi->tc_cfg.ena_tc & BIT(i)) + netdev_set_tc_queue(netdev, + vsi->tc_cfg.tc_info[i].netdev_tc, + vsi->tc_cfg.tc_info[i].qcount_tx, + vsi->tc_cfg.tc_info[i].qoffset); + + for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { + u8 ets_tc = dcbcfg->etscfg.prio_table[i]; + + /* Get the mapped netdev TC# for the UP */ + netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc; + netdev_set_prio_tc_map(netdev, i, netdev_tc); + } +} + /** * ice_dcb_get_ena_tc - return bitmap of enabled TCs * @dcbcfg: DCB config to evaluate for enabled TCs diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h index 819081053ff5..661a6f7bca64 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h @@ -22,6 +22,7 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring, void ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, struct ice_rq_event_info *event); +void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc); static inline void ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring) { @@ -58,5 +59,6 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring __always_unused *tx_ring, #define ice_vsi_cfg_dcb_rings(vsi) do {} while (0) #define ice_dcb_process_lldp_set_mib_change(pf, event) do {} while (0) #define ice_set_cgd_num(tlan_ctx, ring) do {} while (0) +#define ice_vsi_cfg_netdev_tc(vsi, ena_tc) do {} while (0) #endif /* CONFIG_DCB */ #endif /* _ICE_DCB_LIB_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index a16b461b46bb..7e23034df955 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -3435,6 +3435,33 @@ static const struct ethtool_ops ice_ethtool_ops = { .set_fecparam = ice_set_fecparam, }; +static const struct ethtool_ops ice_ethtool_safe_mode_ops = { + .get_link_ksettings = ice_get_link_ksettings, + .set_link_ksettings = ice_set_link_ksettings, + .get_drvinfo = ice_get_drvinfo, + .get_regs_len = ice_get_regs_len, + .get_regs = ice_get_regs, + .get_msglevel = ice_get_msglevel, + .set_msglevel = ice_set_msglevel, + .get_eeprom_len = ice_get_eeprom_len, + .get_eeprom = ice_get_eeprom, + .get_strings = ice_get_strings, + .get_ethtool_stats = ice_get_ethtool_stats, + .get_sset_count = ice_get_sset_count, + .get_ringparam = ice_get_ringparam, + .set_ringparam = ice_set_ringparam, + .nway_reset = ice_nway_reset, +}; + +/** + * ice_set_ethtool_safe_mode_ops - setup safe mode ethtool ops + * @netdev: network interface device structure + */ +void ice_set_ethtool_safe_mode_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &ice_ethtool_safe_mode_ops; +} + /** * ice_set_ethtool_ops - setup netdev ethtool ops * @netdev: network interface device structure diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c index cc8922bd61b5..cbd53b586c36 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c @@ -4,8 +4,6 @@ #include "ice_common.h" #include "ice_flex_pipe.h" -static void ice_fill_blk_tbls(struct ice_hw *hw); - /** * ice_pkg_val_buf * @buf: pointer to the ice buffer @@ -1358,7 +1356,7 @@ static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid) * database with the data iteratively for all advanced feature * blocks. Assume that the HW tables have been allocated. */ -static void ice_fill_blk_tbls(struct ice_hw *hw) +void ice_fill_blk_tbls(struct ice_hw *hw) { u8 i; diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h index 9edf1e7589c7..37eb282742d1 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h @@ -23,6 +23,7 @@ enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len); enum ice_status ice_init_hw_tbls(struct ice_hw *hw); void ice_free_seg(struct ice_hw *hw); +void ice_fill_blk_tbls(struct ice_hw *hw); void ice_clear_hw_tbls(struct ice_hw *hw); void ice_free_hw_tbls(struct ice_hw *hw); #endif /* _ICE_FLEX_PIPE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 9680692bf27c..cc755382df25 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -752,6 +752,17 @@ void ice_vsi_put_qs(struct ice_vsi *vsi) mutex_unlock(&pf->avail_q_mutex); } +/** + * ice_is_safe_mode + * @pf: pointer to the PF struct + * + * returns true if driver is in safe mode, false otherwise + */ +bool ice_is_safe_mode(struct ice_pf *pf) +{ + return !test_bit(ICE_FLAG_ADV_FEATURES, pf->flags); +} + /** * ice_rss_clean - Delete RSS related VSI structures that hold user inputs * @vsi: the VSI being removed @@ -2629,15 +2640,17 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, * DCB settings in the HW. Also, if the FW DCBX engine is not running * then Rx LLDP packets need to be redirected up the stack. */ - if (vsi->type == ICE_VSI_PF) { - ice_vsi_add_rem_eth_mac(vsi, true); + if (!ice_is_safe_mode(pf)) { + if (vsi->type == ICE_VSI_PF) { + ice_vsi_add_rem_eth_mac(vsi, true); - /* Tx LLDP packets */ - ice_cfg_sw_lldp(vsi, true, true); + /* Tx LLDP packets */ + ice_cfg_sw_lldp(vsi, true, true); - /* Rx LLDP packets */ - if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) - ice_cfg_sw_lldp(vsi, false, true); + /* Rx LLDP packets */ + if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) + ice_cfg_sw_lldp(vsi, false, true); + } } return vsi; @@ -2905,8 +2918,11 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi) } /* disable each interrupt */ - ice_for_each_q_vector(vsi, i) + ice_for_each_q_vector(vsi, i) { + if (!vsi->q_vectors[i]) + continue; wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0); + } ice_flush(hw); @@ -2975,14 +2991,16 @@ int ice_vsi_release(struct ice_vsi *vsi) pf->num_avail_sw_msix += vsi->num_q_vectors; } - if (vsi->type == ICE_VSI_PF) { - ice_vsi_add_rem_eth_mac(vsi, false); - ice_cfg_sw_lldp(vsi, true, false); - /* The Rx rule will only exist to remove if the LLDP FW - * engine is currently stopped - */ - if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) - ice_cfg_sw_lldp(vsi, false, false); + if (!ice_is_safe_mode(pf)) { + if (vsi->type == ICE_VSI_PF) { + ice_vsi_add_rem_eth_mac(vsi, false); + ice_cfg_sw_lldp(vsi, true, false); + /* The Rx rule will only exist to remove if the LLDP FW + * engine is currently stopped + */ + if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) + ice_cfg_sw_lldp(vsi, false, false); + } } ice_remove_vsi_fltr(&pf->hw, vsi->idx); @@ -3168,48 +3186,6 @@ static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx) sizeof(vsi->info.tc_mapping)); } -/** - * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration - * @vsi: the VSI being configured - * @ena_tc: TC map to be enabled - */ -static void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc) -{ - struct net_device *netdev = vsi->netdev; - struct ice_pf *pf = vsi->back; - struct ice_dcbx_cfg *dcbcfg; - u8 netdev_tc; - int i; - - if (!netdev) - return; - - if (!ena_tc) { - netdev_reset_tc(netdev); - return; - } - - if (netdev_set_num_tc(netdev, vsi->tc_cfg.numtc)) - return; - - dcbcfg = &pf->hw.port_info->local_dcbx_cfg; - - ice_for_each_traffic_class(i) - if (vsi->tc_cfg.ena_tc & BIT(i)) - netdev_set_tc_queue(netdev, - vsi->tc_cfg.tc_info[i].netdev_tc, - vsi->tc_cfg.tc_info[i].qcount_tx, - vsi->tc_cfg.tc_info[i].qoffset); - - for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { - u8 ets_tc = dcbcfg->etscfg.prio_table[i]; - - /* Get the mapped netdev TC# for the UP */ - netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc; - netdev_set_prio_tc_map(netdev, i, netdev_tc); - } -} - /** * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map * @vsi: VSI to be configured diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index 87f7f5422b46..47bc033fff20 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -125,4 +125,5 @@ char *ice_nvm_version_str(struct ice_hw *hw); enum ice_status ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set); +bool ice_is_safe_mode(struct ice_pf *pf); #endif /* !_ICE_LIB_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index ff295cb54cfd..a9efc918c625 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -21,10 +21,15 @@ const char ice_drv_ver[] = DRV_VERSION; static const char ice_driver_string[] = DRV_SUMMARY; static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation."; +/* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */ +#define ICE_DDP_PKG_PATH "intel/ice/ddp/" +#define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg" + MODULE_AUTHOR("Intel Corporation, "); MODULE_DESCRIPTION(DRV_SUMMARY); MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION); +MODULE_FIRMWARE(ICE_DDP_PKG_FILE); static int debug = -1; module_param(debug, int, 0644); @@ -35,9 +40,10 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)"); #endif /* !CONFIG_DYNAMIC_DEBUG */ static struct workqueue_struct *ice_wq; +static const struct net_device_ops ice_netdev_safe_mode_ops; static const struct net_device_ops ice_netdev_ops; -static void ice_rebuild(struct ice_pf *pf); +static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type); static void ice_vsi_release_all(struct ice_pf *pf); @@ -497,6 +503,8 @@ ice_prepare_for_reset(struct ice_pf *pf) for (i = 0; i < pf->num_alloc_vfs; i++) ice_set_vf_state_qs_dis(&pf->vf[i]); + /* clear SW filtering DB */ + ice_clear_hw_tbls(hw); /* disable the VSIs and their queues that are not already DOWN */ ice_pf_dis_all_vsi(pf, false); @@ -542,7 +550,7 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) */ if (reset_type == ICE_RESET_PFR) { pf->pfr_count++; - ice_rebuild(pf); + ice_rebuild(pf, reset_type); clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); clear_bit(__ICE_PFR_REQ, pf->state); ice_reset_all_vfs(pf, true); @@ -586,7 +594,7 @@ static void ice_reset_subtask(struct ice_pf *pf) } else { /* done with reset. start rebuild */ pf->hw.reset_ongoing = false; - ice_rebuild(pf); + ice_rebuild(pf, reset_type); /* clear bit to resume normal operations, but * ICE_NEEDS_RESTART bit is set in case rebuild failed */ @@ -1496,13 +1504,19 @@ static void ice_service_task(struct work_struct *work) return; } + ice_clean_adminq_subtask(pf); ice_check_media_subtask(pf); ice_check_for_hang_subtask(pf); ice_sync_fltr_subtask(pf); ice_handle_mdd_event(pf); - ice_process_vflr_event(pf); ice_watchdog_subtask(pf); - ice_clean_adminq_subtask(pf); + + if (ice_is_safe_mode(pf)) { + ice_service_task_complete(pf); + return; + } + + ice_process_vflr_event(pf); ice_clean_mailboxq_subtask(pf); /* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */ @@ -1937,30 +1951,41 @@ static void ice_napi_add(struct ice_vsi *vsi) } /** - * ice_cfg_netdev - Allocate, configure and register a netdev - * @vsi: the VSI associated with the new netdev - * - * Returns 0 on success, negative value on failure + * ice_set_ops - set netdev and ethtools ops for the given netdev + * @netdev: netdev instance */ -static int ice_cfg_netdev(struct ice_vsi *vsi) +static void ice_set_ops(struct net_device *netdev) { + struct ice_pf *pf = ice_netdev_to_pf(netdev); + + if (ice_is_safe_mode(pf)) { + netdev->netdev_ops = &ice_netdev_safe_mode_ops; + ice_set_ethtool_safe_mode_ops(netdev); + return; + } + + netdev->netdev_ops = &ice_netdev_ops; + ice_set_ethtool_ops(netdev); +} + +/** + * ice_set_netdev_features - set features for the given netdev + * @netdev: netdev instance + */ +static void ice_set_netdev_features(struct net_device *netdev) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); netdev_features_t csumo_features; netdev_features_t vlano_features; netdev_features_t dflt_features; netdev_features_t tso_features; - struct ice_netdev_priv *np; - struct net_device *netdev; - u8 mac_addr[ETH_ALEN]; - int err; - - netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq, - vsi->alloc_rxq); - if (!netdev) - return -ENOMEM; - vsi->netdev = netdev; - np = netdev_priv(netdev); - np->vsi = vsi; + if (ice_is_safe_mode(pf)) { + /* safe mode */ + netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA; + netdev->hw_features = netdev->features; + return; + } dflt_features = NETIF_F_SG | NETIF_F_HIGHDMA | @@ -1988,25 +2013,50 @@ static int ice_cfg_netdev(struct ice_vsi *vsi) tso_features; netdev->vlan_features |= dflt_features | csumo_features | tso_features; +} + +/** + * ice_cfg_netdev - Allocate, configure and register a netdev + * @vsi: the VSI associated with the new netdev + * + * Returns 0 on success, negative value on failure + */ +static int ice_cfg_netdev(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + struct ice_netdev_priv *np; + struct net_device *netdev; + u8 mac_addr[ETH_ALEN]; + int err; + + netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq, + vsi->alloc_rxq); + if (!netdev) + return -ENOMEM; + + vsi->netdev = netdev; + np = netdev_priv(netdev); + np->vsi = vsi; + + ice_set_netdev_features(netdev); + + ice_set_ops(netdev); if (vsi->type == ICE_VSI_PF) { - SET_NETDEV_DEV(netdev, &vsi->back->pdev->dev); + SET_NETDEV_DEV(netdev, &pf->pdev->dev); ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); - ether_addr_copy(netdev->dev_addr, mac_addr); ether_addr_copy(netdev->perm_addr, mac_addr); } netdev->priv_flags |= IFF_UNICAST_FLT; - /* assign netdev_ops */ - netdev->netdev_ops = &ice_netdev_ops; + /* Setup netdev TC information */ + ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); /* setup watchdog timeout value to be 5 second */ netdev->watchdog_timeo = 5 * HZ; - ice_set_ethtool_ops(netdev); - netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = ICE_MAX_MTU; @@ -2264,29 +2314,41 @@ static void ice_deinit_pf(struct ice_pf *pf) } /** - * ice_init_pf - Initialize general software structures (struct ice_pf) - * @pf: board private structure to initialize + * ice_set_pf_caps - set PFs capability flags + * @pf: pointer to the PF instance */ -static int ice_init_pf(struct ice_pf *pf) +static void ice_set_pf_caps(struct ice_pf *pf) { - bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS); - if (pf->hw.func_caps.common_cap.dcb) + struct ice_hw_func_caps *func_caps = &pf->hw.func_caps; + + clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); + if (func_caps->common_cap.dcb) set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); #ifdef CONFIG_PCI_IOV - if (pf->hw.func_caps.common_cap.sr_iov_1_1) { - struct ice_hw *hw = &pf->hw; - + clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); + if (func_caps->common_cap.sr_iov_1_1) { set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); - pf->num_vfs_supported = min_t(int, hw->func_caps.num_allocd_vfs, + pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs, ICE_MAX_VF_COUNT); } #endif /* CONFIG_PCI_IOV */ + clear_bit(ICE_FLAG_RSS_ENA, pf->flags); + if (func_caps->common_cap.rss_table_size) + set_bit(ICE_FLAG_RSS_ENA, pf->flags); - mutex_init(&pf->sw_mutex); - mutex_init(&pf->avail_q_mutex); + pf->max_pf_txqs = func_caps->common_cap.num_txq; + pf->max_pf_rxqs = func_caps->common_cap.num_rxq; +} - if (pf->hw.func_caps.common_cap.rss_table_size) - set_bit(ICE_FLAG_RSS_ENA, pf->flags); +/** + * ice_init_pf - Initialize general software structures (struct ice_pf) + * @pf: board private structure to initialize + */ +static int ice_init_pf(struct ice_pf *pf) +{ + ice_set_pf_caps(pf); + + mutex_init(&pf->sw_mutex); /* setup service timer and periodic service task */ timer_setup(&pf->serv_tmr, ice_service_timer, 0); @@ -2294,9 +2356,7 @@ static int ice_init_pf(struct ice_pf *pf) INIT_WORK(&pf->serv_task, ice_service_task); clear_bit(__ICE_SERVICE_SCHED, pf->state); - pf->max_pf_txqs = pf->hw.func_caps.common_cap.num_txq; - pf->max_pf_rxqs = pf->hw.func_caps.common_cap.num_rxq; - + mutex_init(&pf->avail_q_mutex); pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL); if (!pf->avail_txqs) return -ENOMEM; @@ -2449,6 +2509,163 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf) return 0; } +/** + * ice_log_pkg_init - log result of DDP package load + * @hw: pointer to hardware info + * @status: status of package load + */ +static void +ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status) +{ + struct ice_pf *pf = (struct ice_pf *)hw->back; + struct device *dev = &pf->pdev->dev; + + switch (*status) { + case ICE_SUCCESS: + /* The package download AdminQ command returned success because + * this download succeeded or ICE_ERR_AQ_NO_WORK since there is + * already a package loaded on the device. + */ + if (hw->pkg_ver.major == hw->active_pkg_ver.major && + hw->pkg_ver.minor == hw->active_pkg_ver.minor && + hw->pkg_ver.update == hw->active_pkg_ver.update && + hw->pkg_ver.draft == hw->active_pkg_ver.draft && + !memcmp(hw->pkg_name, hw->active_pkg_name, + sizeof(hw->pkg_name))) { + if (hw->pkg_dwnld_status == ICE_AQ_RC_EEXIST) + dev_info(dev, + "DDP package already present on device: %s version %d.%d.%d.%d\n", + hw->active_pkg_name, + hw->active_pkg_ver.major, + hw->active_pkg_ver.minor, + hw->active_pkg_ver.update, + hw->active_pkg_ver.draft); + else + dev_info(dev, + "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n", + hw->active_pkg_name, + hw->active_pkg_ver.major, + hw->active_pkg_ver.minor, + hw->active_pkg_ver.update, + hw->active_pkg_ver.draft); + } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ || + hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) { + dev_err(dev, + "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", + hw->active_pkg_name, + hw->active_pkg_ver.major, + hw->active_pkg_ver.minor, + ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); + *status = ICE_ERR_NOT_SUPPORTED; + } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && + hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) { + dev_info(dev, + "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n", + hw->active_pkg_name, + hw->active_pkg_ver.major, + hw->active_pkg_ver.minor, + hw->active_pkg_ver.update, + hw->active_pkg_ver.draft, + hw->pkg_name, + hw->pkg_ver.major, + hw->pkg_ver.minor, + hw->pkg_ver.update, + hw->pkg_ver.draft); + } else { + dev_err(dev, + "An unknown error occurred when loading the DDP package, please reboot the system. If the problem persists, update the NVM. Entering Safe Mode.\n"); + *status = ICE_ERR_NOT_SUPPORTED; + } + break; + case ICE_ERR_BUF_TOO_SHORT: + /* fall-through */ + case ICE_ERR_CFG: + dev_err(dev, + "The DDP package file is invalid. Entering Safe Mode.\n"); + break; + case ICE_ERR_NOT_SUPPORTED: + /* Package File version not supported */ + if (hw->pkg_ver.major > ICE_PKG_SUPP_VER_MAJ || + (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && + hw->pkg_ver.minor > ICE_PKG_SUPP_VER_MNR)) + dev_err(dev, + "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n"); + else if (hw->pkg_ver.major < ICE_PKG_SUPP_VER_MAJ || + (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && + hw->pkg_ver.minor < ICE_PKG_SUPP_VER_MNR)) + dev_err(dev, + "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n", + ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); + break; + case ICE_ERR_AQ_ERROR: + switch (hw->adminq.sq_last_status) { + case ICE_AQ_RC_ENOSEC: + case ICE_AQ_RC_EBADSIG: + dev_err(dev, + "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n"); + return; + case ICE_AQ_RC_ESVN: + dev_err(dev, + "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n"); + return; + case ICE_AQ_RC_EBADMAN: + case ICE_AQ_RC_EBADBUF: + dev_err(dev, + "An error occurred on the device while loading the DDP package. The device will be reset.\n"); + return; + default: + break; + } + /* fall-through */ + default: + dev_err(dev, + "An unknown error (%d) occurred when loading the DDP package. Entering Safe Mode.\n", + *status); + break; + } +} + +/** + * ice_load_pkg - load/reload the DDP Package file + * @firmware: firmware structure when firmware requested or NULL for reload + * @pf: pointer to the PF instance + * + * Called on probe and post CORER/GLOBR rebuild to load DDP Package and + * initialize HW tables. + */ +static void +ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf) +{ + enum ice_status status = ICE_ERR_PARAM; + struct device *dev = &pf->pdev->dev; + struct ice_hw *hw = &pf->hw; + + /* Load DDP Package */ + if (firmware && !hw->pkg_copy) { + status = ice_copy_and_init_pkg(hw, firmware->data, + firmware->size); + ice_log_pkg_init(hw, &status); + } else if (!firmware && hw->pkg_copy) { + /* Reload package during rebuild after CORER/GLOBR reset */ + status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size); + ice_log_pkg_init(hw, &status); + } else { + dev_err(dev, + "The DDP package file failed to load. Entering Safe Mode.\n"); + } + + if (status) { + /* Safe Mode */ + clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags); + return; + } + + /* Successful download package is the precondition for advanced + * features, hence setting the ICE_FLAG_ADV_FEATURES flag + */ + set_bit(ICE_FLAG_ADV_FEATURES, pf->flags); +} + /** * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines * @pf: pointer to the PF structure @@ -2484,6 +2701,86 @@ static enum ice_status ice_send_version(struct ice_pf *pf) return ice_aq_send_driver_ver(&pf->hw, &dv, NULL); } +/** + * ice_get_opt_fw_name - return optional firmware file name or NULL + * @pf: pointer to the PF instance + */ +static char *ice_get_opt_fw_name(struct ice_pf *pf) +{ + /* Optional firmware name same as default with additional dash + * followed by a EUI-64 identifier (PCIe Device Serial Number) + */ + struct pci_dev *pdev = pf->pdev; + char *opt_fw_filename = NULL; + u32 dword; + u8 dsn[8]; + int pos; + + /* Determine the name of the optional file using the DSN (two + * dwords following the start of the DSN Capability). + */ + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN); + if (pos) { + opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL); + if (!opt_fw_filename) + return NULL; + + pci_read_config_dword(pdev, pos + 4, &dword); + put_unaligned_le32(dword, &dsn[0]); + pci_read_config_dword(pdev, pos + 8, &dword); + put_unaligned_le32(dword, &dsn[4]); + snprintf(opt_fw_filename, NAME_MAX, + "%sice-%02x%02x%02x%02x%02x%02x%02x%02x.pkg", + ICE_DDP_PKG_PATH, + dsn[7], dsn[6], dsn[5], dsn[4], + dsn[3], dsn[2], dsn[1], dsn[0]); + } + + return opt_fw_filename; +} + +/** + * ice_request_fw - Device initialization routine + * @pf: pointer to the PF instance + */ +static void ice_request_fw(struct ice_pf *pf) +{ + char *opt_fw_filename = ice_get_opt_fw_name(pf); + const struct firmware *firmware = NULL; + struct device *dev = &pf->pdev->dev; + int err = 0; + + /* optional device-specific DDP (if present) overrides the default DDP + * package file. kernel logs a debug message if the file doesn't exist, + * and warning messages for other errors. + */ + if (opt_fw_filename) { + err = firmware_request_nowarn(&firmware, opt_fw_filename, dev); + if (err) { + kfree(opt_fw_filename); + goto dflt_pkg_load; + } + + /* request for firmware was successful. Download to device */ + ice_load_pkg(firmware, pf); + kfree(opt_fw_filename); + release_firmware(firmware); + return; + } + +dflt_pkg_load: + err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev); + if (err) { + dev_err(dev, + "The DDP package file was not found or could not be read. Entering Safe Mode\n"); + return; + } + + /* request for firmware was successful. Download to device */ + ice_load_pkg(firmware, pf); + release_firmware(firmware); +} + /** * ice_probe - Device initialization routine * @pdev: PCI device information struct @@ -2563,22 +2860,29 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) hw->api_maj_ver, hw->api_min_ver, hw->api_patch, ice_nvm_version_str(hw), hw->fw_build); + ice_request_fw(pf); + + /* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be + * set in pf->state, which will cause ice_is_safe_mode to return + * true + */ + if (ice_is_safe_mode(pf)) { + dev_err(dev, + "Package download failed. Advanced features disabled - Device now in Safe Mode\n"); + /* we already got function/device capabilities but these don't + * reflect what the driver needs to do in safe mode. Instead of + * adding conditional logic everywhere to ignore these + * device/function capabilities, override them. + */ + ice_set_safe_mode_caps(hw); + } + err = ice_init_pf(pf); if (err) { dev_err(dev, "ice_init_pf failed: %d\n", err); goto err_init_pf_unroll; } - if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags)) { - /* Note: DCB init failure is non-fatal to load */ - if (ice_init_pf_dcb(pf, false)) { - clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); - clear_bit(ICE_FLAG_DCB_ENA, pf->flags); - } else { - ice_cfg_lldp_mib_change(&pf->hw, true); - } - } - pf->num_alloc_vsi = hw->func_caps.guar_num_vsi; if (!pf->num_alloc_vsi) { err = -EIO; @@ -2658,6 +2962,20 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) ice_verify_cacheline_size(pf); + /* If no DDP driven features have to be setup, return here */ + if (ice_is_safe_mode(pf)) + return 0; + + /* initialize DDP driven features */ + + /* Note: DCB init failure is non-fatal to load */ + if (ice_init_pf_dcb(pf, false)) { + clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); + clear_bit(ICE_FLAG_DCB_ENA, pf->flags); + } else { + ice_cfg_lldp_mib_change(&pf->hw, true); + } + return 0; err_alloc_sw_unroll: @@ -3110,6 +3428,13 @@ ice_set_features(struct net_device *netdev, netdev_features_t features) struct ice_vsi *vsi = np->vsi; int ret = 0; + /* Don't set any netdev advanced features with device in Safe Mode */ + if (ice_is_safe_mode(vsi->back)) { + dev_err(&vsi->back->pdev->dev, + "Device is in Safe Mode - not enabling advanced netdev features\n"); + return ret; + } + /* Multiple features can be changed in one call so keep features in * separate if/else statements to guarantee each feature is checked */ @@ -3799,9 +4124,6 @@ static int ice_ena_vsi(struct ice_vsi *vsi, bool locked) */ #ifdef CONFIG_DCB int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked) -#else -static int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked) -#endif /* CONFIG_DCB */ { int v; @@ -3812,94 +4134,107 @@ static int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked) return 0; } +#endif /* CONFIG_DCB */ /** - * ice_vsi_rebuild_all - rebuild all VSIs in PF - * @pf: the PF + * ice_vsi_rebuild_by_type - Rebuild VSI of a given type + * @pf: pointer to the PF instance + * @type: VSI type to rebuild + * + * Iterates through the pf->vsi array and rebuilds VSIs of the requested type */ -static int ice_vsi_rebuild_all(struct ice_pf *pf) +static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) { - int i; + enum ice_status status; + int i, err; - /* loop through pf->vsi array and reinit the VSI if found */ ice_for_each_vsi(pf, i) { struct ice_vsi *vsi = pf->vsi[i]; - int err; - if (!vsi) + if (!vsi || vsi->type != type) continue; + /* rebuild the VSI */ err = ice_vsi_rebuild(vsi); if (err) { dev_err(&pf->pdev->dev, - "VSI at index %d rebuild failed\n", - vsi->idx); + "rebuild VSI failed, err %d, VSI index %d, type %d\n", + err, vsi->idx, type); return err; } - dev_info(&pf->pdev->dev, - "VSI at index %d rebuilt. vsi_num = 0x%x\n", - vsi->idx, vsi->vsi_num); + /* replay filters for the VSI */ + status = ice_replay_vsi(&pf->hw, vsi->idx); + if (status) { + dev_err(&pf->pdev->dev, + "replay VSI failed, status %d, VSI index %d, type %d\n", + status, vsi->idx, type); + return -EIO; + } + + /* Re-map HW VSI number, using VSI handle that has been + * previously validated in ice_replay_vsi() call above + */ + vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); + + /* enable the VSI */ + err = ice_ena_vsi(vsi, false); + if (err) { + dev_err(&pf->pdev->dev, + "enable VSI failed, err %d, VSI index %d, type %d\n", + err, vsi->idx, type); + return err; + } + + dev_info(&pf->pdev->dev, "VSI rebuilt. VSI index %d, type %d\n", + vsi->idx, type); } return 0; } /** - * ice_vsi_replay_all - replay all VSIs configuration in the PF - * @pf: the PF + * ice_update_pf_netdev_link - Update PF netdev link status + * @pf: pointer to the PF instance */ -static int ice_vsi_replay_all(struct ice_pf *pf) +static void ice_update_pf_netdev_link(struct ice_pf *pf) { - struct ice_hw *hw = &pf->hw; - enum ice_status ret; + bool link_up; int i; - /* loop through pf->vsi array and replay the VSI if found */ ice_for_each_vsi(pf, i) { struct ice_vsi *vsi = pf->vsi[i]; - if (!vsi) - continue; + if (!vsi || vsi->type != ICE_VSI_PF) + return; - ret = ice_replay_vsi(hw, vsi->idx); - if (ret) { - dev_err(&pf->pdev->dev, - "VSI at index %d replay failed %d\n", - vsi->idx, ret); - return -EIO; + ice_get_link_status(pf->vsi[i]->port_info, &link_up); + if (link_up) { + netif_carrier_on(pf->vsi[i]->netdev); + netif_tx_wake_all_queues(pf->vsi[i]->netdev); + } else { + netif_carrier_off(pf->vsi[i]->netdev); + netif_tx_stop_all_queues(pf->vsi[i]->netdev); } - - /* Re-map HW VSI number, using VSI handle that has been - * previously validated in ice_replay_vsi() call above - */ - vsi->vsi_num = ice_get_hw_vsi_num(hw, vsi->idx); - - dev_info(&pf->pdev->dev, - "VSI at index %d filter replayed successfully - vsi_num %i\n", - vsi->idx, vsi->vsi_num); } - - /* Clean up replay filter after successful re-configuration */ - ice_replay_post(hw); - return 0; } /** * ice_rebuild - rebuild after reset * @pf: PF to rebuild + * @reset_type: type of reset */ -static void ice_rebuild(struct ice_pf *pf) +static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) { struct device *dev = &pf->pdev->dev; struct ice_hw *hw = &pf->hw; enum ice_status ret; - int err, i; + int err; if (test_bit(__ICE_DOWN, pf->state)) goto clear_recovery; - dev_dbg(dev, "rebuilding PF\n"); + dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type); ret = ice_init_all_ctrlq(hw); if (ret) { @@ -3907,6 +4242,16 @@ static void ice_rebuild(struct ice_pf *pf) goto err_init_ctrlq; } + /* if DDP was previously loaded successfully */ + if (!ice_is_safe_mode(pf)) { + /* reload the SW DB of filter tables */ + if (reset_type == ICE_RESET_PFR) + ice_fill_blk_tbls(hw); + else + /* Reload DDP Package after CORER/GLOBR reset */ + ice_load_pkg(NULL, pf); + } + ret = ice_clear_pf_cfg(hw); if (ret) { dev_err(dev, "clear PF configuration failed %d\n", ret); @@ -3925,63 +4270,53 @@ static void ice_rebuild(struct ice_pf *pf) if (err) goto err_sched_init_port; - ice_dcb_rebuild(pf); - - err = ice_vsi_rebuild_all(pf); - if (err) { - dev_err(dev, "ice_vsi_rebuild_all failed\n"); - goto err_vsi_rebuild; - } - err = ice_update_link_info(hw->port_info); if (err) dev_err(&pf->pdev->dev, "Get link status error %d\n", err); - /* Replay all VSIs Configuration, including filters after reset */ - if (ice_vsi_replay_all(pf)) { - dev_err(&pf->pdev->dev, - "error replaying VSI configurations with switch filter rules\n"); - goto err_vsi_rebuild; - } - /* start misc vector */ err = ice_req_irq_msix_misc(pf); if (err) { dev_err(dev, "misc vector setup failed: %d\n", err); - goto err_vsi_rebuild; + goto err_sched_init_port; } - /* restart the VSIs that were rebuilt and running before the reset */ - err = ice_pf_ena_all_vsi(pf, false); + if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) + ice_dcb_rebuild(pf); + + /* rebuild PF VSI */ + err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF); if (err) { - dev_err(&pf->pdev->dev, "error enabling VSIs\n"); - /* no need to disable VSIs in tear down path in ice_rebuild() - * since its already taken care in ice_vsi_open() - */ + dev_err(dev, "PF VSI rebuild failed: %d\n", err); goto err_vsi_rebuild; } - ice_for_each_vsi(pf, i) { - bool link_up; - - if (!pf->vsi[i] || pf->vsi[i]->type != ICE_VSI_PF) - continue; - ice_get_link_status(pf->vsi[i]->port_info, &link_up); - if (link_up) { - netif_carrier_on(pf->vsi[i]->netdev); - netif_tx_wake_all_queues(pf->vsi[i]->netdev); - } else { - netif_carrier_off(pf->vsi[i]->netdev); - netif_tx_stop_all_queues(pf->vsi[i]->netdev); + if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { + err = ice_vsi_rebuild_by_type(pf, ICE_VSI_VF); + if (err) { + dev_err(dev, "VF VSI rebuild failed: %d\n", err); + goto err_vsi_rebuild; } } + ice_update_pf_netdev_link(pf); + + /* tell the firmware we are up */ + ret = ice_send_version(pf); + if (ret) { + dev_err(dev, + "Rebuild failed due to error sending driver version: %d\n", + ret); + goto err_vsi_rebuild; + } + + ice_replay_post(hw); + /* if we get here, reset flow is successful */ clear_bit(__ICE_RESET_FAILED, pf->state); return; err_vsi_rebuild: - ice_vsi_release_all(pf); err_sched_init_port: ice_sched_cleanup_all(hw); err_init_ctrlq: @@ -4508,6 +4843,17 @@ out_rm_features: return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); } +static const struct net_device_ops ice_netdev_safe_mode_ops = { + .ndo_open = ice_open, + .ndo_stop = ice_stop, + .ndo_start_xmit = ice_start_xmit, + .ndo_set_mac_address = ice_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = ice_change_mtu, + .ndo_get_stats64 = ice_get_stats64, + .ndo_tx_timeout = ice_tx_timeout, +}; + static const struct net_device_ops ice_netdev_ops = { .ndo_open = ice_open, .ndo_stop = ice_stop, diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 64de05ccbc47..b45797f39b2f 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -1443,6 +1443,12 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs) { struct ice_pf *pf = pci_get_drvdata(pdev); + if (ice_is_safe_mode(pf)) { + dev_err(&pf->pdev->dev, + "SR-IOV cannot be configured - Device is in Safe Mode\n"); + return -EOPNOTSUPP; + } + if (num_vfs) return ice_pci_sriov_ena(pf, num_vfs); -- cgit v1.2.3 From 2de1256636589bcc29f18feefcfa65588404746e Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Mon, 9 Sep 2019 06:47:47 -0700 Subject: ice: Bump version Bump version to 0.8.1-k Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index a9efc918c625..214cd6eca405 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -10,8 +10,8 @@ #include "ice_dcb_lib.h" #define DRV_VERSION_MAJOR 0 -#define DRV_VERSION_MINOR 7 -#define DRV_VERSION_BUILD 5 +#define DRV_VERSION_MINOR 8 +#define DRV_VERSION_BUILD 1 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ -- cgit v1.2.3