summaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bonding/bond_main.c61
-rw-r--r--drivers/net/bonding/bond_sysfs_slave.c18
-rw-r--r--drivers/net/can/dev.c16
-rw-r--r--drivers/net/can/flexcan.c40
-rw-r--r--drivers/net/can/kvaser_pciefd.c4
-rw-r--r--drivers/net/can/m_can/Kconfig3
-rw-r--r--drivers/net/can/m_can/m_can.c24
-rw-r--r--drivers/net/can/m_can/m_can.h1
-rw-r--r--drivers/net/can/m_can/m_can_platform.c23
-rw-r--r--drivers/net/can/m_can/tcan4x5x.c32
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c11
-rw-r--r--drivers/net/can/rx-offload.c4
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c26
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c18
-rw-r--r--drivers/net/can/ti_hecc.c21
-rw-r--r--drivers/net/can/usb/gs_usb.c131
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c2
-rw-r--r--drivers/net/can/usb/mcba_usb.c4
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c51
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c48
-rw-r--r--drivers/net/can/xilinx_can.c6
-rw-r--r--drivers/net/dsa/lantiq_gswip.c11
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/devlink.c4
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c31
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h1
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_vtu.c59
-rw-r--r--drivers/net/dsa/qca8k.c4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.c3
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c80
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c126
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c4
-rw-r--r--drivers/net/ethernet/broadcom/b44.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c5
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c3
-rw-r--r--drivers/net/ethernet/chelsio/Kconfig2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c111
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c586
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h1
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c3
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c4
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c28
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Kconfig2
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c62
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h115
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_mdio.c8
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c14
-rw-r--r--drivers/net/ethernet/freescale/fec.h6
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c41
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c14
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c59
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c22
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c52
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c14
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c5
-rw-r--r--drivers/net/ethernet/marvell/prestera/Kconfig1
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_pci.c7
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c3
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c37
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h3
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c3
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c5
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c3
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c3
-rw-r--r--drivers/net/ethernet/ti/cpsw.c11
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c1
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c9
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c5
-rw-r--r--drivers/net/geneve.c3
-rw-r--r--drivers/net/ipa/gsi_trans.c15
-rw-r--r--drivers/net/netdevsim/dev.c2
-rw-r--r--drivers/net/netdevsim/health.c1
-rw-r--r--drivers/net/netdevsim/udp_tunnels.c1
-rw-r--r--drivers/net/phy/mscc/mscc_macsec.c1
-rw-r--r--drivers/net/phy/realtek.c2
-rw-r--r--drivers/net/phy/sfp.c3
-rw-r--r--drivers/net/phy/smsc.c4
-rw-r--r--drivers/net/tun.c14
-rw-r--r--drivers/net/usb/cx82310_eth.c3
-rw-r--r--drivers/net/usb/ipheth.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c3
-rw-r--r--drivers/net/vrf.c92
-rw-r--r--drivers/net/wan/cosa.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/sta.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c103
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c36
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c2
136 files changed, 1887 insertions, 950 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 84ecbc6fa0ff..47afc5938c26 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1460,7 +1460,39 @@ static void bond_upper_dev_unlink(struct bonding *bond, struct slave *slave)
slave->dev->flags &= ~IFF_SLAVE;
}
-static struct slave *bond_alloc_slave(struct bonding *bond)
+static void slave_kobj_release(struct kobject *kobj)
+{
+ struct slave *slave = to_slave(kobj);
+ struct bonding *bond = bond_get_bond_by_slave(slave);
+
+ cancel_delayed_work_sync(&slave->notify_work);
+ if (BOND_MODE(bond) == BOND_MODE_8023AD)
+ kfree(SLAVE_AD_INFO(slave));
+
+ kfree(slave);
+}
+
+static struct kobj_type slave_ktype = {
+ .release = slave_kobj_release,
+#ifdef CONFIG_SYSFS
+ .sysfs_ops = &slave_sysfs_ops,
+#endif
+};
+
+static int bond_kobj_init(struct slave *slave)
+{
+ int err;
+
+ err = kobject_init_and_add(&slave->kobj, &slave_ktype,
+ &(slave->dev->dev.kobj), "bonding_slave");
+ if (err)
+ kobject_put(&slave->kobj);
+
+ return err;
+}
+
+static struct slave *bond_alloc_slave(struct bonding *bond,
+ struct net_device *slave_dev)
{
struct slave *slave = NULL;
@@ -1468,11 +1500,17 @@ static struct slave *bond_alloc_slave(struct bonding *bond)
if (!slave)
return NULL;
+ slave->bond = bond;
+ slave->dev = slave_dev;
+
+ if (bond_kobj_init(slave))
+ return NULL;
+
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
SLAVE_AD_INFO(slave) = kzalloc(sizeof(struct ad_slave_info),
GFP_KERNEL);
if (!SLAVE_AD_INFO(slave)) {
- kfree(slave);
+ kobject_put(&slave->kobj);
return NULL;
}
}
@@ -1481,17 +1519,6 @@ static struct slave *bond_alloc_slave(struct bonding *bond)
return slave;
}
-static void bond_free_slave(struct slave *slave)
-{
- struct bonding *bond = bond_get_bond_by_slave(slave);
-
- cancel_delayed_work_sync(&slave->notify_work);
- if (BOND_MODE(bond) == BOND_MODE_8023AD)
- kfree(SLAVE_AD_INFO(slave));
-
- kfree(slave);
-}
-
static void bond_fill_ifbond(struct bonding *bond, struct ifbond *info)
{
info->bond_mode = BOND_MODE(bond);
@@ -1678,14 +1705,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
goto err_undo_flags;
}
- new_slave = bond_alloc_slave(bond);
+ new_slave = bond_alloc_slave(bond, slave_dev);
if (!new_slave) {
res = -ENOMEM;
goto err_undo_flags;
}
- new_slave->bond = bond;
- new_slave->dev = slave_dev;
/* Set the new_slave's queue_id to be zero. Queue ID mapping
* is set via sysfs or module option if desired.
*/
@@ -2007,7 +2032,7 @@ err_restore_mtu:
dev_set_mtu(slave_dev, new_slave->original_mtu);
err_free:
- bond_free_slave(new_slave);
+ kobject_put(&new_slave->kobj);
err_undo_flags:
/* Enslave of first slave has failed and we need to fix master's mac */
@@ -2187,7 +2212,7 @@ static int __bond_release_one(struct net_device *bond_dev,
if (!netif_is_bond_master(slave_dev))
slave_dev->priv_flags &= ~IFF_BONDING;
- bond_free_slave(slave);
+ kobject_put(&slave->kobj);
return 0;
}
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
index 9b8346638f69..fd07561da034 100644
--- a/drivers/net/bonding/bond_sysfs_slave.c
+++ b/drivers/net/bonding/bond_sysfs_slave.c
@@ -121,7 +121,6 @@ static const struct slave_attribute *slave_attrs[] = {
};
#define to_slave_attr(_at) container_of(_at, struct slave_attribute, attr)
-#define to_slave(obj) container_of(obj, struct slave, kobj)
static ssize_t slave_show(struct kobject *kobj,
struct attribute *attr, char *buf)
@@ -132,28 +131,15 @@ static ssize_t slave_show(struct kobject *kobj,
return slave_attr->show(slave, buf);
}
-static const struct sysfs_ops slave_sysfs_ops = {
+const struct sysfs_ops slave_sysfs_ops = {
.show = slave_show,
};
-static struct kobj_type slave_ktype = {
-#ifdef CONFIG_SYSFS
- .sysfs_ops = &slave_sysfs_ops,
-#endif
-};
-
int bond_sysfs_slave_add(struct slave *slave)
{
const struct slave_attribute **a;
int err;
- err = kobject_init_and_add(&slave->kobj, &slave_ktype,
- &(slave->dev->dev.kobj), "bonding_slave");
- if (err) {
- kobject_put(&slave->kobj);
- return err;
- }
-
for (a = slave_attrs; *a; ++a) {
err = sysfs_create_file(&slave->kobj, &((*a)->attr));
if (err) {
@@ -171,6 +157,4 @@ void bond_sysfs_slave_del(struct slave *slave)
for (a = slave_attrs; *a; ++a)
sysfs_remove_file(&slave->kobj, &((*a)->attr));
-
- kobject_put(&slave->kobj);
}
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index b70ded3760f2..81e39d7507d8 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -512,9 +512,13 @@ __can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
*/
struct sk_buff *skb = priv->echo_skb[idx];
struct canfd_frame *cf = (struct canfd_frame *)skb->data;
- u8 len = cf->len;
- *len_ptr = len;
+ /* get the real payload length for netdev statistics */
+ if (cf->can_id & CAN_RTR_FLAG)
+ *len_ptr = 0;
+ else
+ *len_ptr = cf->len;
+
priv->echo_skb[idx] = NULL;
return skb;
@@ -538,7 +542,11 @@ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
if (!skb)
return 0;
- netif_rx(skb);
+ skb_get(skb);
+ if (netif_rx(skb) == NET_RX_SUCCESS)
+ dev_consume_skb_any(skb);
+ else
+ dev_kfree_skb_any(skb);
return len;
}
@@ -584,7 +592,7 @@ static void can_restart(struct net_device *dev)
cf->can_id |= CAN_ERR_RESTARTED;
- netif_rx(skb);
+ netif_rx_ni(skb);
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 4d594e977497..99e5f272205d 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -217,7 +217,7 @@
* MX8MP FlexCAN3 03.00.17.01 yes yes no yes yes yes
* VF610 FlexCAN3 ? no yes no yes yes? no
* LS1021A FlexCAN2 03.00.04.00 no yes no no yes no
- * LX2160A FlexCAN3 03.00.23.00 no yes no no yes yes
+ * LX2160A FlexCAN3 03.00.23.00 no yes no yes yes yes
*
* Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
*/
@@ -400,19 +400,19 @@ static struct flexcan_devtype_data fsl_imx8mp_devtype_data = {
static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP |
- FLEXCAN_QUIRK_BROKEN_PERR_STATE,
+ FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SUPPORT_ECC,
};
static const struct flexcan_devtype_data fsl_ls1021a_r2_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
- FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
- FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
+ FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
};
static const struct flexcan_devtype_data fsl_lx2160a_r1_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
- FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_SUPPORT_FD,
+ FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_SUPPORT_FD |
+ FLEXCAN_QUIRK_SUPPORT_ECC,
};
static const struct can_bittiming_const flexcan_bittiming_const = {
@@ -728,8 +728,10 @@ static int flexcan_get_berr_counter(const struct net_device *dev,
int err;
err = pm_runtime_get_sync(priv->dev);
- if (err < 0)
+ if (err < 0) {
+ pm_runtime_put_noidle(priv->dev);
return err;
+ }
err = __flexcan_get_berr_counter(dev, bec);
@@ -1565,14 +1567,10 @@ static int flexcan_chip_start(struct net_device *dev)
priv->write(reg_ctrl2, &regs->ctrl2);
}
- err = flexcan_transceiver_enable(priv);
- if (err)
- goto out_chip_disable;
-
/* synchronize with the can bus */
err = flexcan_chip_unfreeze(priv);
if (err)
- goto out_transceiver_disable;
+ goto out_chip_disable;
priv->can.state = CAN_STATE_ERROR_ACTIVE;
@@ -1590,8 +1588,6 @@ static int flexcan_chip_start(struct net_device *dev)
return 0;
- out_transceiver_disable:
- flexcan_transceiver_disable(priv);
out_chip_disable:
flexcan_chip_disable(priv);
return err;
@@ -1621,7 +1617,6 @@ static int __flexcan_chip_stop(struct net_device *dev, bool disable_on_error)
priv->write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
&regs->ctrl);
- flexcan_transceiver_disable(priv);
priv->can.state = CAN_STATE_STOPPED;
return 0;
@@ -1654,17 +1649,23 @@ static int flexcan_open(struct net_device *dev)
}
err = pm_runtime_get_sync(priv->dev);
- if (err < 0)
+ if (err < 0) {
+ pm_runtime_put_noidle(priv->dev);
return err;
+ }
err = open_candev(dev);
if (err)
goto out_runtime_put;
- err = request_irq(dev->irq, flexcan_irq, IRQF_SHARED, dev->name, dev);
+ err = flexcan_transceiver_enable(priv);
if (err)
goto out_close;
+ err = request_irq(dev->irq, flexcan_irq, IRQF_SHARED, dev->name, dev);
+ if (err)
+ goto out_transceiver_disable;
+
if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
priv->mb_size = sizeof(struct flexcan_mb) + CANFD_MAX_DLEN;
else
@@ -1716,6 +1717,8 @@ static int flexcan_open(struct net_device *dev)
can_rx_offload_del(&priv->offload);
out_free_irq:
free_irq(dev->irq, dev);
+ out_transceiver_disable:
+ flexcan_transceiver_disable(priv);
out_close:
close_candev(dev);
out_runtime_put:
@@ -1734,6 +1737,7 @@ static int flexcan_close(struct net_device *dev)
can_rx_offload_del(&priv->offload);
free_irq(dev->irq, dev);
+ flexcan_transceiver_disable(priv);
close_candev(dev);
pm_runtime_put(priv->dev);
@@ -1852,7 +1856,7 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
return -EINVAL;
/* stop mode property format is:
- * <&gpr req_gpr>.
+ * <&gpr req_gpr req_bit>.
*/
ret = of_property_read_u32_array(np, "fsl,stop-mode", out_val,
ARRAY_SIZE(out_val));
@@ -2062,6 +2066,8 @@ static int flexcan_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
+ device_set_wakeup_enable(&pdev->dev, false);
+ device_set_wakeup_capable(&pdev->dev, false);
unregister_flexcandev(dev);
pm_runtime_disable(&pdev->dev);
free_candev(dev);
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
index 6f766918211a..72acd1ba162d 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd.c
@@ -287,12 +287,12 @@ struct kvaser_pciefd_tx_packet {
static const struct can_bittiming_const kvaser_pciefd_bittiming_const = {
.name = KVASER_PCIEFD_DRV_NAME,
.tseg1_min = 1,
- .tseg1_max = 255,
+ .tseg1_max = 512,
.tseg2_min = 1,
.tseg2_max = 32,
.sjw_max = 16,
.brp_min = 1,
- .brp_max = 4096,
+ .brp_max = 8192,
.brp_inc = 1,
};
diff --git a/drivers/net/can/m_can/Kconfig b/drivers/net/can/m_can/Kconfig
index 48be627c85c2..5f9f8192dd0b 100644
--- a/drivers/net/can/m_can/Kconfig
+++ b/drivers/net/can/m_can/Kconfig
@@ -16,7 +16,8 @@ config CAN_M_CAN_PLATFORM
config CAN_M_CAN_TCAN4X5X
depends on CAN_M_CAN
- depends on REGMAP_SPI
+ depends on SPI
+ select REGMAP_SPI
tristate "TCAN4X5X M_CAN device"
help
Say Y here if you want support for Texas Instruments TCAN4x5x
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 02c5795b7393..61a93b192037 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -665,7 +665,7 @@ static int m_can_handle_state_change(struct net_device *dev,
unsigned int ecr;
switch (new_state) {
- case CAN_STATE_ERROR_ACTIVE:
+ case CAN_STATE_ERROR_WARNING:
/* error warning state */
cdev->can.can_stats.error_warning++;
cdev->can.state = CAN_STATE_ERROR_WARNING;
@@ -694,7 +694,7 @@ static int m_can_handle_state_change(struct net_device *dev,
__m_can_get_berr_counter(dev, &bec);
switch (new_state) {
- case CAN_STATE_ERROR_ACTIVE:
+ case CAN_STATE_ERROR_WARNING:
/* error warning state */
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = (bec.txerr > bec.rxerr) ?
@@ -956,6 +956,8 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
struct net_device_stats *stats = &dev->stats;
u32 ir;
+ if (pm_runtime_suspended(cdev->dev))
+ return IRQ_NONE;
ir = m_can_read(cdev, M_CAN_IR);
if (!ir)
return IRQ_NONE;
@@ -1031,7 +1033,7 @@ static const struct can_bittiming_const m_can_bittiming_const_31X = {
.name = KBUILD_MODNAME,
.tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
.tseg1_max = 256,
- .tseg2_min = 1, /* Time segment 2 = phase_seg2 */
+ .tseg2_min = 2, /* Time segment 2 = phase_seg2 */
.tseg2_max = 128,
.sjw_max = 128,
.brp_min = 1,
@@ -1383,6 +1385,8 @@ static int m_can_dev_setup(struct m_can_classdev *m_can_dev)
&m_can_data_bittiming_const_31X;
break;
case 32:
+ case 33:
+ /* Support both MCAN version v3.2.x and v3.3.0 */
m_can_dev->can.bittiming_const = m_can_dev->bit_timing ?
m_can_dev->bit_timing : &m_can_bittiming_const_31X;
@@ -1414,6 +1418,9 @@ static void m_can_stop(struct net_device *dev)
/* disable all interrupts */
m_can_disable_all_interrupts(cdev);
+ /* Set init mode to disengage from the network */
+ m_can_config_endisable(cdev, true);
+
/* set the state as STOPPED */
cdev->can.state = CAN_STATE_STOPPED;
}
@@ -1648,7 +1655,7 @@ static int m_can_open(struct net_device *dev)
INIT_WORK(&cdev->tx_work, m_can_tx_work_queue);
err = request_threaded_irq(dev->irq, NULL, m_can_isr,
- IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
+ IRQF_ONESHOT,
dev->name, dev);
} else {
err = request_irq(dev->irq, m_can_isr, IRQF_SHARED, dev->name,
@@ -1812,6 +1819,12 @@ out:
}
EXPORT_SYMBOL_GPL(m_can_class_allocate_dev);
+void m_can_class_free_dev(struct net_device *net)
+{
+ free_candev(net);
+}
+EXPORT_SYMBOL_GPL(m_can_class_free_dev);
+
int m_can_class_register(struct m_can_classdev *m_can_dev)
{
int ret;
@@ -1850,7 +1863,6 @@ pm_runtime_fail:
if (ret) {
if (m_can_dev->pm_clock_support)
pm_runtime_disable(m_can_dev->dev);
- free_candev(m_can_dev->net);
}
return ret;
@@ -1908,8 +1920,6 @@ void m_can_class_unregister(struct m_can_classdev *m_can_dev)
unregister_candev(m_can_dev->net);
m_can_clk_stop(m_can_dev);
-
- free_candev(m_can_dev->net);
}
EXPORT_SYMBOL_GPL(m_can_class_unregister);
diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h
index 49f42b50627a..b2699a7c9997 100644
--- a/drivers/net/can/m_can/m_can.h
+++ b/drivers/net/can/m_can/m_can.h
@@ -99,6 +99,7 @@ struct m_can_classdev {
};
struct m_can_classdev *m_can_class_allocate_dev(struct device *dev);
+void m_can_class_free_dev(struct net_device *net);
int m_can_class_register(struct m_can_classdev *cdev);
void m_can_class_unregister(struct m_can_classdev *cdev);
int m_can_class_get_clocks(struct m_can_classdev *cdev);
diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
index e6d0cb9ee02f..161cb9be018c 100644
--- a/drivers/net/can/m_can/m_can_platform.c
+++ b/drivers/net/can/m_can/m_can_platform.c
@@ -67,32 +67,36 @@ static int m_can_plat_probe(struct platform_device *pdev)
return -ENOMEM;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ if (!priv) {
+ ret = -ENOMEM;
+ goto probe_fail;
+ }
mcan_class->device_data = priv;
- m_can_class_get_clocks(mcan_class);
+ ret = m_can_class_get_clocks(mcan_class);
+ if (ret)
+ goto probe_fail;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "m_can");
addr = devm_ioremap_resource(&pdev->dev, res);
irq = platform_get_irq_byname(pdev, "int0");
if (IS_ERR(addr) || irq < 0) {
ret = -EINVAL;
- goto failed_ret;
+ goto probe_fail;
}
/* message ram could be shared */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram");
if (!res) {
ret = -ENODEV;
- goto failed_ret;
+ goto probe_fail;
}
mram_addr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (!mram_addr) {
ret = -ENOMEM;
- goto failed_ret;
+ goto probe_fail;
}
priv->base = addr;
@@ -111,9 +115,10 @@ static int m_can_plat_probe(struct platform_device *pdev)
m_can_init_ram(mcan_class);
- ret = m_can_class_register(mcan_class);
+ return m_can_class_register(mcan_class);
-failed_ret:
+probe_fail:
+ m_can_class_free_dev(mcan_class->net);
return ret;
}
@@ -134,6 +139,8 @@ static int m_can_plat_remove(struct platform_device *pdev)
m_can_class_unregister(mcan_class);
+ m_can_class_free_dev(mcan_class->net);
+
platform_set_drvdata(pdev, NULL);
return 0;
diff --git a/drivers/net/can/m_can/tcan4x5x.c b/drivers/net/can/m_can/tcan4x5x.c
index eacd428e07e9..e5d7d85e0b6d 100644
--- a/drivers/net/can/m_can/tcan4x5x.c
+++ b/drivers/net/can/m_can/tcan4x5x.c
@@ -440,14 +440,18 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
return -ENOMEM;
priv = devm_kzalloc(&spi->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ if (!priv) {
+ ret = -ENOMEM;
+ goto out_m_can_class_free_dev;
+ }
priv->power = devm_regulator_get_optional(&spi->dev, "vsup");
- if (PTR_ERR(priv->power) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- else
+ if (PTR_ERR(priv->power) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto out_m_can_class_free_dev;
+ } else {
priv->power = NULL;
+ }
mcan_class->device_data = priv;
@@ -460,8 +464,10 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
}
/* Sanity check */
- if (freq < 20000000 || freq > TCAN4X5X_EXT_CLK_DEF)
- return -ERANGE;
+ if (freq < 20000000 || freq > TCAN4X5X_EXT_CLK_DEF) {
+ ret = -ERANGE;
+ goto out_m_can_class_free_dev;
+ }
priv->reg_offset = TCAN4X5X_MCAN_OFFSET;
priv->mram_start = TCAN4X5X_MRAM_START;
@@ -487,6 +493,10 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
priv->regmap = devm_regmap_init(&spi->dev, &tcan4x5x_bus,
&spi->dev, &tcan4x5x_regmap);
+ if (IS_ERR(priv->regmap)) {
+ ret = PTR_ERR(priv->regmap);
+ goto out_clk;
+ }
ret = tcan4x5x_power_enable(priv->power, 1);
if (ret)
@@ -514,8 +524,10 @@ out_clk:
clk_disable_unprepare(mcan_class->cclk);
clk_disable_unprepare(mcan_class->hclk);
}
-
+ out_m_can_class_free_dev:
+ m_can_class_free_dev(mcan_class->net);
dev_err(&spi->dev, "Probe failed, err=%d\n", ret);
+
return ret;
}
@@ -523,9 +535,11 @@ static int tcan4x5x_can_remove(struct spi_device *spi)
{
struct tcan4x5x_priv *priv = spi_get_drvdata(spi);
+ m_can_class_unregister(priv->mcan_dev);
+
tcan4x5x_power_enable(priv->power, 0);
- m_can_class_unregister(priv->mcan_dev);
+ m_can_class_free_dev(priv->mcan_dev->net);
return 0;
}
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index 10aa3e457c33..40c33b8a5fda 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -262,8 +262,7 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
cf_len = get_can_dlc(pucan_msg_get_dlc(msg));
/* if this frame is an echo, */
- if ((rx_msg_flags & PUCAN_MSG_LOOPED_BACK) &&
- !(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE)) {
+ if (rx_msg_flags & PUCAN_MSG_LOOPED_BACK) {
unsigned long flags;
spin_lock_irqsave(&priv->echo_lock, flags);
@@ -277,7 +276,13 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
netif_wake_queue(priv->ndev);
spin_unlock_irqrestore(&priv->echo_lock, flags);
- return 0;
+
+ /* if this frame is only an echo, stop here. Otherwise,
+ * continue to push this application self-received frame into
+ * its own rx queue.
+ */
+ if (!(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE))
+ return 0;
}
/* otherwise, it should be pushed into rx fifo */
diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c
index 3b180269a92d..6e95193b215b 100644
--- a/drivers/net/can/rx-offload.c
+++ b/drivers/net/can/rx-offload.c
@@ -245,7 +245,7 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
if (skb_queue_len(&offload->skb_queue) >
offload->skb_queue_len_max) {
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return -ENOBUFS;
}
@@ -290,7 +290,7 @@ int can_rx_offload_queue_tail(struct can_rx_offload *offload,
{
if (skb_queue_len(&offload->skb_queue) >
offload->skb_queue_len_max) {
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return -ENOBUFS;
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index c3f49543ff26..8a39be076e14 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -75,11 +75,11 @@ static const char *__mcp251xfd_get_model_str(enum mcp251xfd_model model)
{
switch (model) {
case MCP251XFD_MODEL_MCP2517FD:
- return "MCP2517FD"; break;
+ return "MCP2517FD";
case MCP251XFD_MODEL_MCP2518FD:
- return "MCP2518FD"; break;
+ return "MCP2518FD";
case MCP251XFD_MODEL_MCP251XFD:
- return "MCP251xFD"; break;
+ return "MCP251xFD";
}
return "<unknown>";
@@ -95,21 +95,21 @@ static const char *mcp251xfd_get_mode_str(const u8 mode)
{
switch (mode) {
case MCP251XFD_REG_CON_MODE_MIXED:
- return "Mixed (CAN FD/CAN 2.0)"; break;
+ return "Mixed (CAN FD/CAN 2.0)";
case MCP251XFD_REG_CON_MODE_SLEEP:
- return "Sleep"; break;
+ return "Sleep";
case MCP251XFD_REG_CON_MODE_INT_LOOPBACK:
- return "Internal Loopback"; break;
+ return "Internal Loopback";
case MCP251XFD_REG_CON_MODE_LISTENONLY:
- return "Listen Only"; break;
+ return "Listen Only";
case MCP251XFD_REG_CON_MODE_CONFIG:
- return "Configuration"; break;
+ return "Configuration";
case MCP251XFD_REG_CON_MODE_EXT_LOOPBACK:
- return "External Loopback"; break;
+ return "External Loopback";
case MCP251XFD_REG_CON_MODE_CAN2_0:
- return "CAN 2.0"; break;
+ return "CAN 2.0";
case MCP251XFD_REG_CON_MODE_RESTRICTED:
- return "Restricted Operation"; break;
+ return "Restricted Operation";
}
return "<unknown>";
@@ -2738,6 +2738,10 @@ static int mcp251xfd_probe(struct spi_device *spi)
u32 freq;
int err;
+ if (!spi->irq)
+ return dev_err_probe(&spi->dev, -ENXIO,
+ "No IRQ specified (maybe node \"interrupts-extended\" in DT missing)!\n");
+
rx_int = devm_gpiod_get_optional(&spi->dev, "microchip,rx-int",
GPIOD_IN);
if (PTR_ERR(rx_int) == -EPROBE_DEFER)
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
index ba25902dd78c..314f868b3465 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
@@ -173,7 +173,7 @@ mcp251xfd_regmap_nocrc_read(void *context,
memcpy(&buf_tx->cmd, reg, sizeof(buf_tx->cmd));
if (MCP251XFD_SANITIZE_SPI)
memset(buf_tx->data, 0x0, val_len);
- };
+ }
err = spi_sync(spi, &msg);
if (err)
@@ -330,17 +330,17 @@ mcp251xfd_regmap_crc_read(void *context,
goto out;
}
- netdev_dbg(priv->ndev,
- "CRC read error at address 0x%04x (length=%zd, data=%*ph, CRC=0x%04x) retrying.\n",
- reg, val_len, (int)val_len, buf_rx->data,
- get_unaligned_be16(buf_rx->data + val_len));
- }
-
- if (err) {
netdev_info(priv->ndev,
- "CRC read error at address 0x%04x (length=%zd, data=%*ph, CRC=0x%04x).\n",
+ "CRC read error at address 0x%04x (length=%zd, data=%*ph, CRC=0x%04x) retrying.\n",
reg, val_len, (int)val_len, buf_rx->data,
get_unaligned_be16(buf_rx->data + val_len));
+ }
+
+ if (err) {
+ netdev_err(priv->ndev,
+ "CRC read error at address 0x%04x (length=%zd, data=%*ph, CRC=0x%04x).\n",
+ reg, val_len, (int)val_len, buf_rx->data,
+ get_unaligned_be16(buf_rx->data + val_len));
return err;
}
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 1d63006c97bc..2c22f40e12bd 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -881,7 +881,8 @@ static int ti_hecc_probe(struct platform_device *pdev)
priv->base = devm_platform_ioremap_resource_byname(pdev, "hecc");
if (IS_ERR(priv->base)) {
dev_err(&pdev->dev, "hecc ioremap failed\n");
- return PTR_ERR(priv->base);
+ err = PTR_ERR(priv->base);
+ goto probe_exit_candev;
}
/* handle hecc-ram memory */
@@ -889,20 +890,22 @@ static int ti_hecc_probe(struct platform_device *pdev)
"hecc-ram");
if (IS_ERR(priv->hecc_ram)) {
dev_err(&pdev->dev, "hecc-ram ioremap failed\n");
- return PTR_ERR(priv->hecc_ram);
+ err = PTR_ERR(priv->hecc_ram);
+ goto probe_exit_candev;
}
/* handle mbx memory */
priv->mbx = devm_platform_ioremap_resource_byname(pdev, "mbx");
if (IS_ERR(priv->mbx)) {
dev_err(&pdev->dev, "mbx ioremap failed\n");
- return PTR_ERR(priv->mbx);
+ err = PTR_ERR(priv->mbx);
+ goto probe_exit_candev;
}
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!irq) {
dev_err(&pdev->dev, "No irq resource\n");
- goto probe_exit;
+ goto probe_exit_candev;
}
priv->ndev = ndev;
@@ -933,7 +936,7 @@ static int ti_hecc_probe(struct platform_device *pdev)
err = clk_prepare_enable(priv->clk);
if (err) {
dev_err(&pdev->dev, "clk_prepare_enable() failed\n");
- goto probe_exit_clk;
+ goto probe_exit_release_clk;
}
priv->offload.mailbox_read = ti_hecc_mailbox_read;
@@ -942,7 +945,7 @@ static int ti_hecc_probe(struct platform_device *pdev)
err = can_rx_offload_add_timestamp(ndev, &priv->offload);
if (err) {
dev_err(&pdev->dev, "can_rx_offload_add_timestamp() failed\n");
- goto probe_exit_clk;
+ goto probe_exit_disable_clk;
}
err = register_candev(ndev);
@@ -960,11 +963,13 @@ static int ti_hecc_probe(struct platform_device *pdev)
probe_exit_offload:
can_rx_offload_del(&priv->offload);
-probe_exit_clk:
+probe_exit_disable_clk:
+ clk_disable_unprepare(priv->clk);
+probe_exit_release_clk:
clk_put(priv->clk);
probe_exit_candev:
free_candev(ndev);
-probe_exit:
+
return err;
}
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 3005157059ca..018ca3b057a3 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -63,21 +63,27 @@ enum gs_can_identify_mode {
};
/* data types passed between host and device */
+
+/* The firmware on the original USB2CAN by Geschwister Schneider
+ * Technologie Entwicklungs- und Vertriebs UG exchanges all data
+ * between the host and the device in host byte order. This is done
+ * with the struct gs_host_config::byte_order member, which is sent
+ * first to indicate the desired byte order.
+ *
+ * The widely used open source firmware candleLight doesn't support
+ * this feature and exchanges the data in little endian byte order.
+ */
struct gs_host_config {
- u32 byte_order;
+ __le32 byte_order;
} __packed;
-/* All data exchanged between host and device is exchanged in host byte order,
- * thanks to the struct gs_host_config byte_order member, which is sent first
- * to indicate the desired byte order.
- */
struct gs_device_config {
u8 reserved1;
u8 reserved2;
u8 reserved3;
u8 icount;
- u32 sw_version;
- u32 hw_version;
+ __le32 sw_version;
+ __le32 hw_version;
} __packed;
#define GS_CAN_MODE_NORMAL 0
@@ -87,26 +93,26 @@ struct gs_device_config {
#define GS_CAN_MODE_ONE_SHOT BIT(3)
struct gs_device_mode {
- u32 mode;
- u32 flags;
+ __le32 mode;
+ __le32 flags;
} __packed;
struct gs_device_state {
- u32 state;
- u32 rxerr;
- u32 txerr;
+ __le32 state;
+ __le32 rxerr;
+ __le32 txerr;
} __packed;
struct gs_device_bittiming {
- u32 prop_seg;
- u32 phase_seg1;
- u32 phase_seg2;
- u32 sjw;
- u32 brp;
+ __le32 prop_seg;
+ __le32 phase_seg1;
+ __le32 phase_seg2;
+ __le32 sjw;
+ __le32 brp;
} __packed;
struct gs_identify_mode {
- u32 mode;
+ __le32 mode;
} __packed;
#define GS_CAN_FEATURE_LISTEN_ONLY BIT(0)
@@ -117,23 +123,23 @@ struct gs_identify_mode {
#define GS_CAN_FEATURE_IDENTIFY BIT(5)
struct gs_device_bt_const {
- u32 feature;
- u32 fclk_can;
- u32 tseg1_min;
- u32 tseg1_max;
- u32 tseg2_min;
- u32 tseg2_max;
- u32 sjw_max;
- u32 brp_min;
- u32 brp_max;
- u32 brp_inc;
+ __le32 feature;
+ __le32 fclk_can;
+ __le32 tseg1_min;
+ __le32 tseg1_max;
+ __le32 tseg2_min;
+ __le32 tseg2_max;
+ __le32 sjw_max;
+ __le32 brp_min;
+ __le32 brp_max;
+ __le32 brp_inc;
} __packed;
#define GS_CAN_FLAG_OVERFLOW 1
struct gs_host_frame {
u32 echo_id;
- u32 can_id;
+ __le32 can_id;
u8 can_dlc;
u8 channel;
@@ -329,13 +335,13 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
if (!skb)
return;
- cf->can_id = hf->can_id;
+ cf->can_id = le32_to_cpu(hf->can_id);
cf->can_dlc = get_can_dlc(hf->can_dlc);
memcpy(cf->data, hf->data, 8);
/* ERROR frames tell us information about the controller */
- if (hf->can_id & CAN_ERR_FLAG)
+ if (le32_to_cpu(hf->can_id) & CAN_ERR_FLAG)
gs_update_state(dev, cf);
netdev->stats.rx_packets++;
@@ -418,11 +424,11 @@ static int gs_usb_set_bittiming(struct net_device *netdev)
if (!dbt)
return -ENOMEM;
- dbt->prop_seg = bt->prop_seg;
- dbt->phase_seg1 = bt->phase_seg1;
- dbt->phase_seg2 = bt->phase_seg2;
- dbt->sjw = bt->sjw;
- dbt->brp = bt->brp;
+ dbt->prop_seg = cpu_to_le32(bt->prop_seg);
+ dbt->phase_seg1 = cpu_to_le32(bt->phase_seg1);
+ dbt->phase_seg2 = cpu_to_le32(bt->phase_seg2);
+ dbt->sjw = cpu_to_le32(bt->sjw);
+ dbt->brp = cpu_to_le32(bt->brp);
/* request bit timings */
rc = usb_control_msg(interface_to_usbdev(intf),
@@ -503,7 +509,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
cf = (struct can_frame *)skb->data;
- hf->can_id = cf->can_id;
+ hf->can_id = cpu_to_le32(cf->can_id);
hf->can_dlc = cf->can_dlc;
memcpy(hf->data, cf->data, cf->can_dlc);
@@ -573,6 +579,7 @@ static int gs_can_open(struct net_device *netdev)
int rc, i;
struct gs_device_mode *dm;
u32 ctrlmode;
+ u32 flags = 0;
rc = open_candev(netdev);
if (rc)
@@ -640,24 +647,24 @@ static int gs_can_open(struct net_device *netdev)
/* flags */
ctrlmode = dev->can.ctrlmode;
- dm->flags = 0;
if (ctrlmode & CAN_CTRLMODE_LOOPBACK)
- dm->flags |= GS_CAN_MODE_LOOP_BACK;
+ flags |= GS_CAN_MODE_LOOP_BACK;
else if (ctrlmode & CAN_CTRLMODE_LISTENONLY)
- dm->flags |= GS_CAN_MODE_LISTEN_ONLY;
+ flags |= GS_CAN_MODE_LISTEN_ONLY;
/* Controller is not allowed to retry TX
* this mode is unavailable on atmels uc3c hardware
*/
if (ctrlmode & CAN_CTRLMODE_ONE_SHOT)
- dm->flags |= GS_CAN_MODE_ONE_SHOT;
+ flags |= GS_CAN_MODE_ONE_SHOT;
if (ctrlmode & CAN_CTRLMODE_3_SAMPLES)
- dm->flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
+ flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
/* finally start device */
- dm->mode = GS_CAN_MODE_START;
+ dm->mode = cpu_to_le32(GS_CAN_MODE_START);
+ dm->flags = cpu_to_le32(flags);
rc = usb_control_msg(interface_to_usbdev(dev->iface),
usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0),
GS_USB_BREQ_MODE,
@@ -737,9 +744,9 @@ static int gs_usb_set_identify(struct net_device *netdev, bool do_identify)
return -ENOMEM;
if (do_identify)
- imode->mode = GS_CAN_IDENTIFY_ON;
+ imode->mode = cpu_to_le32(GS_CAN_IDENTIFY_ON);
else
- imode->mode = GS_CAN_IDENTIFY_OFF;
+ imode->mode = cpu_to_le32(GS_CAN_IDENTIFY_OFF);
rc = usb_control_msg(interface_to_usbdev(dev->iface),
usb_sndctrlpipe(interface_to_usbdev(dev->iface),
@@ -790,6 +797,7 @@ static struct gs_can *gs_make_candev(unsigned int channel,
struct net_device *netdev;
int rc;
struct gs_device_bt_const *bt_const;
+ u32 feature;
bt_const = kmalloc(sizeof(*bt_const), GFP_KERNEL);
if (!bt_const)
@@ -830,14 +838,14 @@ static struct gs_can *gs_make_candev(unsigned int channel,
/* dev setup */
strcpy(dev->bt_const.name, "gs_usb");
- dev->bt_const.tseg1_min = bt_const->tseg1_min;
- dev->bt_const.tseg1_max = bt_const->tseg1_max;
- dev->bt_const.tseg2_min = bt_const->tseg2_min;
- dev->bt_const.tseg2_max = bt_const->tseg2_max;
- dev->bt_const.sjw_max = bt_const->sjw_max;
- dev->bt_const.brp_min = bt_const->brp_min;
- dev->bt_const.brp_max = bt_const->brp_max;
- dev->bt_const.brp_inc = bt_const->brp_inc;
+ dev->bt_const.tseg1_min = le32_to_cpu(bt_const->tseg1_min);
+ dev->bt_const.tseg1_max = le32_to_cpu(bt_const->tseg1_max);
+ dev->bt_const.tseg2_min = le32_to_cpu(bt_const->tseg2_min);
+ dev->bt_const.tseg2_max = le32_to_cpu(bt_const->tseg2_max);
+ dev->bt_const.sjw_max = le32_to_cpu(bt_const->sjw_max);
+ dev->bt_const.brp_min = le32_to_cpu(bt_const->brp_min);
+ dev->bt_const.brp_max = le32_to_cpu(bt_const->brp_max);
+ dev->bt_const.brp_inc = le32_to_cpu(bt_const->brp_inc);
dev->udev = interface_to_usbdev(intf);
dev->iface = intf;
@@ -854,28 +862,29 @@ static struct gs_can *gs_make_candev(unsigned int channel,
/* can setup */
dev->can.state = CAN_STATE_STOPPED;
- dev->can.clock.freq = bt_const->fclk_can;
+ dev->can.clock.freq = le32_to_cpu(bt_const->fclk_can);
dev->can.bittiming_const = &dev->bt_const;
dev->can.do_set_bittiming = gs_usb_set_bittiming;
dev->can.ctrlmode_supported = 0;
- if (bt_const->feature & GS_CAN_FEATURE_LISTEN_ONLY)
+ feature = le32_to_cpu(bt_const->feature);
+ if (feature & GS_CAN_FEATURE_LISTEN_ONLY)
dev->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
- if (bt_const->feature & GS_CAN_FEATURE_LOOP_BACK)
+ if (feature & GS_CAN_FEATURE_LOOP_BACK)
dev->can.ctrlmode_supported |= CAN_CTRLMODE_LOOPBACK;
- if (bt_const->feature & GS_CAN_FEATURE_TRIPLE_SAMPLE)
+ if (feature & GS_CAN_FEATURE_TRIPLE_SAMPLE)
dev->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
- if (bt_const->feature & GS_CAN_FEATURE_ONE_SHOT)
+ if (feature & GS_CAN_FEATURE_ONE_SHOT)
dev->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
SET_NETDEV_DEV(netdev, &intf->dev);
- if (dconf->sw_version > 1)
- if (bt_const->feature & GS_CAN_FEATURE_IDENTIFY)
+ if (le32_to_cpu(dconf->sw_version) > 1)
+ if (feature & GS_CAN_FEATURE_IDENTIFY)
netdev->ethtool_ops = &gs_usb_ethtool_ops;
kfree(bt_const);
@@ -910,7 +919,7 @@ static int gs_usb_probe(struct usb_interface *intf,
if (!hconf)
return -ENOMEM;
- hconf->byte_order = 0x0000beef;
+ hconf->byte_order = cpu_to_le32(0x0000beef);
/* send host config */
rc = usb_control_msg(interface_to_usbdev(intf),
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
index 7ab87a758754..218fadc91155 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
@@ -367,7 +367,7 @@ static const struct can_bittiming_const kvaser_usb_hydra_kcan_bittiming_c = {
.tseg2_max = 32,
.sjw_max = 16,
.brp_min = 1,
- .brp_max = 4096,
+ .brp_max = 8192,
.brp_inc = 1,
};
diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
index 5857b37dcd96..e97f2e0da6b0 100644
--- a/drivers/net/can/usb/mcba_usb.c
+++ b/drivers/net/can/usb/mcba_usb.c
@@ -326,8 +326,6 @@ static netdev_tx_t mcba_usb_start_xmit(struct sk_buff *skb,
if (!ctx)
return NETDEV_TX_BUSY;
- can_put_echo_skb(skb, priv->netdev, ctx->ndx);
-
if (cf->can_id & CAN_EFF_FLAG) {
/* SIDH | SIDL | EIDH | EIDL
* 28 - 21 | 20 19 18 x x x 17 16 | 15 - 8 | 7 - 0
@@ -357,6 +355,8 @@ static netdev_tx_t mcba_usb_start_xmit(struct sk_buff *skb,
if (cf->can_id & CAN_RTR_FLAG)
usb_msg.dlc |= MCBA_DLC_RTR_MASK;
+ can_put_echo_skb(skb, priv->netdev, ctx->ndx);
+
err = mcba_usb_xmit(priv, (struct mcba_usb_msg *)&usb_msg, ctx);
if (err)
goto xmit_failed;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index d91df34e7fa8..204ccb27d6d9 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -130,14 +130,55 @@ void peak_usb_get_ts_time(struct peak_time_ref *time_ref, u32 ts, ktime_t *time)
/* protect from getting time before setting now */
if (ktime_to_ns(time_ref->tv_host)) {
u64 delta_us;
+ s64 delta_ts = 0;
+
+ /* General case: dev_ts_1 < dev_ts_2 < ts, with:
+ *
+ * - dev_ts_1 = previous sync timestamp
+ * - dev_ts_2 = last sync timestamp
+ * - ts = event timestamp
+ * - ts_period = known sync period (theoretical)
+ * ~ dev_ts2 - dev_ts1
+ * *but*:
+ *
+ * - time counters wrap (see adapter->ts_used_bits)
+ * - sometimes, dev_ts_1 < ts < dev_ts2
+ *
+ * "normal" case (sync time counters increase):
+ * must take into account case when ts wraps (tsw)
+ *
+ * < ts_period > < >
+ * | | |
+ * ---+--------+----+-------0-+--+-->
+ * ts_dev_1 | ts_dev_2 |
+ * ts tsw
+ */
+ if (time_ref->ts_dev_1 < time_ref->ts_dev_2) {
+ /* case when event time (tsw) wraps */
+ if (ts < time_ref->ts_dev_1)
+ delta_ts = BIT_ULL(time_ref->adapter->ts_used_bits);
+
+ /* Otherwise, sync time counter (ts_dev_2) has wrapped:
+ * handle case when event time (tsn) hasn't.
+ *
+ * < ts_period > < >
+ * | | |
+ * ---+--------+--0-+---------+--+-->
+ * ts_dev_1 | ts_dev_2 |
+ * tsn ts
+ */
+ } else if (time_ref->ts_dev_1 < ts) {
+ delta_ts = -BIT_ULL(time_ref->adapter->ts_used_bits);
+ }
- delta_us = ts - time_ref->ts_dev_2;
- if (ts < time_ref->ts_dev_2)
- delta_us &= (1 << time_ref->adapter->ts_used_bits) - 1;
+ /* add delay between last sync and event timestamps */
+ delta_ts += (signed int)(ts - time_ref->ts_dev_2);
- delta_us += time_ref->ts_total;
+ /* add time from beginning to last sync */
+ delta_ts += time_ref->ts_total;
- delta_us *= time_ref->adapter->us_per_ts_scale;
+ /* convert ticks number into microseconds */
+ delta_us = delta_ts * time_ref->adapter->us_per_ts_scale;
delta_us >>= time_ref->adapter->us_per_ts_shift;
*time = ktime_add_us(time_ref->tv_host_0, delta_us);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index ab63fd9eb982..d29d20525588 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -468,12 +468,18 @@ static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if,
struct pucan_msg *rx_msg)
{
struct pucan_rx_msg *rm = (struct pucan_rx_msg *)rx_msg;
- struct peak_usb_device *dev = usb_if->dev[pucan_msg_get_channel(rm)];
- struct net_device *netdev = dev->netdev;
+ struct peak_usb_device *dev;
+ struct net_device *netdev;
struct canfd_frame *cfd;
struct sk_buff *skb;
const u16 rx_msg_flags = le16_to_cpu(rm->flags);
+ if (pucan_msg_get_channel(rm) >= ARRAY_SIZE(usb_if->dev))
+ return -ENOMEM;
+
+ dev = usb_if->dev[pucan_msg_get_channel(rm)];
+ netdev = dev->netdev;
+
if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN) {
/* CANFD frame case */
skb = alloc_canfd_skb(netdev, &cfd);
@@ -519,15 +525,21 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
struct pucan_msg *rx_msg)
{
struct pucan_status_msg *sm = (struct pucan_status_msg *)rx_msg;
- struct peak_usb_device *dev = usb_if->dev[pucan_stmsg_get_channel(sm)];
- struct pcan_usb_fd_device *pdev =
- container_of(dev, struct pcan_usb_fd_device, dev);
+ struct pcan_usb_fd_device *pdev;
enum can_state new_state = CAN_STATE_ERROR_ACTIVE;
enum can_state rx_state, tx_state;
- struct net_device *netdev = dev->netdev;
+ struct peak_usb_device *dev;
+ struct net_device *netdev;
struct can_frame *cf;
struct sk_buff *skb;
+ if (pucan_stmsg_get_channel(sm) >= ARRAY_SIZE(usb_if->dev))
+ return -ENOMEM;
+
+ dev = usb_if->dev[pucan_stmsg_get_channel(sm)];
+ pdev = container_of(dev, struct pcan_usb_fd_device, dev);
+ netdev = dev->netdev;
+
/* nothing should be sent while in BUS_OFF state */
if (dev->can.state == CAN_STATE_BUS_OFF)
return 0;
@@ -579,9 +591,14 @@ static int pcan_usb_fd_decode_error(struct pcan_usb_fd_if *usb_if,
struct pucan_msg *rx_msg)
{
struct pucan_error_msg *er = (struct pucan_error_msg *)rx_msg;
- struct peak_usb_device *dev = usb_if->dev[pucan_ermsg_get_channel(er)];
- struct pcan_usb_fd_device *pdev =
- container_of(dev, struct pcan_usb_fd_device, dev);
+ struct pcan_usb_fd_device *pdev;
+ struct peak_usb_device *dev;
+
+ if (pucan_ermsg_get_channel(er) >= ARRAY_SIZE(usb_if->dev))
+ return -EINVAL;
+
+ dev = usb_if->dev[pucan_ermsg_get_channel(er)];
+ pdev = container_of(dev, struct pcan_usb_fd_device, dev);
/* keep a trace of tx and rx error counters for later use */
pdev->bec.txerr = er->tx_err_cnt;
@@ -595,11 +612,17 @@ static int pcan_usb_fd_decode_overrun(struct pcan_usb_fd_if *usb_if,
struct pucan_msg *rx_msg)
{
struct pcan_ufd_ovr_msg *ov = (struct pcan_ufd_ovr_msg *)rx_msg;
- struct peak_usb_device *dev = usb_if->dev[pufd_omsg_get_channel(ov)];
- struct net_device *netdev = dev->netdev;
+ struct peak_usb_device *dev;
+ struct net_device *netdev;
struct can_frame *cf;
struct sk_buff *skb;
+ if (pufd_omsg_get_channel(ov) >= ARRAY_SIZE(usb_if->dev))
+ return -EINVAL;
+
+ dev = usb_if->dev[pufd_omsg_get_channel(ov)];
+ netdev = dev->netdev;
+
/* allocate an skb to store the error frame */
skb = alloc_can_err_skb(netdev, &cf);
if (!skb)
@@ -716,6 +739,9 @@ static int pcan_usb_fd_encode_msg(struct peak_usb_device *dev,
u16 tx_msg_size, tx_msg_flags;
u8 can_dlc;
+ if (cfd->len > CANFD_MAX_DLEN)
+ return -EINVAL;
+
tx_msg_size = ALIGN(sizeof(struct pucan_tx_msg) + cfd->len, 4);
tx_msg->size = cpu_to_le16(tx_msg_size);
tx_msg->type = cpu_to_le16(PUCAN_MSG_CAN_TX);
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 6c4d00d2dbdc..48d746e18f30 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -1395,7 +1395,7 @@ static int xcan_open(struct net_device *ndev)
if (ret < 0) {
netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
__func__, ret);
- return ret;
+ goto err;
}
ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags,
@@ -1479,6 +1479,7 @@ static int xcan_get_berr_counter(const struct net_device *ndev,
if (ret < 0) {
netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
__func__, ret);
+ pm_runtime_put(priv->dev);
return ret;
}
@@ -1793,7 +1794,7 @@ static int xcan_probe(struct platform_device *pdev)
if (ret < 0) {
netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
__func__, ret);
- goto err_pmdisable;
+ goto err_disableclks;
}
if (priv->read_reg(priv, XCAN_SR_OFFSET) != XCAN_SR_CONFIG_MASK) {
@@ -1828,7 +1829,6 @@ static int xcan_probe(struct platform_device *pdev)
err_disableclks:
pm_runtime_put(priv->dev);
-err_pmdisable:
pm_runtime_disable(&pdev->dev);
err_free:
free_candev(ndev);
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 74db81dafee3..09701c17f3f6 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -26,6 +26,7 @@
*/
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/firmware.h>
#include <linux/if_bridge.h>
@@ -1837,6 +1838,16 @@ static int gswip_gphy_fw_list(struct gswip_priv *priv,
i++;
}
+ /* The standalone PHY11G requires 300ms to be fully
+ * initialized and ready for any MDIO communication after being
+ * taken out of reset. For the SoC-internal GPHY variant there
+ * is no (known) documentation for the minimum time after a
+ * reset. Use the same value as for the standalone variant as
+ * some users have reported internal PHYs not being detected
+ * without any delay.
+ */
+ msleep(300);
+
return 0;
remove_gphy:
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index bd297ae7cf9e..34cca0a4b31c 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -2297,6 +2297,8 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
usleep_range(10000, 20000);
gpiod_set_value_cansleep(gpiod, 0);
usleep_range(10000, 20000);
+
+ mv88e6xxx_g1_wait_eeprom_done(chip);
}
}
diff --git a/drivers/net/dsa/mv88e6xxx/devlink.c b/drivers/net/dsa/mv88e6xxx/devlink.c
index 10cd1bfd81a0..ade04c036fd9 100644
--- a/drivers/net/dsa/mv88e6xxx/devlink.c
+++ b/drivers/net/dsa/mv88e6xxx/devlink.c
@@ -393,8 +393,10 @@ static int mv88e6xxx_region_atu_snapshot(struct devlink *dl,
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_fid_map(chip, fid_bitmap);
- if (err)
+ if (err) {
+ kfree(table);
goto out;
+ }
while (1) {
fid = find_next_bit(fid_bitmap, MV88E6XXX_N_FID, fid + 1);
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
index f62aa83ca08d..33d443a37efc 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.c
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -75,6 +75,37 @@ static int mv88e6xxx_g1_wait_init_ready(struct mv88e6xxx_chip *chip)
return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1);
}
+void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip)
+{
+ const unsigned long timeout = jiffies + 1 * HZ;
+ u16 val;
+ int err;
+
+ /* Wait up to 1 second for the switch to finish reading the
+ * EEPROM.
+ */
+ while (time_before(jiffies, timeout)) {
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &val);
+ if (err) {
+ dev_err(chip->dev, "Error reading status");
+ return;
+ }
+
+ /* If the switch is still resetting, it may not
+ * respond on the bus, and so MDIO read returns
+ * 0xffff. Differentiate between that, and waiting for
+ * the EEPROM to be done by bit 0 being set.
+ */
+ if (val != 0xffff &&
+ val & BIT(MV88E6XXX_G1_STS_IRQ_EEPROM_DONE))
+ return;
+
+ usleep_range(1000, 2000);
+ }
+
+ dev_err(chip->dev, "Timeout waiting for EEPROM done");
+}
+
/* Offset 0x01: Switch MAC Address Register Bytes 0 & 1
* Offset 0x02: Switch MAC Address Register Bytes 2 & 3
* Offset 0x03: Switch MAC Address Register Bytes 4 & 5
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index 1e3546f8b072..e05abe61fa11 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -278,6 +278,7 @@ int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr);
int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip);
int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip);
int mv88e6250_g1_reset(struct mv88e6xxx_chip *chip);
+void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip);
int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip);
int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip);
diff --git a/drivers/net/dsa/mv88e6xxx/global1_vtu.c b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
index 48390b7b18ad..1048509a849b 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_vtu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
@@ -125,11 +125,9 @@ static int mv88e6xxx_g1_vtu_vid_write(struct mv88e6xxx_chip *chip,
* Offset 0x08: VTU/STU Data Register 2
* Offset 0x09: VTU/STU Data Register 3
*/
-
-static int mv88e6185_g1_vtu_data_read(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
+static int mv88e6185_g1_vtu_stu_data_read(struct mv88e6xxx_chip *chip,
+ u16 *regs)
{
- u16 regs[3];
int i;
/* Read all 3 VTU/STU Data registers */
@@ -142,12 +140,45 @@ static int mv88e6185_g1_vtu_data_read(struct mv88e6xxx_chip *chip,
return err;
}
- /* Extract MemberTag and PortState data */
+ return 0;
+}
+
+static int mv88e6185_g1_vtu_data_read(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry)
+{
+ u16 regs[3];
+ int err;
+ int i;
+
+ err = mv88e6185_g1_vtu_stu_data_read(chip, regs);
+ if (err)
+ return err;
+
+ /* Extract MemberTag data */
for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
unsigned int member_offset = (i % 4) * 4;
- unsigned int state_offset = member_offset + 2;
entry->member[i] = (regs[i / 4] >> member_offset) & 0x3;
+ }
+
+ return 0;
+}
+
+static int mv88e6185_g1_stu_data_read(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry)
+{
+ u16 regs[3];
+ int err;
+ int i;
+
+ err = mv88e6185_g1_vtu_stu_data_read(chip, regs);
+ if (err)
+ return err;
+
+ /* Extract PortState data */
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
+ unsigned int state_offset = (i % 4) * 4 + 2;
+
entry->state[i] = (regs[i / 4] >> state_offset) & 0x3;
}
@@ -349,6 +380,10 @@ int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
if (err)
return err;
+ err = mv88e6185_g1_stu_data_read(chip, entry);
+ if (err)
+ return err;
+
/* VTU DBNum[3:0] are located in VTU Operation 3:0
* VTU DBNum[7:4] are located in VTU Operation 11:8
*/
@@ -374,16 +409,20 @@ int mv88e6352_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
return err;
if (entry->valid) {
- /* Fetch (and mask) VLAN PortState data from the STU */
- err = mv88e6xxx_g1_vtu_stu_get(chip, entry);
+ err = mv88e6185_g1_vtu_data_read(chip, entry);
if (err)
return err;
- err = mv88e6185_g1_vtu_data_read(chip, entry);
+ err = mv88e6xxx_g1_vtu_fid_read(chip, entry);
if (err)
return err;
- err = mv88e6xxx_g1_vtu_fid_read(chip, entry);
+ /* Fetch VLAN PortState data from the STU */
+ err = mv88e6xxx_g1_vtu_stu_get(chip, entry);
+ if (err)
+ return err;
+
+ err = mv88e6185_g1_stu_data_read(chip, entry);
if (err)
return err;
}
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index 53064e0e1618..5bdac669a339 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -1219,8 +1219,8 @@ qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
priv->port_mtu[port] = new_mtu;
for (i = 0; i < QCA8K_NUM_PORTS; i++)
- if (priv->port_mtu[port] > mtu)
- mtu = priv->port_mtu[port];
+ if (priv->port_mtu[i] > mtu)
+ mtu = priv->port_mtu[i];
/* Include L2 header / FCS length */
qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, mtu + ETH_HLEN + ETH_FCS_LEN);
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index ad30cacc1622..032ab9f20438 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -516,6 +516,7 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
{
struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
+ u16 q_depth = io_cq->q_depth;
u16 cdesc_idx = 0;
u16 nb_hw_desc;
u16 i = 0;
@@ -543,6 +544,8 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
do {
ena_buf[i].len = cdesc->length;
ena_buf[i].req_id = cdesc->req_id;
+ if (unlikely(ena_buf[i].req_id >= q_depth))
+ return -EIO;
if (++i >= nb_hw_desc)
break;
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index e8131dadc22c..df1884d57d1a 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -789,24 +789,6 @@ static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
adapter->num_io_queues);
}
-static int validate_rx_req_id(struct ena_ring *rx_ring, u16 req_id)
-{
- if (likely(req_id < rx_ring->ring_size))
- return 0;
-
- netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
- "Invalid rx req_id: %hu\n", req_id);
-
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->rx_stats.bad_req_id++;
- u64_stats_update_end(&rx_ring->syncp);
-
- /* Trigger device reset */
- rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
- set_bit(ENA_FLAG_TRIGGER_RESET, &rx_ring->adapter->flags);
- return -EFAULT;
-}
-
/* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
* @adapter: network interface device structure
* @qid: queue index
@@ -926,10 +908,14 @@ static void ena_free_all_io_rx_resources(struct ena_adapter *adapter)
static int ena_alloc_rx_page(struct ena_ring *rx_ring,
struct ena_rx_buffer *rx_info, gfp_t gfp)
{
+ int headroom = rx_ring->rx_headroom;
struct ena_com_buf *ena_buf;
struct page *page;
dma_addr_t dma;
+ /* restore page offset value in case it has been changed by device */
+ rx_info->page_offset = headroom;
+
/* if previous allocated page is not used */
if (unlikely(rx_info->page))
return 0;
@@ -959,10 +945,9 @@ static int ena_alloc_rx_page(struct ena_ring *rx_ring,
"Allocate page %p, rx_info %p\n", page, rx_info);
rx_info->page = page;
- rx_info->page_offset = 0;
ena_buf = &rx_info->ena_buf;
- ena_buf->paddr = dma + rx_ring->rx_headroom;
- ena_buf->len = ENA_PAGE_SIZE - rx_ring->rx_headroom;
+ ena_buf->paddr = dma + headroom;
+ ena_buf->len = ENA_PAGE_SIZE - headroom;
return 0;
}
@@ -1356,15 +1341,10 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
struct ena_rx_buffer *rx_info;
u16 len, req_id, buf = 0;
void *va;
- int rc;
len = ena_bufs[buf].len;
req_id = ena_bufs[buf].req_id;
- rc = validate_rx_req_id(rx_ring, req_id);
- if (unlikely(rc < 0))
- return NULL;
-
rx_info = &rx_ring->rx_buffer_info[req_id];
if (unlikely(!rx_info->page)) {
@@ -1379,7 +1359,8 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
/* save virt address of first buffer */
va = page_address(rx_info->page) + rx_info->page_offset;
- prefetch(va + NET_IP_ALIGN);
+
+ prefetch(va);
if (len <= rx_ring->rx_copybreak) {
skb = ena_alloc_skb(rx_ring, false);
@@ -1420,8 +1401,6 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
rx_info->page_offset, len, ENA_PAGE_SIZE);
- /* The offset is non zero only for the first buffer */
- rx_info->page_offset = 0;
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
"RX skb updated. len %d. data_len %d\n",
@@ -1440,10 +1419,6 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
len = ena_bufs[buf].len;
req_id = ena_bufs[buf].req_id;
- rc = validate_rx_req_id(rx_ring, req_id);
- if (unlikely(rc < 0))
- return NULL;
-
rx_info = &rx_ring->rx_buffer_info[req_id];
} while (1);
@@ -1544,8 +1519,7 @@ static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
int ret;
rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
- xdp->data = page_address(rx_info->page) +
- rx_info->page_offset + rx_ring->rx_headroom;
+ xdp->data = page_address(rx_info->page) + rx_info->page_offset;
xdp_set_data_meta_invalid(xdp);
xdp->data_hard_start = page_address(rx_info->page);
xdp->data_end = xdp->data + rx_ring->ena_bufs[0].len;
@@ -1612,8 +1586,9 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
if (unlikely(ena_rx_ctx.descs == 0))
break;
+ /* First descriptor might have an offset set by the device */
rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
- rx_info->page_offset = ena_rx_ctx.pkt_offset;
+ rx_info->page_offset += ena_rx_ctx.pkt_offset;
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
"rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
@@ -1697,12 +1672,18 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
error:
adapter = netdev_priv(rx_ring->netdev);
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->rx_stats.bad_desc_num++;
- u64_stats_update_end(&rx_ring->syncp);
+ if (rc == -ENOSPC) {
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->rx_stats.bad_desc_num++;
+ u64_stats_update_end(&rx_ring->syncp);
+ adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
+ } else {
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->rx_stats.bad_req_id++;
+ u64_stats_update_end(&rx_ring->syncp);
+ adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
+ }
- /* Too many desc from the device. Trigger reset */
- adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
return 0;
@@ -3388,16 +3369,9 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
goto err_mmio_read_less;
}
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_width));
+ rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(dma_width));
if (rc) {
- dev_err(dev, "pci_set_dma_mask failed 0x%x\n", rc);
- goto err_mmio_read_less;
- }
-
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dma_width));
- if (rc) {
- dev_err(dev, "err_pci_set_consistent_dma_mask failed 0x%x\n",
- rc);
+ dev_err(dev, "dma_set_mask_and_coherent failed %d\n", rc);
goto err_mmio_read_less;
}
@@ -4167,6 +4141,12 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return rc;
}
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(ENA_MAX_PHYS_ADDR_SIZE_BITS));
+ if (rc) {
+ dev_err(&pdev->dev, "dma_set_mask_and_coherent failed %d\n", rc);
+ goto err_disable_device;
+ }
+
pci_set_master(pdev);
ena_dev = vzalloc(sizeof(*ena_dev));
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 4f913658eea4..24122ccda614 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -413,85 +413,63 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
buff->rxdata.pg_off,
buff->len, DMA_FROM_DEVICE);
- /* for single fragment packets use build_skb() */
- if (buff->is_eop &&
- buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) {
- skb = build_skb(aq_buf_vaddr(&buff->rxdata),
+ skb = napi_alloc_skb(napi, AQ_CFG_RX_HDR_SIZE);
+ if (unlikely(!skb)) {
+ u64_stats_update_begin(&self->stats.rx.syncp);
+ self->stats.rx.skb_alloc_fails++;
+ u64_stats_update_end(&self->stats.rx.syncp);
+ err = -ENOMEM;
+ goto err_exit;
+ }
+ if (is_ptp_ring)
+ buff->len -=
+ aq_ptp_extract_ts(self->aq_nic, skb,
+ aq_buf_vaddr(&buff->rxdata),
+ buff->len);
+
+ hdr_len = buff->len;
+ if (hdr_len > AQ_CFG_RX_HDR_SIZE)
+ hdr_len = eth_get_headlen(skb->dev,
+ aq_buf_vaddr(&buff->rxdata),
+ AQ_CFG_RX_HDR_SIZE);
+
+ memcpy(__skb_put(skb, hdr_len), aq_buf_vaddr(&buff->rxdata),
+ ALIGN(hdr_len, sizeof(long)));
+
+ if (buff->len - hdr_len > 0) {
+ skb_add_rx_frag(skb, 0, buff->rxdata.page,
+ buff->rxdata.pg_off + hdr_len,
+ buff->len - hdr_len,
AQ_CFG_RX_FRAME_MAX);
- if (unlikely(!skb)) {
- u64_stats_update_begin(&self->stats.rx.syncp);
- self->stats.rx.skb_alloc_fails++;
- u64_stats_update_end(&self->stats.rx.syncp);
- err = -ENOMEM;
- goto err_exit;
- }
- if (is_ptp_ring)
- buff->len -=
- aq_ptp_extract_ts(self->aq_nic, skb,
- aq_buf_vaddr(&buff->rxdata),
- buff->len);
- skb_put(skb, buff->len);
page_ref_inc(buff->rxdata.page);
- } else {
- skb = napi_alloc_skb(napi, AQ_CFG_RX_HDR_SIZE);
- if (unlikely(!skb)) {
- u64_stats_update_begin(&self->stats.rx.syncp);
- self->stats.rx.skb_alloc_fails++;
- u64_stats_update_end(&self->stats.rx.syncp);
- err = -ENOMEM;
- goto err_exit;
- }
- if (is_ptp_ring)
- buff->len -=
- aq_ptp_extract_ts(self->aq_nic, skb,
- aq_buf_vaddr(&buff->rxdata),
- buff->len);
-
- hdr_len = buff->len;
- if (hdr_len > AQ_CFG_RX_HDR_SIZE)
- hdr_len = eth_get_headlen(skb->dev,
- aq_buf_vaddr(&buff->rxdata),
- AQ_CFG_RX_HDR_SIZE);
-
- memcpy(__skb_put(skb, hdr_len), aq_buf_vaddr(&buff->rxdata),
- ALIGN(hdr_len, sizeof(long)));
-
- if (buff->len - hdr_len > 0) {
- skb_add_rx_frag(skb, 0, buff->rxdata.page,
- buff->rxdata.pg_off + hdr_len,
- buff->len - hdr_len,
- AQ_CFG_RX_FRAME_MAX);
- page_ref_inc(buff->rxdata.page);
- }
+ }
- if (!buff->is_eop) {
- buff_ = buff;
- i = 1U;
- do {
- next_ = buff_->next,
- buff_ = &self->buff_ring[next_];
+ if (!buff->is_eop) {
+ buff_ = buff;
+ i = 1U;
+ do {
+ next_ = buff_->next;
+ buff_ = &self->buff_ring[next_];
- dma_sync_single_range_for_cpu(
- aq_nic_get_dev(self->aq_nic),
- buff_->rxdata.daddr,
- buff_->rxdata.pg_off,
- buff_->len,
- DMA_FROM_DEVICE);
- skb_add_rx_frag(skb, i++,
- buff_->rxdata.page,
- buff_->rxdata.pg_off,
- buff_->len,
- AQ_CFG_RX_FRAME_MAX);
- page_ref_inc(buff_->rxdata.page);
- buff_->is_cleaned = 1;
-
- buff->is_ip_cso &= buff_->is_ip_cso;
- buff->is_udp_cso &= buff_->is_udp_cso;
- buff->is_tcp_cso &= buff_->is_tcp_cso;
- buff->is_cso_err |= buff_->is_cso_err;
+ dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic),
+ buff_->rxdata.daddr,
+ buff_->rxdata.pg_off,
+ buff_->len,
+ DMA_FROM_DEVICE);
+ skb_add_rx_frag(skb, i++,
+ buff_->rxdata.page,
+ buff_->rxdata.pg_off,
+ buff_->len,
+ AQ_CFG_RX_FRAME_MAX);
+ page_ref_inc(buff_->rxdata.page);
+ buff_->is_cleaned = 1;
- } while (!buff_->is_eop);
- }
+ buff->is_ip_cso &= buff_->is_ip_cso;
+ buff->is_udp_cso &= buff_->is_udp_cso;
+ buff->is_tcp_cso &= buff_->is_tcp_cso;
+ buff->is_cso_err |= buff_->is_cso_err;
+
+ } while (!buff_->is_eop);
}
if (buff->is_vlan)
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 0c12cf7bda50..3f65f2b370c5 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -2543,8 +2543,8 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* various kernel subsystems to support the mechanics required by a
* fixed-high-32-bit system.
*/
- if ((dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) ||
- (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0)) {
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
dev_err(&pdev->dev, "No usable DMA configuration,aborting\n");
goto err_dma;
}
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 098b0328e3cb..ff9f96de74b8 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -2312,8 +2312,8 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* various kernel subsystems to support the mechanics required by a
* fixed-high-32-bit system.
*/
- if ((dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) ||
- (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0)) {
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
dev_err(&pdev->dev, "No usable DMA configuration,aborting\n");
goto err_dma;
}
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 74c1778d841e..b455b60a5434 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2383,7 +2383,8 @@ static int b44_init_one(struct ssb_device *sdev,
goto err_out_free_dev;
}
- if (dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30))) {
+ err = dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30));
+ if (err) {
dev_err(sdev->dev,
"Required 30BIT DMA mask unsupported by the system\n");
goto err_out_powerdown;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 7975f59735d6..0af0af2b70fe 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -4099,7 +4099,8 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
bnxt_free_ntp_fltrs(bp, irq_re_init);
if (irq_re_init) {
bnxt_free_ring_stats(bp);
- if (!(bp->fw_cap & BNXT_FW_CAP_PORT_STATS_NO_RESET))
+ if (!(bp->fw_cap & BNXT_FW_CAP_PORT_STATS_NO_RESET) ||
+ test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
bnxt_free_port_stats(bp);
bnxt_free_ring_grps(bp);
bnxt_free_vnics(bp);
@@ -7757,6 +7758,7 @@ static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
{
u64 sw_tmp;
+ hw &= mask;
sw_tmp = (*sw & ~mask) | hw;
if (hw < (*sw & mask))
sw_tmp += mask + 1;
@@ -11588,7 +11590,8 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
dev_err(&pdev->dev, "System does not support DMA, aborting\n");
- goto init_err_disable;
+ rc = -EIO;
+ goto init_err_release;
}
pci_set_master(pdev);
@@ -12672,6 +12675,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
create_singlethread_workqueue("bnxt_pf_wq");
if (!bnxt_pf_wq) {
dev_err(&pdev->dev, "Unable to create workqueue.\n");
+ rc = -ENOMEM;
goto init_err_pci_clean;
}
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 53687bc7fcf5..1471c9a36238 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -2079,6 +2079,9 @@ int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
struct hwrm_nvm_get_dev_info_input req = {0};
int rc;
+ if (BNXT_VF(bp))
+ return -EOPNOTSUPP;
+
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DEV_INFO, -1, -1);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
@@ -2997,7 +3000,7 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
/* Read A2 portion of the EEPROM */
if (length) {
start -= ETH_MODULE_SFF_8436_LEN;
- rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1,
+ rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 0,
start, length, data);
}
return rc;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 883e47c5b1a7..286f0341bdf8 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -1929,7 +1929,8 @@ static inline int macb_clear_csum(struct sk_buff *skb)
static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
{
- bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb);
+ bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb) ||
+ skb_is_nonlinear(*skb);
int padlen = ETH_ZLEN - (*skb)->len;
int headroom = skb_headroom(*skb);
int tailroom = skb_tailroom(*skb);
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
index 87cc0ef68b31..8ba0e08e5e64 100644
--- a/drivers/net/ethernet/chelsio/Kconfig
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -68,7 +68,7 @@ config CHELSIO_T3
config CHELSIO_T4
tristate "Chelsio Communications T4/T5/T6 Ethernet support"
- depends on PCI && (IPV6 || IPV6=n)
+ depends on PCI && (IPV6 || IPV6=n) && (TLS || TLS=n)
select FW_LOADER
select MDIO
select ZLIB_DEFLATE
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 3352dad6ca99..27308600da15 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -2124,6 +2124,9 @@ void cxgb4_inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
struct ulptx_sgl *sgl, u64 *end, unsigned int start,
const dma_addr_t *addr);
+void cxgb4_write_partial_sgl(const struct sk_buff *skb, struct sge_txq *q,
+ struct ulptx_sgl *sgl, u64 *end,
+ const dma_addr_t *addr, u32 start, u32 send_len);
void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n);
int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
u16 vlan);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 0273f40b85f7..17410fe86626 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -3573,6 +3573,8 @@ static int chcr_stats_show(struct seq_file *seq, void *v)
atomic64_read(&adap->ch_ktls_stats.ktls_tx_complete_pkts));
seq_printf(seq, "TX trim pkts : %20llu\n",
atomic64_read(&adap->ch_ktls_stats.ktls_tx_trimmed_pkts));
+ seq_printf(seq, "TX sw fallback : %20llu\n",
+ atomic64_read(&adap->ch_ktls_stats.ktls_tx_fallback));
while (i < MAX_NPORTS) {
ktls_port = &adap->ch_ktls_stats.ktls_port[i];
seq_printf(seq, "Port %d\n", i);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 4e55f7081644..83b46440408b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -880,7 +880,8 @@ int set_filter_wr(struct adapter *adapter, int fidx)
FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
- fwr->smac_sel = f->smt->idx;
+ if (f->fs.newsmac)
+ fwr->smac_sel = f->smt->idx;
fwr->rx_chan_rx_rpl_iq =
htons(FW_FILTER_WR_RX_CHAN_V(0) |
FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index a952fe198eb9..7fd264a6d085 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -1176,6 +1176,7 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
txq = netdev_pick_tx(dev, skb, sb_dev);
if (xfrm_offload(skb) || is_ptp_enabled(skb, dev) ||
skb->encapsulation ||
+ cxgb4_is_ktls_skb(skb) ||
(proto != IPPROTO_TCP && proto != IPPROTO_UDP))
txq = txq % pi->nqsets;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index b169776ab484..1b49f2fa9b18 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -388,6 +388,7 @@ struct ch_ktls_stats_debug {
atomic64_t ktls_tx_retransmit_pkts;
atomic64_t ktls_tx_complete_pkts;
atomic64_t ktls_tx_trimmed_pkts;
+ atomic64_t ktls_tx_fallback;
};
#endif
@@ -493,6 +494,11 @@ struct cxgb4_uld_info {
#endif
};
+static inline bool cxgb4_is_ktls_skb(struct sk_buff *skb)
+{
+ return skb->sk && tls_is_sk_tx_device_offloaded(skb->sk);
+}
+
void cxgb4_uld_enable(struct adapter *adap);
void cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
int cxgb4_unregister_uld(enum cxgb4_uld type);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index a9e9c7ae565d..196652a114c5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -890,6 +890,114 @@ void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
}
EXPORT_SYMBOL(cxgb4_write_sgl);
+/* cxgb4_write_partial_sgl - populate SGL for partial packet
+ * @skb: the packet
+ * @q: the Tx queue we are writing into
+ * @sgl: starting location for writing the SGL
+ * @end: points right after the end of the SGL
+ * @addr: the list of bus addresses for the SGL elements
+ * @start: start offset in the SKB where partial data starts
+ * @len: length of data from @start to send out
+ *
+ * This API will handle sending out partial data of a skb if required.
+ * Unlike cxgb4_write_sgl, @start can be any offset into the skb data,
+ * and @len will decide how much data after @start offset to send out.
+ */
+void cxgb4_write_partial_sgl(const struct sk_buff *skb, struct sge_txq *q,
+ struct ulptx_sgl *sgl, u64 *end,
+ const dma_addr_t *addr, u32 start, u32 len)
+{
+ struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1] = {0}, *to;
+ u32 frag_size, skb_linear_data_len = skb_headlen(skb);
+ struct skb_shared_info *si = skb_shinfo(skb);
+ u8 i = 0, frag_idx = 0, nfrags = 0;
+ skb_frag_t *frag;
+
+ /* Fill the first SGL either from linear data or from partial
+ * frag based on @start.
+ */
+ if (unlikely(start < skb_linear_data_len)) {
+ frag_size = min(len, skb_linear_data_len - start);
+ sgl->len0 = htonl(frag_size);
+ sgl->addr0 = cpu_to_be64(addr[0] + start);
+ len -= frag_size;
+ nfrags++;
+ } else {
+ start -= skb_linear_data_len;
+ frag = &si->frags[frag_idx];
+ frag_size = skb_frag_size(frag);
+ /* find the first frag */
+ while (start >= frag_size) {
+ start -= frag_size;
+ frag_idx++;
+ frag = &si->frags[frag_idx];
+ frag_size = skb_frag_size(frag);
+ }
+
+ frag_size = min(len, skb_frag_size(frag) - start);
+ sgl->len0 = cpu_to_be32(frag_size);
+ sgl->addr0 = cpu_to_be64(addr[frag_idx + 1] + start);
+ len -= frag_size;
+ nfrags++;
+ frag_idx++;
+ }
+
+ /* If the entire partial data fit in one SGL, then send it out
+ * now.
+ */
+ if (!len)
+ goto done;
+
+ /* Most of the complexity below deals with the possibility we hit the
+ * end of the queue in the middle of writing the SGL. For this case
+ * only we create the SGL in a temporary buffer and then copy it.
+ */
+ to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
+
+ /* If the skb couldn't fit in first SGL completely, fill the
+ * rest of the frags in subsequent SGLs. Note that each SGL
+ * pair can store 2 frags.
+ */
+ while (len) {
+ frag_size = min(len, skb_frag_size(&si->frags[frag_idx]));
+ to->len[i & 1] = cpu_to_be32(frag_size);
+ to->addr[i & 1] = cpu_to_be64(addr[frag_idx + 1]);
+ if (i && (i & 1))
+ to++;
+ nfrags++;
+ frag_idx++;
+ i++;
+ len -= frag_size;
+ }
+
+ /* If we ended in an odd boundary, then set the second SGL's
+ * length in the pair to 0.
+ */
+ if (i & 1)
+ to->len[1] = cpu_to_be32(0);
+
+ /* Copy from temporary buffer to Tx ring, in case we hit the
+ * end of the queue in the middle of writing the SGL.
+ */
+ if (unlikely((u8 *)end > (u8 *)q->stat)) {
+ u32 part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
+
+ if (likely(part0))
+ memcpy(sgl->sge, buf, part0);
+ part1 = (u8 *)end - (u8 *)q->stat;
+ memcpy(q->desc, (u8 *)buf + part0, part1);
+ end = (void *)q->desc + part1;
+ }
+
+ /* 0-pad to multiple of 16 */
+ if ((uintptr_t)end & 8)
+ *end = 0;
+done:
+ sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
+ ULPTX_NSGE_V(nfrags));
+}
+EXPORT_SYMBOL(cxgb4_write_partial_sgl);
+
/* This function copies 64 byte coalesced work request to
* memory mapped BAR2 space. For coalesced WR SGE fetches
* data from the FIFO instead of from Host.
@@ -1422,7 +1530,8 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
#endif /* CHELSIO_IPSEC_INLINE */
#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
- if (skb->decrypted)
+ if (cxgb4_is_ktls_skb(skb) &&
+ (skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb))))
return adap->uld[CXGB4_ULD_KTLS].tx_handler(skb, dev);
#endif /* CHELSIO_TLS_DEVICE */
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
index 5195f692f14d..7f90b828d159 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
@@ -14,6 +14,50 @@
static LIST_HEAD(uld_ctx_list);
static DEFINE_MUTEX(dev_mutex);
+/* chcr_get_nfrags_to_send: get the remaining nfrags after start offset
+ * @skb: skb
+ * @start: start offset.
+ * @len: how much data to send after @start
+ */
+static int chcr_get_nfrags_to_send(struct sk_buff *skb, u32 start, u32 len)
+{
+ struct skb_shared_info *si = skb_shinfo(skb);
+ u32 frag_size, skb_linear_data_len = skb_headlen(skb);
+ u8 nfrags = 0, frag_idx = 0;
+ skb_frag_t *frag;
+
+ /* if its a linear skb then return 1 */
+ if (!skb_is_nonlinear(skb))
+ return 1;
+
+ if (unlikely(start < skb_linear_data_len)) {
+ frag_size = min(len, skb_linear_data_len - start);
+ start = 0;
+ } else {
+ start -= skb_linear_data_len;
+
+ frag = &si->frags[frag_idx];
+ frag_size = skb_frag_size(frag);
+ while (start >= frag_size) {
+ start -= frag_size;
+ frag_idx++;
+ frag = &si->frags[frag_idx];
+ frag_size = skb_frag_size(frag);
+ }
+ frag_size = min(len, skb_frag_size(frag) - start);
+ }
+ len -= frag_size;
+ nfrags++;
+
+ while (len) {
+ frag_size = min(len, skb_frag_size(&si->frags[frag_idx]));
+ len -= frag_size;
+ nfrags++;
+ frag_idx++;
+ }
+ return nfrags;
+}
+
static int chcr_init_tcb_fields(struct chcr_ktls_info *tx_info);
/*
* chcr_ktls_save_keys: calculate and save crypto keys.
@@ -500,7 +544,9 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
/* need to wait for hw response, can't free tx_info yet. */
if (tx_info->open_state == CH_KTLS_OPEN_PENDING)
tx_info->pending_close = true;
- /* free the lock after the cleanup */
+ else
+ spin_unlock_bh(&tx_info->lock);
+ /* if in pending close, free the lock after the cleanup */
goto put_module;
}
spin_unlock_bh(&tx_info->lock);
@@ -689,7 +735,8 @@ static int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input)
}
static void *__chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
- u32 tid, void *pos, u16 word, u64 mask,
+ u32 tid, void *pos, u16 word,
+ struct sge_eth_txq *q, u64 mask,
u64 val, u32 reply)
{
struct cpl_set_tcb_field_core *cpl;
@@ -698,7 +745,10 @@ static void *__chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
/* ULP_TXPKT */
txpkt = pos;
- txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
+ txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) |
+ ULP_TXPKT_CHANNELID_V(tx_info->port_id) |
+ ULP_TXPKT_FID_V(q->q.cntxt_id) |
+ ULP_TXPKT_RO_F);
txpkt->len = htonl(DIV_ROUND_UP(CHCR_SET_TCB_FIELD_LEN, 16));
/* ULPTX_IDATA sub-command */
@@ -753,7 +803,7 @@ static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
} else {
u8 buf[48] = {0};
- __chcr_write_cpl_set_tcb_ulp(tx_info, tid, buf, word,
+ __chcr_write_cpl_set_tcb_ulp(tx_info, tid, buf, word, q,
mask, val, reply);
return chcr_copy_to_txd(buf, &q->q, pos,
@@ -761,7 +811,7 @@ static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
}
}
- pos = __chcr_write_cpl_set_tcb_ulp(tx_info, tid, pos, word,
+ pos = __chcr_write_cpl_set_tcb_ulp(tx_info, tid, pos, word, q,
mask, val, reply);
/* check again if we are at the end of the queue */
@@ -783,11 +833,11 @@ static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
*/
static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
struct sge_eth_txq *q, u64 tcp_seq,
- u64 tcp_ack, u64 tcp_win)
+ u64 tcp_ack, u64 tcp_win, bool offset)
{
bool first_wr = ((tx_info->prev_ack == 0) && (tx_info->prev_win == 0));
struct ch_ktls_port_stats_debug *port_stats;
- u32 len, cpl = 0, ndesc, wr_len;
+ u32 len, cpl = 0, ndesc, wr_len, wr_mid = 0;
struct fw_ulptx_wr *wr;
int credits;
void *pos;
@@ -803,6 +853,11 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
return NETDEV_TX_BUSY;
}
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ chcr_eth_txq_stop(q);
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ }
+
pos = &q->q.desc[q->q.pidx];
/* make space for WR, we'll fill it later when we know all the cpls
* being sent out and have complete length.
@@ -818,7 +873,7 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
cpl++;
}
/* reset snd una if it's a re-transmit pkt */
- if (tcp_seq != tx_info->prev_seq) {
+ if (tcp_seq != tx_info->prev_seq || offset) {
/* reset snd_una */
port_stats =
&tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
@@ -827,7 +882,8 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
TCB_SND_UNA_RAW_V
(TCB_SND_UNA_RAW_M),
TCB_SND_UNA_RAW_V(0), 0);
- atomic64_inc(&port_stats->ktls_tx_ooo);
+ if (tcp_seq != tx_info->prev_seq)
+ atomic64_inc(&port_stats->ktls_tx_ooo);
cpl++;
}
/* update ack */
@@ -856,7 +912,8 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
wr->cookie = 0;
/* fill len in wr field */
- wr->flowid_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(len, 16)));
+ wr->flowid_len16 = htonl(wr_mid |
+ FW_WR_LEN16_V(DIV_ROUND_UP(len, 16)));
ndesc = DIV_ROUND_UP(len, 64);
chcr_txq_advance(&q->q, ndesc);
@@ -866,34 +923,14 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
}
/*
- * chcr_ktls_skb_copy
- * @nskb - new skb where the frags to be added.
- * @skb - old skb from which frags will be copied.
- */
-static void chcr_ktls_skb_copy(struct sk_buff *skb, struct sk_buff *nskb)
-{
- int i;
-
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- skb_shinfo(nskb)->frags[i] = skb_shinfo(skb)->frags[i];
- __skb_frag_ref(&skb_shinfo(nskb)->frags[i]);
- }
-
- skb_shinfo(nskb)->nr_frags = skb_shinfo(skb)->nr_frags;
- nskb->len += skb->data_len;
- nskb->data_len = skb->data_len;
- nskb->truesize += skb->data_len;
-}
-
-/*
* chcr_ktls_get_tx_flits
* returns number of flits to be sent out, it includes key context length, WR
* size and skb fragments.
*/
static unsigned int
-chcr_ktls_get_tx_flits(const struct sk_buff *skb, unsigned int key_ctx_len)
+chcr_ktls_get_tx_flits(u32 nr_frags, unsigned int key_ctx_len)
{
- return chcr_sgl_len(skb_shinfo(skb)->nr_frags) +
+ return chcr_sgl_len(nr_frags) +
DIV_ROUND_UP(key_ctx_len + CHCR_KTLS_WR_SIZE, 8);
}
@@ -957,8 +994,10 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
struct tcphdr *tcp;
int len16, pktlen;
struct iphdr *ip;
+ u32 wr_mid = 0;
int credits;
u8 buf[150];
+ u64 cntrl1;
void *pos;
iplen = skb_network_header_len(skb);
@@ -967,7 +1006,7 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
/* packet length = eth hdr len + ip hdr len + tcp hdr len
* (including options).
*/
- pktlen = skb->len - skb->data_len;
+ pktlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
ctrl = sizeof(*cpl) + pktlen;
len16 = DIV_ROUND_UP(sizeof(*wr) + ctrl, 16);
@@ -980,6 +1019,11 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ chcr_eth_txq_stop(q);
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ }
+
pos = &q->q.desc[q->q.pidx];
wr = pos;
@@ -987,7 +1031,7 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
FW_WR_IMMDLEN_V(ctrl));
- wr->equiq_to_len16 = htonl(FW_WR_LEN16_V(len16));
+ wr->equiq_to_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16));
wr->r3 = 0;
cpl = (void *)(wr + 1);
@@ -997,22 +1041,28 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
TXPKT_PF_V(tx_info->adap->pf));
cpl->pack = 0;
cpl->len = htons(pktlen);
- /* checksum offload */
- cpl->ctrl1 = 0;
-
- pos = cpl + 1;
memcpy(buf, skb->data, pktlen);
if (tx_info->ip_family == AF_INET) {
/* we need to correct ip header len */
ip = (struct iphdr *)(buf + maclen);
ip->tot_len = htons(pktlen - maclen);
+ cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP);
#if IS_ENABLED(CONFIG_IPV6)
} else {
ip6 = (struct ipv6hdr *)(buf + maclen);
ip6->payload_len = htons(pktlen - maclen - iplen);
+ cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP6);
#endif
}
+
+ cntrl1 |= T6_TXPKT_ETHHDR_LEN_V(maclen - ETH_HLEN) |
+ TXPKT_IPHDR_LEN_V(iplen);
+ /* checksum offload */
+ cpl->ctrl1 = cpu_to_be64(cntrl1);
+
+ pos = cpl + 1;
+
/* now take care of the tcp header, if fin is not set then clear push
* bit as well, and if fin is set, it will be sent at the last so we
* need to update the tcp sequence number as per the last packet.
@@ -1031,71 +1081,6 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
return 0;
}
-/* chcr_ktls_skb_shift - Shifts request length paged data from skb to another.
- * @tgt- buffer into which tail data gets added
- * @skb- buffer from which the paged data comes from
- * @shiftlen- shift up to this many bytes
- */
-static int chcr_ktls_skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
- int shiftlen)
-{
- skb_frag_t *fragfrom, *fragto;
- int from, to, todo;
-
- WARN_ON(shiftlen > skb->data_len);
-
- todo = shiftlen;
- from = 0;
- to = 0;
- fragfrom = &skb_shinfo(skb)->frags[from];
-
- while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
- fragfrom = &skb_shinfo(skb)->frags[from];
- fragto = &skb_shinfo(tgt)->frags[to];
-
- if (todo >= skb_frag_size(fragfrom)) {
- *fragto = *fragfrom;
- todo -= skb_frag_size(fragfrom);
- from++;
- to++;
-
- } else {
- __skb_frag_ref(fragfrom);
- skb_frag_page_copy(fragto, fragfrom);
- skb_frag_off_copy(fragto, fragfrom);
- skb_frag_size_set(fragto, todo);
-
- skb_frag_off_add(fragfrom, todo);
- skb_frag_size_sub(fragfrom, todo);
- todo = 0;
-
- to++;
- break;
- }
- }
-
- /* Ready to "commit" this state change to tgt */
- skb_shinfo(tgt)->nr_frags = to;
-
- /* Reposition in the original skb */
- to = 0;
- while (from < skb_shinfo(skb)->nr_frags)
- skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
-
- skb_shinfo(skb)->nr_frags = to;
-
- WARN_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
-
- skb->len -= shiftlen;
- skb->data_len -= shiftlen;
- skb->truesize -= shiftlen;
- tgt->len += shiftlen;
- tgt->data_len += shiftlen;
- tgt->truesize += shiftlen;
-
- return shiftlen;
-}
-
/*
* chcr_ktls_xmit_wr_complete: This sends out the complete record. If an skb
* received has partial end part of the record, send out the complete record, so
@@ -1111,6 +1096,8 @@ static int chcr_ktls_skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
struct chcr_ktls_info *tx_info,
struct sge_eth_txq *q, u32 tcp_seq,
+ bool is_last_wr, u32 data_len,
+ u32 skb_offset, u32 nfrags,
bool tcp_push, u32 mss)
{
u32 len16, wr_mid = 0, flits = 0, ndesc, cipher_start;
@@ -1126,7 +1113,7 @@ static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
u64 *end;
/* get the number of flits required */
- flits = chcr_ktls_get_tx_flits(skb, tx_info->key_ctx_len);
+ flits = chcr_ktls_get_tx_flits(nfrags, tx_info->key_ctx_len);
/* number of descriptors */
ndesc = chcr_flits_to_desc(flits);
/* check if enough credits available */
@@ -1155,6 +1142,9 @@ static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
+ if (!is_last_wr)
+ skb_get(skb);
+
pos = &q->q.desc[q->q.pidx];
end = (u64 *)pos + flits;
/* FW_ULPTX_WR */
@@ -1187,7 +1177,7 @@ static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
CPL_TX_SEC_PDU_CPLLEN_V(CHCR_CPL_TX_SEC_PDU_LEN_64BIT) |
CPL_TX_SEC_PDU_PLACEHOLDER_V(1) |
CPL_TX_SEC_PDU_IVINSRTOFST_V(TLS_HEADER_SIZE + 1));
- cpl->pldlen = htonl(skb->data_len);
+ cpl->pldlen = htonl(data_len);
/* encryption should start after tls header size + iv size */
cipher_start = TLS_HEADER_SIZE + tx_info->iv_size + 1;
@@ -1229,7 +1219,7 @@ static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
/* CPL_TX_DATA */
tx_data = (void *)pos;
OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid));
- tx_data->len = htonl(TX_DATA_MSS_V(mss) | TX_LENGTH_V(skb->data_len));
+ tx_data->len = htonl(TX_DATA_MSS_V(mss) | TX_LENGTH_V(data_len));
tx_data->rsvd = htonl(tcp_seq);
@@ -1249,8 +1239,8 @@ static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
}
/* send the complete packet except the header */
- cxgb4_write_sgl(skb, &q->q, pos, end, skb->len - skb->data_len,
- sgl_sdesc->addr);
+ cxgb4_write_partial_sgl(skb, &q->q, pos, end, sgl_sdesc->addr,
+ skb_offset, data_len);
sgl_sdesc->skb = skb;
chcr_txq_advance(&q->q, ndesc);
@@ -1282,10 +1272,11 @@ static int chcr_ktls_xmit_wr_short(struct sk_buff *skb,
struct sge_eth_txq *q,
u32 tcp_seq, bool tcp_push, u32 mss,
u32 tls_rec_offset, u8 *prior_data,
- u32 prior_data_len)
+ u32 prior_data_len, u32 data_len,
+ u32 skb_offset)
{
+ u32 len16, wr_mid = 0, cipher_start, nfrags;
struct adapter *adap = tx_info->adap;
- u32 len16, wr_mid = 0, cipher_start;
unsigned int flits = 0, ndesc;
int credits, left, last_desc;
struct tx_sw_desc *sgl_sdesc;
@@ -1298,10 +1289,11 @@ static int chcr_ktls_xmit_wr_short(struct sk_buff *skb,
void *pos;
u64 *end;
+ nfrags = chcr_get_nfrags_to_send(skb, skb_offset, data_len);
/* get the number of flits required, it's a partial record so 2 flits
* (AES_BLOCK_SIZE) will be added.
*/
- flits = chcr_ktls_get_tx_flits(skb, tx_info->key_ctx_len) + 2;
+ flits = chcr_ktls_get_tx_flits(nfrags, tx_info->key_ctx_len) + 2;
/* get the correct 8 byte IV of this record */
iv_record = cpu_to_be64(tx_info->iv + tx_info->record_no);
/* If it's a middle record and not 16 byte aligned to run AES CTR, need
@@ -1373,7 +1365,7 @@ static int chcr_ktls_xmit_wr_short(struct sk_buff *skb,
htonl(CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
CPL_TX_SEC_PDU_CPLLEN_V(CHCR_CPL_TX_SEC_PDU_LEN_64BIT) |
CPL_TX_SEC_PDU_IVINSRTOFST_V(1));
- cpl->pldlen = htonl(skb->data_len + AES_BLOCK_LEN + prior_data_len);
+ cpl->pldlen = htonl(data_len + AES_BLOCK_LEN + prior_data_len);
cpl->aadstart_cipherstop_hi =
htonl(CPL_TX_SEC_PDU_CIPHERSTART_V(cipher_start));
cpl->cipherstop_lo_authinsert = 0;
@@ -1404,7 +1396,7 @@ static int chcr_ktls_xmit_wr_short(struct sk_buff *skb,
tx_data = (void *)pos;
OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid));
tx_data->len = htonl(TX_DATA_MSS_V(mss) |
- TX_LENGTH_V(skb->data_len + prior_data_len));
+ TX_LENGTH_V(data_len + prior_data_len));
tx_data->rsvd = htonl(tcp_seq);
tx_data->flags = htonl(TX_BYPASS_F);
if (tcp_push)
@@ -1437,8 +1429,8 @@ static int chcr_ktls_xmit_wr_short(struct sk_buff *skb,
if (prior_data_len)
pos = chcr_copy_to_txd(prior_data, &q->q, pos, 16);
/* send the complete packet except the header */
- cxgb4_write_sgl(skb, &q->q, pos, end, skb->len - skb->data_len,
- sgl_sdesc->addr);
+ cxgb4_write_partial_sgl(skb, &q->q, pos, end, sgl_sdesc->addr,
+ skb_offset, data_len);
sgl_sdesc->skb = skb;
chcr_txq_advance(&q->q, ndesc);
@@ -1466,6 +1458,7 @@ static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info,
struct sk_buff *skb, u32 tcp_seq, u32 mss,
bool tcp_push, struct sge_eth_txq *q,
u32 port_id, u8 *prior_data,
+ u32 data_len, u32 skb_offset,
u32 prior_data_len)
{
int credits, left, len16, last_desc;
@@ -1475,14 +1468,16 @@ static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info,
struct ulptx_idata *idata;
struct ulp_txpkt *ulptx;
struct fw_ulptx_wr *wr;
- u32 wr_mid = 0;
+ u32 wr_mid = 0, nfrags;
void *pos;
u64 *end;
flits = DIV_ROUND_UP(CHCR_PLAIN_TX_DATA_LEN, 8);
- flits += chcr_sgl_len(skb_shinfo(skb)->nr_frags);
+ nfrags = chcr_get_nfrags_to_send(skb, skb_offset, data_len);
+ flits += chcr_sgl_len(nfrags);
if (prior_data_len)
flits += 2;
+
/* WR will need len16 */
len16 = DIV_ROUND_UP(flits, 2);
/* check how many descriptors needed */
@@ -1535,7 +1530,7 @@ static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info,
tx_data = (struct cpl_tx_data *)(idata + 1);
OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid));
tx_data->len = htonl(TX_DATA_MSS_V(mss) |
- TX_LENGTH_V(skb->data_len + prior_data_len));
+ TX_LENGTH_V(data_len + prior_data_len));
/* set tcp seq number */
tx_data->rsvd = htonl(tcp_seq);
tx_data->flags = htonl(TX_BYPASS_F);
@@ -1559,8 +1554,8 @@ static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info,
end = pos + left;
}
/* send the complete packet including the header */
- cxgb4_write_sgl(skb, &q->q, pos, end, skb->len - skb->data_len,
- sgl_sdesc->addr);
+ cxgb4_write_partial_sgl(skb, &q->q, pos, end, sgl_sdesc->addr,
+ skb_offset, data_len);
sgl_sdesc->skb = skb;
chcr_txq_advance(&q->q, ndesc);
@@ -1568,12 +1563,96 @@ static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info,
return 0;
}
+static int chcr_ktls_tunnel_pkt(struct chcr_ktls_info *tx_info,
+ struct sk_buff *skb,
+ struct sge_eth_txq *q)
+{
+ u32 ctrl, iplen, maclen, wr_mid = 0, len16;
+ struct tx_sw_desc *sgl_sdesc;
+ struct fw_eth_tx_pkt_wr *wr;
+ struct cpl_tx_pkt_core *cpl;
+ unsigned int flits, ndesc;
+ int credits, last_desc;
+ u64 cntrl1, *end;
+ void *pos;
+
+ ctrl = sizeof(*cpl);
+ flits = DIV_ROUND_UP(sizeof(*wr) + ctrl, 8);
+
+ flits += chcr_sgl_len(skb_shinfo(skb)->nr_frags + 1);
+ len16 = DIV_ROUND_UP(flits, 2);
+ /* check how many descriptors needed */
+ ndesc = DIV_ROUND_UP(flits, 8);
+
+ credits = chcr_txq_avail(&q->q) - ndesc;
+ if (unlikely(credits < 0)) {
+ chcr_eth_txq_stop(q);
+ return -ENOMEM;
+ }
+
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ chcr_eth_txq_stop(q);
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ }
+
+ last_desc = q->q.pidx + ndesc - 1;
+ if (last_desc >= q->q.size)
+ last_desc -= q->q.size;
+ sgl_sdesc = &q->q.sdesc[last_desc];
+
+ if (unlikely(cxgb4_map_skb(tx_info->adap->pdev_dev, skb,
+ sgl_sdesc->addr) < 0)) {
+ memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
+ q->mapping_err++;
+ return -ENOMEM;
+ }
+
+ iplen = skb_network_header_len(skb);
+ maclen = skb_mac_header_len(skb);
+
+ pos = &q->q.desc[q->q.pidx];
+ end = (u64 *)pos + flits;
+ wr = pos;
+
+ /* Firmware work request header */
+ wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
+ FW_WR_IMMDLEN_V(ctrl));
+
+ wr->equiq_to_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16));
+ wr->r3 = 0;
+
+ cpl = (void *)(wr + 1);
+
+ /* CPL header */
+ cpl->ctrl0 = htonl(TXPKT_OPCODE_V(CPL_TX_PKT) |
+ TXPKT_INTF_V(tx_info->tx_chan) |
+ TXPKT_PF_V(tx_info->adap->pf));
+ cpl->pack = 0;
+ cntrl1 = TXPKT_CSUM_TYPE_V(tx_info->ip_family == AF_INET ?
+ TX_CSUM_TCPIP : TX_CSUM_TCPIP6);
+ cntrl1 |= T6_TXPKT_ETHHDR_LEN_V(maclen - ETH_HLEN) |
+ TXPKT_IPHDR_LEN_V(iplen);
+ /* checksum offload */
+ cpl->ctrl1 = cpu_to_be64(cntrl1);
+ cpl->len = htons(skb->len);
+
+ pos = cpl + 1;
+
+ cxgb4_write_sgl(skb, &q->q, pos, end, 0, sgl_sdesc->addr);
+ sgl_sdesc->skb = skb;
+ chcr_txq_advance(&q->q, ndesc);
+ cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
+ return 0;
+}
+
/*
* chcr_ktls_copy_record_in_skb
* @nskb - new skb where the frags to be added.
+ * @skb - old skb, to copy socket and destructor details.
* @record - specific record which has complete 16k record in frags.
*/
static void chcr_ktls_copy_record_in_skb(struct sk_buff *nskb,
+ struct sk_buff *skb,
struct tls_record_info *record)
{
int i = 0;
@@ -1588,6 +1667,9 @@ static void chcr_ktls_copy_record_in_skb(struct sk_buff *nskb,
nskb->data_len = record->len;
nskb->len += record->len;
nskb->truesize += record->len;
+ nskb->sk = skb->sk;
+ nskb->destructor = skb->destructor;
+ refcount_add(nskb->truesize, &nskb->sk->sk_wmem_alloc);
}
/*
@@ -1659,7 +1741,7 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
struct sk_buff *skb,
struct tls_record_info *record,
u32 tcp_seq, int mss, bool tcp_push_no_fin,
- struct sge_eth_txq *q,
+ struct sge_eth_txq *q, u32 skb_offset,
u32 tls_end_offset, bool last_wr)
{
struct sk_buff *nskb = NULL;
@@ -1668,30 +1750,37 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
nskb = skb;
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_complete_pkts);
} else {
- dev_kfree_skb_any(skb);
-
- nskb = alloc_skb(0, GFP_KERNEL);
- if (!nskb)
+ nskb = alloc_skb(0, GFP_ATOMIC);
+ if (!nskb) {
+ dev_kfree_skb_any(skb);
return NETDEV_TX_BUSY;
+ }
+
/* copy complete record in skb */
- chcr_ktls_copy_record_in_skb(nskb, record);
+ chcr_ktls_copy_record_in_skb(nskb, skb, record);
/* packet is being sent from the beginning, update the tcp_seq
* accordingly.
*/
tcp_seq = tls_record_start_seq(record);
- /* reset snd una, so the middle record won't send the already
- * sent part.
- */
- if (chcr_ktls_update_snd_una(tx_info, q))
- goto out;
+ /* reset skb offset */
+ skb_offset = 0;
+
+ if (last_wr)
+ dev_kfree_skb_any(skb);
+
+ last_wr = true;
+
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_end_pkts);
}
if (chcr_ktls_xmit_wr_complete(nskb, tx_info, q, tcp_seq,
+ last_wr, record->len, skb_offset,
+ record->num_frags,
(last_wr && tcp_push_no_fin),
mss)) {
goto out;
}
+ tx_info->prev_seq = record->end_seq;
return 0;
out:
dev_kfree_skb_any(nskb);
@@ -1723,41 +1812,47 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
struct sk_buff *skb,
struct tls_record_info *record,
u32 tcp_seq, int mss, bool tcp_push_no_fin,
+ u32 data_len, u32 skb_offset,
struct sge_eth_txq *q, u32 tls_end_offset)
{
u32 tls_rec_offset = tcp_seq - tls_record_start_seq(record);
u8 prior_data[16] = {0};
u32 prior_data_len = 0;
- u32 data_len;
/* check if the skb is ending in middle of tag/HASH, its a big
* trouble, send the packet before the HASH.
*/
- int remaining_record = tls_end_offset - skb->data_len;
+ int remaining_record = tls_end_offset - data_len;
if (remaining_record > 0 &&
remaining_record < TLS_CIPHER_AES_GCM_128_TAG_SIZE) {
- int trimmed_len = skb->data_len -
- (TLS_CIPHER_AES_GCM_128_TAG_SIZE - remaining_record);
- struct sk_buff *tmp_skb = NULL;
- /* don't process the pkt if it is only a partial tag */
- if (skb->data_len < TLS_CIPHER_AES_GCM_128_TAG_SIZE)
- goto out;
+ int trimmed_len = 0;
- WARN_ON(trimmed_len > skb->data_len);
+ if (tls_end_offset > TLS_CIPHER_AES_GCM_128_TAG_SIZE)
+ trimmed_len = data_len -
+ (TLS_CIPHER_AES_GCM_128_TAG_SIZE -
+ remaining_record);
+ if (!trimmed_len)
+ return FALLBACK;
- /* shift to those many bytes */
- tmp_skb = alloc_skb(0, GFP_KERNEL);
- if (unlikely(!tmp_skb))
- goto out;
+ WARN_ON(trimmed_len > data_len);
- chcr_ktls_skb_shift(tmp_skb, skb, trimmed_len);
- /* free the last trimmed portion */
- dev_kfree_skb_any(skb);
- skb = tmp_skb;
+ data_len = trimmed_len;
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_trimmed_pkts);
}
- data_len = skb->data_len;
+
+ /* check if it is only the header part. */
+ if (tls_rec_offset + data_len <= (TLS_HEADER_SIZE + tx_info->iv_size)) {
+ if (chcr_ktls_tx_plaintxt(tx_info, skb, tcp_seq, mss,
+ tcp_push_no_fin, q,
+ tx_info->port_id, prior_data,
+ data_len, skb_offset, prior_data_len))
+ goto out;
+
+ tx_info->prev_seq = tcp_seq + data_len;
+ return 0;
+ }
+
/* check if the middle record's start point is 16 byte aligned. CTR
* needs 16 byte aligned start point to start encryption.
*/
@@ -1818,9 +1913,6 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
}
/* reset tcp_seq as per the prior_data_required len */
tcp_seq -= prior_data_len;
- /* include prio_data_len for further calculation.
- */
- data_len += prior_data_len;
}
/* reset snd una, so the middle record won't send the already
* sent part.
@@ -1829,37 +1921,54 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
goto out;
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_middle_pkts);
} else {
- /* Else means, its a partial first part of the record. Check if
- * its only the header, don't need to send for encryption then.
- */
- if (data_len <= TLS_HEADER_SIZE + tx_info->iv_size) {
- if (chcr_ktls_tx_plaintxt(tx_info, skb, tcp_seq, mss,
- tcp_push_no_fin, q,
- tx_info->port_id,
- prior_data,
- prior_data_len)) {
- goto out;
- }
- return 0;
- }
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_start_pkts);
}
if (chcr_ktls_xmit_wr_short(skb, tx_info, q, tcp_seq, tcp_push_no_fin,
mss, tls_rec_offset, prior_data,
- prior_data_len)) {
+ prior_data_len, data_len, skb_offset)) {
goto out;
}
+ tx_info->prev_seq = tcp_seq + data_len + prior_data_len;
return 0;
out:
dev_kfree_skb_any(skb);
return NETDEV_TX_BUSY;
}
+static int chcr_ktls_sw_fallback(struct sk_buff *skb,
+ struct chcr_ktls_info *tx_info,
+ struct sge_eth_txq *q)
+{
+ u32 data_len, skb_offset;
+ struct sk_buff *nskb;
+ struct tcphdr *th;
+
+ nskb = tls_encrypt_skb(skb);
+
+ if (!nskb)
+ return 0;
+
+ th = tcp_hdr(nskb);
+ skb_offset = skb_transport_offset(nskb) + tcp_hdrlen(nskb);
+ data_len = nskb->len - skb_offset;
+ skb_tx_timestamp(nskb);
+
+ if (chcr_ktls_tunnel_pkt(tx_info, nskb, q))
+ goto out;
+
+ tx_info->prev_seq = ntohl(th->seq) + data_len;
+ atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_fallback);
+ return 0;
+out:
+ dev_kfree_skb_any(nskb);
+ return 0;
+}
/* nic tls TX handler */
static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
{
+ u32 tls_end_offset, tcp_seq, skb_data_len, skb_offset;
struct ch_ktls_port_stats_debug *port_stats;
struct chcr_ktls_ofld_ctx_tx *tx_ctx;
struct ch_ktls_stats_debug *stats;
@@ -1867,20 +1976,17 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
int data_len, qidx, ret = 0, mss;
struct tls_record_info *record;
struct chcr_ktls_info *tx_info;
- u32 tls_end_offset, tcp_seq;
struct tls_context *tls_ctx;
- struct sk_buff *local_skb;
struct sge_eth_txq *q;
struct adapter *adap;
unsigned long flags;
tcp_seq = ntohl(th->seq);
+ skb_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ skb_data_len = skb->len - skb_offset;
+ data_len = skb_data_len;
- mss = skb_is_gso(skb) ? skb_shinfo(skb)->gso_size : skb->data_len;
-
- /* check if we haven't set it for ktls offload */
- if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
- goto out;
+ mss = skb_is_gso(skb) ? skb_shinfo(skb)->gso_size : data_len;
tls_ctx = tls_get_ctx(skb->sk);
if (unlikely(tls_ctx->netdev != dev))
@@ -1892,14 +1998,6 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(!tx_info))
goto out;
- /* don't touch the original skb, make a new skb to extract each records
- * and send them separately.
- */
- local_skb = alloc_skb(0, GFP_KERNEL);
-
- if (unlikely(!local_skb))
- return NETDEV_TX_BUSY;
-
adap = tx_info->adap;
stats = &adap->ch_ktls_stats;
port_stats = &stats->ktls_port[tx_info->port_id];
@@ -1914,20 +2012,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
if (ret)
return NETDEV_TX_BUSY;
}
- /* update tcb */
- ret = chcr_ktls_xmit_tcb_cpls(tx_info, q, ntohl(th->seq),
- ntohl(th->ack_seq),
- ntohs(th->window));
- if (ret) {
- dev_kfree_skb_any(local_skb);
- return NETDEV_TX_BUSY;
- }
- /* copy skb contents into local skb */
- chcr_ktls_skb_copy(skb, local_skb);
-
- /* go through the skb and send only one record at a time. */
- data_len = skb->data_len;
/* TCP segments can be in received either complete or partial.
* chcr_end_part_handler will handle cases if complete record or end
* part of the record is received. Incase of partial end part of record,
@@ -1952,10 +2037,64 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
goto out;
}
+ tls_end_offset = record->end_seq - tcp_seq;
+
+ pr_debug("seq 0x%x, end_seq 0x%x prev_seq 0x%x, datalen 0x%x\n",
+ tcp_seq, record->end_seq, tx_info->prev_seq, data_len);
+ /* update tcb for the skb */
+ if (skb_data_len == data_len) {
+ u32 tx_max = tcp_seq;
+
+ if (!tls_record_is_start_marker(record) &&
+ tls_end_offset < TLS_CIPHER_AES_GCM_128_TAG_SIZE)
+ tx_max = record->end_seq -
+ TLS_CIPHER_AES_GCM_128_TAG_SIZE;
+
+ ret = chcr_ktls_xmit_tcb_cpls(tx_info, q, tx_max,
+ ntohl(th->ack_seq),
+ ntohs(th->window),
+ tls_end_offset !=
+ record->len);
+ if (ret) {
+ spin_unlock_irqrestore(&tx_ctx->base.lock,
+ flags);
+ goto out;
+ }
+
+ if (th->fin)
+ skb_get(skb);
+ }
+
if (unlikely(tls_record_is_start_marker(record))) {
- spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
atomic64_inc(&port_stats->ktls_tx_skip_no_sync_data);
- goto out;
+ /* If tls_end_offset < data_len, means there is some
+ * data after start marker, which needs encryption, send
+ * plaintext first and take skb refcount. else send out
+ * complete pkt as plaintext.
+ */
+ if (tls_end_offset < data_len)
+ skb_get(skb);
+ else
+ tls_end_offset = data_len;
+
+ ret = chcr_ktls_tx_plaintxt(tx_info, skb, tcp_seq, mss,
+ (!th->fin && th->psh), q,
+ tx_info->port_id, NULL,
+ tls_end_offset, skb_offset,
+ 0);
+
+ spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
+ if (ret) {
+ /* free the refcount taken earlier */
+ if (tls_end_offset < data_len)
+ dev_kfree_skb_any(skb);
+ goto out;
+ }
+
+ data_len -= tls_end_offset;
+ tcp_seq = record->end_seq;
+ skb_offset += tls_end_offset;
+ continue;
}
/* increase page reference count of the record, so that there
@@ -1967,73 +2106,64 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
/* lock cleared */
spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
- tls_end_offset = record->end_seq - tcp_seq;
- pr_debug("seq 0x%x, end_seq 0x%x prev_seq 0x%x, datalen 0x%x\n",
- tcp_seq, record->end_seq, tx_info->prev_seq, data_len);
/* if a tls record is finishing in this SKB */
if (tls_end_offset <= data_len) {
- struct sk_buff *nskb = NULL;
-
- if (tls_end_offset < data_len) {
- nskb = alloc_skb(0, GFP_KERNEL);
- if (unlikely(!nskb)) {
- ret = -ENOMEM;
- goto clear_ref;
- }
-
- chcr_ktls_skb_shift(nskb, local_skb,
- tls_end_offset);
- } else {
- /* its the only record in this skb, directly
- * point it.
- */
- nskb = local_skb;
- }
- ret = chcr_end_part_handler(tx_info, nskb, record,
+ ret = chcr_end_part_handler(tx_info, skb, record,
tcp_seq, mss,
(!th->fin && th->psh), q,
+ skb_offset,
tls_end_offset,
- (nskb == local_skb));
-
- if (ret && nskb != local_skb)
- dev_kfree_skb_any(local_skb);
+ skb_offset +
+ tls_end_offset == skb->len);
data_len -= tls_end_offset;
/* tcp_seq increment is required to handle next record.
*/
tcp_seq += tls_end_offset;
+ skb_offset += tls_end_offset;
} else {
- ret = chcr_short_record_handler(tx_info, local_skb,
+ ret = chcr_short_record_handler(tx_info, skb,
record, tcp_seq, mss,
(!th->fin && th->psh),
+ data_len, skb_offset,
q, tls_end_offset);
data_len = 0;
}
-clear_ref:
+
/* clear the frag ref count which increased locally before */
for (i = 0; i < record->num_frags; i++) {
/* clear the frag ref count */
__skb_frag_unref(&record->frags[i]);
}
/* if any failure, come out from the loop. */
- if (ret)
- goto out;
+ if (ret) {
+ if (th->fin)
+ dev_kfree_skb_any(skb);
+
+ if (ret == FALLBACK)
+ return chcr_ktls_sw_fallback(skb, tx_info, q);
+
+ return NETDEV_TX_OK;
+ }
+
/* length should never be less than 0 */
WARN_ON(data_len < 0);
} while (data_len > 0);
- tx_info->prev_seq = ntohl(th->seq) + skb->data_len;
atomic64_inc(&port_stats->ktls_tx_encrypted_packets);
- atomic64_add(skb->data_len, &port_stats->ktls_tx_encrypted_bytes);
+ atomic64_add(skb_data_len, &port_stats->ktls_tx_encrypted_bytes);
/* tcp finish is set, send a separate tcp msg including all the options
* as well.
*/
- if (th->fin)
+ if (th->fin) {
chcr_ktls_write_tcp_options(tx_info, skb, q, tx_info->tx_chan);
+ dev_kfree_skb_any(skb);
+ }
+ return NETDEV_TX_OK;
out:
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
index c1651b1431a0..18b3b1f02415 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
@@ -26,6 +26,7 @@
#define CHCR_KTLS_WR_SIZE (CHCR_PLAIN_TX_DATA_LEN +\
sizeof(struct cpl_tx_sec_pdu))
+#define FALLBACK 35
enum ch_ktls_open_state {
CH_KTLS_OPEN_SUCCESS = 0,
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
index d581c4e623f8..96d561653496 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
@@ -212,7 +212,7 @@ static struct sk_buff *alloc_ctrl_skb(struct sk_buff *skb, int len)
{
if (likely(skb && !skb_shared(skb) && !skb_cloned(skb))) {
__skb_trim(skb, 0);
- refcount_add(2, &skb->users);
+ refcount_inc(&skb->users);
} else {
skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
}
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
index f1820aca0d33..62c829023da5 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
@@ -383,6 +383,9 @@ int chtls_setkey(struct chtls_sock *csk, u32 keylen,
if (ret)
goto out_notcb;
+ if (unlikely(csk_flag(sk, CSK_ABORT_SHUTDOWN)))
+ goto out_notcb;
+
set_wr_txq(skb, CPL_PRIORITY_DATA, csk->tlshws.txqid);
csk->wr_credits -= DIV_ROUND_UP(len, 16);
csk->wr_unacked += DIV_ROUND_UP(len, 16);
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 00024dd41147..80fb1f537bb3 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1907,6 +1907,8 @@ err_register_netdev:
clk_disable_unprepare(priv->rclk);
clk_disable_unprepare(priv->clk);
err_ncsi_dev:
+ if (priv->ndev)
+ ncsi_unregister_dev(priv->ndev);
ftgmac100_destroy_mdio(netdev);
err_setup_mdio:
iounmap(priv->base);
@@ -1926,6 +1928,8 @@ static int ftgmac100_remove(struct platform_device *pdev)
netdev = platform_get_drvdata(pdev);
priv = netdev_priv(netdev);
+ if (priv->ndev)
+ ncsi_unregister_dev(priv->ndev);
unregister_netdev(netdev);
clk_disable_unprepare(priv->rclk);
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 06cc863f4dd6..d9c285948fc2 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -174,12 +174,17 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
#define DPAA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result)
#define DPAA_TIME_STAMP_SIZE 8
#define DPAA_HASH_RESULTS_SIZE 8
+#define DPAA_HWA_SIZE (DPAA_PARSE_RESULTS_SIZE + DPAA_TIME_STAMP_SIZE \
+ + DPAA_HASH_RESULTS_SIZE)
+#define DPAA_RX_PRIV_DATA_DEFAULT_SIZE (DPAA_TX_PRIV_DATA_SIZE + \
+ dpaa_rx_extra_headroom)
#ifdef CONFIG_DPAA_ERRATUM_A050385
-#define DPAA_RX_PRIV_DATA_SIZE (DPAA_A050385_ALIGN - (DPAA_PARSE_RESULTS_SIZE\
- + DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE))
+#define DPAA_RX_PRIV_DATA_A050385_SIZE (DPAA_A050385_ALIGN - DPAA_HWA_SIZE)
+#define DPAA_RX_PRIV_DATA_SIZE (fman_has_errata_a050385() ? \
+ DPAA_RX_PRIV_DATA_A050385_SIZE : \
+ DPAA_RX_PRIV_DATA_DEFAULT_SIZE)
#else
-#define DPAA_RX_PRIV_DATA_SIZE (u16)(DPAA_TX_PRIV_DATA_SIZE + \
- dpaa_rx_extra_headroom)
+#define DPAA_RX_PRIV_DATA_SIZE DPAA_RX_PRIV_DATA_DEFAULT_SIZE
#endif
#define DPAA_ETH_PCD_RXQ_NUM 128
@@ -2840,7 +2845,8 @@ out_error:
return err;
}
-static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl)
+static u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl,
+ enum port_type port)
{
u16 headroom;
@@ -2854,10 +2860,12 @@ static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl)
*
* Also make sure the headroom is a multiple of data_align bytes
*/
- headroom = (u16)(bl->priv_data_size + DPAA_PARSE_RESULTS_SIZE +
- DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE);
+ headroom = (u16)(bl[port].priv_data_size + DPAA_HWA_SIZE);
- return ALIGN(headroom, DPAA_FD_DATA_ALIGNMENT);
+ if (port == RX)
+ return ALIGN(headroom, DPAA_FD_RX_DATA_ALIGNMENT);
+ else
+ return ALIGN(headroom, DPAA_FD_DATA_ALIGNMENT);
}
static int dpaa_eth_probe(struct platform_device *pdev)
@@ -3025,8 +3033,8 @@ static int dpaa_eth_probe(struct platform_device *pdev)
goto free_dpaa_fqs;
}
- priv->tx_headroom = dpaa_get_headroom(&priv->buf_layout[TX]);
- priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]);
+ priv->tx_headroom = dpaa_get_headroom(priv->buf_layout, TX);
+ priv->rx_headroom = dpaa_get_headroom(priv->buf_layout, RX);
/* All real interfaces need their ports initialized */
err = dpaa_eth_init_ports(mac_dev, dpaa_bp, &port_fqs,
diff --git a/drivers/net/ethernet/freescale/dpaa2/Kconfig b/drivers/net/ethernet/freescale/dpaa2/Kconfig
index cfd369cf4c8c..ee7a906e30b3 100644
--- a/drivers/net/ethernet/freescale/dpaa2/Kconfig
+++ b/drivers/net/ethernet/freescale/dpaa2/Kconfig
@@ -4,6 +4,8 @@ config FSL_DPAA2_ETH
depends on FSL_MC_BUS && FSL_MC_DPIO
select PHYLINK
select PCS_LYNX
+ select FSL_XGMAC_MDIO
+ select NET_DEVLINK
help
This is the DPAA2 Ethernet driver supporting Freescale SoCs
with DPAA2 (DataPath Acceleration Architecture v2).
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index 0fa18b00c49b..d99ea0f4e4a6 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -16,6 +16,7 @@ config FSL_ENETC
config FSL_ENETC_VF
tristate "ENETC VF driver"
depends on PCI && PCI_MSI
+ select FSL_ENETC_MDIO
select PHYLINK
select DIMLIB
help
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 52be6e315752..fc2075ea57fe 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -33,7 +33,10 @@ netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_BUSY;
}
+ enetc_lock_mdio();
count = enetc_map_tx_buffs(tx_ring, skb, priv->active_offloads);
+ enetc_unlock_mdio();
+
if (unlikely(!count))
goto drop_packet_err;
@@ -239,7 +242,7 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
skb_tx_timestamp(skb);
/* let H/W know BD ring has been updated */
- enetc_wr_reg(tx_ring->tpir, i); /* includes wmb() */
+ enetc_wr_reg_hot(tx_ring->tpir, i); /* includes wmb() */
return count;
@@ -262,12 +265,16 @@ static irqreturn_t enetc_msix(int irq, void *data)
struct enetc_int_vector *v = data;
int i;
+ enetc_lock_mdio();
+
/* disable interrupts */
- enetc_wr_reg(v->rbier, 0);
- enetc_wr_reg(v->ricr1, v->rx_ictt);
+ enetc_wr_reg_hot(v->rbier, 0);
+ enetc_wr_reg_hot(v->ricr1, v->rx_ictt);
for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
- enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), 0);
+ enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i), 0);
+
+ enetc_unlock_mdio();
napi_schedule(&v->napi);
@@ -334,19 +341,23 @@ static int enetc_poll(struct napi_struct *napi, int budget)
v->rx_napi_work = false;
+ enetc_lock_mdio();
+
/* enable interrupts */
- enetc_wr_reg(v->rbier, ENETC_RBIER_RXTIE);
+ enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE);
for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
- enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i),
- ENETC_TBIER_TXTIE);
+ enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i),
+ ENETC_TBIER_TXTIE);
+
+ enetc_unlock_mdio();
return work_done;
}
static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci)
{
- int pi = enetc_rd_reg(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK;
+ int pi = enetc_rd_reg_hot(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK;
return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi;
}
@@ -386,7 +397,10 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
i = tx_ring->next_to_clean;
tx_swbd = &tx_ring->tx_swbd[i];
+
+ enetc_lock_mdio();
bds_to_clean = enetc_bd_ready_count(tx_ring, i);
+ enetc_unlock_mdio();
do_tstamp = false;
@@ -429,16 +443,20 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
tx_swbd = tx_ring->tx_swbd;
}
+ enetc_lock_mdio();
+
/* BD iteration loop end */
if (is_eof) {
tx_frm_cnt++;
/* re-arm interrupt source */
- enetc_wr_reg(tx_ring->idr, BIT(tx_ring->index) |
- BIT(16 + tx_ring->index));
+ enetc_wr_reg_hot(tx_ring->idr, BIT(tx_ring->index) |
+ BIT(16 + tx_ring->index));
}
if (unlikely(!bds_to_clean))
bds_to_clean = enetc_bd_ready_count(tx_ring, i);
+
+ enetc_unlock_mdio();
}
tx_ring->next_to_clean = i;
@@ -515,8 +533,6 @@ static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
if (likely(j)) {
rx_ring->next_to_alloc = i; /* keep track from page reuse */
rx_ring->next_to_use = i;
- /* update ENETC's consumer index */
- enetc_wr_reg(rx_ring->rcir, i);
}
return j;
@@ -534,8 +550,8 @@ static void enetc_get_rx_tstamp(struct net_device *ndev,
u64 tstamp;
if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TSTMP) {
- lo = enetc_rd(hw, ENETC_SICTR0);
- hi = enetc_rd(hw, ENETC_SICTR1);
+ lo = enetc_rd_reg_hot(hw->reg + ENETC_SICTR0);
+ hi = enetc_rd_reg_hot(hw->reg + ENETC_SICTR1);
rxbd = enetc_rxbd_ext(rxbd);
tstamp_lo = le32_to_cpu(rxbd->ext.tstamp);
if (lo <= tstamp_lo)
@@ -684,23 +700,31 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
u32 bd_status;
u16 size;
+ enetc_lock_mdio();
+
if (cleaned_cnt >= ENETC_RXBD_BUNDLE) {
int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt);
+ /* update ENETC's consumer index */
+ enetc_wr_reg_hot(rx_ring->rcir, rx_ring->next_to_use);
cleaned_cnt -= count;
}
rxbd = enetc_rxbd(rx_ring, i);
bd_status = le32_to_cpu(rxbd->r.lstatus);
- if (!bd_status)
+ if (!bd_status) {
+ enetc_unlock_mdio();
break;
+ }
- enetc_wr_reg(rx_ring->idr, BIT(rx_ring->index));
+ enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index));
dma_rmb(); /* for reading other rxbd fields */
size = le16_to_cpu(rxbd->r.buf_len);
skb = enetc_map_rx_buff_to_skb(rx_ring, i, size);
- if (!skb)
+ if (!skb) {
+ enetc_unlock_mdio();
break;
+ }
enetc_get_offloads(rx_ring, rxbd, skb);
@@ -712,6 +736,7 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
if (unlikely(bd_status &
ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))) {
+ enetc_unlock_mdio();
dev_kfree_skb(skb);
while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
dma_rmb();
@@ -751,6 +776,8 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
enetc_process_skb(rx_ring, skb);
+ enetc_unlock_mdio();
+
napi_gro_receive(napi, skb);
rx_frm_cnt++;
@@ -1225,6 +1252,7 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
rx_ring->idr = hw->reg + ENETC_SIRXIDR;
enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring));
+ enetc_wr(hw, ENETC_SIRXIDR, rx_ring->next_to_use);
/* enable ring */
enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 17cf7c94fdb5..eb6bbf1113c7 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -324,14 +324,100 @@ struct enetc_hw {
void __iomem *global;
};
-/* general register accessors */
-#define enetc_rd_reg(reg) ioread32((reg))
-#define enetc_wr_reg(reg, val) iowrite32((val), (reg))
+/* ENETC register accessors */
+
+/* MDIO issue workaround (on LS1028A) -
+ * Due to a hardware issue, an access to MDIO registers
+ * that is concurrent with other ENETC register accesses
+ * may lead to the MDIO access being dropped or corrupted.
+ * To protect the MDIO accesses a readers-writers locking
+ * scheme is used, where the MDIO register accesses are
+ * protected by write locks to insure exclusivity, while
+ * the remaining ENETC registers are accessed under read
+ * locks since they only compete with MDIO accesses.
+ */
+extern rwlock_t enetc_mdio_lock;
+
+/* use this locking primitive only on the fast datapath to
+ * group together multiple non-MDIO register accesses to
+ * minimize the overhead of the lock
+ */
+static inline void enetc_lock_mdio(void)
+{
+ read_lock(&enetc_mdio_lock);
+}
+
+static inline void enetc_unlock_mdio(void)
+{
+ read_unlock(&enetc_mdio_lock);
+}
+
+/* use these accessors only on the fast datapath under
+ * the enetc_lock_mdio() locking primitive to minimize
+ * the overhead of the lock
+ */
+static inline u32 enetc_rd_reg_hot(void __iomem *reg)
+{
+ lockdep_assert_held(&enetc_mdio_lock);
+
+ return ioread32(reg);
+}
+
+static inline void enetc_wr_reg_hot(void __iomem *reg, u32 val)
+{
+ lockdep_assert_held(&enetc_mdio_lock);
+
+ iowrite32(val, reg);
+}
+
+/* internal helpers for the MDIO w/a */
+static inline u32 _enetc_rd_reg_wa(void __iomem *reg)
+{
+ u32 val;
+
+ enetc_lock_mdio();
+ val = ioread32(reg);
+ enetc_unlock_mdio();
+
+ return val;
+}
+
+static inline void _enetc_wr_reg_wa(void __iomem *reg, u32 val)
+{
+ enetc_lock_mdio();
+ iowrite32(val, reg);
+ enetc_unlock_mdio();
+}
+
+static inline u32 _enetc_rd_mdio_reg_wa(void __iomem *reg)
+{
+ unsigned long flags;
+ u32 val;
+
+ write_lock_irqsave(&enetc_mdio_lock, flags);
+ val = ioread32(reg);
+ write_unlock_irqrestore(&enetc_mdio_lock, flags);
+
+ return val;
+}
+
+static inline void _enetc_wr_mdio_reg_wa(void __iomem *reg, u32 val)
+{
+ unsigned long flags;
+
+ write_lock_irqsave(&enetc_mdio_lock, flags);
+ iowrite32(val, reg);
+ write_unlock_irqrestore(&enetc_mdio_lock, flags);
+}
+
#ifdef ioread64
-#define enetc_rd_reg64(reg) ioread64((reg))
+static inline u64 _enetc_rd_reg64(void __iomem *reg)
+{
+ return ioread64(reg);
+}
#else
/* using this to read out stats on 32b systems */
-static inline u64 enetc_rd_reg64(void __iomem *reg)
+static inline u64 _enetc_rd_reg64(void __iomem *reg)
{
u32 low, high, tmp;
@@ -345,12 +431,29 @@ static inline u64 enetc_rd_reg64(void __iomem *reg)
}
#endif
+static inline u64 _enetc_rd_reg64_wa(void __iomem *reg)
+{
+ u64 val;
+
+ enetc_lock_mdio();
+ val = _enetc_rd_reg64(reg);
+ enetc_unlock_mdio();
+
+ return val;
+}
+
+/* general register accessors */
+#define enetc_rd_reg(reg) _enetc_rd_reg_wa((reg))
+#define enetc_wr_reg(reg, val) _enetc_wr_reg_wa((reg), (val))
#define enetc_rd(hw, off) enetc_rd_reg((hw)->reg + (off))
#define enetc_wr(hw, off, val) enetc_wr_reg((hw)->reg + (off), val)
-#define enetc_rd64(hw, off) enetc_rd_reg64((hw)->reg + (off))
+#define enetc_rd64(hw, off) _enetc_rd_reg64_wa((hw)->reg + (off))
/* port register accessors - PF only */
#define enetc_port_rd(hw, off) enetc_rd_reg((hw)->port + (off))
#define enetc_port_wr(hw, off, val) enetc_wr_reg((hw)->port + (off), val)
+#define enetc_port_rd_mdio(hw, off) _enetc_rd_mdio_reg_wa((hw)->port + (off))
+#define enetc_port_wr_mdio(hw, off, val) _enetc_wr_mdio_reg_wa(\
+ (hw)->port + (off), val)
/* global register accessors - PF only */
#define enetc_global_rd(hw, off) enetc_rd_reg((hw)->global + (off))
#define enetc_global_wr(hw, off, val) enetc_wr_reg((hw)->global + (off), val)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
index 48c32a171afa..ee0116ed4738 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
@@ -16,13 +16,13 @@
static inline u32 _enetc_mdio_rd(struct enetc_mdio_priv *mdio_priv, int off)
{
- return enetc_port_rd(mdio_priv->hw, mdio_priv->mdio_base + off);
+ return enetc_port_rd_mdio(mdio_priv->hw, mdio_priv->mdio_base + off);
}
static inline void _enetc_mdio_wr(struct enetc_mdio_priv *mdio_priv, int off,
u32 val)
{
- enetc_port_wr(mdio_priv->hw, mdio_priv->mdio_base + off, val);
+ enetc_port_wr_mdio(mdio_priv->hw, mdio_priv->mdio_base + off, val);
}
#define enetc_mdio_rd(mdio_priv, off) \
@@ -174,3 +174,7 @@ struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs)
return hw;
}
EXPORT_SYMBOL_GPL(enetc_hw_alloc);
+
+/* Lock for MDIO access errata on LS1028A */
+DEFINE_RWLOCK(enetc_mdio_lock);
+EXPORT_SYMBOL_GPL(enetc_mdio_lock);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index 827f74e86d34..dbceb99c4441 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -92,18 +92,8 @@ static int enetc_setup_taprio(struct net_device *ndev,
gcl_config->atc = 0xff;
gcl_config->acl_len = cpu_to_le16(gcl_len);
- if (!admin_conf->base_time) {
- gcl_data->btl =
- cpu_to_le32(enetc_rd(&priv->si->hw, ENETC_SICTR0));
- gcl_data->bth =
- cpu_to_le32(enetc_rd(&priv->si->hw, ENETC_SICTR1));
- } else {
- gcl_data->btl =
- cpu_to_le32(lower_32_bits(admin_conf->base_time));
- gcl_data->bth =
- cpu_to_le32(upper_32_bits(admin_conf->base_time));
- }
-
+ gcl_data->btl = cpu_to_le32(lower_32_bits(admin_conf->base_time));
+ gcl_data->bth = cpu_to_le32(upper_32_bits(admin_conf->base_time));
gcl_data->ct = cpu_to_le32(admin_conf->cycle_time);
gcl_data->cte = cpu_to_le32(admin_conf->cycle_time_extension);
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 832a2175636d..c527f4ee1d3a 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -456,6 +456,12 @@ struct bufdesc_ex {
*/
#define FEC_QUIRK_HAS_FRREG (1 << 16)
+/* Some FEC hardware blocks need the MMFR cleared at setup time to avoid
+ * the generation of an MII event. This must be avoided in the older
+ * FEC blocks where it will stop MII events being generated.
+ */
+#define FEC_QUIRK_CLEAR_SETUP_MII (1 << 17)
+
struct bufdesc_prop {
int qid;
/* Address of Rx and Tx buffers */
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 8f7eca1e7716..04f24c66cf36 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -100,14 +100,14 @@ static const struct fec_devinfo fec_imx27_info = {
static const struct fec_devinfo fec_imx28_info = {
.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
- FEC_QUIRK_HAS_FRREG,
+ FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII,
};
static const struct fec_devinfo fec_imx6q_info = {
.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
- FEC_QUIRK_HAS_RACC,
+ FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII,
};
static const struct fec_devinfo fec_mvf600_info = {
@@ -119,7 +119,8 @@ static const struct fec_devinfo fec_imx6x_info = {
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
- FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
+ FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
+ FEC_QUIRK_CLEAR_SETUP_MII,
};
static const struct fec_devinfo fec_imx6ul_info = {
@@ -127,7 +128,7 @@ static const struct fec_devinfo fec_imx6ul_info = {
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 |
FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC |
- FEC_QUIRK_HAS_COALESCE,
+ FEC_QUIRK_HAS_COALESCE | FEC_QUIRK_CLEAR_SETUP_MII,
};
static struct platform_device_id fec_devtype[] = {
@@ -1807,7 +1808,7 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
int ret = 0, frame_start, frame_addr, frame_op;
bool is_c45 = !!(regnum & MII_ADDR_C45);
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return ret;
@@ -1866,11 +1867,9 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
int ret, frame_start, frame_addr;
bool is_c45 = !!(regnum & MII_ADDR_C45);
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return ret;
- else
- ret = 0;
if (is_c45) {
frame_start = FEC_MMFR_ST_C45;
@@ -2134,15 +2133,17 @@ static int fec_enet_mii_init(struct platform_device *pdev)
if (suppress_preamble)
fep->phy_speed |= BIT(7);
- /* Clear MMFR to avoid to generate MII event by writing MSCR.
- * MII event generation condition:
- * - writing MSCR:
- * - mmfr[31:0]_not_zero & mscr[7:0]_is_zero &
- * mscr_reg_data_in[7:0] != 0
- * - writing MMFR:
- * - mscr[7:0]_not_zero
- */
- writel(0, fep->hwp + FEC_MII_DATA);
+ if (fep->quirks & FEC_QUIRK_CLEAR_SETUP_MII) {
+ /* Clear MMFR to avoid to generate MII event by writing MSCR.
+ * MII event generation condition:
+ * - writing MSCR:
+ * - mmfr[31:0]_not_zero & mscr[7:0]_is_zero &
+ * mscr_reg_data_in[7:0] != 0
+ * - writing MMFR:
+ * - mscr[7:0]_not_zero
+ */
+ writel(0, fep->hwp + FEC_MII_DATA);
+ }
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
@@ -2272,7 +2273,7 @@ static void fec_enet_get_regs(struct net_device *ndev,
u32 i, off;
int ret;
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return;
@@ -2973,7 +2974,7 @@ fec_enet_open(struct net_device *ndev)
int ret;
bool reset_again;
- ret = pm_runtime_get_sync(&fep->pdev->dev);
+ ret = pm_runtime_resume_and_get(&fep->pdev->dev);
if (ret < 0)
return ret;
@@ -3767,7 +3768,7 @@ fec_drv_remove(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
int ret;
- ret = pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0)
return ret;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 41dd3d0f3452..d391a45cebb6 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1829,20 +1829,12 @@ static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
/* make space for additional header when fcb is needed */
- if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
- struct sk_buff *skb_new;
-
- skb_new = skb_realloc_headroom(skb, fcb_len);
- if (!skb_new) {
+ if (fcb_len) {
+ if (unlikely(skb_cow_head(skb, fcb_len))) {
dev->stats.tx_errors++;
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
-
- if (skb->sk)
- skb_set_owner_w(skb_new, skb->sk);
- dev_consume_skb_any(skb);
- skb = skb_new;
}
/* total number of fragments in the SKB */
@@ -3380,7 +3372,7 @@ static int gfar_probe(struct platform_device *ofdev)
if (dev->features & NETIF_F_IP_CSUM ||
priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
- dev->needed_headroom = GMAC_FCB_LEN;
+ dev->needed_headroom = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
/* Initializing some of the rx/tx queue level parameters */
for (i = 0; i < priv->num_tx_queues; i++) {
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index af4dfbe28d56..2491ebc97871 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1185,18 +1185,27 @@ static int ibmvnic_open(struct net_device *netdev)
if (adapter->state != VNIC_CLOSED) {
rc = ibmvnic_login(netdev);
if (rc)
- return rc;
+ goto out;
rc = init_resources(adapter);
if (rc) {
netdev_err(netdev, "failed to initialize resources\n");
release_resources(adapter);
- return rc;
+ goto out;
}
}
rc = __ibmvnic_open(netdev);
+out:
+ /*
+ * If open fails due to a pending failover, set device state and
+ * return. Device operation will be handled by reset routine.
+ */
+ if (rc && adapter->failover_pending) {
+ adapter->state = VNIC_OPEN;
+ rc = 0;
+ }
return rc;
}
@@ -1922,6 +1931,13 @@ static int do_reset(struct ibmvnic_adapter *adapter,
rwi->reset_reason);
rtnl_lock();
+ /*
+ * Now that we have the rtnl lock, clear any pending failover.
+ * This will ensure ibmvnic_open() has either completed or will
+ * block until failover is complete.
+ */
+ if (rwi->reset_reason == VNIC_RESET_FAILOVER)
+ adapter->failover_pending = false;
netif_carrier_off(netdev);
adapter->reset_reason = rwi->reset_reason;
@@ -2058,8 +2074,11 @@ static int do_reset(struct ibmvnic_adapter *adapter,
for (i = 0; i < adapter->req_rx_queues; i++)
napi_schedule(&adapter->napi[i]);
- if (adapter->reset_reason != VNIC_RESET_FAILOVER)
+ if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
+ adapter->reset_reason == VNIC_RESET_MOBILITY) {
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
+ call_netdevice_notifiers(NETDEV_RESEND_IGMP, netdev);
+ }
rc = 0;
@@ -2129,6 +2148,9 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
if (rc)
return IBMVNIC_OPEN_FAILED;
+ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
+ call_netdevice_notifiers(NETDEV_RESEND_IGMP, netdev);
+
return 0;
}
@@ -2193,7 +2215,6 @@ static void __ibmvnic_reset(struct work_struct *work)
if (!saved_state) {
reset_state = adapter->state;
- adapter->state = VNIC_RESETTING;
saved_state = true;
}
spin_unlock_irqrestore(&adapter->state_lock, flags);
@@ -2202,6 +2223,13 @@ static void __ibmvnic_reset(struct work_struct *work)
/* CHANGE_PARAM requestor holds rtnl_lock */
rc = do_change_param_reset(adapter, rwi, reset_state);
} else if (adapter->force_reset_recovery) {
+ /*
+ * Since we are doing a hard reset now, clear the
+ * failover_pending flag so we don't ignore any
+ * future MOBILITY or other resets.
+ */
+ adapter->failover_pending = false;
+
/* Transport event occurred during previous reset */
if (adapter->wait_for_reset) {
/* Previous was CHANGE_PARAM; caller locked */
@@ -2266,9 +2294,15 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
unsigned long flags;
int ret;
+ /*
+ * If failover is pending don't schedule any other reset.
+ * Instead let the failover complete. If there is already a
+ * a failover reset scheduled, we will detect and drop the
+ * duplicate reset when walking the ->rwi_list below.
+ */
if (adapter->state == VNIC_REMOVING ||
adapter->state == VNIC_REMOVED ||
- adapter->failover_pending) {
+ (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) {
ret = EBUSY;
netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
goto err;
@@ -2321,6 +2355,12 @@ static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct ibmvnic_adapter *adapter = netdev_priv(dev);
+ if (test_bit(0, &adapter->resetting)) {
+ netdev_err(adapter->netdev,
+ "Adapter is resetting, skip timeout reset\n");
+ return;
+ }
+
ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
}
@@ -2839,6 +2879,9 @@ static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
{
int i, rc;
+ if (!adapter->tx_scrq || !adapter->rx_scrq)
+ return -EINVAL;
+
for (i = 0; i < adapter->req_tx_queues; i++) {
netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
@@ -4713,7 +4756,6 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
case IBMVNIC_CRQ_INIT:
dev_info(dev, "Partner initialized\n");
adapter->from_passive_init = true;
- adapter->failover_pending = false;
if (!completion_done(&adapter->init_done)) {
complete(&adapter->init_done);
adapter->init_done_rc = -EIO;
@@ -4930,6 +4972,9 @@ static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
/* Clean out the queue */
+ if (!crq->msgs)
+ return -EINVAL;
+
memset(crq->msgs, 0, PAGE_SIZE);
crq->cur = 0;
crq->active = false;
@@ -5234,7 +5279,7 @@ static int ibmvnic_remove(struct vio_dev *dev)
unsigned long flags;
spin_lock_irqsave(&adapter->state_lock, flags);
- if (adapter->state == VNIC_RESETTING) {
+ if (test_bit(0, &adapter->resetting)) {
spin_unlock_irqrestore(&adapter->state_lock, flags);
return -EBUSY;
}
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 217dcc7ded70..47a3fd71c96f 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -942,8 +942,7 @@ enum vnic_state {VNIC_PROBING = 1,
VNIC_CLOSING,
VNIC_CLOSED,
VNIC_REMOVING,
- VNIC_REMOVED,
- VNIC_RESETTING};
+ VNIC_REMOVED};
enum ibmvnic_reset_reason {VNIC_RESET_FAILOVER = 1,
VNIC_RESET_MOBILITY,
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 537300e762f0..d231a2cdd98f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -140,6 +140,7 @@ enum i40e_state_t {
__I40E_CLIENT_RESET,
__I40E_VIRTCHNL_OP_PENDING,
__I40E_RECOVERY_MODE,
+ __I40E_VF_RESETS_DISABLED, /* disable resets during i40e_remove */
/* This must be last as it determines the size of the BITMAP */
__I40E_STATE_SIZE__,
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 4f8a2154b93f..1337686bd099 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -4010,8 +4010,16 @@ static irqreturn_t i40e_intr(int irq, void *data)
}
if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
- ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
- set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
+ /* disable any further VFLR event notifications */
+ if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state)) {
+ u32 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
+
+ reg &= ~I40E_PFINT_ICR0_VFLR_MASK;
+ wr32(hw, I40E_PFINT_ICR0_ENA, reg);
+ } else {
+ ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
+ set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
+ }
}
if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
@@ -15311,6 +15319,11 @@ static void i40e_remove(struct pci_dev *pdev)
while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
usleep_range(1000, 2000);
+ if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
+ set_bit(__I40E_VF_RESETS_DISABLED, pf->state);
+ i40e_free_vfs(pf);
+ pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
+ }
/* no more scheduling of any task */
set_bit(__I40E_SUSPENDED, pf->state);
set_bit(__I40E_DOWN, pf->state);
@@ -15337,11 +15350,6 @@ static void i40e_remove(struct pci_dev *pdev)
*/
i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
- if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
- i40e_free_vfs(pf);
- pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
- }
-
i40e_fdir_teardown(pf);
/* If there is a switch structure or any orphans, remove them.
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index c96e2f2d4cba..1b5390ec3d78 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1403,7 +1403,8 @@ static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
* @vf: pointer to the VF structure
* @flr: VFLR was issued or not
*
- * Returns true if the VF is reset, false otherwise.
+ * Returns true if the VF is in reset, resets successfully, or resets
+ * are disabled and false otherwise.
**/
bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
{
@@ -1413,11 +1414,14 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
u32 reg;
int i;
+ if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state))
+ return true;
+
/* If the VFs have been disabled, this means something else is
* resetting the VF, so we shouldn't continue.
*/
if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
- return false;
+ return true;
i40e_trigger_vf_reset(vf, flr);
@@ -1581,6 +1585,15 @@ void i40e_free_vfs(struct i40e_pf *pf)
i40e_notify_client_of_vf_enable(pf, 0);
+ /* Disable IOV before freeing resources. This lets any VF drivers
+ * running in the host get themselves cleaned up before we yank
+ * the carpet out from underneath their feet.
+ */
+ if (!pci_vfs_assigned(pf->pdev))
+ pci_disable_sriov(pf->pdev);
+ else
+ dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
+
/* Amortize wait time by stopping all VFs at the same time */
for (i = 0; i < pf->num_alloc_vfs; i++) {
if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
@@ -1596,15 +1609,6 @@ void i40e_free_vfs(struct i40e_pf *pf)
i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
}
- /* Disable IOV before freeing resources. This lets any VF drivers
- * running in the host get themselves cleaned up before we yank
- * the carpet out from underneath their feet.
- */
- if (!pci_vfs_assigned(pf->pdev))
- pci_disable_sriov(pf->pdev);
- else
- dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
-
/* free up VF resources */
tmp = pf->num_alloc_vfs;
pf->num_alloc_vfs = 0;
@@ -2713,6 +2717,10 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param;
}
+ if (is_valid_ether_addr(al->list[i].addr) &&
+ is_zero_ether_addr(vf->default_lan_addr.addr))
+ ether_addr_copy(vf->default_lan_addr.addr,
+ al->list[i].addr);
}
}
spin_unlock_bh(&vsi->mac_filter_hash_lock);
@@ -2740,6 +2748,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_ether_addr_list *al =
(struct virtchnl_ether_addr_list *)msg;
+ bool was_unimac_deleted = false;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
i40e_status ret = 0;
@@ -2759,6 +2768,8 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
ret = I40E_ERR_INVALID_MAC_ADDR;
goto error_param;
}
+ if (ether_addr_equal(al->list[i].addr, vf->default_lan_addr.addr))
+ was_unimac_deleted = true;
}
vsi = pf->vsi[vf->lan_vsi_idx];
@@ -2779,10 +2790,25 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
vf->vf_id, ret);
+ if (vf->trusted && was_unimac_deleted) {
+ struct i40e_mac_filter *f;
+ struct hlist_node *h;
+ u8 *macaddr = NULL;
+ int bkt;
+
+ /* set last unicast mac address as default */
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ if (is_valid_ether_addr(f->macaddr))
+ macaddr = f->macaddr;
+ }
+ if (macaddr)
+ ether_addr_copy(vf->default_lan_addr.addr, macaddr);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ }
error_param:
/* send the response to the VF */
- return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
- ret);
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, ret);
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 6acede0acdca..567fd67e900e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -281,8 +281,8 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
unsigned int xdp_res, xdp_xmit = 0;
+ bool failure = false;
struct sk_buff *skb;
- bool failure;
while (likely(total_rx_packets < (unsigned int)budget)) {
union i40e_rx_desc *rx_desc;
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 9112dff075cf..b673ac1199bb 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -3891,21 +3891,23 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu)
}
/**
- * igc_get_stats - Get System Network Statistics
+ * igc_get_stats64 - Get System Network Statistics
* @netdev: network interface device structure
+ * @stats: rtnl_link_stats64 pointer
*
* Returns the address of the device statistics structure.
* The statistics are updated here and also from the timer callback.
*/
-static struct net_device_stats *igc_get_stats(struct net_device *netdev)
+static void igc_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
{
struct igc_adapter *adapter = netdev_priv(netdev);
+ spin_lock(&adapter->stats64_lock);
if (!test_bit(__IGC_RESETTING, &adapter->state))
igc_update_stats(adapter);
-
- /* only return the current stats */
- return &netdev->stats;
+ memcpy(stats, &adapter->stats64, sizeof(*stats));
+ spin_unlock(&adapter->stats64_lock);
}
static netdev_features_t igc_fix_features(struct net_device *netdev,
@@ -4855,7 +4857,7 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_set_rx_mode = igc_set_rx_mode,
.ndo_set_mac_address = igc_set_mac,
.ndo_change_mtu = igc_change_mtu,
- .ndo_get_stats = igc_get_stats,
+ .ndo_get_stats64 = igc_get_stats64,
.ndo_fix_features = igc_fix_features,
.ndo_set_features = igc_set_features,
.ndo_features_check = igc_features_check,
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 54b0bf574c05..4a9041ee1b39 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2287,6 +2287,7 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
dma_sync_single_for_cpu(dev->dev.parent,
rx_desc->buf_phys_addr,
len, dma_dir);
+ rx_desc->buf_phys_addr = 0;
if (data_len > 0 && sinfo->nr_frags < MAX_SKB_FRAGS) {
skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags];
@@ -2295,8 +2296,8 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
skb_frag_size_set(frag, data_len);
__skb_frag_set_page(frag, page);
sinfo->nr_frags++;
-
- rx_desc->buf_phys_addr = 0;
+ } else {
+ page_pool_put_full_page(rxq->page_pool, page, true);
}
*size -= len;
}
diff --git a/drivers/net/ethernet/marvell/prestera/Kconfig b/drivers/net/ethernet/marvell/prestera/Kconfig
index b1fcc44f566a..b6f20e2034c6 100644
--- a/drivers/net/ethernet/marvell/prestera/Kconfig
+++ b/drivers/net/ethernet/marvell/prestera/Kconfig
@@ -6,6 +6,7 @@
config PRESTERA
tristate "Marvell Prestera Switch ASICs support"
depends on NET_SWITCHDEV && VLAN_8021Q
+ depends on BRIDGE || BRIDGE=n
select NET_DEVLINK
help
This driver supports Marvell Prestera Switch ASICs family.
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
index 1b97adae542e..be5677623455 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_pci.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
@@ -676,7 +676,8 @@ static int prestera_pci_probe(struct pci_dev *pdev,
if (err)
return err;
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(30))) {
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(30));
+ if (err) {
dev_err(&pdev->dev, "fail to set DMA mask\n");
goto err_dma_mask;
}
@@ -702,8 +703,10 @@ static int prestera_pci_probe(struct pci_dev *pdev,
dev_info(fw->dev.dev, "Prestera FW is ready\n");
fw->wq = alloc_workqueue("prestera_fw_wq", WQ_HIGHPRI, 1);
- if (!fw->wq)
+ if (!fw->wq) {
+ err = -ENOMEM;
goto err_wq_alloc;
+ }
INIT_WORK(&fw->evt_work, prestera_fw_evt_work_fn);
diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
index 13250553263b..a8641a407c06 100644
--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
+++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
@@ -966,6 +966,7 @@ static int mtk_star_enable(struct net_device *ndev)
mtk_star_adjust_link, 0, priv->phy_intf);
if (!priv->phydev) {
netdev_err(ndev, "failed to connect to PHY\n");
+ ret = -ENODEV;
goto err_free_irq;
}
@@ -1053,7 +1054,7 @@ static int mtk_star_netdev_start_xmit(struct sk_buff *skb,
err_drop_packet:
dev_kfree_skb(skb);
ndev->stats.tx_dropped++;
- return NETDEV_TX_BUSY;
+ return NETDEV_TX_OK;
}
/* Returns the number of bytes sent or a negative number on the first
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index f6ff9620a137..f6cfec81ccc3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -1864,8 +1864,8 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
#define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77)
#define INIT_HCA_MCAST_OFFSET 0x0c0
#define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00)
-#define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
-#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16)
+#define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x13)
+#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x17)
#define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18)
#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
#define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6
@@ -1873,7 +1873,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
#define INIT_HCA_DRIVER_VERSION_SZ 0x40
#define INIT_HCA_FS_PARAM_OFFSET 0x1d0
#define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00)
-#define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12)
+#define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x13)
#define INIT_HCA_FS_A0_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x18)
#define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b)
#define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21)
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 650ae08c71de..8f020f26ebf5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -182,8 +182,8 @@ struct mlx4_init_hca_param {
u64 cmpt_base;
u64 mtt_base;
u64 global_caps;
- u16 log_mc_entry_sz;
- u16 log_mc_hash_sz;
+ u8 log_mc_entry_sz;
+ u8 log_mc_hash_sz;
u16 hca_core_clock; /* Internal Clock Frequency (in MHz) */
u8 log_num_qps;
u8 log_num_srqs;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
index 3e44e4d820c5..95f2b26a3ee3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
@@ -187,7 +187,7 @@ static bool mlx5e_rep_is_lag_netdev(struct net_device *netdev)
struct mlx5e_priv *priv;
/* A given netdev is not a representor or not a slave of LAG configuration */
- if (!mlx5e_eswitch_rep(netdev) || !bond_slave_get_rtnl(netdev))
+ if (!mlx5e_eswitch_rep(netdev) || !netif_is_lag_port(netdev))
return false;
priv = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index e36e505d38ad..d29af7b9c695 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -107,12 +107,16 @@ void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
mlx5e_tc_encap_flows_del(priv, e, &flow_list);
if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
+ struct net_device *route_dev;
+
ether_addr_copy(e->h_dest, ha);
ether_addr_copy(eth->h_dest, ha);
/* Update the encap source mac, in case that we delete
* the flows when encap source mac changed.
*/
- ether_addr_copy(eth->h_source, e->route_dev->dev_addr);
+ route_dev = __dev_get_by_index(dev_net(priv->netdev), e->route_dev_ifindex);
+ if (route_dev)
+ ether_addr_copy(eth->h_source, route_dev->dev_addr);
mlx5e_tc_encap_flows_add(priv, e, &flow_list);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 7cce85faa16f..90930e54b6f2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -77,13 +77,13 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
return 0;
}
-static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
- struct net_device *mirred_dev,
- struct net_device **out_dev,
- struct net_device **route_dev,
- struct flowi4 *fl4,
- struct neighbour **out_n,
- u8 *out_ttl)
+static int mlx5e_route_lookup_ipv4_get(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct net_device **out_dev,
+ struct net_device **route_dev,
+ struct flowi4 *fl4,
+ struct neighbour **out_n,
+ u8 *out_ttl)
{
struct neighbour *n;
struct rtable *rt;
@@ -117,18 +117,28 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
ip_rt_put(rt);
return ret;
}
+ dev_hold(*route_dev);
if (!(*out_ttl))
*out_ttl = ip4_dst_hoplimit(&rt->dst);
n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
ip_rt_put(rt);
- if (!n)
+ if (!n) {
+ dev_put(*route_dev);
return -ENOMEM;
+ }
*out_n = n;
return 0;
}
+static void mlx5e_route_lookup_ipv4_put(struct net_device *route_dev,
+ struct neighbour *n)
+{
+ neigh_release(n);
+ dev_put(route_dev);
+}
+
static const char *mlx5e_netdev_kind(struct net_device *dev)
{
if (dev->rtnl_link_ops)
@@ -193,8 +203,8 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
fl4.saddr = tun_key->u.ipv4.src;
ttl = tun_key->ttl;
- err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev, &route_dev,
- &fl4, &n, &ttl);
+ err = mlx5e_route_lookup_ipv4_get(priv, mirred_dev, &out_dev, &route_dev,
+ &fl4, &n, &ttl);
if (err)
return err;
@@ -223,7 +233,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
e->m_neigh.family = n->ops->family;
memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
e->out_dev = out_dev;
- e->route_dev = route_dev;
+ e->route_dev_ifindex = route_dev->ifindex;
/* It's important to add the neigh to the hash table before checking
* the neigh validity state. So if we'll get a notification, in case the
@@ -278,7 +288,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
e->flags |= MLX5_ENCAP_ENTRY_VALID;
mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
- neigh_release(n);
+ mlx5e_route_lookup_ipv4_put(route_dev, n);
return err;
destroy_neigh_entry:
@@ -286,18 +296,18 @@ destroy_neigh_entry:
free_encap:
kfree(encap_header);
release_neigh:
- neigh_release(n);
+ mlx5e_route_lookup_ipv4_put(route_dev, n);
return err;
}
#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
-static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
- struct net_device *mirred_dev,
- struct net_device **out_dev,
- struct net_device **route_dev,
- struct flowi6 *fl6,
- struct neighbour **out_n,
- u8 *out_ttl)
+static int mlx5e_route_lookup_ipv6_get(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct net_device **out_dev,
+ struct net_device **route_dev,
+ struct flowi6 *fl6,
+ struct neighbour **out_n,
+ u8 *out_ttl)
{
struct dst_entry *dst;
struct neighbour *n;
@@ -318,15 +328,25 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
return ret;
}
+ dev_hold(*route_dev);
n = dst_neigh_lookup(dst, &fl6->daddr);
dst_release(dst);
- if (!n)
+ if (!n) {
+ dev_put(*route_dev);
return -ENOMEM;
+ }
*out_n = n;
return 0;
}
+static void mlx5e_route_lookup_ipv6_put(struct net_device *route_dev,
+ struct neighbour *n)
+{
+ neigh_release(n);
+ dev_put(route_dev);
+}
+
int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
struct mlx5e_encap_entry *e)
@@ -348,8 +368,8 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
fl6.daddr = tun_key->u.ipv6.dst;
fl6.saddr = tun_key->u.ipv6.src;
- err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev, &route_dev,
- &fl6, &n, &ttl);
+ err = mlx5e_route_lookup_ipv6_get(priv, mirred_dev, &out_dev, &route_dev,
+ &fl6, &n, &ttl);
if (err)
return err;
@@ -378,7 +398,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
e->m_neigh.family = n->ops->family;
memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
e->out_dev = out_dev;
- e->route_dev = route_dev;
+ e->route_dev_ifindex = route_dev->ifindex;
/* It's importent to add the neigh to the hash table before checking
* the neigh validity state. So if we'll get a notification, in case the
@@ -433,7 +453,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
e->flags |= MLX5_ENCAP_ENTRY_VALID;
mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
- neigh_release(n);
+ mlx5e_route_lookup_ipv6_put(route_dev, n);
return err;
destroy_neigh_entry:
@@ -441,7 +461,7 @@ destroy_neigh_entry:
free_encap:
kfree(encap_header);
release_neigh:
- neigh_release(n);
+ mlx5e_route_lookup_ipv6_put(route_dev, n);
return err;
}
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index 4e574ac73019..be3465ba38ca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -122,9 +122,9 @@ void mlx5e_activate_xsk(struct mlx5e_channel *c)
set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
/* TX queue is created active. */
- spin_lock(&c->async_icosq_lock);
+ spin_lock_bh(&c->async_icosq_lock);
mlx5e_trigger_irq(&c->async_icosq);
- spin_unlock(&c->async_icosq_lock);
+ spin_unlock_bh(&c->async_icosq_lock);
}
void mlx5e_deactivate_xsk(struct mlx5e_channel *c)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
index fb671a457129..8e96260fce1d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
@@ -36,9 +36,9 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
if (test_and_set_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->async_icosq.state))
return 0;
- spin_lock(&c->async_icosq_lock);
+ spin_lock_bh(&c->async_icosq_lock);
mlx5e_trigger_irq(&c->async_icosq);
- spin_unlock(&c->async_icosq_lock);
+ spin_unlock_bh(&c->async_icosq_lock);
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 0e45590662a8..381a9c8c9da9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -64,13 +64,13 @@ static int rx_err_add_rule(struct mlx5e_priv *priv,
if (!spec)
return -ENOMEM;
- /* Action to copy 7 bit ipsec_syndrome to regB[0:6] */
+ /* Action to copy 7 bit ipsec_syndrome to regB[24:30] */
MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY);
MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME);
MLX5_SET(copy_action_in, action, src_offset, 0);
MLX5_SET(copy_action_in, action, length, 7);
MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
- MLX5_SET(copy_action_in, action, dst_offset, 0);
+ MLX5_SET(copy_action_in, action, dst_offset, 24);
modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL,
1, action);
@@ -488,13 +488,13 @@ static int rx_add_rule(struct mlx5e_priv *priv,
setup_fte_common(attrs, ipsec_obj_id, spec, &flow_act);
- /* Set 1 bit ipsec marker */
- /* Set 24 bit ipsec_obj_id */
+ /* Set bit[31] ipsec marker */
+ /* Set bit[23-0] ipsec_obj_id */
MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
- MLX5_SET(set_action_in, action, data, (ipsec_obj_id << 1) | 0x1);
- MLX5_SET(set_action_in, action, offset, 7);
- MLX5_SET(set_action_in, action, length, 25);
+ MLX5_SET(set_action_in, action, data, (ipsec_obj_id | BIT(31)));
+ MLX5_SET(set_action_in, action, offset, 0);
+ MLX5_SET(set_action_in, action, length, 32);
modify_hdr = mlx5_modify_header_alloc(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL,
1, action);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 11e31a3db2be..a9b45606dbdb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -453,7 +453,6 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
struct mlx5_cqe64 *cqe)
{
u32 ipsec_meta_data = be32_to_cpu(cqe->ft_metadata);
- u8 ipsec_syndrome = ipsec_meta_data & 0xFF;
struct mlx5e_priv *priv;
struct xfrm_offload *xo;
struct xfrm_state *xs;
@@ -481,7 +480,7 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
xo = xfrm_offload(skb);
xo->flags = CRYPTO_DONE;
- switch (ipsec_syndrome & MLX5_IPSEC_METADATA_SYNDROM_MASK) {
+ switch (MLX5_IPSEC_METADATA_SYNDROM(ipsec_meta_data)) {
case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED:
xo->status = CRYPTO_SUCCESS;
if (WARN_ON_ONCE(priv->ipsec->no_trailer))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index 056dacb612b0..9df9b9a8e09b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -39,9 +39,10 @@
#include "en.h"
#include "en/txrx.h"
-#define MLX5_IPSEC_METADATA_MARKER_MASK (0x80)
-#define MLX5_IPSEC_METADATA_SYNDROM_MASK (0x7F)
-#define MLX5_IPSEC_METADATA_HANDLE(metadata) (((metadata) >> 8) & 0xFF)
+/* Bit31: IPsec marker, Bit30-24: IPsec syndrome, Bit23-0: IPsec obj id */
+#define MLX5_IPSEC_METADATA_MARKER(metadata) (((metadata) >> 31) & 0x1)
+#define MLX5_IPSEC_METADATA_SYNDROM(metadata) (((metadata) >> 24) & GENMASK(6, 0))
+#define MLX5_IPSEC_METADATA_HANDLE(metadata) ((metadata) & GENMASK(23, 0))
struct mlx5e_accel_tx_ipsec_state {
struct xfrm_offload *xo;
@@ -78,7 +79,7 @@ static inline unsigned int mlx5e_ipsec_tx_ids_len(struct mlx5e_accel_tx_ipsec_st
static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe)
{
- return !!(MLX5_IPSEC_METADATA_MARKER_MASK & be32_to_cpu(cqe->ft_metadata));
+ return MLX5_IPSEC_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata));
}
static inline bool mlx5e_ipsec_is_tx_flow(struct mlx5e_accel_tx_ipsec_state *ipsec_st)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index ccaccb9fc2f7..6a1d82503ef8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -188,7 +188,7 @@ static int post_rx_param_wqes(struct mlx5e_channel *c,
err = 0;
sq = &c->async_icosq;
- spin_lock(&c->async_icosq_lock);
+ spin_lock_bh(&c->async_icosq_lock);
cseg = post_static_params(sq, priv_rx);
if (IS_ERR(cseg))
@@ -199,7 +199,7 @@ static int post_rx_param_wqes(struct mlx5e_channel *c,
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
unlock:
- spin_unlock(&c->async_icosq_lock);
+ spin_unlock_bh(&c->async_icosq_lock);
return err;
@@ -265,10 +265,10 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
BUILD_BUG_ON(MLX5E_KTLS_GET_PROGRESS_WQEBBS != 1);
- spin_lock(&sq->channel->async_icosq_lock);
+ spin_lock_bh(&sq->channel->async_icosq_lock);
if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) {
- spin_unlock(&sq->channel->async_icosq_lock);
+ spin_unlock_bh(&sq->channel->async_icosq_lock);
err = -ENOSPC;
goto err_dma_unmap;
}
@@ -299,7 +299,7 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
icosq_fill_wi(sq, pi, &wi);
sq->pc++;
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
- spin_unlock(&sq->channel->async_icosq_lock);
+ spin_unlock_bh(&sq->channel->async_icosq_lock);
return 0;
@@ -360,7 +360,7 @@ static int resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx
err = 0;
sq = &c->async_icosq;
- spin_lock(&c->async_icosq_lock);
+ spin_lock_bh(&c->async_icosq_lock);
cseg = post_static_params(sq, priv_rx);
if (IS_ERR(cseg)) {
@@ -372,7 +372,7 @@ static int resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
priv_rx->stats->tls_resync_res_ok++;
unlock:
- spin_unlock(&c->async_icosq_lock);
+ spin_unlock_bh(&c->async_icosq_lock);
return err;
}
@@ -476,19 +476,22 @@ static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
depth += sizeof(struct tcphdr);
- if (unlikely(!sk || sk->sk_state == TCP_TIME_WAIT))
+ if (unlikely(!sk))
return;
- if (unlikely(!resync_queue_get_psv(sk)))
- return;
+ if (unlikely(sk->sk_state == TCP_TIME_WAIT))
+ goto unref;
- skb->sk = sk;
- skb->destructor = sock_edemux;
+ if (unlikely(!resync_queue_get_psv(sk)))
+ goto unref;
seq = th->seq;
datalen = skb->len - depth;
tls_offload_rx_resync_async_request_start(sk, seq, datalen);
rq->stats->tls_resync_req_start++;
+
+unref:
+ sock_gen_put(sk);
}
void mlx5e_ktls_rx_resync(struct net_device *netdev, struct sock *sk,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index b3f02aac7f26..ebce97921e03 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -5253,6 +5253,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
mlx5e_disable_async_events(priv);
mlx5_lag_remove(mdev);
+ mlx5_vxlan_reset_to_default(mdev->vxlan);
}
int mlx5e_update_nic_rx(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index 9020d1419bcf..8932c387d46a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -186,7 +186,7 @@ struct mlx5e_encap_entry {
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
struct net_device *out_dev;
- struct net_device *route_dev;
+ int route_dev_ifindex;
struct mlx5e_tc_tunnel *tunnel;
int reformat_type;
u8 flags;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 599f5b5ebc97..6628a0197b4e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1584,7 +1584,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
} while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
out:
- if (rq->xdp_prog)
+ if (rcu_access_pointer(rq->xdp_prog))
mlx5e_xdp_rx_poll_complete(rq);
mlx5_cqwq_update_db_record(cqwq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index e3a968e9e2a0..ce710f22b1ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -4658,6 +4658,7 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
return flow;
err_free:
+ dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
mlx5e_flow_put(priv, flow);
out:
return ERR_PTR(err);
@@ -4802,6 +4803,7 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv,
return 0;
err_free:
+ dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
mlx5e_flow_put(priv, flow);
out:
return err;
@@ -5227,8 +5229,10 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
tc->ct = mlx5_tc_ct_init(priv, tc->chains, &priv->fs.tc.mod_hdr,
MLX5_FLOW_NAMESPACE_KERNEL);
- if (IS_ERR(tc->ct))
+ if (IS_ERR(tc->ct)) {
+ err = PTR_ERR(tc->ct);
goto err_ct;
+ }
tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
err = register_netdevice_notifier_dev_net(priv->netdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index 3b979008143d..4a2ce241522e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -283,6 +283,9 @@ static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
reg_b = be32_to_cpu(cqe->ft_metadata);
+ if (reg_b >> (MLX5E_TC_TABLE_CHAIN_TAG_BITS + ZONE_RESTORE_BITS))
+ return false;
+
chain = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
if (chain)
return true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 82b4419af9d4..6dd3ea3cbbed 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -144,7 +144,9 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz);
}
-/* RM 2311217: no L4 inner checksum for IPsec tunnel type packet */
+/* If packet is not IP's CHECKSUM_PARTIAL (e.g. icmd packet),
+ * need to set L3 checksum flag for IPsec
+ */
static void
ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg)
@@ -154,7 +156,6 @@ ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
sq->stats->csum_partial_inner++;
} else {
- eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
sq->stats->csum_partial++;
}
}
@@ -162,11 +163,6 @@ ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
static inline void
mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
{
- if (unlikely(eseg->flow_table_metadata & cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC))) {
- ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
- return;
- }
-
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
if (skb->encapsulation) {
@@ -177,6 +173,9 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
sq->stats->csum_partial++;
}
+ } else if (unlikely(eseg->flow_table_metadata & cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC))) {
+ ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
+
} else
sq->stats->csum_none++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 6e6a9a563992..d4ee0a9c03db 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1142,6 +1142,10 @@ int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
struct mlx5_vport *vport;
vport = mlx5_eswitch_get_vport(esw, vport_num);
+
+ if (!vport->qos.enabled)
+ return -EOPNOTSUPP;
+
MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps);
return mlx5_modify_scheduling_element_cmd(esw->dev,
@@ -1408,6 +1412,7 @@ static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
int i;
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
+ memset(&vport->qos, 0, sizeof(vport->qos));
memset(&vport->info, 0, sizeof(vport->info));
vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
}
@@ -1902,8 +1907,6 @@ int mlx5_devlink_port_function_hw_addr_get(struct devlink *devlink,
ether_addr_copy(hw_addr, vport->info.mac);
*hw_addr_len = ETH_ALEN;
err = 0;
- } else {
- NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
}
mutex_unlock(&esw->state_lock);
return err;
@@ -2223,12 +2226,15 @@ static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
max_guarantee = evport->info.min_rate;
}
- return max_t(u32, max_guarantee / fw_max_bw_share, 1);
+ if (max_guarantee)
+ return max_t(u32, max_guarantee / fw_max_bw_share, 1);
+ return 0;
}
-static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
+static int normalize_vports_min_rate(struct mlx5_eswitch *esw)
{
u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
+ u32 divider = calculate_vports_min_rate_divider(esw);
struct mlx5_vport *evport;
u32 vport_max_rate;
u32 vport_min_rate;
@@ -2241,9 +2247,9 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
continue;
vport_min_rate = evport->info.min_rate;
vport_max_rate = evport->info.max_rate;
- bw_share = MLX5_MIN_BW_SHARE;
+ bw_share = 0;
- if (vport_min_rate)
+ if (divider)
bw_share = MLX5_RATE_TO_BW_SHARE(vport_min_rate,
divider,
fw_max_bw_share);
@@ -2268,7 +2274,6 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
u32 fw_max_bw_share;
u32 previous_min_rate;
- u32 divider;
bool min_rate_supported;
bool max_rate_supported;
int err = 0;
@@ -2293,8 +2298,7 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
previous_min_rate = evport->info.min_rate;
evport->info.min_rate = min_rate;
- divider = calculate_vports_min_rate_divider(esw);
- err = normalize_vports_min_rate(esw, divider);
+ err = normalize_vports_min_rate(esw);
if (err) {
evport->info.min_rate = previous_min_rate;
goto unlock;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 16091838bfcf..9fdd99272e31 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -534,6 +534,13 @@ static void del_sw_hw_rule(struct fs_node *node)
goto out;
}
+ if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT &&
+ --fte->dests_size) {
+ fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
+ fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+ goto out;
+ }
+
if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
--fte->dests_size) {
fte->modify_mask |=
@@ -2010,10 +2017,11 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
down_write_ref_node(&fte->node, false);
for (i = handle->num_rules - 1; i >= 0; i--)
tree_remove_node(&handle->rule[i]->node, true);
- if (fte->modify_mask && fte->dests_size) {
- modify_fte(fte);
+ if (fte->dests_size) {
+ if (fte->modify_mask)
+ modify_fte(fte);
up_write_ref_node(&fte->node, false);
- } else {
+ } else if (list_empty(&fte->node.children)) {
del_hw_fte(&fte->node);
/* Avoid double call to del_hw_fte */
fte->node.del_hw_func = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
index 3315afe2f8dc..38084400ee8f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
@@ -168,6 +168,17 @@ struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev)
void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan)
{
+ if (!mlx5_vxlan_allowed(vxlan))
+ return;
+
+ mlx5_vxlan_del_port(vxlan, IANA_VXLAN_UDP_PORT);
+ WARN_ON(!hash_empty(vxlan->htable));
+
+ kfree(vxlan);
+}
+
+void mlx5_vxlan_reset_to_default(struct mlx5_vxlan *vxlan)
+{
struct mlx5_vxlan_port *vxlanp;
struct hlist_node *tmp;
int bkt;
@@ -175,12 +186,12 @@ void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan)
if (!mlx5_vxlan_allowed(vxlan))
return;
- /* Lockless since we are the only hash table consumers*/
hash_for_each_safe(vxlan->htable, bkt, tmp, vxlanp, hlist) {
- hash_del(&vxlanp->hlist);
- mlx5_vxlan_core_del_port_cmd(vxlan->mdev, vxlanp->udp_port);
- kfree(vxlanp);
+ /* Don't delete default UDP port added by the HW.
+ * Remove only user configured ports
+ */
+ if (vxlanp->udp_port == IANA_VXLAN_UDP_PORT)
+ continue;
+ mlx5_vxlan_del_port(vxlan, vxlanp->udp_port);
}
-
- kfree(vxlan);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h
index ec766529f49b..34ef662da35e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h
@@ -56,6 +56,7 @@ void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan);
int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port);
int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port);
bool mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port);
+void mlx5_vxlan_reset_to_default(struct mlx5_vxlan *vxlan);
#else
static inline struct mlx5_vxlan*
mlx5_vxlan_create(struct mlx5_core_dev *mdev) { return ERR_PTR(-EOPNOTSUPP); }
@@ -63,6 +64,7 @@ static inline void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan) { return; }
static inline int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port) { return -EOPNOTSUPP; }
static inline int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port) { return -EOPNOTSUPP; }
static inline bool mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port) { return false; }
+static inline void mlx5_vxlan_reset_to_default(struct mlx5_vxlan *vxlan) { return; }
#endif
#endif /* __MLX5_VXLAN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index 872e9910bb7c..a619d90559f7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -6,6 +6,7 @@
config MLXSW_CORE
tristate "Mellanox Technologies Switch ASICs support"
select NET_DEVLINK
+ select MLXFW
help
This driver supports Mellanox Technologies Switch ASICs family.
@@ -82,7 +83,6 @@ config MLXSW_SPECTRUM
select GENERIC_ALLOCATOR
select PARMAN
select OBJAGG
- select MLXFW
imply PTP_1588_CLOCK
select NET_PTP_CLASSIFY if PTP_1588_CLOCK
default m
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 937b8e46f8c7..1a86535c4968 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -571,7 +571,8 @@ static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
if (trans->core->fw_flash_in_progress)
timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS);
- queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout);
+ queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw,
+ timeout << trans->retries);
}
static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index a1938842f828..b319c22c211c 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -148,7 +148,8 @@ static void lan743x_intr_software_isr(void *context)
int_sts = lan743x_csr_read(adapter, INT_STS);
if (int_sts & INT_BIT_SW_GP_) {
- lan743x_csr_write(adapter, INT_STS, INT_BIT_SW_GP_);
+ /* disable the interrupt to prevent repeated re-triggering */
+ lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_);
intr->software_isr_flag = 1;
}
}
@@ -674,14 +675,12 @@ clean_up:
static int lan743x_dp_write(struct lan743x_adapter *adapter,
u32 select, u32 addr, u32 length, u32 *buf)
{
- int ret = -EIO;
u32 dp_sel;
int i;
- mutex_lock(&adapter->dp_lock);
if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_,
1, 40, 100, 100))
- goto unlock;
+ return -EIO;
dp_sel = lan743x_csr_read(adapter, DP_SEL);
dp_sel &= ~DP_SEL_MASK_;
dp_sel |= select;
@@ -693,13 +692,10 @@ static int lan743x_dp_write(struct lan743x_adapter *adapter,
lan743x_csr_write(adapter, DP_CMD, DP_CMD_WRITE_);
if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_,
1, 40, 100, 100))
- goto unlock;
+ return -EIO;
}
- ret = 0;
-unlock:
- mutex_unlock(&adapter->dp_lock);
- return ret;
+ return 0;
}
static u32 lan743x_mac_mii_access(u16 id, u16 index, int read)
@@ -1019,16 +1015,16 @@ static void lan743x_phy_close(struct lan743x_adapter *adapter)
static int lan743x_phy_open(struct lan743x_adapter *adapter)
{
struct lan743x_phy *phy = &adapter->phy;
+ struct phy_device *phydev = NULL;
struct device_node *phynode;
- struct phy_device *phydev;
struct net_device *netdev;
int ret = -EIO;
netdev = adapter->netdev;
phynode = of_node_get(adapter->pdev->dev.of_node);
- adapter->phy_mode = PHY_INTERFACE_MODE_GMII;
if (phynode) {
+ /* try devicetree phy, or fixed link */
of_get_phy_mode(phynode, &adapter->phy_mode);
if (of_phy_is_fixed_link(phynode)) {
@@ -1044,13 +1040,15 @@ static int lan743x_phy_open(struct lan743x_adapter *adapter)
lan743x_phy_link_status_change, 0,
adapter->phy_mode);
of_node_put(phynode);
- if (!phydev)
- goto return_error;
- } else {
+ }
+
+ if (!phydev) {
+ /* try internal phy */
phydev = phy_find_first(adapter->mdiobus);
if (!phydev)
goto return_error;
+ adapter->phy_mode = PHY_INTERFACE_MODE_GMII;
ret = phy_connect_direct(netdev, phydev,
lan743x_phy_link_status_change,
adapter->phy_mode);
@@ -1310,13 +1308,13 @@ clean_up_data_descriptor:
goto clear_active;
if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED)) {
- dev_kfree_skb(buffer_info->skb);
+ dev_kfree_skb_any(buffer_info->skb);
goto clear_skb;
}
if (cleanup) {
lan743x_ptp_unrequest_tx_timestamp(tx->adapter);
- dev_kfree_skb(buffer_info->skb);
+ dev_kfree_skb_any(buffer_info->skb);
} else {
ignore_sync = (buffer_info->flags &
TX_BUFFER_INFO_FLAG_IGNORE_SYNC) != 0;
@@ -1626,7 +1624,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
if (required_number_of_descriptors >
lan743x_tx_get_avail_desc(tx)) {
if (required_number_of_descriptors > (tx->ring_size - 1)) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
} else {
/* save to overflow buffer */
tx->overflow_skb = skb;
@@ -1659,7 +1657,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
start_frame_length,
do_timestamp,
skb->ip_summed == CHECKSUM_PARTIAL)) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
goto unlock;
}
@@ -1678,7 +1676,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
* frame assembler clean up was performed inside
* lan743x_tx_frame_add_fragment
*/
- dev_kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
goto unlock;
}
}
@@ -2733,7 +2731,6 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
adapter->intr.irq = adapter->pdev->irq;
lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF);
- mutex_init(&adapter->dp_lock);
ret = lan743x_gpio_init(adapter);
if (ret)
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index c61a40411317..a536f4a4994d 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -712,9 +712,6 @@ struct lan743x_adapter {
struct lan743x_csr csr;
struct lan743x_intr intr;
- /* lock, used to prevent concurrent access to data port */
- struct mutex dp_lock;
-
struct lan743x_gpio gpio;
struct lan743x_ptp ptp;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index ed9808fc743b..35c72d4a78b3 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -126,6 +126,11 @@ static int ionic_get_link_ksettings(struct net_device *netdev,
ethtool_link_ksettings_zero_link_mode(ks, supported);
+ if (!idev->port_info) {
+ netdev_err(netdev, "port_info not initialized\n");
+ return -EOPNOTSUPP;
+ }
+
/* The port_info data is found in a DMA space that the NIC keeps
* up-to-date, so there's no need to request the data from the
* NIC, we already have it in our memory space.
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 0e4cd8890cff..0a22f8ce9a2c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -1647,9 +1647,9 @@ static void qed_src_init_pf(struct qed_hwfn *p_hwfn)
ilog2(rounded_conn_num));
STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET,
- p_hwfn->p_cxt_mngr->first_free);
+ p_hwfn->p_cxt_mngr->src_t2.first_free);
STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET,
- p_hwfn->p_cxt_mngr->last_free);
+ p_hwfn->p_cxt_mngr->src_t2.last_free);
}
/* Timers PF */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index 8b64495f8745..056e79620a0e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -326,9 +326,6 @@ struct qed_cxt_mngr {
/* SRC T2 */
struct qed_src_t2 src_t2;
- u32 t2_num_pages;
- u64 first_free;
- u64 last_free;
/* total number of SRQ's for this hwfn */
u32 srq_count;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index 512cbef24097..a99861124630 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -2754,14 +2754,18 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
iwarp_info->partial_fpdus = kcalloc((u16)p_hwfn->p_rdma_info->num_qps,
sizeof(*iwarp_info->partial_fpdus),
GFP_KERNEL);
- if (!iwarp_info->partial_fpdus)
+ if (!iwarp_info->partial_fpdus) {
+ rc = -ENOMEM;
goto err;
+ }
iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps;
iwarp_info->mpa_intermediate_buf = kzalloc(buff_size, GFP_KERNEL);
- if (!iwarp_info->mpa_intermediate_buf)
+ if (!iwarp_info->mpa_intermediate_buf) {
+ rc = -ENOMEM;
goto err;
+ }
/* The mpa_bufs array serves for pending RX packets received on the
* mpa ll2 that don't have place on the tx ring and require later
@@ -2771,8 +2775,10 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
iwarp_info->mpa_bufs = kcalloc(data.input.rx_num_desc,
sizeof(*iwarp_info->mpa_bufs),
GFP_KERNEL);
- if (!iwarp_info->mpa_bufs)
+ if (!iwarp_info->mpa_bufs) {
+ rc = -ENOMEM;
goto err;
+ }
INIT_LIST_HEAD(&iwarp_info->mpa_buf_pending_list);
INIT_LIST_HEAD(&iwarp_info->mpa_buf_list);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index b8af59fc1aa4..d2c190732d3e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -2231,7 +2231,8 @@ static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter)
/* Boot either flash image or firmware image from host file system */
if (qlcnic_load_fw_file == 1) {
- if (qlcnic_83xx_load_fw_image_from_host(adapter))
+ err = qlcnic_83xx_load_fw_image_from_host(adapter);
+ if (err)
return err;
} else {
QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID,
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index 29a7bfa2584d..3d7d3ab383f8 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -188,6 +188,11 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
dev = skb->dev;
port = rmnet_get_port_rcu(dev);
+ if (unlikely(!port)) {
+ atomic_long_inc(&skb->dev->rx_nohandler);
+ kfree_skb(skb);
+ goto done;
+ }
switch (port->rmnet_mode) {
case RMNET_EPMODE_VND:
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 00f13805c6f7..85d9c3e30c69 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -4080,9 +4080,17 @@ err_out:
return -EIO;
}
-static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb)
+static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp)
{
- return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34;
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_34:
+ case RTL_GIGA_MAC_VER_60:
+ case RTL_GIGA_MAC_VER_61:
+ case RTL_GIGA_MAC_VER_63:
+ return true;
+ default:
+ return false;
+ }
}
static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts)
@@ -4154,8 +4162,9 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
opts[1] |= transport_offset << TCPHO_SHIFT;
} else {
- if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
- return !eth_skb_pad(skb);
+ if (unlikely(skb->len < ETH_ZLEN && rtl_test_hw_pad_bug(tp)))
+ /* eth_skb_pad would free the skb on error */
+ return !__skb_put_padto(skb, ETH_ZLEN, false);
}
return true;
@@ -4334,18 +4343,9 @@ static netdev_features_t rtl8169_features_check(struct sk_buff *skb,
rtl_chip_supports_csum_v2(tp))
features &= ~NETIF_F_ALL_TSO;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
- if (skb->len < ETH_ZLEN) {
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_11:
- case RTL_GIGA_MAC_VER_12:
- case RTL_GIGA_MAC_VER_17:
- case RTL_GIGA_MAC_VER_34:
- features &= ~NETIF_F_CSUM_MASK;
- break;
- default:
- break;
- }
- }
+ /* work around hw bug on some chip versions */
+ if (skb->len < ETH_ZLEN)
+ features &= ~NETIF_F_CSUM_MASK;
if (transport_offset > TCPHO_MAX &&
rtl_chip_supports_csum_v2(tp))
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
index f61cb997a8f6..82b1c7a5a7a9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
@@ -113,8 +113,10 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
/* Enable TX clock */
if (dwmac->data->tx_clk_en) {
dwmac->tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
- if (IS_ERR(dwmac->tx_clk))
+ if (IS_ERR(dwmac->tx_clk)) {
+ ret = PTR_ERR(dwmac->tx_clk);
goto err_remove_config_dt;
+ }
clk_prepare_enable(dwmac->tx_clk);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index b6e5e3e36b63..81ee0a071b4e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -625,13 +625,6 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- if (plat->eee_usecs_rate > 0) {
- u32 tx_lpi_usec;
-
- tx_lpi_usec = (plat->eee_usecs_rate / 1000000) - 1;
- writel(tx_lpi_usec, res.addr + GMAC_1US_TIC_COUNTER);
- }
-
ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
if (ret < 0)
return ret;
@@ -641,6 +634,13 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
res.wol_irq = pci_irq_vector(pdev, 0);
res.irq = pci_irq_vector(pdev, 0);
+ if (plat->eee_usecs_rate > 0) {
+ u32 tx_lpi_usec;
+
+ tx_lpi_usec = (plat->eee_usecs_rate / 1000000) - 1;
+ writel(tx_lpi_usec, res.addr + GMAC_1US_TIC_COUNTER);
+ }
+
ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
if (ret) {
pci_free_irq_vectors(pdev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 002791b77356..ced6d76a0d85 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -1171,7 +1171,6 @@ const struct stmmac_ops dwmac4_ops = {
.pcs_get_adv_lp = dwmac4_get_adv_lp,
.debug = dwmac4_debug,
.set_filter = dwmac4_set_filter,
- .flex_pps_config = dwmac5_flex_pps_config,
.set_mac_loopback = dwmac4_set_mac_loopback,
.update_vlan_hash = dwmac4_update_vlan_hash,
.sarc_configure = dwmac4_sarc_configure,
@@ -1213,6 +1212,7 @@ const struct stmmac_ops dwmac410_ops = {
.pcs_get_adv_lp = dwmac4_get_adv_lp,
.debug = dwmac4_debug,
.set_filter = dwmac4_set_filter,
+ .flex_pps_config = dwmac5_flex_pps_config,
.set_mac_loopback = dwmac4_set_mac_loopback,
.update_vlan_hash = dwmac4_update_vlan_hash,
.sarc_configure = dwmac4_sarc_configure,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index cb87d31a99df..57a53a600aa5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -23,7 +23,7 @@ int dwmac_dma_reset(void __iomem *ioaddr)
return readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
!(value & DMA_BUS_MODE_SFT_RESET),
- 10000, 100000);
+ 10000, 200000);
}
/* CSR1 enables the transmit DMA to check for new descriptor */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 220626a8d499..ba45fe237512 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -4757,6 +4757,7 @@ static void stmmac_napi_add(struct net_device *dev)
ch->priv_data = priv;
ch->index = queue;
+ spin_lock_init(&ch->lock);
if (queue < priv->plat->rx_queues_to_use) {
netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx,
@@ -5246,6 +5247,7 @@ int stmmac_resume(struct device *dev)
return ret;
}
+ rtnl_lock();
mutex_lock(&priv->lock);
stmmac_reset_queues_param(priv);
@@ -5261,6 +5263,7 @@ int stmmac_resume(struct device *dev)
stmmac_enable_all_queues(priv);
mutex_unlock(&priv->lock);
+ rtnl_unlock();
if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
rtnl_lock();
diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
index 75056c14b161..5dc60ecabe56 100644
--- a/drivers/net/ethernet/ti/am65-cpts.c
+++ b/drivers/net/ethernet/ti/am65-cpts.c
@@ -1001,8 +1001,7 @@ struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
if (IS_ERR_OR_NULL(cpts->ptp_clock)) {
dev_err(dev, "Failed to register ptp clk %ld\n",
PTR_ERR(cpts->ptp_clock));
- if (!cpts->ptp_clock)
- ret = -ENODEV;
+ ret = cpts->ptp_clock ? PTR_ERR(cpts->ptp_clock) : -ENODEV;
goto refclk_disable;
}
cpts->phc_index = ptp_clock_index(cpts->ptp_clock);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 9fd1f77190ad..b0f00b4edd94 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -838,9 +838,12 @@ static int cpsw_ndo_open(struct net_device *ndev)
if (ret < 0)
goto err_cleanup;
- if (cpts_register(cpsw->cpts))
- dev_err(priv->dev, "error registering cpts device\n");
-
+ if (cpsw->cpts) {
+ if (cpts_register(cpsw->cpts))
+ dev_err(priv->dev, "error registering cpts device\n");
+ else
+ writel(0x10, &cpsw->wr_regs->misc_en);
+ }
}
cpsw_restore(priv);
@@ -1631,6 +1634,7 @@ static int cpsw_probe(struct platform_device *pdev)
CPSW_MAX_QUEUES, CPSW_MAX_QUEUES);
if (!ndev) {
dev_err(dev, "error allocating net_device\n");
+ ret = -ENOMEM;
goto clean_cpts;
}
@@ -1716,7 +1720,6 @@ static int cpsw_probe(struct platform_device *pdev)
/* Enable misc CPTS evnt_pend IRQ */
cpts_set_irqpoll(cpsw->cpts, false);
- writel(0x10, &cpsw->wr_regs->misc_en);
skip_cpts:
cpsw_notice(priv, probe,
diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c
index 4d02c5135611..4619c3a950b0 100644
--- a/drivers/net/ethernet/ti/cpsw_ethtool.c
+++ b/drivers/net/ethernet/ti/cpsw_ethtool.c
@@ -728,7 +728,6 @@ int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
(1 << HWTSTAMP_TX_ON);
info->rx_filters =
(1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
return 0;
}
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index f779d2e1b5c5..2f5e0ad23ad7 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -873,8 +873,12 @@ static int cpsw_ndo_open(struct net_device *ndev)
if (ret < 0)
goto err_cleanup;
- if (cpts_register(cpsw->cpts))
- dev_err(priv->dev, "error registering cpts device\n");
+ if (cpsw->cpts) {
+ if (cpts_register(cpsw->cpts))
+ dev_err(priv->dev, "error registering cpts device\n");
+ else
+ writel(0x10, &cpsw->wr_regs->misc_en);
+ }
napi_enable(&cpsw->napi_rx);
napi_enable(&cpsw->napi_tx);
@@ -2006,7 +2010,6 @@ static int cpsw_probe(struct platform_device *pdev)
/* Enable misc CPTS evnt_pend IRQ */
cpts_set_irqpoll(cpsw->cpts, false);
- writel(0x10, &cpsw->wr_regs->misc_en);
skip_cpts:
ret = cpsw_register_notifiers(cpsw);
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index 51cc29f39038..31c5e36ff706 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -639,13 +639,10 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_NTP_ALL:
- return -ERANGE;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
- break;
+ return -ERANGE;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index d07008a818df..1426bfc009bc 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -224,8 +224,7 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
if (ip_tunnel_collect_metadata() || gs->collect_md) {
__be16 flags;
- flags = TUNNEL_KEY | TUNNEL_GENEVE_OPT |
- (gnvh->oam ? TUNNEL_OAM : 0) |
+ flags = TUNNEL_KEY | (gnvh->oam ? TUNNEL_OAM : 0) |
(gnvh->critical ? TUNNEL_CRIT_OPT : 0);
tun_dst = udp_tun_rx_dst(skb, geneve_get_sk_family(gs), flags,
diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c
index 92642030e735..e8599bb948c0 100644
--- a/drivers/net/ipa/gsi_trans.c
+++ b/drivers/net/ipa/gsi_trans.c
@@ -362,22 +362,31 @@ struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
return trans;
}
-/* Free a previously-allocated transaction (used only in case of error) */
+/* Free a previously-allocated transaction */
void gsi_trans_free(struct gsi_trans *trans)
{
+ refcount_t *refcount = &trans->refcount;
struct gsi_trans_info *trans_info;
+ bool last;
- if (!refcount_dec_and_test(&trans->refcount))
+ /* We must hold the lock to release the last reference */
+ if (refcount_dec_not_one(refcount))
return;
trans_info = &trans->gsi->channel[trans->channel_id].trans_info;
spin_lock_bh(&trans_info->spinlock);
- list_del(&trans->links);
+ /* Reference might have been added before we got the lock */
+ last = refcount_dec_and_test(refcount);
+ if (last)
+ list_del(&trans->links);
spin_unlock_bh(&trans_info->spinlock);
+ if (!last)
+ return;
+
ipa_gsi_trans_release(trans);
/* Releasing the reserved TREs implicitly frees the sgl[] and
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index d07061417675..e7972e88ffe0 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -96,6 +96,7 @@ static const struct file_operations nsim_dev_take_snapshot_fops = {
.open = simple_open,
.write = nsim_dev_take_snapshot_write,
.llseek = generic_file_llseek,
+ .owner = THIS_MODULE,
};
static ssize_t nsim_dev_trap_fa_cookie_read(struct file *file,
@@ -188,6 +189,7 @@ static const struct file_operations nsim_dev_trap_fa_cookie_fops = {
.read = nsim_dev_trap_fa_cookie_read,
.write = nsim_dev_trap_fa_cookie_write,
.llseek = generic_file_llseek,
+ .owner = THIS_MODULE,
};
static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
diff --git a/drivers/net/netdevsim/health.c b/drivers/net/netdevsim/health.c
index 62958b238d50..21e2974660e7 100644
--- a/drivers/net/netdevsim/health.c
+++ b/drivers/net/netdevsim/health.c
@@ -261,6 +261,7 @@ static const struct file_operations nsim_dev_health_break_fops = {
.open = simple_open,
.write = nsim_dev_health_break_write,
.llseek = generic_file_llseek,
+ .owner = THIS_MODULE,
};
int nsim_dev_health_init(struct nsim_dev *nsim_dev, struct devlink *devlink)
diff --git a/drivers/net/netdevsim/udp_tunnels.c b/drivers/net/netdevsim/udp_tunnels.c
index 6ab023acefd6..02dc3123eb6c 100644
--- a/drivers/net/netdevsim/udp_tunnels.c
+++ b/drivers/net/netdevsim/udp_tunnels.c
@@ -124,6 +124,7 @@ static const struct file_operations nsim_udp_tunnels_info_reset_fops = {
.open = simple_open,
.write = nsim_udp_tunnels_info_reset_write,
.llseek = generic_file_llseek,
+ .owner = THIS_MODULE,
};
int nsim_udp_tunnels_info_create(struct nsim_dev *nsim_dev,
diff --git a/drivers/net/phy/mscc/mscc_macsec.c b/drivers/net/phy/mscc/mscc_macsec.c
index 6cf9b798b710..10be266e48e8 100644
--- a/drivers/net/phy/mscc/mscc_macsec.c
+++ b/drivers/net/phy/mscc/mscc_macsec.c
@@ -981,7 +981,6 @@ int vsc8584_macsec_init(struct phy_device *phydev)
switch (phydev->phy_id & phydev->drv->phy_id_mask) {
case PHY_ID_VSC856X:
- case PHY_ID_VSC8575:
case PHY_ID_VSC8582:
case PHY_ID_VSC8584:
INIT_LIST_HEAD(&vsc8531->macsec_flows);
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index fb1db713b7fb..575580d3ffe0 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -551,6 +551,8 @@ static struct phy_driver realtek_drvs[] = {
{
PHY_ID_MATCH_EXACT(0x00008201),
.name = "RTL8201CP Ethernet",
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
}, {
PHY_ID_MATCH_EXACT(0x001cc816),
.name = "RTL8201F Fast Ethernet",
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 1d18c10e8f82..34aa196b7465 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -2389,7 +2389,8 @@ static int sfp_probe(struct platform_device *pdev)
continue;
sfp->gpio_irq[i] = gpiod_to_irq(sfp->gpio[i]);
- if (!sfp->gpio_irq[i]) {
+ if (sfp->gpio_irq[i] < 0) {
+ sfp->gpio_irq[i] = 0;
sfp->need_poll = true;
continue;
}
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index ec97669be5c2..0fc39ac5ca88 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -291,8 +291,10 @@ static int smsc_phy_probe(struct phy_device *phydev)
return ret;
ret = clk_set_rate(priv->refclk, 50 * 1000 * 1000);
- if (ret)
+ if (ret) {
+ clk_disable_unprepare(priv->refclk);
return ret;
+ }
return 0;
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index be69d272052f..cd06cae76035 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1961,12 +1961,15 @@ static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
struct tun_file *tfile = file->private_data;
struct tun_struct *tun = tun_get(tfile);
ssize_t result;
+ int noblock = 0;
if (!tun)
return -EBADFD;
- result = tun_get_user(tun, tfile, NULL, from,
- file->f_flags & O_NONBLOCK, false);
+ if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT))
+ noblock = 1;
+
+ result = tun_get_user(tun, tfile, NULL, from, noblock, false);
tun_put(tun);
return result;
@@ -2185,10 +2188,15 @@ static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
struct tun_file *tfile = file->private_data;
struct tun_struct *tun = tun_get(tfile);
ssize_t len = iov_iter_count(to), ret;
+ int noblock = 0;
if (!tun)
return -EBADFD;
- ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK, NULL);
+
+ if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT))
+ noblock = 1;
+
+ ret = tun_do_read(tun, tfile, to, noblock, NULL);
ret = min_t(ssize_t, ret, len);
if (ret > 0)
iocb->ki_pos = ret;
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
index ca89d8258dd3..c4568a491dc4 100644
--- a/drivers/net/usb/cx82310_eth.c
+++ b/drivers/net/usb/cx82310_eth.c
@@ -197,7 +197,8 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
}
/* enable ethernet mode (?) */
- if (cx82310_enable_ethernet(dev))
+ ret = cx82310_enable_ethernet(dev);
+ if (ret)
goto err;
/* get the MAC address */
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index b09b45382faf..207e59e74935 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -59,7 +59,7 @@
#define IPHETH_USBINTF_SUBCLASS 253
#define IPHETH_USBINTF_PROTO 1
-#define IPHETH_BUF_SIZE 1516
+#define IPHETH_BUF_SIZE 1514
#define IPHETH_IP_ALIGN 2 /* padding at front of URB */
#define IPHETH_TX_TIMEOUT (5 * HZ)
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index a322f51873d0..fc378ff56775 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1070,7 +1070,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x05c6, 0x9011, 4)},
{QMI_FIXED_INTF(0x05c6, 0x9021, 1)},
{QMI_FIXED_INTF(0x05c6, 0x9022, 2)},
- {QMI_FIXED_INTF(0x05c6, 0x9025, 4)}, /* Alcatel-sbell ASB TL131 TDD LTE (China Mobile) */
+ {QMI_QUIRK_SET_DTR(0x05c6, 0x9025, 4)}, /* Alcatel-sbell ASB TL131 TDD LTE (China Mobile) */
{QMI_FIXED_INTF(0x05c6, 0x9026, 3)},
{QMI_FIXED_INTF(0x05c6, 0x902e, 5)},
{QMI_FIXED_INTF(0x05c6, 0x9031, 5)},
@@ -1309,6 +1309,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1230, 2)}, /* Telit LE910Cx */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1260, 2)}, /* Telit LE910Cx */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1261, 2)}, /* Telit LE910Cx */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)}, /* Telit LN940 series */
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 60c1aadece89..f2793ffde191 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -608,8 +608,7 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
return ret;
}
-static int vrf_finish_direct(struct net *net, struct sock *sk,
- struct sk_buff *skb)
+static void vrf_finish_direct(struct sk_buff *skb)
{
struct net_device *vrf_dev = skb->dev;
@@ -628,7 +627,8 @@ static int vrf_finish_direct(struct net *net, struct sock *sk,
skb_pull(skb, ETH_HLEN);
}
- return 1;
+ /* reset skb device */
+ nf_reset_ct(skb);
}
#if IS_ENABLED(CONFIG_IPV6)
@@ -707,15 +707,41 @@ static struct sk_buff *vrf_ip6_out_redirect(struct net_device *vrf_dev,
return skb;
}
+static int vrf_output6_direct_finish(struct net *net, struct sock *sk,
+ struct sk_buff *skb)
+{
+ vrf_finish_direct(skb);
+
+ return vrf_ip6_local_out(net, sk, skb);
+}
+
static int vrf_output6_direct(struct net *net, struct sock *sk,
struct sk_buff *skb)
{
+ int err = 1;
+
skb->protocol = htons(ETH_P_IPV6);
- return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
- net, sk, skb, NULL, skb->dev,
- vrf_finish_direct,
- !(IPCB(skb)->flags & IPSKB_REROUTED));
+ if (!(IPCB(skb)->flags & IPSKB_REROUTED))
+ err = nf_hook(NFPROTO_IPV6, NF_INET_POST_ROUTING, net, sk, skb,
+ NULL, skb->dev, vrf_output6_direct_finish);
+
+ if (likely(err == 1))
+ vrf_finish_direct(skb);
+
+ return err;
+}
+
+static int vrf_ip6_out_direct_finish(struct net *net, struct sock *sk,
+ struct sk_buff *skb)
+{
+ int err;
+
+ err = vrf_output6_direct(net, sk, skb);
+ if (likely(err == 1))
+ err = vrf_ip6_local_out(net, sk, skb);
+
+ return err;
}
static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev,
@@ -728,18 +754,15 @@ static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev,
skb->dev = vrf_dev;
err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk,
- skb, NULL, vrf_dev, vrf_output6_direct);
+ skb, NULL, vrf_dev, vrf_ip6_out_direct_finish);
if (likely(err == 1))
err = vrf_output6_direct(net, sk, skb);
- /* reset skb device */
if (likely(err == 1))
- nf_reset_ct(skb);
- else
- skb = NULL;
+ return skb;
- return skb;
+ return NULL;
}
static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
@@ -919,15 +942,41 @@ static struct sk_buff *vrf_ip_out_redirect(struct net_device *vrf_dev,
return skb;
}
+static int vrf_output_direct_finish(struct net *net, struct sock *sk,
+ struct sk_buff *skb)
+{
+ vrf_finish_direct(skb);
+
+ return vrf_ip_local_out(net, sk, skb);
+}
+
static int vrf_output_direct(struct net *net, struct sock *sk,
struct sk_buff *skb)
{
+ int err = 1;
+
skb->protocol = htons(ETH_P_IP);
- return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
- net, sk, skb, NULL, skb->dev,
- vrf_finish_direct,
- !(IPCB(skb)->flags & IPSKB_REROUTED));
+ if (!(IPCB(skb)->flags & IPSKB_REROUTED))
+ err = nf_hook(NFPROTO_IPV4, NF_INET_POST_ROUTING, net, sk, skb,
+ NULL, skb->dev, vrf_output_direct_finish);
+
+ if (likely(err == 1))
+ vrf_finish_direct(skb);
+
+ return err;
+}
+
+static int vrf_ip_out_direct_finish(struct net *net, struct sock *sk,
+ struct sk_buff *skb)
+{
+ int err;
+
+ err = vrf_output_direct(net, sk, skb);
+ if (likely(err == 1))
+ err = vrf_ip_local_out(net, sk, skb);
+
+ return err;
}
static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev,
@@ -940,18 +989,15 @@ static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev,
skb->dev = vrf_dev;
err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
- skb, NULL, vrf_dev, vrf_output_direct);
+ skb, NULL, vrf_dev, vrf_ip_out_direct_finish);
if (likely(err == 1))
err = vrf_output_direct(net, sk, skb);
- /* reset skb device */
if (likely(err == 1))
- nf_reset_ct(skb);
- else
- skb = NULL;
+ return skb;
- return skb;
+ return NULL;
}
static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index f8aed0696d77..2369ca250cd6 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -889,6 +889,7 @@ static ssize_t cosa_write(struct file *file,
chan->tx_status = 1;
spin_unlock_irqrestore(&cosa->lock, flags);
up(&chan->wsem);
+ kfree(kbuf);
return -ERESTARTSYS;
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
index d43e0d3f3a12..052413eef059 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
@@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2012-2014, 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,10 +27,9 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2012-2014, 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -128,7 +126,9 @@ enum iwl_sta_flags {
STA_FLG_MAX_AGG_SIZE_256K = (5 << STA_FLG_MAX_AGG_SIZE_SHIFT),
STA_FLG_MAX_AGG_SIZE_512K = (6 << STA_FLG_MAX_AGG_SIZE_SHIFT),
STA_FLG_MAX_AGG_SIZE_1024K = (7 << STA_FLG_MAX_AGG_SIZE_SHIFT),
- STA_FLG_MAX_AGG_SIZE_MSK = (7 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+ STA_FLG_MAX_AGG_SIZE_2M = (8 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+ STA_FLG_MAX_AGG_SIZE_4M = (9 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+ STA_FLG_MAX_AGG_SIZE_MSK = (0xf << STA_FLG_MAX_AGG_SIZE_SHIFT),
STA_FLG_AGG_MPDU_DENS_SHIFT = 23,
STA_FLG_AGG_MPDU_DENS_2US = (4 << STA_FLG_AGG_MPDU_DENS_SHIFT),
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
index a731f28e101a..53b438d709db 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -421,12 +421,14 @@ struct iwl_hs20_roc_res {
* able to run the GO Negotiation. Will not be fragmented and not
* repetitive. Valid only on the P2P Device MAC. Only the duration will
* be taken into account.
+ * @SESSION_PROTECT_CONF_MAX_ID: not used
*/
enum iwl_mvm_session_prot_conf_id {
SESSION_PROTECT_CONF_ASSOC,
SESSION_PROTECT_CONF_GO_CLIENT_ASSOC,
SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV,
SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION,
+ SESSION_PROTECT_CONF_MAX_ID,
}; /* SESSION_PROTECTION_CONF_ID_E_VER_1 */
/**
@@ -459,7 +461,7 @@ struct iwl_mvm_session_prot_cmd {
* @mac_id: the mac id for which the session protection started / ended
* @status: 1 means success, 0 means failure
* @start: 1 means the session protection started, 0 means it ended
- * @conf_id: the configuration id of the session that started / eneded
+ * @conf_id: see &enum iwl_mvm_session_prot_conf_id
*
* Note that any session protection will always get two notifications: start
* and end even the firmware could not schedule it.
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index cb9e8e189a1a..1d48c7d7fffd 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -147,6 +147,16 @@
#define CSR_MAC_SHADOW_REG_CTL2 (CSR_BASE + 0x0AC)
#define CSR_MAC_SHADOW_REG_CTL2_RX_WAKE 0xFFFF
+/* LTR control (since IWL_DEVICE_FAMILY_22000) */
+#define CSR_LTR_LONG_VAL_AD (CSR_BASE + 0x0D4)
+#define CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ 0x80000000
+#define CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE 0x1c000000
+#define CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL 0x03ff0000
+#define CSR_LTR_LONG_VAL_AD_SNOOP_REQ 0x00008000
+#define CSR_LTR_LONG_VAL_AD_SNOOP_SCALE 0x00001c00
+#define CSR_LTR_LONG_VAL_AD_SNOOP_VAL 0x000003ff
+#define CSR_LTR_LONG_VAL_AD_SCALE_USEC 2
+
/* GIO Chicken Bits (PCI Express bus link power management) */
#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 688c1125e67b..b627e7da7ac9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -3080,7 +3080,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
/* this would be a mac80211 bug ... but don't crash */
if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
- return -EINVAL;
+ return test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) ? 0 : -EINVAL;
/*
* If we are in a STA removal flow and in DQA mode:
@@ -3127,6 +3127,9 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
goto out_unlock;
}
+ if (vif->type == NL80211_IFTYPE_STATION)
+ vif->bss_conf.he_support = sta->he_cap.has_he;
+
if (sta->tdls &&
(vif->p2p ||
iwl_mvm_tdls_sta_count(mvm, NULL) ==
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 017537944fd0..799d8219463c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -196,6 +196,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
mpdu_dens = sta->ht_cap.ampdu_density;
}
+
if (sta->vht_cap.vht_supported) {
agg_size = sta->vht_cap.cap &
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
@@ -205,6 +206,23 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
agg_size = sta->ht_cap.ampdu_factor;
}
+ /* D6.0 10.12.2 A-MPDU length limit rules
+ * A STA indicates the maximum length of the A-MPDU preEOF padding
+ * that it can receive in an HE PPDU in the Maximum A-MPDU Length
+ * Exponent field in its HT Capabilities, VHT Capabilities,
+ * and HE 6 GHz Band Capabilities elements (if present) and the
+ * Maximum AMPDU Length Exponent Extension field in its HE
+ * Capabilities element
+ */
+ if (sta->he_cap.has_he)
+ agg_size += u8_get_bits(sta->he_cap.he_cap_elem.mac_cap_info[3],
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);
+
+ /* Limit to max A-MPDU supported by FW */
+ if (agg_size > (STA_FLG_MAX_AGG_SIZE_4M >> STA_FLG_MAX_AGG_SIZE_SHIFT))
+ agg_size = (STA_FLG_MAX_AGG_SIZE_4M >>
+ STA_FLG_MAX_AGG_SIZE_SHIFT);
+
add_sta_cmd.station_flags |=
cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
add_sta_cmd.station_flags |=
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 7fce79c1c114..1db6d8d38822 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -641,11 +641,32 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
}
}
+static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
+ struct iwl_mvm_vif *mvmvif)
+{
+ struct iwl_mvm_session_prot_cmd cmd = {
+ .id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color)),
+ .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
+ .conf_id = cpu_to_le32(mvmvif->time_event_data.id),
+ };
+ int ret;
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
+ MAC_CONF_GROUP, 0),
+ 0, sizeof(cmd), &cmd);
+ if (ret)
+ IWL_ERR(mvm,
+ "Couldn't send the SESSION_PROTECTION_CMD: %d\n", ret);
+}
+
static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
struct iwl_mvm_time_event_data *te_data,
u32 *uid)
{
u32 id;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
/*
* It is possible that by the time we got to this point the time
@@ -663,14 +684,29 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
iwl_mvm_te_clear_data(mvm, te_data);
spin_unlock_bh(&mvm->time_event_lock);
- /*
- * It is possible that by the time we try to remove it, the time event
- * has already ended and removed. In such a case there is no need to
- * send a removal command.
+ /* When session protection is supported, the te_data->id field
+ * is reused to save session protection's configuration.
*/
- if (id == TE_MAX) {
- IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", *uid);
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
+ if (mvmvif && id < SESSION_PROTECT_CONF_MAX_ID) {
+ /* Session protection is still ongoing. Cancel it */
+ iwl_mvm_cancel_session_protection(mvm, mvmvif);
+ if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
+ iwl_mvm_roc_finished(mvm);
+ }
+ }
return false;
+ } else {
+ /* It is possible that by the time we try to remove it, the
+ * time event has already ended and removed. In such a case
+ * there is no need to send a removal command.
+ */
+ if (id == TE_MAX) {
+ IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", *uid);
+ return false;
+ }
}
return true;
@@ -771,6 +807,7 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mvm_session_prot_notif *notif = (void *)pkt->data;
struct ieee80211_vif *vif;
+ struct iwl_mvm_vif *mvmvif;
rcu_read_lock();
vif = iwl_mvm_rcu_dereference_vif_id(mvm, le32_to_cpu(notif->mac_id),
@@ -779,9 +816,10 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
if (!vif)
goto out_unlock;
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
/* The vif is not a P2P_DEVICE, maintain its time_event_data */
if (vif->type != NL80211_IFTYPE_P2P_DEVICE) {
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_time_event_data *te_data =
&mvmvif->time_event_data;
@@ -816,10 +854,14 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
if (!le32_to_cpu(notif->status) || !le32_to_cpu(notif->start)) {
/* End TE, notify mac80211 */
+ mvmvif->time_event_data.id = SESSION_PROTECT_CONF_MAX_ID;
ieee80211_remain_on_channel_expired(mvm->hw);
set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
iwl_mvm_roc_finished(mvm);
} else if (le32_to_cpu(notif->start)) {
+ if (WARN_ON(mvmvif->time_event_data.id !=
+ le32_to_cpu(notif->conf_id)))
+ goto out_unlock;
set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
ieee80211_ready_on_channel(mvm->hw); /* Start TE */
}
@@ -845,20 +887,24 @@ iwl_mvm_start_p2p_roc_session_protection(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
+ /* The time_event_data.id field is reused to save session
+ * protection's configuration.
+ */
switch (type) {
case IEEE80211_ROC_TYPE_NORMAL:
- cmd.conf_id =
- cpu_to_le32(SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV);
+ mvmvif->time_event_data.id =
+ SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV;
break;
case IEEE80211_ROC_TYPE_MGMT_TX:
- cmd.conf_id =
- cpu_to_le32(SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION);
+ mvmvif->time_event_data.id =
+ SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION;
break;
default:
WARN_ONCE(1, "Got an invalid ROC type\n");
return -EINVAL;
}
+ cmd.conf_id = cpu_to_le32(mvmvif->time_event_data.id);
return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
MAC_CONF_GROUP, 0),
0, sizeof(cmd), &cmd);
@@ -960,25 +1006,6 @@ void iwl_mvm_cleanup_roc_te(struct iwl_mvm *mvm)
__iwl_mvm_remove_time_event(mvm, te_data, &uid);
}
-static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
- struct iwl_mvm_vif *mvmvif)
-{
- struct iwl_mvm_session_prot_cmd cmd = {
- .id_and_color =
- cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
- mvmvif->color)),
- .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
- };
- int ret;
-
- ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
- MAC_CONF_GROUP, 0),
- 0, sizeof(cmd), &cmd);
- if (ret)
- IWL_ERR(mvm,
- "Couldn't send the SESSION_PROTECTION_CMD: %d\n", ret);
-}
-
void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif;
@@ -988,10 +1015,13 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
mvmvif = iwl_mvm_vif_from_mac80211(vif);
- iwl_mvm_cancel_session_protection(mvm, mvmvif);
-
- if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ iwl_mvm_cancel_session_protection(mvm, mvmvif);
set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
+ } else {
+ iwl_mvm_remove_aux_roc_te(mvm, mvmvif,
+ &mvmvif->time_event_data);
+ }
iwl_mvm_roc_finished(mvm);
@@ -1126,10 +1156,15 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
mvmvif->color)),
.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
- .conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC),
.duration_tu = cpu_to_le32(MSEC_TO_TU(duration)),
};
+ /* The time_event_data.id field is reused to save session
+ * protection's configuration.
+ */
+ mvmvif->time_event_data.id = SESSION_PROTECT_CONF_ASSOC;
+ cmd.conf_id = cpu_to_le32(mvmvif->time_event_data.id);
+
lockdep_assert_held(&mvm->mutex);
spin_lock_bh(&mvm->time_event_lock);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index a0352fa873d9..5512e3c630c3 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -252,6 +252,26 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL,
CSR_AUTO_FUNC_BOOT_ENA);
+
+ if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) {
+ /*
+ * The firmware initializes this again later (to a smaller
+ * value), but for the boot process initialize the LTR to
+ * ~250 usec.
+ */
+ u32 val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
+ u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
+ CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) |
+ u32_encode_bits(250,
+ CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) |
+ CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
+ u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
+ CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) |
+ u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
+
+ iwl_write32(trans, CSR_LTR_LONG_VAL_AD, val);
+ }
+
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1);
else
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index d2e69ad53b27..2fffbbc8462f 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -2156,18 +2156,36 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
void *buf, int dwords)
{
unsigned long flags;
- int offs, ret = 0;
+ int offs = 0;
u32 *vals = buf;
- if (iwl_trans_grab_nic_access(trans, &flags)) {
- iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
- for (offs = 0; offs < dwords; offs++)
- vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
- iwl_trans_release_nic_access(trans, &flags);
- } else {
- ret = -EBUSY;
+ while (offs < dwords) {
+ /* limit the time we spin here under lock to 1/2s */
+ ktime_t timeout = ktime_add_us(ktime_get(), 500 * USEC_PER_MSEC);
+
+ if (iwl_trans_grab_nic_access(trans, &flags)) {
+ iwl_write32(trans, HBUS_TARG_MEM_RADDR,
+ addr + 4 * offs);
+
+ while (offs < dwords) {
+ vals[offs] = iwl_read32(trans,
+ HBUS_TARG_MEM_RDAT);
+ offs++;
+
+ /* calling ktime_get is expensive so
+ * do it once in 128 reads
+ */
+ if (offs % 128 == 0 && ktime_after(ktime_get(),
+ timeout))
+ break;
+ }
+ iwl_trans_release_nic_access(trans, &flags);
+ } else {
+ return -EBUSY;
+ }
}
- return ret;
+
+ return 0;
}
static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index 042015bc8055..b2fd87834f23 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -1482,7 +1482,7 @@ static bool rtw_fw_dump_check_size(struct rtw_dev *rtwdev,
int rtw_fw_dump_fifo(struct rtw_dev *rtwdev, u8 fifo_sel, u32 addr, u32 size,
u32 *buffer)
{
- if (!rtwdev->chip->fw_fifo_addr) {
+ if (!rtwdev->chip->fw_fifo_addr[0]) {
rtw_dbg(rtwdev, RTW_DBG_FW, "chip not support dump fw fifo\n");
return -ENOTSUPP;
}