summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/atm/Kconfig4
-rw-r--r--drivers/bluetooth/btbcm.c139
-rw-r--r--drivers/bluetooth/btbcm.h10
-rw-r--r--drivers/bluetooth/btqca.c18
-rw-r--r--drivers/bluetooth/btqca.h3
-rw-r--r--drivers/bluetooth/btrtl.c10
-rw-r--r--drivers/bluetooth/btusb.c205
-rw-r--r--drivers/bluetooth/hci_bcm.c35
-rw-r--r--drivers/bluetooth/hci_h5.c2
-rw-r--r--drivers/bluetooth/hci_qca.c57
-rw-r--r--drivers/bluetooth/hci_serdev.c4
-rw-r--r--drivers/cdrom/cdrom.c2
-rw-r--r--drivers/char/random.c2
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c89
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h1
-rw-r--r--drivers/crypto/chelsio/chcr_ipsec.c3
-rw-r--r--drivers/infiniband/hw/mlx5/Makefile28
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.c17
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.h2
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c11
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c10
-rw-r--r--drivers/infiniband/hw/mlx5/flow.c4
-rw-r--r--drivers/infiniband/hw/mlx5/ib_virt.c2
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c5
-rw-r--r--drivers/infiniband/hw/mlx5/main.c96
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h3
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c3
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c79
-rw-r--r--drivers/infiniband/hw/mlx5/qp.h46
-rw-r--r--drivers/infiniband/hw/mlx5/qpc.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/qp.c)304
-rw-r--r--drivers/infiniband/hw/mlx5/srq_cmd.c2
-rw-r--r--drivers/macintosh/mac_hid.c3
-rw-r--r--drivers/media/rc/bpf-lirc.c4
-rw-r--r--drivers/net/Kconfig12
-rw-r--r--drivers/net/appletalk/Kconfig8
-rw-r--r--drivers/net/arcnet/Kconfig6
-rw-r--r--drivers/net/bonding/bond_alb.c46
-rw-r--r--drivers/net/bonding/bond_main.c300
-rw-r--r--drivers/net/bonding/bonding_priv.h2
-rw-r--r--drivers/net/caif/Kconfig2
-rw-r--r--drivers/net/dsa/b53/b53_common.c81
-rw-r--r--drivers/net/dsa/b53/b53_priv.h8
-rw-r--r--drivers/net/dsa/b53/b53_srab.c2
-rw-r--r--drivers/net/dsa/mt7530.c24
-rw-r--r--drivers/net/dsa/mt7530.h6
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c16
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c55
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.h7
-rw-r--r--drivers/net/dsa/ocelot/felix.c84
-rw-r--r--drivers/net/dsa/ocelot/felix.h6
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c192
-rw-r--r--drivers/net/dsa/sja1105/Kconfig9
-rw-r--r--drivers/net/dsa/sja1105/Makefile4
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h91
-rw-r--r--drivers/net/dsa/sja1105/sja1105_clocking.c58
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.c84
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ethtool.c144
-rw-r--r--drivers/net/dsa/sja1105/sja1105_flower.c215
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c1131
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.h13
-rw-r--r--drivers/net/dsa/sja1105/sja1105_spi.c10
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.c264
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.h81
-rw-r--r--drivers/net/dsa/sja1105/sja1105_tas.c127
-rw-r--r--drivers/net/dsa/sja1105/sja1105_tas.h36
-rw-r--r--drivers/net/dsa/sja1105/sja1105_vl.c782
-rw-r--r--drivers/net/dsa/sja1105/sja1105_vl.h72
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-platform.c10
-rw-r--r--drivers/net/ethernet/3com/3c509.c1
-rw-r--r--drivers/net/ethernet/3com/3c515.c1
-rw-r--r--drivers/net/ethernet/3com/3c59x.c4
-rw-r--r--drivers/net/ethernet/3com/Kconfig2
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c1
-rw-r--r--drivers/net/ethernet/agere/et131x.c4
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c4
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_admin_defs.h8
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c39
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h47
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c66
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c14
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h7
-rw-r--r--drivers/net/ethernet/amd/7990.c2
-rw-r--r--drivers/net/ethernet/amd/7990.h2
-rw-r--r--drivers/net/ethernet/amd/atarilance.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/Makefile4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_common.h40
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h22
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_macsec.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c70
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c39
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c20
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c97
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h42
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c18
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h10
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c60
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h26
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c35
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c684
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h14
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_internal.h137
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.c208
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.h91
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh_internal.h328
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.c131
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h606
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c320
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c6
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c43
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c8
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c21
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c100
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h24
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c261
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h216
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c1
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c1
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c790
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h23
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c96
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c8
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h12
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c1
-rw-r--r--drivers/net/ethernet/chelsio/Kconfig2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h32
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c144
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c17
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c40
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c2
-rw-r--r--drivers/net/ethernet/cirrus/Kconfig2
-rw-r--r--drivers/net/ethernet/cortina/gemini.c3
-rw-r--r--drivers/net/ethernet/dec/tulip/Kconfig4
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c10
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c2
-rw-r--r--drivers/net/ethernet/dnet.c3
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c3
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c9
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c175
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h15
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c34
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h86
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h159
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c50
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c1103
-rw-r--r--drivers/net/ethernet/freescale/fec.h4
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c95
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c160
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h31
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c88
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h44
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c195
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c1649
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h34
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c79
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h87
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c368
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h26
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h87
-rw-r--r--drivers/net/ethernet/huawei/hinic/Makefile2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_dev.h5
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_ethtool.c498
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c47
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c205
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h90
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c98
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h7
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_if.c78
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_if.h26
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_io.c53
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_io.h26
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c1210
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h154
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c17
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h12
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c8
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h7
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c9
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h6
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c140
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.c207
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.h159
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c15
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_sriov.c1294
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_sriov.h109
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c17
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c54
-rw-r--r--drivers/net/ethernet/intel/e1000e/regs.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c30
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c34
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c2
-rw-r--r--drivers/net/ethernet/intel/igc/Makefile2
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h400
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c3
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h24
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c33
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h3
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c576
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h12
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.c157
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.h9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c33
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c34
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c3
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c5
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c36
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c52
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/crdump.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c74
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ecpf.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h113
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c88
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c161
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c79
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/chains.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c136
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c154
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c118
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c150
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mcg.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c85
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pd.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rl.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c113
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c142
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c463
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h181
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c220
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c305
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c378
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c621
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h54
-rw-r--r--drivers/net/ethernet/microchip/encx24j600-regmap.c5
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c12
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c5
-rw-r--r--drivers/net/ethernet/mscc/Makefile2
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c212
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h3
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ace.c113
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ace.h5
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c27
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c29
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.c324
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.h41
-rw-r--r--drivers/net/ethernet/mscc/ocelot_regs.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot_tc.c6
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c12
-rw-r--r--drivers/net/ethernet/neterion/Kconfig4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c16
-rw-r--r--drivers/net/ethernet/ni/nixge.c3
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c6
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c14
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h17
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c20
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h1089
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c158
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h28
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c7
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.c136
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.h6
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c49
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h16
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c26
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h49
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c42
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.h15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c40
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c43
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c253
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h28
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c17
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c16
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h10
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h17
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c24
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c232
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c4
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c5
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.h5
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c3
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c618
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c3
-rw-r--r--drivers/net/ethernet/sfc/ef10.c214
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c27
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c25
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h12
-rw-r--r--drivers/net/ethernet/sfc/mcdi_filters.c82
-rw-r--r--drivers/net/ethernet/sfc/mcdi_filters.h17
-rw-r--r--drivers/net/ethernet/sfc/mcdi_functions.c8
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c7
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h10
-rw-r--r--drivers/net/ethernet/sfc/nic.h11
-rw-r--r--drivers/net/ethernet/sfc/ptp.c7
-rw-r--r--drivers/net/ethernet/sfc/rx.c1
-rw-r--r--drivers/net/ethernet/sfc/siena.c8
-rw-r--r--drivers/net/ethernet/smsc/Kconfig4
-rw-r--r--drivers/net/ethernet/socionext/netsec.c30
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c160
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c146
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c74
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c67
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c5
-rw-r--r--drivers/net/ethernet/sun/cassini.c14
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c12
-rw-r--r--drivers/net/ethernet/ti/Kconfig25
-rw-r--r--drivers/net/ethernet/ti/Makefile3
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c36
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c205
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h13
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.c626
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.h29
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c1086
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.h74
-rw-r--r--drivers/net/ethernet/ti/cpmac.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c22
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c25
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c17
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h2
-rw-r--r--drivers/net/ethernet/ti/cpts.c422
-rw-r--r--drivers/net/ethernet/ti/cpts.h27
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c2
-rw-r--r--drivers/net/ethernet/ti/k3-cppi-desc-pool.c4
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c3
-rw-r--r--drivers/net/ethernet/ti/tlan.c2
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c2
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c2
-rw-r--r--drivers/net/ethernet/via/Kconfig1
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c8
-rw-r--r--drivers/net/fddi/Kconfig2
-rw-r--r--drivers/net/hamradio/Kconfig14
-rw-r--r--drivers/net/hamradio/bpqether.c20
-rw-r--r--drivers/net/hamradio/scc.c2
-rw-r--r--drivers/net/hyperv/netvsc_bpf.c1
-rw-r--r--drivers/net/hyperv/netvsc_drv.c4
-rw-r--r--drivers/net/ipa/gsi.c114
-rw-r--r--drivers/net/ipa/gsi.h15
-rw-r--r--drivers/net/ipa/ipa.h10
-rw-r--r--drivers/net/ipa/ipa_cmd.c59
-rw-r--r--drivers/net/ipa/ipa_cmd.h11
-rw-r--r--drivers/net/ipa/ipa_data-sc7180.c14
-rw-r--r--drivers/net/ipa/ipa_data-sdm845.c15
-rw-r--r--drivers/net/ipa/ipa_data.h29
-rw-r--r--drivers/net/ipa/ipa_endpoint.c174
-rw-r--r--drivers/net/ipa/ipa_endpoint.h2
-rw-r--r--drivers/net/ipa/ipa_main.c4
-rw-r--r--drivers/net/ipa/ipa_mem.c210
-rw-r--r--drivers/net/ipa/ipa_mem.h3
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c2
-rw-r--r--drivers/net/macsec.c2
-rw-r--r--drivers/net/macvlan.c10
-rw-r--r--drivers/net/phy/Kconfig18
-rw-r--r--drivers/net/phy/Makefile2
-rw-r--r--drivers/net/phy/at803x.c280
-rw-r--r--drivers/net/phy/bcm-phy-lib.c339
-rw-r--r--drivers/net/phy/bcm-phy-lib.h19
-rw-r--r--drivers/net/phy/bcm54140.c860
-rw-r--r--drivers/net/phy/broadcom.c64
-rw-r--r--drivers/net/phy/cortina.c1
-rw-r--r--drivers/net/phy/dp83867.c4
-rw-r--r--drivers/net/phy/marvell.c201
-rw-r--r--drivers/net/phy/marvell10g.c2
-rw-r--r--drivers/net/phy/mdio-bcm-iproc.c4
-rw-r--r--drivers/net/phy/mdio-ipq4019.c160
-rw-r--r--drivers/net/phy/mdio-moxart.c1
-rw-r--r--drivers/net/phy/mdio_bus.c25
-rw-r--r--drivers/net/phy/micrel.c128
-rw-r--r--drivers/net/phy/mscc/mscc.h1
-rw-r--r--drivers/net/phy/mscc/mscc_main.c102
-rw-r--r--drivers/net/phy/nxp-tja11xx.c386
-rw-r--r--drivers/net/phy/phy-c45.c1
-rw-r--r--drivers/net/phy/phy.c110
-rw-r--r--drivers/net/phy/phy_device.c248
-rw-r--r--drivers/net/phy/phylink.c46
-rw-r--r--drivers/net/phy/realtek.c15
-rw-r--r--drivers/net/phy/teranetics.c1
-rw-r--r--drivers/net/plip/Kconfig2
-rw-r--r--drivers/net/ppp/ppp_generic.c2
-rw-r--r--drivers/net/rionet.c3
-rw-r--r--drivers/net/team/team.c1
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/usb/ax88179_178a.c79
-rw-r--r--drivers/net/usb/huawei_cdc_ncm.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/r8152.c22
-rw-r--r--drivers/net/usb/sierra_net.c5
-rw-r--r--drivers/net/veth.c28
-rw-r--r--drivers/net/virtio_net.c15
-rw-r--r--drivers/net/vrf.c1
-rw-r--r--drivers/net/wan/Kconfig4
-rw-r--r--drivers/net/wireless/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c31
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h36
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h8
-rw-r--r--drivers/net/wireless/ath/ath10k/hif.h20
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c399
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.h40
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h24
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c42
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c51
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h5
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c326
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c71
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.c61
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c191
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.h19
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c186
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.h7
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.c12
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h30
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c127
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h110
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c52
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h19
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h15
-rw-r--r--drivers/net/wireless/ath/ath11k/debug.c9
-rw-r--r--drivers/net/wireless/ath/ath11k/debug.h22
-rw-r--r--drivers/net/wireless/ath/ath11k/debug_htt_stats.h8
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_sta.c297
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c6
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.h13
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c49
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c69
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_desc.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c95
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.c35
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.h12
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c170
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h88
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.h4
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c49
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c16
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c58
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.h3
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c4
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c21
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_n.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/pio.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/xmit.c13
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/xmit.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c17
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/commonring.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c9
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c115
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h9
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.h2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/Kconfig4
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c71
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c76
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/config.h39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/soc.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/sta.h26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c139
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.c99
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-modparams.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c113
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c44
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c47
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c137
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c8
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_hw.c22
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_proc.c2
-rw-r--r--drivers/net/wireless/intersil/orinoco/spectrum_cs.c3
-rw-r--r--drivers/net/wireless/intersil/p54/p54usb.c1
-rw-r--r--drivers/net/wireless/marvell/libertas/cmd.h2
-rw-r--r--drivers/net/wireless/marvell/libertas/cmdresp.c5
-rw-r--r--drivers/net/wireless/marvell/libertas/mesh.c6
-rw-r--r--drivers/net/wireless/marvell/libertas/mesh.h2
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/if_usb.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c39
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c2
-rw-r--r--drivers/net/wireless/ray_cs.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/bf.c7
-rw-r--r--drivers/net/wireless/realtek/rtw88/bf.h22
-rw-r--r--drivers/net/wireless/realtek/rtw88/efuse.c26
-rw-r--r--drivers/net/wireless/realtek/rtw88/efuse.h3
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c54
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h32
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c407
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c40
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c65
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h60
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c39
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c60
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.h6
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h97
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.c1137
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.h144
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d_table.c1196
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d_table.h15
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c23
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c152
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.h28
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c_table.c14666
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c_table.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.c11
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.h2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c2
-rw-r--r--drivers/net/wireless/st/cw1200/cw1200_spi.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h1
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c4
-rw-r--r--drivers/of/of_mdio.c73
-rw-r--r--drivers/parport/procfs.c39
-rw-r--r--drivers/power/supply/test_power.c2
-rw-r--r--drivers/ptp/ptp_chardev.c1
-rw-r--r--drivers/ptp/ptp_clock.c3
-rw-r--r--drivers/ptp/ptp_clockmatrix.c94
-rw-r--r--drivers/ptp/ptp_clockmatrix.h8
-rw-r--r--drivers/ptp/ptp_idt82p33.c6
-rw-r--r--drivers/ptp/ptp_ines.c8
-rw-r--r--drivers/ptp/ptp_kvm.c2
-rw-r--r--drivers/s390/net/Kconfig9
-rw-r--r--drivers/s390/net/qeth_core.h49
-rw-r--r--drivers/s390/net/qeth_core_main.c465
-rw-r--r--drivers/s390/net/qeth_core_mpc.h25
-rw-r--r--drivers/s390/net/qeth_core_sys.c15
-rw-r--r--drivers/s390/net/qeth_l2_main.c2
-rw-r--r--drivers/s390/net/qeth_l3_main.c19
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.c1
-rw-r--r--drivers/ssb/scan.c6
-rw-r--r--drivers/ssb/sprom.c12
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/README2
-rw-r--r--drivers/vhost/net.c1
710 files changed, 51820 insertions, 9527 deletions
diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig
index 8c37294f1d1e..cfb0d16b60ad 100644
--- a/drivers/atm/Kconfig
+++ b/drivers/atm/Kconfig
@@ -306,7 +306,7 @@ config ATM_IA
for more info about the cards. Say Y (or M to compile as a module
named iphase) here if you have one of these cards.
- See the file <file:Documentation/networking/iphase.txt> for further
+ See the file <file:Documentation/networking/iphase.rst> for further
details.
config ATM_IA_DEBUG
@@ -336,7 +336,7 @@ config ATM_FORE200E
on PCI and SBUS hosts. Say Y (or M to compile as a module
named fore_200e) here if you have one of these ATM adapters.
- See the file <file:Documentation/networking/fore200e.txt> for
+ See the file <file:Documentation/networking/fore200e.rst> for
further details.
config ATM_FORE200E_USE_TASKLET
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 1f498f358f60..df7a8a22e53c 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -27,6 +27,11 @@
#define BDADDR_BCM4345C5 (&(bdaddr_t) {{0xac, 0x1f, 0x00, 0xc5, 0x45, 0x43}})
#define BDADDR_BCM43341B (&(bdaddr_t) {{0xac, 0x1f, 0x00, 0x1b, 0x34, 0x43}})
+#define BCM_FW_NAME_LEN 64
+#define BCM_FW_NAME_COUNT_MAX 2
+/* For kmalloc-ing the fw-name array instead of putting it on the stack */
+typedef char bcm_fw_name[BCM_FW_NAME_LEN];
+
int btbcm_check_bdaddr(struct hci_dev *hdev)
{
struct hci_rp_read_bd_addr *bda;
@@ -358,6 +363,13 @@ static int btbcm_read_info(struct hci_dev *hdev)
bt_dev_info(hdev, "BCM: features 0x%2.2x", skb->data[1]);
kfree_skb(skb);
+ return 0;
+}
+
+static int btbcm_print_local_name(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+
/* Read Local Name */
skb = btbcm_read_local_name(hdev);
if (IS_ERR(skb))
@@ -380,6 +392,7 @@ static const struct bcm_subver_table bcm_uart_subver_table[] = {
{ 0x410e, "BCM43341B0" }, /* 002.001.014 */
{ 0x4204, "BCM2076B1" }, /* 002.002.004 */
{ 0x4406, "BCM4324B3" }, /* 002.004.006 */
+ { 0x4606, "BCM4324B5" }, /* 002.006.006 */
{ 0x6109, "BCM4335C0" }, /* 003.001.009 */
{ 0x610c, "BCM4354" }, /* 003.001.012 */
{ 0x2122, "BCM4343A0" }, /* 001.001.034 */
@@ -395,6 +408,7 @@ static const struct bcm_subver_table bcm_uart_subver_table[] = {
};
static const struct bcm_subver_table bcm_usb_subver_table[] = {
+ { 0x2105, "BCM20703A1" }, /* 001.001.005 */
{ 0x210b, "BCM43142A0" }, /* 001.001.011 */
{ 0x2112, "BCM4314A0" }, /* 001.001.018 */
{ 0x2118, "BCM20702A0" }, /* 001.001.024 */
@@ -408,14 +422,17 @@ static const struct bcm_subver_table bcm_usb_subver_table[] = {
{ }
};
-int btbcm_initialize(struct hci_dev *hdev, char *fw_name, size_t len,
- bool reinit)
+int btbcm_initialize(struct hci_dev *hdev, bool *fw_load_done)
{
u16 subver, rev, pid, vid;
- const char *hw_name = "BCM";
struct sk_buff *skb;
struct hci_rp_read_local_version *ver;
const struct bcm_subver_table *bcm_subver_table;
+ const char *hw_name = NULL;
+ char postfix[16] = "";
+ int fw_name_count = 0;
+ bcm_fw_name *fw_name;
+ const struct firmware *fw;
int i, err;
/* Reset */
@@ -434,15 +451,14 @@ int btbcm_initialize(struct hci_dev *hdev, char *fw_name, size_t len,
kfree_skb(skb);
/* Read controller information */
- if (!reinit) {
+ if (!(*fw_load_done)) {
err = btbcm_read_info(hdev);
if (err)
return err;
}
-
- /* Upper nibble of rev should be between 0 and 3? */
- if (((rev & 0xf000) >> 12) > 3)
- return 0;
+ err = btbcm_print_local_name(hdev);
+ if (err)
+ return err;
bcm_subver_table = (hdev->bus == HCI_USB) ? bcm_usb_subver_table :
bcm_uart_subver_table;
@@ -454,6 +470,13 @@ int btbcm_initialize(struct hci_dev *hdev, char *fw_name, size_t len,
}
}
+ bt_dev_info(hdev, "%s (%3.3u.%3.3u.%3.3u) build %4.4u",
+ hw_name ? hw_name : "BCM", (subver & 0xe000) >> 13,
+ (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);
+
+ if (*fw_load_done)
+ return 0;
+
if (hdev->bus == HCI_USB) {
/* Read USB Product Info */
skb = btbcm_read_usb_product(hdev);
@@ -464,85 +487,81 @@ int btbcm_initialize(struct hci_dev *hdev, char *fw_name, size_t len,
pid = get_unaligned_le16(skb->data + 3);
kfree_skb(skb);
- snprintf(fw_name, len, "brcm/%s-%4.4x-%4.4x.hcd",
- hw_name, vid, pid);
- } else {
- snprintf(fw_name, len, "brcm/%s.hcd", hw_name);
+ snprintf(postfix, sizeof(postfix), "-%4.4x-%4.4x", vid, pid);
}
- bt_dev_info(hdev, "%s (%3.3u.%3.3u.%3.3u) build %4.4u",
- hw_name, (subver & 0xe000) >> 13,
- (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);
+ fw_name = kmalloc(BCM_FW_NAME_COUNT_MAX * BCM_FW_NAME_LEN, GFP_KERNEL);
+ if (!fw_name)
+ return -ENOMEM;
+
+ if (hw_name) {
+ snprintf(fw_name[fw_name_count], BCM_FW_NAME_LEN,
+ "brcm/%s%s.hcd", hw_name, postfix);
+ fw_name_count++;
+ }
+
+ snprintf(fw_name[fw_name_count], BCM_FW_NAME_LEN,
+ "brcm/BCM%s.hcd", postfix);
+ fw_name_count++;
+
+ for (i = 0; i < fw_name_count; i++) {
+ err = firmware_request_nowarn(&fw, fw_name[i], &hdev->dev);
+ if (err == 0) {
+ bt_dev_info(hdev, "%s '%s' Patch",
+ hw_name ? hw_name : "BCM", fw_name[i]);
+ *fw_load_done = true;
+ break;
+ }
+ }
+
+ if (*fw_load_done) {
+ err = btbcm_patchram(hdev, fw);
+ if (err)
+ bt_dev_info(hdev, "BCM: Patch failed (%d)", err);
+
+ release_firmware(fw);
+ } else {
+ bt_dev_err(hdev, "BCM: firmware Patch file not found, tried:");
+ for (i = 0; i < fw_name_count; i++)
+ bt_dev_err(hdev, "BCM: '%s'", fw_name[i]);
+ }
+ kfree(fw_name);
return 0;
}
EXPORT_SYMBOL_GPL(btbcm_initialize);
-int btbcm_finalize(struct hci_dev *hdev)
+int btbcm_finalize(struct hci_dev *hdev, bool *fw_load_done)
{
- char fw_name[64];
int err;
- /* Re-initialize */
- err = btbcm_initialize(hdev, fw_name, sizeof(fw_name), true);
- if (err)
- return err;
+ /* Re-initialize if necessary */
+ if (*fw_load_done) {
+ err = btbcm_initialize(hdev, fw_load_done);
+ if (err)
+ return err;
+ }
btbcm_check_bdaddr(hdev);
set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
- /* Some devices ship with the controller default address.
- * Allow the bootloader to set a valid address through the
- * device tree.
- */
- set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
-
return 0;
}
EXPORT_SYMBOL_GPL(btbcm_finalize);
int btbcm_setup_patchram(struct hci_dev *hdev)
{
- char fw_name[64];
- const struct firmware *fw;
- struct sk_buff *skb;
+ bool fw_load_done = false;
int err;
/* Initialize */
- err = btbcm_initialize(hdev, fw_name, sizeof(fw_name), false);
+ err = btbcm_initialize(hdev, &fw_load_done);
if (err)
return err;
- err = request_firmware(&fw, fw_name, &hdev->dev);
- if (err < 0) {
- bt_dev_info(hdev, "BCM: Patch %s not found", fw_name);
- goto done;
- }
-
- btbcm_patchram(hdev, fw);
-
- release_firmware(fw);
-
- /* Re-initialize */
- err = btbcm_initialize(hdev, fw_name, sizeof(fw_name), true);
- if (err)
- return err;
-
- /* Read Local Name */
- skb = btbcm_read_local_name(hdev);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- bt_dev_info(hdev, "%s", (char *)(skb->data + 1));
- kfree_skb(skb);
-
-done:
- btbcm_check_bdaddr(hdev);
-
- set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
-
- return 0;
+ /* Re-initialize after loading Patch */
+ return btbcm_finalize(hdev, &fw_load_done);
}
EXPORT_SYMBOL_GPL(btbcm_setup_patchram);
diff --git a/drivers/bluetooth/btbcm.h b/drivers/bluetooth/btbcm.h
index 014ef847a486..8bf01565fdfc 100644
--- a/drivers/bluetooth/btbcm.h
+++ b/drivers/bluetooth/btbcm.h
@@ -62,9 +62,8 @@ int btbcm_write_pcm_int_params(struct hci_dev *hdev,
int btbcm_setup_patchram(struct hci_dev *hdev);
int btbcm_setup_apple(struct hci_dev *hdev);
-int btbcm_initialize(struct hci_dev *hdev, char *fw_name, size_t len,
- bool reinit);
-int btbcm_finalize(struct hci_dev *hdev);
+int btbcm_initialize(struct hci_dev *hdev, bool *fw_load_done);
+int btbcm_finalize(struct hci_dev *hdev, bool *fw_load_done);
#else
@@ -105,13 +104,12 @@ static inline int btbcm_setup_apple(struct hci_dev *hdev)
return 0;
}
-static inline int btbcm_initialize(struct hci_dev *hdev, char *fw_name,
- size_t len, bool reinit)
+static inline int btbcm_initialize(struct hci_dev *hdev, bool *fw_load_done)
{
return 0;
}
-static inline int btbcm_finalize(struct hci_dev *hdev)
+static inline int btbcm_finalize(struct hci_dev *hdev, bool *fw_load_done)
{
return 0;
}
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index a16845c0751d..3ea866d44568 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -32,7 +32,7 @@ int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version,
* VSE event. WCN3991 sends version command response as a payload to
* command complete event.
*/
- if (soc_type == QCA_WCN3991) {
+ if (soc_type >= QCA_WCN3991) {
event_type = 0;
rlen += 1;
rtype = EDL_PATCH_VER_REQ_CMD;
@@ -69,7 +69,7 @@ int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version,
goto out;
}
- if (soc_type == QCA_WCN3991)
+ if (soc_type >= QCA_WCN3991)
memmove(&edl->data, &edl->data[1], sizeof(*ver));
ver = (struct qca_btsoc_version *)(edl->data);
@@ -217,7 +217,7 @@ static void qca_tlv_check_data(struct qca_fw_config *config,
tlv_nvm->data[0] |= 0x80;
/* UART Baud Rate */
- if (soc_type == QCA_WCN3991)
+ if (soc_type >= QCA_WCN3991)
tlv_nvm->data[1] = nvm_baud_rate;
else
tlv_nvm->data[2] = nvm_baud_rate;
@@ -268,7 +268,7 @@ static int qca_tlv_send_segment(struct hci_dev *hdev, int seg_size,
* VSE event. WCN3991 sends version command response as a payload to
* command complete event.
*/
- if (soc_type == QCA_WCN3991) {
+ if (soc_type >= QCA_WCN3991) {
event_type = 0;
rlen = sizeof(*edl);
rtype = EDL_PATCH_TLV_REQ_CMD;
@@ -301,7 +301,7 @@ static int qca_tlv_send_segment(struct hci_dev *hdev, int seg_size,
err = -EIO;
}
- if (soc_type == QCA_WCN3991)
+ if (soc_type >= QCA_WCN3991)
goto out;
tlv_resp = (struct tlv_seg_resp *)(edl->data);
@@ -442,6 +442,11 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
(soc_ver & 0x0000000f);
snprintf(config.fwname, sizeof(config.fwname),
"qca/crbtfw%02x.tlv", rom_ver);
+ } else if (soc_type == QCA_QCA6390) {
+ rom_ver = ((soc_ver & 0x00000f00) >> 0x04) |
+ (soc_ver & 0x0000000f);
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/htbtfw%02x.tlv", rom_ver);
} else {
snprintf(config.fwname, sizeof(config.fwname),
"qca/rampatch_%08x.bin", soc_ver);
@@ -464,6 +469,9 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
else if (qca_is_wcn399x(soc_type))
snprintf(config.fwname, sizeof(config.fwname),
"qca/crnv%02x.bin", rom_ver);
+ else if (soc_type == QCA_QCA6390)
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/htnv%02x.bin", rom_ver);
else
snprintf(config.fwname, sizeof(config.fwname),
"qca/nvm_%08x.bin", soc_ver);
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
index e16a4d650597..6e1e62dd4b95 100644
--- a/drivers/bluetooth/btqca.h
+++ b/drivers/bluetooth/btqca.h
@@ -125,8 +125,9 @@ enum qca_btsoc_type {
QCA_AR3002,
QCA_ROME,
QCA_WCN3990,
- QCA_WCN3991,
QCA_WCN3998,
+ QCA_WCN3991,
+ QCA_QCA6390,
};
#if IS_ENABLED(CONFIG_BT_QCA)
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 67f4bc21e7c5..3a9afc905f24 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -130,12 +130,19 @@ static const struct id_table ic_id_table[] = {
.cfg_name = "rtl_bt/rtl8821c_config" },
/* 8761A */
- { IC_MATCH_FL_LMPSUBV, RTL_ROM_LMP_8761A, 0x0,
+ { IC_INFO(RTL_ROM_LMP_8761A, 0xa),
.config_needed = false,
.has_rom_version = true,
.fw_name = "rtl_bt/rtl8761a_fw.bin",
.cfg_name = "rtl_bt/rtl8761a_config" },
+ /* 8761B */
+ { IC_INFO(RTL_ROM_LMP_8761A, 0xb),
+ .config_needed = false,
+ .has_rom_version = true,
+ .fw_name = "rtl_bt/rtl8761b_fw.bin",
+ .cfg_name = "rtl_bt/rtl8761b_config" },
+
/* 8822C with UART interface */
{ .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_HCIREV |
IC_MATCH_FL_HCIBUS,
@@ -267,6 +274,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
{ RTL_ROM_LMP_8723B, 9 }, /* 8723D */
{ RTL_ROM_LMP_8821A, 10 }, /* 8821C */
{ RTL_ROM_LMP_8822B, 13 }, /* 8822C */
+ { RTL_ROM_LMP_8761A, 14 }, /* 8761B */
};
min_size = sizeof(struct rtl_epatch_header) + sizeof(extension_sig) + 3;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 3bdec42c9612..5f022e9cf667 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -58,6 +58,7 @@ static struct usb_driver btusb_driver;
#define BTUSB_CW6622 0x100000
#define BTUSB_MEDIATEK 0x200000
#define BTUSB_WIDEBAND_SPEECH 0x400000
+#define BTUSB_VALID_LE_STATES 0x800000
static const struct usb_device_id btusb_table[] = {
/* Generic Bluetooth USB device */
@@ -335,11 +336,14 @@ static const struct usb_device_id blacklist_table[] = {
/* Intel Bluetooth devices */
{ USB_DEVICE(0x8087, 0x0025), .driver_info = BTUSB_INTEL_NEW |
- BTUSB_WIDEBAND_SPEECH },
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x8087, 0x0026), .driver_info = BTUSB_INTEL_NEW |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x8087, 0x0029), .driver_info = BTUSB_INTEL_NEW |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x8087, 0x0032), .driver_info = BTUSB_INTEL_NEW |
+ BTUSB_WIDEBAND_SPEECH},
{ USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
{ USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
{ USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
@@ -348,7 +352,8 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x8087, 0x0aa7), .driver_info = BTUSB_INTEL |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x8087, 0x0aaa), .driver_info = BTUSB_INTEL_NEW |
- BTUSB_WIDEBAND_SPEECH },
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
/* Other Intel Bluetooth devices */
{ USB_VENDOR_AND_INTERFACE_INFO(0x8087, 0xe0, 0x01, 0x01),
@@ -492,6 +497,8 @@ struct btusb_data {
__u8 cmdreq;
unsigned int sco_num;
+ unsigned int air_mode;
+ bool usb_alt6_packet_flow;
int isoc_altsetting;
int suspend_count;
@@ -983,6 +990,42 @@ static void btusb_isoc_complete(struct urb *urb)
}
}
+static inline void __fill_isoc_descriptor_msbc(struct urb *urb, int len,
+ int mtu, struct btusb_data *data)
+{
+ int i, offset = 0;
+ unsigned int interval;
+
+ BT_DBG("len %d mtu %d", len, mtu);
+
+ /* For mSBC ALT 6 setting the host will send the packet at continuous
+ * flow. As per core spec 5, vol 4, part B, table 2.1. For ALT setting
+ * 6 the HCI PACKET INTERVAL should be 7.5ms for every usb packets.
+ * To maintain the rate we send 63bytes of usb packets alternatively for
+ * 7ms and 8ms to maintain the rate as 7.5ms.
+ */
+ if (data->usb_alt6_packet_flow) {
+ interval = 7;
+ data->usb_alt6_packet_flow = false;
+ } else {
+ interval = 6;
+ data->usb_alt6_packet_flow = true;
+ }
+
+ for (i = 0; i < interval; i++) {
+ urb->iso_frame_desc[i].offset = offset;
+ urb->iso_frame_desc[i].length = offset;
+ }
+
+ if (len && i < BTUSB_MAX_ISOC_FRAMES) {
+ urb->iso_frame_desc[i].offset = offset;
+ urb->iso_frame_desc[i].length = len;
+ i++;
+ }
+
+ urb->number_of_packets = i;
+}
+
static inline void __fill_isoc_descriptor(struct urb *urb, int len, int mtu)
{
int i, offset = 0;
@@ -1386,9 +1429,13 @@ static struct urb *alloc_isoc_urb(struct hci_dev *hdev, struct sk_buff *skb)
urb->transfer_flags = URB_ISO_ASAP;
- __fill_isoc_descriptor(urb, skb->len,
- le16_to_cpu(data->isoc_tx_ep->wMaxPacketSize));
-
+ if (data->isoc_altsetting == 6)
+ __fill_isoc_descriptor_msbc(urb, skb->len,
+ le16_to_cpu(data->isoc_tx_ep->wMaxPacketSize),
+ data);
+ else
+ __fill_isoc_descriptor(urb, skb->len,
+ le16_to_cpu(data->isoc_tx_ep->wMaxPacketSize));
skb->dev = (void *)hdev;
return urb;
@@ -1484,6 +1531,7 @@ static void btusb_notify(struct hci_dev *hdev, unsigned int evt)
if (hci_conn_num(hdev, SCO_LINK) != data->sco_num) {
data->sco_num = hci_conn_num(hdev, SCO_LINK);
+ data->air_mode = evt;
schedule_work(&data->work);
}
}
@@ -1531,11 +1579,70 @@ static inline int __set_isoc_interface(struct hci_dev *hdev, int altsetting)
return 0;
}
+static int btusb_switch_alt_setting(struct hci_dev *hdev, int new_alts)
+{
+ struct btusb_data *data = hci_get_drvdata(hdev);
+ int err;
+
+ if (data->isoc_altsetting != new_alts) {
+ unsigned long flags;
+
+ clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
+ usb_kill_anchored_urbs(&data->isoc_anchor);
+
+ /* When isochronous alternate setting needs to be
+ * changed, because SCO connection has been added
+ * or removed, a packet fragment may be left in the
+ * reassembling state. This could lead to wrongly
+ * assembled fragments.
+ *
+ * Clear outstanding fragment when selecting a new
+ * alternate setting.
+ */
+ spin_lock_irqsave(&data->rxlock, flags);
+ kfree_skb(data->sco_skb);
+ data->sco_skb = NULL;
+ spin_unlock_irqrestore(&data->rxlock, flags);
+
+ err = __set_isoc_interface(hdev, new_alts);
+ if (err < 0)
+ return err;
+ }
+
+ if (!test_and_set_bit(BTUSB_ISOC_RUNNING, &data->flags)) {
+ if (btusb_submit_isoc_urb(hdev, GFP_KERNEL) < 0)
+ clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
+ else
+ btusb_submit_isoc_urb(hdev, GFP_KERNEL);
+ }
+
+ return 0;
+}
+
+static struct usb_host_interface *btusb_find_altsetting(struct btusb_data *data,
+ int alt)
+{
+ struct usb_interface *intf = data->isoc;
+ int i;
+
+ BT_DBG("Looking for Alt no :%d", alt);
+
+ if (!intf)
+ return NULL;
+
+ for (i = 0; i < intf->num_altsetting; i++) {
+ if (intf->altsetting[i].desc.bAlternateSetting == alt)
+ return &intf->altsetting[i];
+ }
+
+ return NULL;
+}
+
static void btusb_work(struct work_struct *work)
{
struct btusb_data *data = container_of(work, struct btusb_data, work);
struct hci_dev *hdev = data->hdev;
- int new_alts;
+ int new_alts = 0;
int err;
if (data->sco_num > 0) {
@@ -1550,44 +1657,27 @@ static void btusb_work(struct work_struct *work)
set_bit(BTUSB_DID_ISO_RESUME, &data->flags);
}
- if (hdev->voice_setting & 0x0020) {
- static const int alts[3] = { 2, 4, 5 };
-
- new_alts = alts[data->sco_num - 1];
- } else {
- new_alts = data->sco_num;
- }
-
- if (data->isoc_altsetting != new_alts) {
- unsigned long flags;
+ if (data->air_mode == HCI_NOTIFY_ENABLE_SCO_CVSD) {
+ if (hdev->voice_setting & 0x0020) {
+ static const int alts[3] = { 2, 4, 5 };
- clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
- usb_kill_anchored_urbs(&data->isoc_anchor);
-
- /* When isochronous alternate setting needs to be
- * changed, because SCO connection has been added
- * or removed, a packet fragment may be left in the
- * reassembling state. This could lead to wrongly
- * assembled fragments.
- *
- * Clear outstanding fragment when selecting a new
- * alternate setting.
- */
- spin_lock_irqsave(&data->rxlock, flags);
- kfree_skb(data->sco_skb);
- data->sco_skb = NULL;
- spin_unlock_irqrestore(&data->rxlock, flags);
+ new_alts = alts[data->sco_num - 1];
+ } else {
+ new_alts = data->sco_num;
+ }
+ } else if (data->air_mode == HCI_NOTIFY_ENABLE_SCO_TRANSP) {
- if (__set_isoc_interface(hdev, new_alts) < 0)
- return;
- }
+ data->usb_alt6_packet_flow = true;
- if (!test_and_set_bit(BTUSB_ISOC_RUNNING, &data->flags)) {
- if (btusb_submit_isoc_urb(hdev, GFP_KERNEL) < 0)
- clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
+ /* Check if Alt 6 is supported for Transparent audio */
+ if (btusb_find_altsetting(data, 6))
+ new_alts = 6;
else
- btusb_submit_isoc_urb(hdev, GFP_KERNEL);
+ bt_dev_err(hdev, "Device does not support ALT setting 6");
}
+
+ if (btusb_switch_alt_setting(hdev, new_alts) < 0)
+ bt_dev_err(hdev, "set USB alt:(%d) failed!", new_alts);
} else {
clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
usb_kill_anchored_urbs(&data->isoc_anchor);
@@ -2252,7 +2342,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
if (ver.fw_variant == 0x23) {
clear_bit(BTUSB_BOOTLOADER, &data->flags);
btintel_check_bdaddr(hdev);
- return 0;
+ goto finish;
}
/* If the device is not in bootloader mode, then the only possible
@@ -2452,6 +2542,23 @@ done:
*/
btintel_load_ddc_config(hdev, fwname);
+ /* Read the Intel version information after loading the FW */
+ err = btintel_read_version(hdev, &ver);
+ if (err)
+ return err;
+
+ btintel_version_info(hdev, &ver);
+
+finish:
+ /* All Intel controllers that support the Microsoft vendor
+ * extension are using 0xFC1E for VsMsftOpCode.
+ */
+ switch (ver.hw_variant) {
+ case 0x12: /* ThP */
+ hci_set_msft_opcode(hdev, 0xFC1E);
+ break;
+ }
+
/* Set the event mask for Intel specific vendor events. This enables
* a few extra events that are useful during general operation. It
* does not enable any debugging related events.
@@ -2461,13 +2568,6 @@ done:
*/
btintel_set_event_mask(hdev, false);
- /* Read the Intel version information after loading the FW */
- err = btintel_read_version(hdev, &ver);
- if (err)
- return err;
-
- btintel_version_info(hdev, &ver);
-
return 0;
}
@@ -3600,6 +3700,13 @@ static void btusb_check_needs_reset_resume(struct usb_interface *intf)
interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
}
+static bool btusb_prevent_wake(struct hci_dev *hdev)
+{
+ struct btusb_data *data = hci_get_drvdata(hdev);
+
+ return !device_may_wakeup(&data->udev->dev);
+}
+
static int btusb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -3733,6 +3840,7 @@ static int btusb_probe(struct usb_interface *intf,
hdev->flush = btusb_flush;
hdev->send = btusb_send_frame;
hdev->notify = btusb_notify;
+ hdev->prevent_wake = btusb_prevent_wake;
#ifdef CONFIG_PM
err = btusb_config_oob_wake(hdev);
@@ -3877,6 +3985,9 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_WIDEBAND_SPEECH)
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ if (id->driver_info & BTUSB_VALID_LE_STATES)
+ set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
+
if (id->driver_info & BTUSB_DIGIANSWER) {
data->cmdreq_type = USB_TYPE_VENDOR;
set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index b236cb11c0dc..8ea5ca8d71d6 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -118,6 +118,7 @@ struct bcm_device {
u32 oper_speed;
int irq;
bool irq_active_low;
+ bool irq_acquired;
#ifdef CONFIG_PM
struct hci_uart *hu;
@@ -333,6 +334,8 @@ static int bcm_request_irq(struct bcm_data *bcm)
goto unlock;
}
+ bdev->irq_acquired = true;
+
device_init_wakeup(bdev->dev, true);
pm_runtime_set_autosuspend_delay(bdev->dev,
@@ -514,7 +517,7 @@ static int bcm_close(struct hci_uart *hu)
}
if (bdev) {
- if (IS_ENABLED(CONFIG_PM) && bdev->irq > 0) {
+ if (IS_ENABLED(CONFIG_PM) && bdev->irq_acquired) {
devm_free_irq(bdev->dev, bdev->irq, bdev);
device_init_wakeup(bdev->dev, false);
pm_runtime_disable(bdev->dev);
@@ -550,8 +553,7 @@ static int bcm_flush(struct hci_uart *hu)
static int bcm_setup(struct hci_uart *hu)
{
struct bcm_data *bcm = hu->priv;
- char fw_name[64];
- const struct firmware *fw;
+ bool fw_load_done = false;
unsigned int speed;
int err;
@@ -560,21 +562,12 @@ static int bcm_setup(struct hci_uart *hu)
hu->hdev->set_diag = bcm_set_diag;
hu->hdev->set_bdaddr = btbcm_set_bdaddr;
- err = btbcm_initialize(hu->hdev, fw_name, sizeof(fw_name), false);
+ err = btbcm_initialize(hu->hdev, &fw_load_done);
if (err)
return err;
- err = request_firmware(&fw, fw_name, &hu->hdev->dev);
- if (err < 0) {
- bt_dev_info(hu->hdev, "BCM: Patch %s not found", fw_name);
+ if (!fw_load_done)
return 0;
- }
-
- err = btbcm_patchram(hu->hdev, fw);
- if (err) {
- bt_dev_info(hu->hdev, "BCM: Patch failed (%d)", err);
- goto finalize;
- }
/* Init speed if any */
if (hu->init_speed)
@@ -613,13 +606,16 @@ static int bcm_setup(struct hci_uart *hu)
btbcm_write_pcm_int_params(hu->hdev, &params);
}
-finalize:
- release_firmware(fw);
-
- err = btbcm_finalize(hu->hdev);
+ err = btbcm_finalize(hu->hdev, &fw_load_done);
if (err)
return err;
+ /* Some devices ship with the controller default address.
+ * Allow the bootloader to set a valid address through the
+ * device tree.
+ */
+ set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hu->hdev->quirks);
+
if (!bcm_request_irq(bcm))
err = bcm_setup_sleep(hu);
@@ -1153,7 +1149,8 @@ static int bcm_of_probe(struct bcm_device *bdev)
device_property_read_u8_array(bdev->dev, "brcm,bt-pcm-int-params",
bdev->pcm_int_params, 5);
bdev->irq = of_irq_get_byname(bdev->dev->of_node, "host-wakeup");
-
+ bdev->irq_active_low = irq_get_trigger_type(bdev->irq)
+ & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW);
return 0;
}
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index 106c110efe56..e60b2e0773db 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -1018,6 +1018,8 @@ static const struct of_device_id rtl_bluetooth_of_match[] = {
#ifdef CONFIG_BT_HCIUART_RTL
{ .compatible = "realtek,rtl8822cs-bt",
.data = (const void *)&rtl_vnd },
+ { .compatible = "realtek,rtl8723bs-bt",
+ .data = (const void *)&rtl_vnd },
#endif
{ },
};
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 439392b1c043..b3fd07a6f812 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -26,6 +26,7 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/acpi.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/serdev.h>
@@ -596,10 +597,12 @@ static int qca_open(struct hci_uart *hu)
if (hu->serdev) {
qcadev = serdev_device_get_drvdata(hu->serdev);
- if (qca_is_wcn399x(qcadev->btsoc_type)) {
+
+ if (qca_is_wcn399x(qcadev->btsoc_type))
hu->init_speed = qcadev->init_speed;
+
+ if (qcadev->oper_speed)
hu->oper_speed = qcadev->oper_speed;
- }
}
timer_setup(&qca->wake_retrans_timer, hci_ibs_wake_retrans_timeout, 0);
@@ -1596,7 +1599,7 @@ static int qca_setup(struct hci_uart *hu)
set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
bt_dev_info(hdev, "setting up %s",
- qca_is_wcn399x(soc_type) ? "wcn399x" : "ROME");
+ qca_is_wcn399x(soc_type) ? "wcn399x" : "ROME/QCA6390");
retry:
ret = qca_power_on(hdev);
@@ -1665,10 +1668,10 @@ retry:
}
/* Setup bdaddr */
- if (qca_is_wcn399x(soc_type))
- hu->hdev->set_bdaddr = qca_set_bdaddr;
- else
+ if (soc_type == QCA_ROME)
hu->hdev->set_bdaddr = qca_set_bdaddr_rome;
+ else
+ hu->hdev->set_bdaddr = qca_set_bdaddr;
return ret;
}
@@ -1721,6 +1724,11 @@ static const struct qca_vreg_data qca_soc_data_wcn3998 = {
.num_vregs = 4,
};
+static const struct qca_vreg_data qca_soc_data_qca6390 = {
+ .soc_type = QCA_QCA6390,
+ .num_vregs = 0,
+};
+
static void qca_power_shutdown(struct hci_uart *hu)
{
struct qca_serdev *qcadev;
@@ -1764,7 +1772,7 @@ static int qca_power_off(struct hci_dev *hdev)
enum qca_btsoc_type soc_type = qca_soc_type(hu);
/* Stop sending shutdown command if soc crashes. */
- if (qca_is_wcn399x(soc_type)
+ if (soc_type != QCA_ROME
&& qca->memdump_state == QCA_MEMDUMP_IDLE) {
qca_send_pre_shutdown_cmd(hdev);
usleep_range(8000, 10000);
@@ -1865,6 +1873,11 @@ static int qca_serdev_probe(struct serdev_device *serdev)
serdev_device_set_drvdata(serdev, qcadev);
device_property_read_string(&serdev->dev, "firmware-name",
&qcadev->firmware_name);
+ device_property_read_u32(&serdev->dev, "max-speed",
+ &qcadev->oper_speed);
+ if (!qcadev->oper_speed)
+ BT_DBG("UART will pick default operating speed");
+
if (data && qca_is_wcn399x(data->soc_type)) {
qcadev->btsoc_type = data->soc_type;
qcadev->bt_power = devm_kzalloc(&serdev->dev,
@@ -1889,18 +1902,17 @@ static int qca_serdev_probe(struct serdev_device *serdev)
return PTR_ERR(qcadev->susclk);
}
- device_property_read_u32(&serdev->dev, "max-speed",
- &qcadev->oper_speed);
- if (!qcadev->oper_speed)
- BT_DBG("UART will pick default operating speed");
-
err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
if (err) {
BT_ERR("wcn3990 serdev registration failed");
return err;
}
} else {
- qcadev->btsoc_type = QCA_ROME;
+ if (data)
+ qcadev->btsoc_type = data->soc_type;
+ else
+ qcadev->btsoc_type = QCA_ROME;
+
qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
GPIOD_OUT_LOW);
if (!qcadev->bt_en) {
@@ -2044,21 +2056,38 @@ static int __maybe_unused qca_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(qca_pm_ops, qca_suspend, qca_resume);
+#ifdef CONFIG_OF
static const struct of_device_id qca_bluetooth_of_match[] = {
{ .compatible = "qcom,qca6174-bt" },
+ { .compatible = "qcom,qca6390-bt", .data = &qca_soc_data_qca6390},
+ { .compatible = "qcom,qca9377-bt" },
{ .compatible = "qcom,wcn3990-bt", .data = &qca_soc_data_wcn3990},
{ .compatible = "qcom,wcn3991-bt", .data = &qca_soc_data_wcn3991},
{ .compatible = "qcom,wcn3998-bt", .data = &qca_soc_data_wcn3998},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, qca_bluetooth_of_match);
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id qca_bluetooth_acpi_match[] = {
+ { "QCOM6390", (kernel_ulong_t)&qca_soc_data_qca6390 },
+ { "DLA16390", (kernel_ulong_t)&qca_soc_data_qca6390 },
+ { "DLB16390", (kernel_ulong_t)&qca_soc_data_qca6390 },
+ { "DLB26390", (kernel_ulong_t)&qca_soc_data_qca6390 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, qca_bluetooth_acpi_match);
+#endif
+
static struct serdev_device_driver qca_serdev_driver = {
.probe = qca_serdev_probe,
.remove = qca_serdev_remove,
.driver = {
.name = "hci_uart_qca",
- .of_match_table = qca_bluetooth_of_match,
+ .of_match_table = of_match_ptr(qca_bluetooth_of_match),
+ .acpi_match_table = ACPI_PTR(qca_bluetooth_acpi_match),
.pm = &qca_pm_ops,
},
};
diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
index 4652896d4990..599855e4c57c 100644
--- a/drivers/bluetooth/hci_serdev.c
+++ b/drivers/bluetooth/hci_serdev.c
@@ -21,8 +21,6 @@
#include "hci_uart.h"
-static struct serdev_device_ops hci_serdev_client_ops;
-
static inline void hci_uart_tx_complete(struct hci_uart *hu, int pkt_type)
{
struct hci_dev *hdev = hu->hdev;
@@ -260,7 +258,7 @@ static int hci_uart_receive_buf(struct serdev_device *serdev, const u8 *data,
return count;
}
-static struct serdev_device_ops hci_serdev_client_ops = {
+static const struct serdev_device_ops hci_serdev_client_ops = {
.receive_buf = hci_uart_receive_buf,
.write_wakeup = hci_uart_write_wakeup,
};
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index faca0f346fff..e3bbe108eb54 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -3631,7 +3631,7 @@ static void cdrom_update_settings(void)
}
static int cdrom_sysctl_handler(struct ctl_table *ctl, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 0d10e31fd342..1e0db78b83ba 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -2057,7 +2057,7 @@ static char sysctl_bootid[16];
* sysctl system call, as 16 bytes of binary data.
*/
static int proc_do_uuid(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table fake_table;
unsigned char buf[64], tmp_uuid[16], *uuid;
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index c29b80dd30d8..b8c1c4dd3ef0 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -1054,8 +1054,8 @@ static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
u32 temp = be32_to_cpu(*--b);
temp = ~temp;
- c = (u64)temp + 1; // No of block can processed withou overflow
- if ((bytes / AES_BLOCK_SIZE) > c)
+ c = (u64)temp + 1; // No of block can processed without overflow
+ if ((bytes / AES_BLOCK_SIZE) >= c)
bytes = c * AES_BLOCK_SIZE;
return bytes;
}
@@ -1077,7 +1077,14 @@ static int chcr_update_tweak(struct skcipher_request *req, u8 *iv,
keylen = ablkctx->enckey_len / 2;
key = ablkctx->key + keylen;
- ret = aes_expandkey(&aes, key, keylen);
+ /* For a 192 bit key remove the padded zeroes which was
+ * added in chcr_xts_setkey
+ */
+ if (KEY_CONTEXT_CK_SIZE_G(ntohl(ablkctx->key_ctx_hdr))
+ == CHCR_KEYCTX_CIPHER_KEY_SIZE_192)
+ ret = aes_expandkey(&aes, key, keylen - 8);
+ else
+ ret = aes_expandkey(&aes, key, keylen);
if (ret)
return ret;
aes_encrypt(&aes, iv, iv);
@@ -1158,15 +1165,16 @@ static int chcr_final_cipher_iv(struct skcipher_request *req,
static int chcr_handle_cipher_resp(struct skcipher_request *req,
unsigned char *input, int err)
{
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chcr_context *ctx = c_ctx(tfm);
- struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
- struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
- struct sk_buff *skb;
struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
- struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
- struct cipher_wr_param wrparam;
+ struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
+ struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
struct chcr_dev *dev = c_ctx(tfm)->dev;
+ struct chcr_context *ctx = c_ctx(tfm);
+ struct adapter *adap = padap(ctx->dev);
+ struct cipher_wr_param wrparam;
+ struct sk_buff *skb;
int bytes;
if (err)
@@ -1197,6 +1205,8 @@ static int chcr_handle_cipher_resp(struct skcipher_request *req,
if (unlikely(bytes == 0)) {
chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
req);
+ memcpy(req->iv, reqctx->init_iv, IV);
+ atomic_inc(&adap->chcr_stats.fallback);
err = chcr_cipher_fallback(ablkctx->sw_cipher,
req->base.flags,
req->src,
@@ -1248,20 +1258,28 @@ static int process_cipher(struct skcipher_request *req,
struct sk_buff **skb,
unsigned short op_type)
{
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
unsigned int ivsize = crypto_skcipher_ivsize(tfm);
- struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
+ struct adapter *adap = padap(c_ctx(tfm)->dev);
struct cipher_wr_param wrparam;
int bytes, err = -EINVAL;
+ int subtype;
reqctx->processed = 0;
reqctx->partial_req = 0;
if (!req->iv)
goto error;
+ subtype = get_cryptoalg_subtype(tfm);
if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
(req->cryptlen == 0) ||
(req->cryptlen % crypto_skcipher_blocksize(tfm))) {
+ if (req->cryptlen == 0 && subtype != CRYPTO_ALG_SUB_TYPE_XTS)
+ goto fallback;
+ else if (req->cryptlen % crypto_skcipher_blocksize(tfm) &&
+ subtype == CRYPTO_ALG_SUB_TYPE_XTS)
+ goto fallback;
pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
ablkctx->enckey_len, req->cryptlen, ivsize);
goto error;
@@ -1302,12 +1320,10 @@ static int process_cipher(struct skcipher_request *req,
} else {
bytes = req->cryptlen;
}
- if (get_cryptoalg_subtype(tfm) ==
- CRYPTO_ALG_SUB_TYPE_CTR) {
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) {
bytes = adjust_ctr_overflow(req->iv, bytes);
}
- if (get_cryptoalg_subtype(tfm) ==
- CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
CTR_RFC3686_IV_SIZE);
@@ -1315,20 +1331,25 @@ static int process_cipher(struct skcipher_request *req,
/* initialize counter portion of counter block */
*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
+ memcpy(reqctx->init_iv, reqctx->iv, IV);
} else {
memcpy(reqctx->iv, req->iv, IV);
+ memcpy(reqctx->init_iv, req->iv, IV);
}
if (unlikely(bytes == 0)) {
chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
req);
+fallback: atomic_inc(&adap->chcr_stats.fallback);
err = chcr_cipher_fallback(ablkctx->sw_cipher,
req->base.flags,
req->src,
req->dst,
req->cryptlen,
- reqctx->iv,
+ subtype ==
+ CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 ?
+ reqctx->iv : req->iv,
op_type);
goto error;
}
@@ -1984,7 +2005,7 @@ static int chcr_ahash_digest(struct ahash_request *req)
req_ctx->data_len += params.bfr_len + params.sg_len;
if (req->nbytes == 0) {
- create_last_hash_block(req_ctx->reqbfr, bs, 0);
+ create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
params.more = 1;
params.bfr_len = bs;
}
@@ -2250,12 +2271,28 @@ static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
ablkctx->enckey_len = key_len;
get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
- ablkctx->key_ctx_hdr =
+ /* Both keys for xts must be aligned to 16 byte boundary
+ * by padding with zeros. So for 24 byte keys padding 8 zeroes.
+ */
+ if (key_len == 48) {
+ context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len
+ + 16) >> 4;
+ memmove(ablkctx->key + 32, ablkctx->key + 24, 24);
+ memset(ablkctx->key + 24, 0, 8);
+ memset(ablkctx->key + 56, 0, 8);
+ ablkctx->enckey_len = 64;
+ ablkctx->key_ctx_hdr =
+ FILL_KEY_CTX_HDR(CHCR_KEYCTX_CIPHER_KEY_SIZE_192,
+ CHCR_KEYCTX_NO_KEY, 1,
+ 0, context_size);
+ } else {
+ ablkctx->key_ctx_hdr =
FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
CHCR_KEYCTX_NO_KEY, 1,
0, context_size);
+ }
ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
return 0;
badkey_err:
@@ -2556,7 +2593,7 @@ int chcr_aead_dma_map(struct device *dev,
int dst_size;
dst_size = req->assoclen + req->cryptlen + (op_type ?
- -authsize : authsize);
+ 0 : authsize);
if (!req->cryptlen || !dst_size)
return 0;
reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
@@ -2603,15 +2640,16 @@ void chcr_aead_dma_unmap(struct device *dev,
int dst_size;
dst_size = req->assoclen + req->cryptlen + (op_type ?
- -authsize : authsize);
+ 0 : authsize);
if (!req->cryptlen || !dst_size)
return;
dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
DMA_BIDIRECTIONAL);
if (req->src == req->dst) {
- dma_unmap_sg(dev, req->src, sg_nents(req->src),
- DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, req->src,
+ sg_nents_for_len(req->src, dst_size),
+ DMA_BIDIRECTIONAL);
} else {
dma_unmap_sg(dev, req->src, sg_nents(req->src),
DMA_TO_DEVICE);
@@ -2910,7 +2948,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
unsigned int ccm_xtra;
- unsigned char tag_offset = 0, auth_offset = 0;
+ unsigned int tag_offset = 0, auth_offset = 0;
unsigned int assoclen;
if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
@@ -3702,6 +3740,13 @@ static int chcr_aead_op(struct aead_request *req,
return -ENOSPC;
}
+ if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
+ crypto_ipsec_check_assoclen(req->assoclen) != 0) {
+ pr_err("RFC4106: Invalid value of assoclen %d\n",
+ req->assoclen);
+ return -EINVAL;
+ }
+
/* Form a WR from req */
skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx], size);
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 542bebae001f..b3fdbdc25acb 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -302,6 +302,7 @@ struct chcr_skcipher_req_ctx {
unsigned int op;
u16 imm;
u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
+ u8 init_iv[CHCR_MAX_CRYPTO_IV_LEN];
u16 txqidx;
u16 rxqidx;
};
diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c
index 9fd3b9d1ec2f..d25689837b26 100644
--- a/drivers/crypto/chelsio/chcr_ipsec.c
+++ b/drivers/crypto/chelsio/chcr_ipsec.c
@@ -294,9 +294,6 @@ static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
return false;
}
- /* Inline single pdu */
- if (skb_shinfo(skb)->gso_size)
- return false;
return true;
}
diff --git a/drivers/infiniband/hw/mlx5/Makefile b/drivers/infiniband/hw/mlx5/Makefile
index 2a334800f109..228be05fbaf8 100644
--- a/drivers/infiniband/hw/mlx5/Makefile
+++ b/drivers/infiniband/hw/mlx5/Makefile
@@ -1,11 +1,25 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_MLX5_INFINIBAND) += mlx5_ib.o
+obj-$(CONFIG_MLX5_INFINIBAND) += mlx5_ib.o
+
+mlx5_ib-y := ah.o \
+ cmd.o \
+ cong.o \
+ cq.o \
+ doorbell.o \
+ gsi.o \
+ ib_virt.o \
+ mad.o \
+ main.o \
+ mem.o \
+ mr.o \
+ qp.o \
+ qpc.o \
+ restrack.o \
+ srq.o \
+ srq_cmd.o
-mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq_cmd.o \
- srq.o mr.o ah.o mad.o gsi.o ib_virt.o cmd.o \
- cong.o restrack.o
mlx5_ib-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += odp.o
mlx5_ib-$(CONFIG_MLX5_ESWITCH) += ib_rep.o
-mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += devx.o
-mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += flow.o
-mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += qos.o
+mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += devx.o \
+ flow.o \
+ qos.o
diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c
index 4c26492ab8a3..a2fcbc49131e 100644
--- a/drivers/infiniband/hw/mlx5/cmd.c
+++ b/drivers/infiniband/hw/mlx5/cmd.c
@@ -327,23 +327,6 @@ int mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn, u16 uid)
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
-int mlx5_cmd_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id,
- u16 uid)
-{
- u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0};
- int err;
-
- MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
- MLX5_SET(alloc_q_counter_in, in, uid, uid);
-
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
- if (!err)
- *counter_id = MLX5_GET(alloc_q_counter_out, out,
- counter_set_id);
- return err;
-}
-
int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
u16 opmod, u8 port)
{
diff --git a/drivers/infiniband/hw/mlx5/cmd.h b/drivers/infiniband/hw/mlx5/cmd.h
index 945ebce73613..43079b18d9b4 100644
--- a/drivers/infiniband/hw/mlx5/cmd.h
+++ b/drivers/infiniband/hw/mlx5/cmd.h
@@ -61,8 +61,6 @@ int mlx5_cmd_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
u32 qpn, u16 uid);
int mlx5_cmd_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn, u16 uid);
int mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn, u16 uid);
-int mlx5_cmd_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id,
- u16 uid);
int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
u16 opmod, u8 port);
#endif /* MLX5_IB_CMD_H */
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 146ba2966744..0c18cb6a2f14 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -36,6 +36,7 @@
#include <rdma/ib_cache.h>
#include "mlx5_ib.h"
#include "srq.h"
+#include "qp.h"
static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe)
{
@@ -201,7 +202,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
case MLX5_CQE_RESP_WR_IMM:
wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
wc->wc_flags = IB_WC_WITH_IMM;
- wc->ex.imm_data = cqe->imm_inval_pkey;
+ wc->ex.imm_data = cqe->immediate;
break;
case MLX5_CQE_RESP_SEND:
wc->opcode = IB_WC_RECV;
@@ -213,12 +214,12 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
case MLX5_CQE_RESP_SEND_IMM:
wc->opcode = IB_WC_RECV;
wc->wc_flags = IB_WC_WITH_IMM;
- wc->ex.imm_data = cqe->imm_inval_pkey;
+ wc->ex.imm_data = cqe->immediate;
break;
case MLX5_CQE_RESP_SEND_INV:
wc->opcode = IB_WC_RECV;
wc->wc_flags = IB_WC_WITH_INVALIDATE;
- wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey);
+ wc->ex.invalidate_rkey = be32_to_cpu(cqe->inval_rkey);
break;
}
wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff;
@@ -226,7 +227,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
wc->wc_flags |= g ? IB_WC_GRH : 0;
if (unlikely(is_qp1(qp->ibqp.qp_type))) {
- u16 pkey = be32_to_cpu(cqe->imm_inval_pkey) & 0xffff;
+ u16 pkey = be32_to_cpu(cqe->pkey) & 0xffff;
ib_find_cached_pkey(&dev->ib_dev, qp->port, pkey,
&wc->pkey_index);
@@ -484,7 +485,7 @@ repoll:
* because CQs will be locked while QPs are removed
* from the table.
*/
- mqp = __mlx5_qp_lookup(dev->mdev, qpn);
+ mqp = radix_tree_lookup(&dev->qp_table.tree, qpn);
*cur_qp = to_mibqp(mqp);
}
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 46e1ab771f10..35b98c2d64d5 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -14,6 +14,7 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/fs.h>
#include "mlx5_ib.h"
+#include "qp.h"
#include <linux/xarray.h>
#define UVERBS_MODULE_NAME mlx5_ib
@@ -1356,7 +1357,7 @@ static int devx_obj_cleanup(struct ib_uobject *uobject,
}
if (obj->flags & DEVX_OBJ_FLAGS_DCT)
- ret = mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct);
+ ret = mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct);
else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
ret = mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
else
@@ -1450,9 +1451,8 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
if (opcode == MLX5_CMD_OP_CREATE_DCT) {
obj->flags |= DEVX_OBJ_FLAGS_DCT;
- err = mlx5_core_create_dct(dev->mdev, &obj->core_dct,
- cmd_in, cmd_in_len,
- cmd_out, cmd_out_len);
+ err = mlx5_core_create_dct(dev, &obj->core_dct, cmd_in,
+ cmd_in_len, cmd_out, cmd_out_len);
} else if (opcode == MLX5_CMD_OP_CREATE_CQ) {
obj->flags |= DEVX_OBJ_FLAGS_CQ;
obj->core_cq.comp = devx_cq_comp;
@@ -1499,7 +1499,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
obj_destroy:
if (obj->flags & DEVX_OBJ_FLAGS_DCT)
- mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct);
+ mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct);
else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
else
diff --git a/drivers/infiniband/hw/mlx5/flow.c b/drivers/infiniband/hw/mlx5/flow.c
index 862b7bf3e646..69cb7e6e8955 100644
--- a/drivers/infiniband/hw/mlx5/flow.c
+++ b/drivers/infiniband/hw/mlx5/flow.c
@@ -427,7 +427,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER)(
num_actions = uverbs_attr_ptr_get_array_size(
attrs, MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM,
- MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto));
+ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto));
if (num_actions < 0)
return num_actions;
@@ -648,7 +648,7 @@ DECLARE_UVERBS_NAMED_METHOD(
UA_MANDATORY),
UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM,
UVERBS_ATTR_MIN_SIZE(MLX5_UN_SZ_BYTES(
- set_action_in_add_action_in_auto)),
+ set_add_copy_action_in_auto)),
UA_MANDATORY,
UA_ALLOC_AND_COPY),
UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_FT_TYPE,
diff --git a/drivers/infiniband/hw/mlx5/ib_virt.c b/drivers/infiniband/hw/mlx5/ib_virt.c
index b61165359954..46b2d370fb3f 100644
--- a/drivers/infiniband/hw/mlx5/ib_virt.c
+++ b/drivers/infiniband/hw/mlx5/ib_virt.c
@@ -134,7 +134,7 @@ int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
if (!out)
return -ENOMEM;
- err = mlx5_core_query_vport_counter(mdev, true, vf, port, out, out_sz);
+ err = mlx5_core_query_vport_counter(mdev, true, vf, port, out);
if (err)
goto ex;
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 14e0c17de6a9..454ce5de2de7 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-#include <linux/mlx5/cmd.h>
#include <linux/mlx5/vport.h>
#include <rdma/ib_mad.h>
#include <rdma/ib_smi.h>
@@ -188,8 +187,8 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u8 port_num,
goto done;
}
- err = mlx5_core_query_vport_counter(mdev, 0, 0,
- mdev_port_num, out_cnt, sz);
+ err = mlx5_core_query_vport_counter(mdev, 0, 0, mdev_port_num,
+ out_cnt);
if (!err)
pma_cnt_ext_assign(pma_cnt_ext, out_cnt);
} else {
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 6679756506e6..65e0e24d463b 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -59,6 +59,7 @@
#include "ib_rep.h"
#include "cmd.h"
#include "srq.h"
+#include "qp.h"
#include <linux/mlx5/fs_helpers.h>
#include <linux/mlx5/accel.h>
#include <rdma/uverbs_std_types.h>
@@ -2443,7 +2444,7 @@ static int handle_alloc_dm_sw_icm(struct ib_ucontext *ctx,
act_size = roundup_pow_of_two(act_size);
dm->size = act_size;
- err = mlx5_dm_sw_icm_alloc(dev, type, act_size,
+ err = mlx5_dm_sw_icm_alloc(dev, type, act_size, attr->alignment,
to_mucontext(ctx)->devx_uid, &dm->dev_addr,
&dm->icm_dm.obj_id);
if (err)
@@ -4632,8 +4633,7 @@ static void delay_drop_handler(struct work_struct *work)
atomic_inc(&delay_drop->events_cnt);
mutex_lock(&delay_drop->lock);
- err = mlx5_core_set_delay_drop(delay_drop->dev->mdev,
- delay_drop->timeout);
+ err = mlx5_core_set_delay_drop(delay_drop->dev, delay_drop->timeout);
if (err) {
mlx5_ib_warn(delay_drop->dev, "Failed to set delay drop, timeout=%u\n",
delay_drop->timeout);
@@ -5439,15 +5439,21 @@ static bool is_mdev_switchdev_mode(const struct mlx5_core_dev *mdev)
static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
{
+ u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {};
int num_cnt_ports;
int i;
num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports;
+ MLX5_SET(dealloc_q_counter_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_Q_COUNTER);
+
for (i = 0; i < num_cnt_ports; i++) {
- if (dev->port[i].cnts.set_id_valid)
- mlx5_core_dealloc_q_counter(dev->mdev,
- dev->port[i].cnts.set_id);
+ if (dev->port[i].cnts.set_id) {
+ MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
+ dev->port[i].cnts.set_id);
+ mlx5_cmd_exec_in(dev->mdev, dealloc_q_counter, in);
+ }
kfree(dev->port[i].cnts.names);
kfree(dev->port[i].cnts.offsets);
}
@@ -5556,11 +5562,14 @@ static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev,
static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
{
+ u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {};
int num_cnt_ports;
int err = 0;
int i;
bool is_shared;
+ MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
is_shared = MLX5_CAP_GEN(dev->mdev, log_max_uctx) != 0;
num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports;
@@ -5572,17 +5581,19 @@ static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
mlx5_ib_fill_counters(dev, dev->port[i].cnts.names,
dev->port[i].cnts.offsets);
- err = mlx5_cmd_alloc_q_counter(dev->mdev,
- &dev->port[i].cnts.set_id,
- is_shared ?
- MLX5_SHARED_RESOURCE_UID : 0);
+ MLX5_SET(alloc_q_counter_in, in, uid,
+ is_shared ? MLX5_SHARED_RESOURCE_UID : 0);
+
+ err = mlx5_cmd_exec_inout(dev->mdev, alloc_q_counter, in, out);
if (err) {
mlx5_ib_warn(dev,
"couldn't allocate queue counter for port %d, err %d\n",
i + 1, err);
goto err_alloc;
}
- dev->port[i].cnts.set_id_valid = true;
+
+ dev->port[i].cnts.set_id =
+ MLX5_GET(alloc_q_counter_out, out, counter_set_id);
}
return 0;
@@ -5638,27 +5649,23 @@ static int mlx5_ib_query_q_counters(struct mlx5_core_dev *mdev,
struct rdma_hw_stats *stats,
u16 set_id)
{
- int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out);
- void *out;
+ u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {};
__be32 val;
int ret, i;
- out = kvzalloc(outlen, GFP_KERNEL);
- if (!out)
- return -ENOMEM;
-
- ret = mlx5_core_query_q_counter(mdev, set_id, 0, out, outlen);
+ MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
+ MLX5_SET(query_q_counter_in, in, counter_set_id, set_id);
+ ret = mlx5_cmd_exec_inout(mdev, query_q_counter, in, out);
if (ret)
- goto free;
+ return ret;
for (i = 0; i < cnts->num_q_counters; i++) {
- val = *(__be32 *)(out + cnts->offsets[i]);
+ val = *(__be32 *)((void *)out + cnts->offsets[i]);
stats->value[i] = (u64)be32_to_cpu(val);
}
-free:
- kvfree(out);
- return ret;
+ return 0;
}
static int mlx5_ib_query_ext_ppcnt_counters(struct mlx5_ib_dev *dev,
@@ -5765,20 +5772,38 @@ static int mlx5_ib_counter_update_stats(struct rdma_counter *counter)
counter->stats, counter->id);
}
+static int mlx5_ib_counter_dealloc(struct rdma_counter *counter)
+{
+ struct mlx5_ib_dev *dev = to_mdev(counter->device);
+ u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {};
+
+ if (!counter->id)
+ return 0;
+
+ MLX5_SET(dealloc_q_counter_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_Q_COUNTER);
+ MLX5_SET(dealloc_q_counter_in, in, counter_set_id, counter->id);
+ return mlx5_cmd_exec_in(dev->mdev, dealloc_q_counter, in);
+}
+
static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
struct ib_qp *qp)
{
struct mlx5_ib_dev *dev = to_mdev(qp->device);
- u16 cnt_set_id = 0;
int err;
if (!counter->id) {
- err = mlx5_cmd_alloc_q_counter(dev->mdev,
- &cnt_set_id,
- MLX5_SHARED_RESOURCE_UID);
+ u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {};
+
+ MLX5_SET(alloc_q_counter_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_Q_COUNTER);
+ MLX5_SET(alloc_q_counter_in, in, uid, MLX5_SHARED_RESOURCE_UID);
+ err = mlx5_cmd_exec_inout(dev->mdev, alloc_q_counter, in, out);
if (err)
return err;
- counter->id = cnt_set_id;
+ counter->id =
+ MLX5_GET(alloc_q_counter_out, out, counter_set_id);
}
err = mlx5_ib_qp_set_counter(qp, counter);
@@ -5788,7 +5813,7 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
return 0;
fail_set_counter:
- mlx5_core_dealloc_q_counter(dev->mdev, cnt_set_id);
+ mlx5_ib_counter_dealloc(counter);
counter->id = 0;
return err;
@@ -5799,13 +5824,6 @@ static int mlx5_ib_counter_unbind_qp(struct ib_qp *qp)
return mlx5_ib_qp_set_counter(qp, NULL);
}
-static int mlx5_ib_counter_dealloc(struct rdma_counter *counter)
-{
- struct mlx5_ib_dev *dev = to_mdev(counter->device);
-
- return mlx5_core_dealloc_q_counter(dev->mdev, counter->id);
-}
-
static int mlx5_ib_rn_get_params(struct ib_device *device, u8 port_num,
enum rdma_netdev_t type,
struct rdma_netdev_alloc_params *params)
@@ -7175,6 +7193,9 @@ static const struct mlx5_ib_profile pf_profile = {
STAGE_CREATE(MLX5_IB_STAGE_ROCE,
mlx5_ib_stage_roce_init,
mlx5_ib_stage_roce_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_QP,
+ mlx5_init_qp_table,
+ mlx5_cleanup_qp_table),
STAGE_CREATE(MLX5_IB_STAGE_SRQ,
mlx5_init_srq_table,
mlx5_cleanup_srq_table),
@@ -7232,6 +7253,9 @@ const struct mlx5_ib_profile raw_eth_profile = {
STAGE_CREATE(MLX5_IB_STAGE_ROCE,
mlx5_ib_stage_raw_eth_roce_init,
mlx5_ib_stage_raw_eth_roce_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_QP,
+ mlx5_init_qp_table,
+ mlx5_cleanup_qp_table),
STAGE_CREATE(MLX5_IB_STAGE_SRQ,
mlx5_init_srq_table,
mlx5_cleanup_srq_table),
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index a4e522385de0..aaabb8a98eed 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -780,7 +780,6 @@ struct mlx5_ib_counters {
u32 num_cong_counters;
u32 num_ext_ppcnt_counters;
u16 set_id;
- bool set_id_valid;
};
struct mlx5_ib_multiport_info;
@@ -870,6 +869,7 @@ enum mlx5_ib_stages {
MLX5_IB_STAGE_CAPS,
MLX5_IB_STAGE_NON_DEFAULT_CB,
MLX5_IB_STAGE_ROCE,
+ MLX5_IB_STAGE_QP,
MLX5_IB_STAGE_SRQ,
MLX5_IB_STAGE_DEVICE_RESOURCES,
MLX5_IB_STAGE_DEVICE_NOTIFIER,
@@ -1065,6 +1065,7 @@ struct mlx5_ib_dev {
struct mlx5_dm dm;
u16 devx_whitelist_uid;
struct mlx5_srq_table srq_table;
+ struct mlx5_qp_table qp_table;
struct mlx5_async_ctx async_ctx;
struct mlx5_devx_event_table devx_event_table;
struct mlx5_var_table var_table;
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 3de7606d4a1a..16af1105cfcf 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -36,6 +36,7 @@
#include "mlx5_ib.h"
#include "cmd.h"
+#include "qp.h"
#include <linux/mlx5/eq.h>
@@ -1219,7 +1220,7 @@ static inline struct mlx5_core_rsc_common *odp_get_rsc(struct mlx5_ib_dev *dev,
case MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE:
case MLX5_WQE_PF_TYPE_RESP:
case MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC:
- common = mlx5_core_res_hold(dev->mdev, wq_num, MLX5_RES_QP);
+ common = mlx5_core_res_hold(dev, wq_num, MLX5_RES_QP);
break;
default:
break;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 2210759843ba..d93eec5d3277 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -39,6 +39,7 @@
#include "mlx5_ib.h"
#include "ib_rep.h"
#include "cmd.h"
+#include "qp.h"
/* not supported currently */
static int wq_signature;
@@ -1254,7 +1255,7 @@ static int create_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
struct mlx5_ib_sq *sq, u32 tdn,
struct ib_pd *pd)
{
- u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
MLX5_SET(create_tis_in, in, uid, to_mpd(pd)->uid);
@@ -1262,7 +1263,7 @@ static int create_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
if (qp->flags & MLX5_IB_QP_UNDERLAY)
MLX5_SET(tisc, tisc, underlay_qpn, qp->underlay_qpn);
- return mlx5_core_create_tis(dev->mdev, in, sizeof(in), &sq->tisn);
+ return mlx5_core_create_tis(dev->mdev, in, &sq->tisn);
}
static void destroy_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
@@ -1336,7 +1337,7 @@ static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
pas = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
mlx5_ib_populate_pas(dev, sq->ubuffer.umem, page_shift, pas, 0);
- err = mlx5_core_create_sq_tracked(dev->mdev, in, inlen, &sq->base.mqp);
+ err = mlx5_core_create_sq_tracked(dev, in, inlen, &sq->base.mqp);
kvfree(in);
@@ -1356,7 +1357,7 @@ static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
struct mlx5_ib_sq *sq)
{
destroy_flow_rule_vport_sq(sq);
- mlx5_core_destroy_sq_tracked(dev->mdev, &sq->base.mqp);
+ mlx5_core_destroy_sq_tracked(dev, &sq->base.mqp);
ib_umem_release(sq->ubuffer.umem);
}
@@ -1426,7 +1427,7 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
qp_pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, qpin, pas);
memcpy(pas, qp_pas, rq_pas_size);
- err = mlx5_core_create_rq_tracked(dev->mdev, in, inlen, &rq->base.mqp);
+ err = mlx5_core_create_rq_tracked(dev, in, inlen, &rq->base.mqp);
kvfree(in);
@@ -1436,7 +1437,7 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
static void destroy_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
struct mlx5_ib_rq *rq)
{
- mlx5_core_destroy_rq_tracked(dev->mdev, &rq->base.mqp);
+ mlx5_core_destroy_rq_tracked(dev, &rq->base.mqp);
}
static bool tunnel_offload_supported(struct mlx5_core_dev *dev)
@@ -1459,9 +1460,8 @@ static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
struct mlx5_ib_rq *rq, u32 tdn,
- u32 *qp_flags_en,
- struct ib_pd *pd,
- u32 *out, int outlen)
+ u32 *qp_flags_en, struct ib_pd *pd,
+ u32 *out)
{
u8 lb_flag = 0;
u32 *in;
@@ -1494,9 +1494,8 @@ static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
}
MLX5_SET(tirc, tirc, self_lb_block, lb_flag);
-
- err = mlx5_core_create_tir_out(dev->mdev, in, inlen, out, outlen);
-
+ MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
+ err = mlx5_cmd_exec_inout(dev->mdev, create_tir, in, out);
rq->tirn = MLX5_GET(create_tir_out, out, tirn);
if (!err && MLX5_GET(tirc, tirc, self_lb_block)) {
err = mlx5_ib_enable_lb(dev, false, true);
@@ -1556,9 +1555,8 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (err)
goto err_destroy_sq;
- err = create_raw_packet_qp_tir(
- dev, rq, tdn, &qp->flags_en, pd, out,
- MLX5_ST_SZ_BYTES(create_tir_out));
+ err = create_raw_packet_qp_tir(dev, rq, tdn, &qp->flags_en, pd,
+ out);
if (err)
goto err_destroy_rq;
@@ -1853,7 +1851,8 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
MLX5_SET(rx_hash_field_select, hfso, selected_fields, selected_fields);
create_tir:
- err = mlx5_core_create_tir_out(dev->mdev, in, inlen, out, outlen);
+ MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
+ err = mlx5_cmd_exec_inout(dev->mdev, create_tir, in, out);
qp->rss_qp.tirn = MLX5_GET(create_tir_out, out, tirn);
if (!err && MLX5_GET(tirc, tirc, self_lb_block)) {
@@ -2347,7 +2346,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
err = create_raw_packet_qp(dev, qp, in, inlen, pd, udata,
&resp);
} else {
- err = mlx5_core_create_qp(dev->mdev, &base->mqp, in, inlen);
+ err = mlx5_core_create_qp(dev, &base->mqp, in, inlen);
}
if (err) {
@@ -2513,8 +2512,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (qp->state != IB_QPS_RESET) {
if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET &&
!(qp->flags & MLX5_IB_QP_UNDERLAY)) {
- err = mlx5_core_qp_modify(dev->mdev,
- MLX5_CMD_OP_2RST_QP, 0,
+ err = mlx5_core_qp_modify(dev, MLX5_CMD_OP_2RST_QP, 0,
NULL, &base->mqp);
} else {
struct mlx5_modify_raw_qp_param raw_qp_param = {
@@ -2555,7 +2553,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
qp->flags & MLX5_IB_QP_UNDERLAY) {
destroy_raw_packet_qp(dev, qp);
} else {
- err = mlx5_core_destroy_qp(dev->mdev, &base->mqp);
+ err = mlx5_core_destroy_qp(dev, &base->mqp);
if (err)
mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n",
base->mqp.qpn);
@@ -2818,7 +2816,7 @@ static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp)
if (mqp->state == IB_QPS_RTR) {
int err;
- err = mlx5_core_destroy_dct(dev->mdev, &mqp->dct.mdct);
+ err = mlx5_core_destroy_dct(dev, &mqp->dct.mdct);
if (err) {
mlx5_ib_warn(dev, "failed to destroy DCT %d\n", err);
return err;
@@ -2933,7 +2931,7 @@ static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev,
tisc = MLX5_ADDR_OF(modify_tis_in, in, ctx);
MLX5_SET(tisc, tisc, prio, ((sl & 0x7) << 1));
- err = mlx5_core_modify_tis(dev, sq->tisn, in, inlen);
+ err = mlx5_core_modify_tis(dev, sq->tisn, in);
kvfree(in);
@@ -2960,7 +2958,7 @@ static int modify_raw_packet_tx_affinity(struct mlx5_core_dev *dev,
tisc = MLX5_ADDR_OF(modify_tis_in, in, ctx);
MLX5_SET(tisc, tisc, lag_tx_port_affinity, tx_affinity);
- err = mlx5_core_modify_tis(dev, sq->tisn, in, inlen);
+ err = mlx5_core_modify_tis(dev, sq->tisn, in);
kvfree(in);
@@ -3240,7 +3238,7 @@ static int modify_raw_packet_qp_rq(
"RAW PACKET QP counters are not supported on current FW\n");
}
- err = mlx5_core_modify_rq(dev->mdev, rq->base.mqp.qpn, in, inlen);
+ err = mlx5_core_modify_rq(dev->mdev, rq->base.mqp.qpn, in);
if (err)
goto out;
@@ -3303,7 +3301,7 @@ static int modify_raw_packet_qp_sq(
MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index);
}
- err = mlx5_core_modify_sq(dev, sq->base.mqp.qpn, in, inlen);
+ err = mlx5_core_modify_sq(dev, sq->base.mqp.qpn, in);
if (err) {
/* Remove new rate from table if failed */
if (new_rate_added)
@@ -3462,10 +3460,9 @@ static int __mlx5_ib_qp_set_counter(struct ib_qp *qp,
base = &mqp->trans_qp.base;
context.qp_counter_set_usr_page &= cpu_to_be32(0xffffff);
context.qp_counter_set_usr_page |= cpu_to_be32(set_id << 24);
- return mlx5_core_qp_modify(dev->mdev,
- MLX5_CMD_OP_RTS2RTS_QP,
- MLX5_QP_OPTPAR_COUNTER_SET_ID,
- &context, &base->mqp);
+ return mlx5_core_qp_modify(dev, MLX5_CMD_OP_RTS2RTS_QP,
+ MLX5_QP_OPTPAR_COUNTER_SET_ID, &context,
+ &base->mqp);
}
static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
@@ -3752,8 +3749,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
err = modify_raw_packet_qp(dev, qp, &raw_qp_param, tx_affinity);
} else {
- err = mlx5_core_qp_modify(dev->mdev, op, optpar, context,
- &base->mqp);
+ err = mlx5_core_qp_modify(dev, op, optpar, context, &base->mqp);
}
if (err)
@@ -3927,7 +3923,7 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
MLX5_SET(dctc, dctc, my_addr_index, attr->ah_attr.grh.sgid_index);
MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit);
- err = mlx5_core_create_dct(dev->mdev, &qp->dct.mdct, qp->dct.in,
+ err = mlx5_core_create_dct(dev, &qp->dct.mdct, qp->dct.in,
MLX5_ST_SZ_BYTES(create_dct_in), out,
sizeof(out));
if (err)
@@ -3935,7 +3931,7 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
resp.dctn = qp->dct.mdct.mqp.qpn;
err = ib_copy_to_udata(udata, &resp, resp.response_length);
if (err) {
- mlx5_core_destroy_dct(dev->mdev, &qp->dct.mdct);
+ mlx5_core_destroy_dct(dev, &qp->dct.mdct);
return err;
}
} else {
@@ -5699,8 +5695,7 @@ static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (!outb)
return -ENOMEM;
- err = mlx5_core_qp_query(dev->mdev, &qp->trans_qp.base.mqp, outb,
- outlen);
+ err = mlx5_core_qp_query(dev, &qp->trans_qp.base.mqp, outb, outlen);
if (err)
goto out;
@@ -5778,7 +5773,7 @@ static int mlx5_ib_dct_query_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *mqp,
if (!out)
return -ENOMEM;
- err = mlx5_core_dct_query(dev->mdev, dct, out, outlen);
+ err = mlx5_core_dct_query(dev, dct, out, outlen);
if (err)
goto out;
@@ -5964,7 +5959,7 @@ static int set_delay_drop(struct mlx5_ib_dev *dev)
if (dev->delay_drop.activate)
goto out;
- err = mlx5_core_set_delay_drop(dev->mdev, dev->delay_drop.timeout);
+ err = mlx5_core_set_delay_drop(dev, dev->delay_drop.timeout);
if (err)
goto out;
@@ -6070,13 +6065,13 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
}
rq_pas0 = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
mlx5_ib_populate_pas(dev, rwq->umem, rwq->page_shift, rq_pas0, 0);
- err = mlx5_core_create_rq_tracked(dev->mdev, in, inlen, &rwq->core_qp);
+ err = mlx5_core_create_rq_tracked(dev, in, inlen, &rwq->core_qp);
if (!err && init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP) {
err = set_delay_drop(dev);
if (err) {
mlx5_ib_warn(dev, "Failed to enable delay drop err=%d\n",
err);
- mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
+ mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
} else {
rwq->create_flags |= MLX5_IB_WQ_FLAGS_DELAY_DROP;
}
@@ -6258,7 +6253,7 @@ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
return &rwq->ibwq;
err_copy:
- mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
+ mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
err_user_rq:
destroy_user_rq(dev, pd, rwq, udata);
err:
@@ -6271,7 +6266,7 @@ void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
struct mlx5_ib_dev *dev = to_mdev(wq->device);
struct mlx5_ib_rwq *rwq = to_mrwq(wq);
- mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
+ mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
destroy_user_rq(dev, wq->pd, rwq, udata);
kfree(rwq);
}
@@ -6449,7 +6444,7 @@ int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
"Receive WQ counters are not supported on current FW\n");
}
- err = mlx5_core_modify_rq(dev->mdev, rwq->core_qp.qpn, in, inlen);
+ err = mlx5_core_modify_rq(dev->mdev, rwq->core_qp.qpn, in);
if (!err)
rwq->ibwq.state = (wq_state == MLX5_RQC_STATE_ERR) ? IB_WQS_ERR : wq_state;
diff --git a/drivers/infiniband/hw/mlx5/qp.h b/drivers/infiniband/hw/mlx5/qp.h
new file mode 100644
index 000000000000..ad9d76e3e18a
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/qp.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#ifndef _MLX5_IB_QP_H
+#define _MLX5_IB_QP_H
+
+#include "mlx5_ib.h"
+
+int mlx5_init_qp_table(struct mlx5_ib_dev *dev);
+void mlx5_cleanup_qp_table(struct mlx5_ib_dev *dev);
+
+int mlx5_core_create_dct(struct mlx5_ib_dev *dev, struct mlx5_core_dct *qp,
+ u32 *in, int inlen, u32 *out, int outlen);
+int mlx5_core_create_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
+ u32 *in, int inlen);
+int mlx5_core_qp_modify(struct mlx5_ib_dev *dev, u16 opcode, u32 opt_param_mask,
+ void *qpc, struct mlx5_core_qp *qp);
+int mlx5_core_destroy_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp);
+int mlx5_core_destroy_dct(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct);
+int mlx5_core_qp_query(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
+ u32 *out, int outlen);
+int mlx5_core_dct_query(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct,
+ u32 *out, int outlen);
+
+int mlx5_core_set_delay_drop(struct mlx5_ib_dev *dev, u32 timeout_usec);
+
+void mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
+ struct mlx5_core_qp *rq);
+int mlx5_core_create_sq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
+ struct mlx5_core_qp *sq);
+void mlx5_core_destroy_sq_tracked(struct mlx5_ib_dev *dev,
+ struct mlx5_core_qp *sq);
+
+int mlx5_core_create_rq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
+ struct mlx5_core_qp *rq);
+
+struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_ib_dev *dev,
+ int res_num,
+ enum mlx5_res_type res_type);
+void mlx5_core_res_put(struct mlx5_core_rsc_common *res);
+
+int mlx5_core_xrcd_alloc(struct mlx5_ib_dev *dev, u32 *xrcdn);
+int mlx5_core_xrcd_dealloc(struct mlx5_ib_dev *dev, u32 xrcdn);
+#endif /* _MLX5_IB_QP_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/infiniband/hw/mlx5/qpc.c
index c3aea4cc2fff..ea62735042f0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/infiniband/hw/mlx5/qpc.c
@@ -1,46 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
*/
#include <linux/gfp.h>
-#include <linux/export.h>
-#include <linux/mlx5/cmd.h>
#include <linux/mlx5/qp.h>
#include <linux/mlx5/driver.h>
-#include <linux/mlx5/transobj.h>
+#include "mlx5_ib.h"
+#include "qp.h"
-#include "mlx5_core.h"
-#include "lib/eq.h"
-
-static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
+static int mlx5_core_drain_dct(struct mlx5_ib_dev *dev,
struct mlx5_core_dct *dct);
static struct mlx5_core_rsc_common *
@@ -124,11 +93,9 @@ static int rsc_event_notifier(struct notifier_block *nb,
{
struct mlx5_core_rsc_common *common;
struct mlx5_qp_table *table;
- struct mlx5_core_dev *dev;
struct mlx5_core_dct *dct;
u8 event_type = (u8)type;
struct mlx5_core_qp *qp;
- struct mlx5_priv *priv;
struct mlx5_eqe *eqe;
u32 rsn;
@@ -155,22 +122,12 @@ static int rsc_event_notifier(struct notifier_block *nb,
}
table = container_of(nb, struct mlx5_qp_table, nb);
- priv = container_of(table, struct mlx5_priv, qp_table);
- dev = container_of(priv, struct mlx5_core_dev, priv);
-
- mlx5_core_dbg(dev, "event (%d) arrived on resource 0x%x\n", eqe->type, rsn);
-
common = mlx5_get_rsc(table, rsn);
- if (!common) {
- mlx5_core_dbg(dev, "Async event for unknown resource 0x%x\n", rsn);
+ if (!common)
return NOTIFY_OK;
- }
- if (!is_event_type_allowed((rsn >> MLX5_USER_INDEX_LEN), event_type)) {
- mlx5_core_warn(dev, "event 0x%.2x is not allowed on resource 0x%.8x\n",
- event_type, rsn);
+ if (!is_event_type_allowed((rsn >> MLX5_USER_INDEX_LEN), event_type))
goto out;
- }
switch (common->res) {
case MLX5_RES_QP:
@@ -185,7 +142,7 @@ static int rsc_event_notifier(struct notifier_block *nb,
complete(&dct->drained);
break;
default:
- mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
+ break;
}
out:
mlx5_core_put_rsc(common);
@@ -193,11 +150,10 @@ out:
return NOTIFY_OK;
}
-static int create_resource_common(struct mlx5_core_dev *dev,
- struct mlx5_core_qp *qp,
- int rsc_type)
+static int create_resource_common(struct mlx5_ib_dev *dev,
+ struct mlx5_core_qp *qp, int rsc_type)
{
- struct mlx5_qp_table *table = &dev->priv.qp_table;
+ struct mlx5_qp_table *table = &dev->qp_table;
int err;
qp->common.res = rsc_type;
@@ -216,10 +172,10 @@ static int create_resource_common(struct mlx5_core_dev *dev,
return 0;
}
-static void destroy_resource_common(struct mlx5_core_dev *dev,
+static void destroy_resource_common(struct mlx5_ib_dev *dev,
struct mlx5_core_qp *qp)
{
- struct mlx5_qp_table *table = &dev->priv.qp_table;
+ struct mlx5_qp_table *table = &dev->qp_table;
unsigned long flags;
spin_lock_irqsave(&table->lock, flags);
@@ -230,24 +186,19 @@ static void destroy_resource_common(struct mlx5_core_dev *dev,
wait_for_completion(&qp->common.free);
}
-static int _mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
+static int _mlx5_core_destroy_dct(struct mlx5_ib_dev *dev,
struct mlx5_core_dct *dct, bool need_cleanup)
{
- u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {};
struct mlx5_core_qp *qp = &dct->mqp;
int err;
err = mlx5_core_drain_dct(dev, dct);
if (err) {
- if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ if (dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
goto destroy;
- } else {
- mlx5_core_warn(
- dev, "failed drain DCT 0x%x with error 0x%x\n",
- qp->qpn, err);
- return err;
- }
+
+ return err;
}
wait_for_completion(&dct->drained);
destroy:
@@ -256,15 +207,12 @@ destroy:
MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
MLX5_SET(destroy_dct_in, in, dctn, qp->qpn);
MLX5_SET(destroy_dct_in, in, uid, qp->uid);
- err = mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
- (void *)&out, sizeof(out));
+ err = mlx5_cmd_exec_in(dev->mdev, destroy_dct, in);
return err;
}
-int mlx5_core_create_dct(struct mlx5_core_dev *dev,
- struct mlx5_core_dct *dct,
- u32 *in, int inlen,
- u32 *out, int outlen)
+int mlx5_core_create_dct(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct,
+ u32 *in, int inlen, u32 *out, int outlen)
{
struct mlx5_core_qp *qp = &dct->mqp;
int err;
@@ -272,11 +220,9 @@ int mlx5_core_create_dct(struct mlx5_core_dev *dev,
init_completion(&dct->drained);
MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT);
- err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
- if (err) {
- mlx5_core_warn(dev, "create DCT failed, ret %d\n", err);
+ err = mlx5_cmd_exec(dev->mdev, in, inlen, out, outlen);
+ if (err)
return err;
- }
qp->qpn = MLX5_GET(create_dct_out, out, dctn);
qp->uid = MLX5_GET(create_dct_in, in, uid);
@@ -289,108 +235,83 @@ err_cmd:
_mlx5_core_destroy_dct(dev, dct, false);
return err;
}
-EXPORT_SYMBOL_GPL(mlx5_core_create_dct);
-int mlx5_core_create_qp(struct mlx5_core_dev *dev,
- struct mlx5_core_qp *qp,
+int mlx5_core_create_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
u32 *in, int inlen)
{
- u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {0};
- u32 dout[MLX5_ST_SZ_DW(destroy_qp_out)];
- u32 din[MLX5_ST_SZ_DW(destroy_qp_in)];
+ u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
+ u32 din[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
int err;
MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
- err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+ err = mlx5_cmd_exec(dev->mdev, in, inlen, out, sizeof(out));
if (err)
return err;
qp->uid = MLX5_GET(create_qp_in, in, uid);
qp->qpn = MLX5_GET(create_qp_out, out, qpn);
- mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
err = create_resource_common(dev, qp, MLX5_RES_QP);
if (err)
goto err_cmd;
- err = mlx5_debug_qp_add(dev, qp);
- if (err)
- mlx5_core_dbg(dev, "failed adding QP 0x%x to debug file system\n",
- qp->qpn);
-
- atomic_inc(&dev->num_qps);
+ mlx5_debug_qp_add(dev->mdev, qp);
return 0;
err_cmd:
- memset(din, 0, sizeof(din));
- memset(dout, 0, sizeof(dout));
MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
MLX5_SET(destroy_qp_in, din, qpn, qp->qpn);
MLX5_SET(destroy_qp_in, din, uid, qp->uid);
- mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
+ mlx5_cmd_exec_in(dev->mdev, destroy_qp, din);
return err;
}
-EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
-static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
+static int mlx5_core_drain_dct(struct mlx5_ib_dev *dev,
struct mlx5_core_dct *dct)
{
- u32 out[MLX5_ST_SZ_DW(drain_dct_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(drain_dct_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(drain_dct_in)] = {};
struct mlx5_core_qp *qp = &dct->mqp;
MLX5_SET(drain_dct_in, in, opcode, MLX5_CMD_OP_DRAIN_DCT);
MLX5_SET(drain_dct_in, in, dctn, qp->qpn);
MLX5_SET(drain_dct_in, in, uid, qp->uid);
- return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
- (void *)&out, sizeof(out));
+ return mlx5_cmd_exec_in(dev->mdev, drain_dct, in);
}
-int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
+int mlx5_core_destroy_dct(struct mlx5_ib_dev *dev,
struct mlx5_core_dct *dct)
{
return _mlx5_core_destroy_dct(dev, dct, true);
}
-EXPORT_SYMBOL_GPL(mlx5_core_destroy_dct);
-int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
- struct mlx5_core_qp *qp)
+int mlx5_core_destroy_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp)
{
- u32 out[MLX5_ST_SZ_DW(destroy_qp_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {0};
- int err;
+ u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
- mlx5_debug_qp_remove(dev, qp);
+ mlx5_debug_qp_remove(dev->mdev, qp);
destroy_resource_common(dev, qp);
MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
MLX5_SET(destroy_qp_in, in, uid, qp->uid);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
- if (err)
- return err;
-
- atomic_dec(&dev->num_qps);
+ mlx5_cmd_exec_in(dev->mdev, destroy_qp, in);
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
-int mlx5_core_set_delay_drop(struct mlx5_core_dev *dev,
+int mlx5_core_set_delay_drop(struct mlx5_ib_dev *dev,
u32 timeout_usec)
{
- u32 out[MLX5_ST_SZ_DW(set_delay_drop_params_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(set_delay_drop_params_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(set_delay_drop_params_in)] = {};
MLX5_SET(set_delay_drop_params_in, in, opcode,
MLX5_CMD_OP_SET_DELAY_DROP_PARAMS);
MLX5_SET(set_delay_drop_params_in, in, delay_drop_timeout,
timeout_usec / 100);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev->mdev, set_delay_drop_params, in);
}
-EXPORT_SYMBOL_GPL(mlx5_core_set_delay_drop);
struct mbox_info {
u32 *in;
@@ -496,120 +417,112 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
opt_param_mask, qpc, uid);
break;
default:
- mlx5_core_err(dev, "Unknown transition for modify QP: OP(0x%x) QPN(0x%x)\n",
- opcode, qpn);
return -EINVAL;
}
return 0;
}
-int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
- u32 opt_param_mask, void *qpc,
- struct mlx5_core_qp *qp)
+int mlx5_core_qp_modify(struct mlx5_ib_dev *dev, u16 opcode, u32 opt_param_mask,
+ void *qpc, struct mlx5_core_qp *qp)
{
struct mbox_info mbox;
int err;
- err = modify_qp_mbox_alloc(dev, opcode, qp->qpn,
+ err = modify_qp_mbox_alloc(dev->mdev, opcode, qp->qpn,
opt_param_mask, qpc, &mbox, qp->uid);
if (err)
return err;
- err = mlx5_cmd_exec(dev, mbox.in, mbox.inlen, mbox.out, mbox.outlen);
+ err = mlx5_cmd_exec(dev->mdev, mbox.in, mbox.inlen, mbox.out,
+ mbox.outlen);
mbox_free(&mbox);
return err;
}
-EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
-void mlx5_init_qp_table(struct mlx5_core_dev *dev)
+int mlx5_init_qp_table(struct mlx5_ib_dev *dev)
{
- struct mlx5_qp_table *table = &dev->priv.qp_table;
+ struct mlx5_qp_table *table = &dev->qp_table;
- memset(table, 0, sizeof(*table));
spin_lock_init(&table->lock);
INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
- mlx5_qp_debugfs_init(dev);
+ mlx5_qp_debugfs_init(dev->mdev);
table->nb.notifier_call = rsc_event_notifier;
- mlx5_notifier_register(dev, &table->nb);
+ mlx5_notifier_register(dev->mdev, &table->nb);
+
+ return 0;
}
-void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
+void mlx5_cleanup_qp_table(struct mlx5_ib_dev *dev)
{
- struct mlx5_qp_table *table = &dev->priv.qp_table;
+ struct mlx5_qp_table *table = &dev->qp_table;
- mlx5_notifier_unregister(dev, &table->nb);
- mlx5_qp_debugfs_cleanup(dev);
+ mlx5_notifier_unregister(dev->mdev, &table->nb);
+ mlx5_qp_debugfs_cleanup(dev->mdev);
}
-int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
+int mlx5_core_qp_query(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
u32 *out, int outlen)
{
- u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {};
MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
MLX5_SET(query_qp_in, in, qpn, qp->qpn);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
+ return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, outlen);
}
-EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
-int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct,
+int mlx5_core_dct_query(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct,
u32 *out, int outlen)
{
- u32 in[MLX5_ST_SZ_DW(query_dct_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_dct_in)] = {};
struct mlx5_core_qp *qp = &dct->mqp;
MLX5_SET(query_dct_in, in, opcode, MLX5_CMD_OP_QUERY_DCT);
MLX5_SET(query_dct_in, in, dctn, qp->qpn);
- return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
- (void *)out, outlen);
+ return mlx5_cmd_exec(dev->mdev, (void *)&in, sizeof(in), (void *)out,
+ outlen);
}
-EXPORT_SYMBOL_GPL(mlx5_core_dct_query);
-int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
+int mlx5_core_xrcd_alloc(struct mlx5_ib_dev *dev, u32 *xrcdn)
{
- u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {};
int err;
MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev->mdev, alloc_xrcd, in, out);
if (!err)
*xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
return err;
}
-EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
-int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
+int mlx5_core_xrcd_dealloc(struct mlx5_ib_dev *dev, u32 xrcdn)
{
- u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {};
MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev->mdev, dealloc_xrcd, in);
}
-EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
-static void destroy_rq_tracked(struct mlx5_core_dev *dev, u32 rqn, u16 uid)
+static void destroy_rq_tracked(struct mlx5_ib_dev *dev, u32 rqn, u16 uid)
{
- u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {};
- u32 out[MLX5_ST_SZ_DW(destroy_rq_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {};
MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ);
MLX5_SET(destroy_rq_in, in, rqn, rqn);
MLX5_SET(destroy_rq_in, in, uid, uid);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev->mdev, destroy_rq, in);
}
-int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
+int mlx5_core_create_rq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
struct mlx5_core_qp *rq)
{
int err;
u32 rqn;
- err = mlx5_core_create_rq(dev, in, inlen, &rqn);
+ err = mlx5_core_create_rq(dev->mdev, in, inlen, &rqn);
if (err)
return err;
@@ -626,39 +539,37 @@ err_destroy_rq:
return err;
}
-EXPORT_SYMBOL(mlx5_core_create_rq_tracked);
-void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
+void mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
struct mlx5_core_qp *rq)
{
destroy_resource_common(dev, rq);
destroy_rq_tracked(dev, rq->qpn, rq->uid);
}
-EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked);
-static void destroy_sq_tracked(struct mlx5_core_dev *dev, u32 sqn, u16 uid)
+static void destroy_sq_tracked(struct mlx5_ib_dev *dev, u32 sqn, u16 uid)
{
- u32 in[MLX5_ST_SZ_DW(destroy_sq_in)] = {};
- u32 out[MLX5_ST_SZ_DW(destroy_sq_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(destroy_sq_in)] = {};
MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ);
MLX5_SET(destroy_sq_in, in, sqn, sqn);
MLX5_SET(destroy_sq_in, in, uid, uid);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev->mdev, destroy_sq, in);
}
-int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
+int mlx5_core_create_sq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
struct mlx5_core_qp *sq)
{
+ u32 out[MLX5_ST_SZ_DW(create_sq_out)] = {};
int err;
- u32 sqn;
- err = mlx5_core_create_sq(dev, in, inlen, &sqn);
+ MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);
+ err = mlx5_cmd_exec(dev->mdev, in, inlen, out, sizeof(out));
if (err)
return err;
+ sq->qpn = MLX5_GET(create_sq_out, out, sqn);
sq->uid = MLX5_GET(create_sq_in, in, uid);
- sq->qpn = sqn;
err = create_resource_common(dev, sq, MLX5_RES_SQ);
if (err)
goto err_destroy_sq;
@@ -670,68 +581,25 @@ err_destroy_sq:
return err;
}
-EXPORT_SYMBOL(mlx5_core_create_sq_tracked);
-void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
+void mlx5_core_destroy_sq_tracked(struct mlx5_ib_dev *dev,
struct mlx5_core_qp *sq)
{
destroy_resource_common(dev, sq);
destroy_sq_tracked(dev, sq->qpn, sq->uid);
}
-EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);
-
-int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id)
-{
- u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0};
- int err;
-
- MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
- if (!err)
- *counter_id = MLX5_GET(alloc_q_counter_out, out,
- counter_set_id);
- return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_core_alloc_q_counter);
-
-int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id)
-{
- u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)] = {0};
-
- MLX5_SET(dealloc_q_counter_in, in, opcode,
- MLX5_CMD_OP_DEALLOC_Q_COUNTER);
- MLX5_SET(dealloc_q_counter_in, in, counter_set_id, counter_id);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-EXPORT_SYMBOL_GPL(mlx5_core_dealloc_q_counter);
-
-int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
- int reset, void *out, int out_size)
-{
- u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {0};
-
- MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
- MLX5_SET(query_q_counter_in, in, clear, reset);
- MLX5_SET(query_q_counter_in, in, counter_set_id, counter_id);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
-}
-EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter);
-struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_core_dev *dev,
+struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_ib_dev *dev,
int res_num,
enum mlx5_res_type res_type)
{
u32 rsn = res_num | (res_type << MLX5_USER_INDEX_LEN);
- struct mlx5_qp_table *table = &dev->priv.qp_table;
+ struct mlx5_qp_table *table = &dev->qp_table;
return mlx5_get_rsc(table, rsn);
}
-EXPORT_SYMBOL_GPL(mlx5_core_res_hold);
void mlx5_core_res_put(struct mlx5_core_rsc_common *res)
{
mlx5_core_put_rsc(res);
}
-EXPORT_SYMBOL_GPL(mlx5_core_res_put);
diff --git a/drivers/infiniband/hw/mlx5/srq_cmd.c b/drivers/infiniband/hw/mlx5/srq_cmd.c
index 8fc3630a9d4c..c851570791af 100644
--- a/drivers/infiniband/hw/mlx5/srq_cmd.c
+++ b/drivers/infiniband/hw/mlx5/srq_cmd.c
@@ -5,9 +5,9 @@
#include <linux/kernel.h>
#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
#include "mlx5_ib.h"
#include "srq.h"
+#include "qp.h"
static int get_pas_size(struct mlx5_srq_attr *in)
{
diff --git a/drivers/macintosh/mac_hid.c b/drivers/macintosh/mac_hid.c
index 7af0c536d568..28b8581b44dd 100644
--- a/drivers/macintosh/mac_hid.c
+++ b/drivers/macintosh/mac_hid.c
@@ -183,8 +183,7 @@ static void mac_hid_stop_emulation(void)
}
static int mac_hid_toggle_emumouse(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
int *valp = table->data;
int old_val = *valp;
diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c
index 0f3417d161b8..5bb144435c16 100644
--- a/drivers/media/rc/bpf-lirc.c
+++ b/drivers/media/rc/bpf-lirc.c
@@ -103,12 +103,14 @@ lirc_mode2_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_map_peek_elem_proto;
case BPF_FUNC_ktime_get_ns:
return &bpf_ktime_get_ns_proto;
+ case BPF_FUNC_ktime_get_boot_ns:
+ return &bpf_ktime_get_boot_ns_proto;
case BPF_FUNC_tail_call:
return &bpf_tail_call_proto;
case BPF_FUNC_get_prandom_u32:
return &bpf_get_prandom_u32_proto;
case BPF_FUNC_trace_printk:
- if (capable(CAP_SYS_ADMIN))
+ if (perfmon_capable())
return bpf_get_trace_printk_proto();
/* fall through */
default:
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index b103fbdd0f68..c7d310ef1c83 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -50,7 +50,7 @@ config BONDING
The driver supports multiple bonding modes to allow for both high
performance and high availability operation.
- Refer to <file:Documentation/networking/bonding.txt> for more
+ Refer to <file:Documentation/networking/bonding.rst> for more
information.
To compile this driver as a module, choose M here: the module
@@ -126,7 +126,7 @@ config EQUALIZER
Linux driver or with a Livingston Portmaster 2e.
Say Y if you want this and read
- <file:Documentation/networking/eql.txt>. You may also want to read
+ <file:Documentation/networking/eql.rst>. You may also want to read
section 6.2 of the NET-3-HOWTO, available from
<http://www.tldp.org/docs.html#howto>.
@@ -302,7 +302,7 @@ config NETCONSOLE
tristate "Network console logging support"
---help---
If you want to log kernel messages over the network, enable this.
- See <file:Documentation/networking/netconsole.txt> for details.
+ See <file:Documentation/networking/netconsole.rst> for details.
config NETCONSOLE_DYNAMIC
bool "Dynamic reconfiguration of logging targets"
@@ -312,7 +312,7 @@ config NETCONSOLE_DYNAMIC
This option enables the ability to dynamically reconfigure target
parameters (interface, IP addresses, port numbers, MAC addresses)
at runtime through a userspace interface exported using configfs.
- See <file:Documentation/networking/netconsole.txt> for details.
+ See <file:Documentation/networking/netconsole.rst> for details.
config NETPOLL
def_bool NETCONSOLE
@@ -355,7 +355,7 @@ config TUN
devices, driver will automatically delete tunXX or tapXX device and
all routes corresponding to it.
- Please read <file:Documentation/networking/tuntap.txt> for more
+ Please read <file:Documentation/networking/tuntap.rst> for more
information.
To compile this driver as a module, choose M here: the module
@@ -460,7 +460,7 @@ config NET_SB1000
At present this driver only compiles as a module, so say M here if
you have this card. The module will be called sb1000. Then read
- <file:Documentation/networking/device_drivers/sb1000.txt> for
+ <file:Documentation/networking/device_drivers/sb1000.rst> for
information on how to use this module, as it needs special ppp
scripts for establishing a connection. Further documentation
and the necessary scripts can be found at:
diff --git a/drivers/net/appletalk/Kconfig b/drivers/net/appletalk/Kconfig
index af509b05ac5c..10589a82263b 100644
--- a/drivers/net/appletalk/Kconfig
+++ b/drivers/net/appletalk/Kconfig
@@ -48,7 +48,7 @@ config LTPC
If you are in doubt, this card is the one with the 65C02 chip on it.
You also need version 1.3.3 or later of the netatalk package.
This driver is experimental, which means that it may not work.
- See the file <file:Documentation/networking/ltpc.txt>.
+ See the file <file:Documentation/networking/ltpc.rst>.
config COPS
tristate "COPS LocalTalk PC support"
@@ -59,7 +59,7 @@ config COPS
package. This driver is experimental, which means that it may not
work. This driver will only work if you choose "AppleTalk DDP"
networking support, above.
- Please read the file <file:Documentation/networking/cops.txt>.
+ Please read the file <file:Documentation/networking/cops.rst>.
config COPS_DAYNA
bool "Dayna firmware support"
@@ -86,7 +86,7 @@ config IPDDP
box is stuck on an AppleTalk only network) or decapsulate (e.g. if
you want your Linux box to act as an Internet gateway for a zoo of
AppleTalk connected Macs). Please see the file
- <file:Documentation/networking/ipddp.txt> for more information.
+ <file:Documentation/networking/ipddp.rst> for more information.
If you say Y here, the AppleTalk-IP support will be compiled into
the kernel. In this case, you can either use encapsulation or
@@ -107,4 +107,4 @@ config IPDDP_ENCAP
IP packets inside AppleTalk frames; this is useful if your Linux box
is stuck on an AppleTalk network (which hopefully contains a
decapsulator somewhere). Please see
- <file:Documentation/networking/ipddp.txt> for more information.
+ <file:Documentation/networking/ipddp.rst> for more information.
diff --git a/drivers/net/arcnet/Kconfig b/drivers/net/arcnet/Kconfig
index 27551bf3d7e4..43eef60653b2 100644
--- a/drivers/net/arcnet/Kconfig
+++ b/drivers/net/arcnet/Kconfig
@@ -9,7 +9,7 @@ menuconfig ARCNET
---help---
If you have a network card of this type, say Y and check out the
(arguably) beautiful poetry in
- <file:Documentation/networking/arcnet.txt>.
+ <file:Documentation/networking/arcnet.rst>.
You need both this driver, and the driver for the particular ARCnet
chipset of your card. If you don't know, then it's probably a
@@ -28,7 +28,7 @@ config ARCNET_1201
arc0 device. You need to say Y here to communicate with
industry-standard RFC1201 implementations, like the arcether.com
packet driver or most DOS/Windows ODI drivers. Please read the
- ARCnet documentation in <file:Documentation/networking/arcnet.txt>
+ ARCnet documentation in <file:Documentation/networking/arcnet.rst>
for more information about using arc0.
config ARCNET_1051
@@ -42,7 +42,7 @@ config ARCNET_1051
industry-standard RFC1201 implementations, like the arcether.com
packet driver or most DOS/Windows ODI drivers. RFC1201 is included
automatically as the arc0 device. Please read the ARCnet
- documentation in <file:Documentation/networking/arcnet.txt> for more
+ documentation in <file:Documentation/networking/arcnet.rst> for more
information about using arc0e and arc0s.
config ARCNET_RAW
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index c81698550e5a..095ea51d1853 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1318,8 +1318,7 @@ static netdev_tx_t bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond,
tx_slave->dev->dev_addr);
}
- bond_dev_queue_xmit(bond, skb, tx_slave->dev);
- goto out;
+ return bond_dev_queue_xmit(bond, skb, tx_slave->dev);
}
if (tx_slave && bond->params.tlb_dynamic_lb) {
@@ -1329,16 +1328,14 @@ static netdev_tx_t bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond,
}
/* no suitable interface, frame not sent */
- bond_tx_drop(bond->dev, skb);
-out:
- return NETDEV_TX_OK;
+ return bond_tx_drop(bond->dev, skb);
}
-netdev_tx_t bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
+struct slave *bond_xmit_tlb_slave_get(struct bonding *bond,
+ struct sk_buff *skb)
{
- struct bonding *bond = netdev_priv(bond_dev);
- struct ethhdr *eth_data;
struct slave *tx_slave = NULL;
+ struct ethhdr *eth_data;
u32 hash_index;
skb_reset_mac_header(skb);
@@ -1360,7 +1357,7 @@ netdev_tx_t bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
struct bond_up_slave *slaves;
unsigned int count;
- slaves = rcu_dereference(bond->slave_arr);
+ slaves = rcu_dereference(bond->usable_slaves);
count = slaves ? READ_ONCE(slaves->count) : 0;
if (likely(count))
tx_slave = slaves->arr[hash_index %
@@ -1369,20 +1366,29 @@ netdev_tx_t bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
break;
}
}
- return bond_do_alb_xmit(skb, bond, tx_slave);
+ return tx_slave;
}
-netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
+netdev_tx_t bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct ethhdr *eth_data;
+ struct slave *tx_slave;
+
+ tx_slave = bond_xmit_tlb_slave_get(bond, skb);
+ return bond_do_alb_xmit(skb, bond, tx_slave);
+}
+
+struct slave *bond_xmit_alb_slave_get(struct bonding *bond,
+ struct sk_buff *skb)
+{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
- struct slave *tx_slave = NULL;
static const __be32 ip_bcast = htonl(0xffffffff);
- int hash_size = 0;
+ struct slave *tx_slave = NULL;
+ const u8 *hash_start = NULL;
bool do_tx_balance = true;
+ struct ethhdr *eth_data;
u32 hash_index = 0;
- const u8 *hash_start = NULL;
+ int hash_size = 0;
skb_reset_mac_header(skb);
eth_data = eth_hdr(skb);
@@ -1494,14 +1500,22 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
struct bond_up_slave *slaves;
unsigned int count;
- slaves = rcu_dereference(bond->slave_arr);
+ slaves = rcu_dereference(bond->usable_slaves);
count = slaves ? READ_ONCE(slaves->count) : 0;
if (likely(count))
tx_slave = slaves->arr[bond_xmit_hash(bond, skb) %
count];
}
}
+ return tx_slave;
+}
+
+netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ struct slave *tx_slave = NULL;
+ tx_slave = bond_xmit_alb_slave_get(bond, skb);
return bond_do_alb_xmit(skb, bond, tx_slave);
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2e70e43c5df5..a25c65d4af71 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -287,7 +287,7 @@ const char *bond_mode_name(int mode)
* @skb: hw accel VLAN tagged skb to transmit
* @slave_dev: slave that is supposed to xmit this skbuff
*/
-void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
+netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
struct net_device *slave_dev)
{
skb->dev = slave_dev;
@@ -297,9 +297,9 @@ void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
if (unlikely(netpoll_tx_running(bond->dev)))
- bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
- else
- dev_queue_xmit(skb);
+ return bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
+
+ return dev_queue_xmit(skb);
}
/* In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid,
@@ -3923,16 +3923,15 @@ unwind:
}
/**
- * bond_xmit_slave_id - transmit skb through slave with slave_id
+ * bond_get_slave_by_id - get xmit slave with slave_id
* @bond: bonding device that is transmitting
- * @skb: buffer to transmit
* @slave_id: slave id up to slave_cnt-1 through which to transmit
*
- * This function tries to transmit through slave with slave_id but in case
+ * This function tries to get slave with slave_id but in case
* it fails, it tries to find the first available slave for transmission.
- * The skb is consumed in all cases, thus the function is void.
*/
-static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
+static struct slave *bond_get_slave_by_id(struct bonding *bond,
+ int slave_id)
{
struct list_head *iter;
struct slave *slave;
@@ -3941,10 +3940,8 @@ static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int sl
/* Here we start from the slave with slave_id */
bond_for_each_slave_rcu(bond, slave, iter) {
if (--i < 0) {
- if (bond_slave_can_tx(slave)) {
- bond_dev_queue_xmit(bond, skb, slave->dev);
- return;
- }
+ if (bond_slave_can_tx(slave))
+ return slave;
}
}
@@ -3953,13 +3950,11 @@ static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int sl
bond_for_each_slave_rcu(bond, slave, iter) {
if (--i < 0)
break;
- if (bond_slave_can_tx(slave)) {
- bond_dev_queue_xmit(bond, skb, slave->dev);
- return;
- }
+ if (bond_slave_can_tx(slave))
+ return slave;
}
/* no slave that can tx has been found */
- bond_tx_drop(bond->dev, skb);
+ return NULL;
}
/**
@@ -3995,10 +3990,9 @@ static u32 bond_rr_gen_slave_id(struct bonding *bond)
return slave_id;
}
-static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
- struct net_device *bond_dev)
+static struct slave *bond_xmit_roundrobin_slave_get(struct bonding *bond,
+ struct sk_buff *skb)
{
- struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave;
int slave_cnt;
u32 slave_id;
@@ -4020,22 +4014,37 @@ static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
if (iph->protocol == IPPROTO_IGMP) {
slave = rcu_dereference(bond->curr_active_slave);
if (slave)
- bond_dev_queue_xmit(bond, skb, slave->dev);
- else
- bond_xmit_slave_id(bond, skb, 0);
- return NETDEV_TX_OK;
+ return slave;
+ return bond_get_slave_by_id(bond, 0);
}
}
non_igmp:
slave_cnt = READ_ONCE(bond->slave_cnt);
if (likely(slave_cnt)) {
- slave_id = bond_rr_gen_slave_id(bond);
- bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
- } else {
- bond_tx_drop(bond_dev, skb);
+ slave_id = bond_rr_gen_slave_id(bond) % slave_cnt;
+ return bond_get_slave_by_id(bond, slave_id);
}
- return NETDEV_TX_OK;
+ return NULL;
+}
+
+static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
+ struct net_device *bond_dev)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ struct slave *slave;
+
+ slave = bond_xmit_roundrobin_slave_get(bond, skb);
+ if (likely(slave))
+ return bond_dev_queue_xmit(bond, skb, slave->dev);
+
+ return bond_tx_drop(bond_dev, skb);
+}
+
+static struct slave *bond_xmit_activebackup_slave_get(struct bonding *bond,
+ struct sk_buff *skb)
+{
+ return rcu_dereference(bond->curr_active_slave);
}
/* In active-backup mode, we know that bond->curr_active_slave is always valid if
@@ -4047,13 +4056,11 @@ static netdev_tx_t bond_xmit_activebackup(struct sk_buff *skb,
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave;
- slave = rcu_dereference(bond->curr_active_slave);
+ slave = bond_xmit_activebackup_slave_get(bond, skb);
if (slave)
- bond_dev_queue_xmit(bond, skb, slave->dev);
- else
- bond_tx_drop(bond_dev, skb);
+ return bond_dev_queue_xmit(bond, skb, slave->dev);
- return NETDEV_TX_OK;
+ return bond_tx_drop(bond_dev, skb);
}
/* Use this to update slave_array when (a) it's not appropriate to update
@@ -4087,6 +4094,61 @@ err:
bond_slave_arr_work_rearm(bond, 1);
}
+static void bond_skip_slave(struct bond_up_slave *slaves,
+ struct slave *skipslave)
+{
+ int idx;
+
+ /* Rare situation where caller has asked to skip a specific
+ * slave but allocation failed (most likely!). BTW this is
+ * only possible when the call is initiated from
+ * __bond_release_one(). In this situation; overwrite the
+ * skipslave entry in the array with the last entry from the
+ * array to avoid a situation where the xmit path may choose
+ * this to-be-skipped slave to send a packet out.
+ */
+ for (idx = 0; slaves && idx < slaves->count; idx++) {
+ if (skipslave == slaves->arr[idx]) {
+ slaves->arr[idx] =
+ slaves->arr[slaves->count - 1];
+ slaves->count--;
+ break;
+ }
+ }
+}
+
+static void bond_set_slave_arr(struct bonding *bond,
+ struct bond_up_slave *usable_slaves,
+ struct bond_up_slave *all_slaves)
+{
+ struct bond_up_slave *usable, *all;
+
+ usable = rtnl_dereference(bond->usable_slaves);
+ rcu_assign_pointer(bond->usable_slaves, usable_slaves);
+ kfree_rcu(usable, rcu);
+
+ all = rtnl_dereference(bond->all_slaves);
+ rcu_assign_pointer(bond->all_slaves, all_slaves);
+ kfree_rcu(all, rcu);
+}
+
+static void bond_reset_slave_arr(struct bonding *bond)
+{
+ struct bond_up_slave *usable, *all;
+
+ usable = rtnl_dereference(bond->usable_slaves);
+ if (usable) {
+ RCU_INIT_POINTER(bond->usable_slaves, NULL);
+ kfree_rcu(usable, rcu);
+ }
+
+ all = rtnl_dereference(bond->all_slaves);
+ if (all) {
+ RCU_INIT_POINTER(bond->all_slaves, NULL);
+ kfree_rcu(all, rcu);
+ }
+}
+
/* Build the usable slaves array in control path for modes that use xmit-hash
* to determine the slave interface -
* (a) BOND_MODE_8023AD
@@ -4097,9 +4159,9 @@ err:
*/
int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
{
+ struct bond_up_slave *usable_slaves = NULL, *all_slaves = NULL;
struct slave *slave;
struct list_head *iter;
- struct bond_up_slave *new_arr, *old_arr;
int agg_id = 0;
int ret = 0;
@@ -4107,11 +4169,12 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
WARN_ON(lockdep_is_held(&bond->mode_lock));
#endif
- new_arr = kzalloc(offsetof(struct bond_up_slave, arr[bond->slave_cnt]),
- GFP_KERNEL);
- if (!new_arr) {
+ usable_slaves = kzalloc(struct_size(usable_slaves, arr,
+ bond->slave_cnt), GFP_KERNEL);
+ all_slaves = kzalloc(struct_size(all_slaves, arr,
+ bond->slave_cnt), GFP_KERNEL);
+ if (!usable_slaves || !all_slaves) {
ret = -ENOMEM;
- pr_err("Failed to build slave-array.\n");
goto out;
}
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
@@ -4119,20 +4182,19 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
pr_debug("bond_3ad_get_active_agg_info failed\n");
- kfree_rcu(new_arr, rcu);
/* No active aggragator means it's not safe to use
* the previous array.
*/
- old_arr = rtnl_dereference(bond->slave_arr);
- if (old_arr) {
- RCU_INIT_POINTER(bond->slave_arr, NULL);
- kfree_rcu(old_arr, rcu);
- }
+ bond_reset_slave_arr(bond);
goto out;
}
agg_id = ad_info.aggregator_id;
}
bond_for_each_slave(bond, slave, iter) {
+ if (skipslave == slave)
+ continue;
+
+ all_slaves->arr[all_slaves->count++] = slave;
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct aggregator *agg;
@@ -4142,44 +4204,45 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
}
if (!bond_slave_can_tx(slave))
continue;
- if (skipslave == slave)
- continue;
slave_dbg(bond->dev, slave->dev, "Adding slave to tx hash array[%d]\n",
- new_arr->count);
+ usable_slaves->count);
- new_arr->arr[new_arr->count++] = slave;
+ usable_slaves->arr[usable_slaves->count++] = slave;
}
- old_arr = rtnl_dereference(bond->slave_arr);
- rcu_assign_pointer(bond->slave_arr, new_arr);
- if (old_arr)
- kfree_rcu(old_arr, rcu);
+ bond_set_slave_arr(bond, usable_slaves, all_slaves);
+ return ret;
out:
if (ret != 0 && skipslave) {
- int idx;
-
- /* Rare situation where caller has asked to skip a specific
- * slave but allocation failed (most likely!). BTW this is
- * only possible when the call is initiated from
- * __bond_release_one(). In this situation; overwrite the
- * skipslave entry in the array with the last entry from the
- * array to avoid a situation where the xmit path may choose
- * this to-be-skipped slave to send a packet out.
- */
- old_arr = rtnl_dereference(bond->slave_arr);
- for (idx = 0; old_arr != NULL && idx < old_arr->count; idx++) {
- if (skipslave == old_arr->arr[idx]) {
- old_arr->arr[idx] =
- old_arr->arr[old_arr->count-1];
- old_arr->count--;
- break;
- }
- }
+ bond_skip_slave(rtnl_dereference(bond->all_slaves),
+ skipslave);
+ bond_skip_slave(rtnl_dereference(bond->usable_slaves),
+ skipslave);
}
+ kfree_rcu(all_slaves, rcu);
+ kfree_rcu(usable_slaves, rcu);
+
return ret;
}
+static struct slave *bond_xmit_3ad_xor_slave_get(struct bonding *bond,
+ struct sk_buff *skb,
+ struct bond_up_slave *slaves)
+{
+ struct slave *slave;
+ unsigned int count;
+ u32 hash;
+
+ hash = bond_xmit_hash(bond, skb);
+ count = slaves ? READ_ONCE(slaves->count) : 0;
+ if (unlikely(!count))
+ return NULL;
+
+ slave = slaves->arr[hash % count];
+ return slave;
+}
+
/* Use this Xmit function for 3AD as well as XOR modes. The current
* usable slave array is formed in the control path. The xmit function
* just calculates hash and sends the packet out.
@@ -4188,20 +4251,15 @@ static netdev_tx_t bond_3ad_xor_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct bonding *bond = netdev_priv(dev);
- struct slave *slave;
struct bond_up_slave *slaves;
- unsigned int count;
+ struct slave *slave;
- slaves = rcu_dereference(bond->slave_arr);
- count = slaves ? READ_ONCE(slaves->count) : 0;
- if (likely(count)) {
- slave = slaves->arr[bond_xmit_hash(bond, skb) % count];
- bond_dev_queue_xmit(bond, skb, slave->dev);
- } else {
- bond_tx_drop(dev, skb);
- }
+ slaves = rcu_dereference(bond->usable_slaves);
+ slave = bond_xmit_3ad_xor_slave_get(bond, skb, slaves);
+ if (likely(slave))
+ return bond_dev_queue_xmit(bond, skb, slave->dev);
- return NETDEV_TX_OK;
+ return bond_tx_drop(dev, skb);
}
/* in broadcast mode, we send everything to all usable interfaces. */
@@ -4227,11 +4285,9 @@ static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb,
}
}
if (slave && bond_slave_is_up(slave) && slave->link == BOND_LINK_UP)
- bond_dev_queue_xmit(bond, skb, slave->dev);
- else
- bond_tx_drop(bond_dev, skb);
+ return bond_dev_queue_xmit(bond, skb, slave->dev);
- return NETDEV_TX_OK;
+ return bond_tx_drop(bond_dev, skb);
}
/*------------------------- Device initialization ---------------------------*/
@@ -4284,6 +4340,48 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
return txq;
}
+static struct net_device *bond_xmit_get_slave(struct net_device *master_dev,
+ struct sk_buff *skb,
+ bool all_slaves)
+{
+ struct bonding *bond = netdev_priv(master_dev);
+ struct bond_up_slave *slaves;
+ struct slave *slave = NULL;
+
+ switch (BOND_MODE(bond)) {
+ case BOND_MODE_ROUNDROBIN:
+ slave = bond_xmit_roundrobin_slave_get(bond, skb);
+ break;
+ case BOND_MODE_ACTIVEBACKUP:
+ slave = bond_xmit_activebackup_slave_get(bond, skb);
+ break;
+ case BOND_MODE_8023AD:
+ case BOND_MODE_XOR:
+ if (all_slaves)
+ slaves = rcu_dereference(bond->all_slaves);
+ else
+ slaves = rcu_dereference(bond->usable_slaves);
+ slave = bond_xmit_3ad_xor_slave_get(bond, skb, slaves);
+ break;
+ case BOND_MODE_BROADCAST:
+ break;
+ case BOND_MODE_ALB:
+ slave = bond_xmit_alb_slave_get(bond, skb);
+ break;
+ case BOND_MODE_TLB:
+ slave = bond_xmit_tlb_slave_get(bond, skb);
+ break;
+ default:
+ /* Should never happen, mode already checked */
+ WARN_ONCE(true, "Unknown bonding mode");
+ break;
+ }
+
+ if (slave)
+ return slave->dev;
+ return NULL;
+}
+
static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bonding *bond = netdev_priv(dev);
@@ -4310,8 +4408,7 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
/* Should never happen, mode already checked */
netdev_err(dev, "Unknown bonding mode %d\n", BOND_MODE(bond));
WARN_ON_ONCE(1);
- bond_tx_drop(dev, skb);
- return NETDEV_TX_OK;
+ return bond_tx_drop(dev, skb);
}
}
@@ -4330,7 +4427,7 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (bond_has_slaves(bond))
ret = __bond_start_xmit(skb, dev);
else
- bond_tx_drop(dev, skb);
+ ret = bond_tx_drop(dev, skb);
rcu_read_unlock();
return ret;
@@ -4405,6 +4502,7 @@ static const struct net_device_ops bond_netdev_ops = {
.ndo_del_slave = bond_release,
.ndo_fix_features = bond_fix_features,
.ndo_features_check = passthru_features_check,
+ .ndo_get_xmit_slave = bond_xmit_get_slave,
};
static const struct device_type bond_type = {
@@ -4472,9 +4570,9 @@ void bond_setup(struct net_device *bond_dev)
static void bond_uninit(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
+ struct bond_up_slave *usable, *all;
struct list_head *iter;
struct slave *slave;
- struct bond_up_slave *arr;
bond_netpoll_cleanup(bond_dev);
@@ -4483,15 +4581,20 @@ static void bond_uninit(struct net_device *bond_dev)
__bond_release_one(bond_dev, slave->dev, true, true);
netdev_info(bond_dev, "Released all slaves\n");
- arr = rtnl_dereference(bond->slave_arr);
- if (arr) {
- RCU_INIT_POINTER(bond->slave_arr, NULL);
- kfree_rcu(arr, rcu);
+ usable = rtnl_dereference(bond->usable_slaves);
+ if (usable) {
+ RCU_INIT_POINTER(bond->usable_slaves, NULL);
+ kfree_rcu(usable, rcu);
+ }
+
+ all = rtnl_dereference(bond->all_slaves);
+ if (all) {
+ RCU_INIT_POINTER(bond->all_slaves, NULL);
+ kfree_rcu(all, rcu);
}
list_del(&bond->bond_list);
- lockdep_unregister_key(&bond->stats_lock_key);
bond_debug_unregister(bond);
}
@@ -4896,8 +4999,7 @@ static int bond_init(struct net_device *bond_dev)
return -ENOMEM;
spin_lock_init(&bond->stats_lock);
- lockdep_register_key(&bond->stats_lock_key);
- lockdep_set_class(&bond->stats_lock, &bond->stats_lock_key);
+ netdev_lockdep_set_classes(bond_dev);
list_add_tail(&bond->bond_list, &bn->dev_list);
diff --git a/drivers/net/bonding/bonding_priv.h b/drivers/net/bonding/bonding_priv.h
index 45b77bc8c7b3..48cdf3a49a7d 100644
--- a/drivers/net/bonding/bonding_priv.h
+++ b/drivers/net/bonding/bonding_priv.h
@@ -14,7 +14,7 @@
#ifndef _BONDING_PRIV_H
#define _BONDING_PRIV_H
-#include <linux/vermagic.h>
+#include <generated/utsrelease.h>
#define DRV_NAME "bonding"
#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index 661c25eb1c46..1538ad194cf4 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -28,7 +28,7 @@ config CAIF_SPI_SLAVE
The CAIF Link layer SPI Protocol driver for Slave SPI interface.
This driver implements a platform driver to accommodate for a
platform specific SPI device. A sample CAIF SPI Platform device is
- provided in <file:Documentation/networking/caif/spi_porting.txt>.
+ provided in <file:Documentation/networking/caif/spi_porting.rst>.
config CAIF_SPI_SYNC
bool "Next command and length in start of frame"
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index c283593bef17..ceb8be653182 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1484,8 +1484,7 @@ static int b53_arl_rw_op(struct b53_device *dev, unsigned int op)
}
static int b53_arl_read(struct b53_device *dev, u64 mac,
- u16 vid, struct b53_arl_entry *ent, u8 *idx,
- bool is_valid)
+ u16 vid, struct b53_arl_entry *ent, u8 *idx)
{
DECLARE_BITMAP(free_bins, B53_ARLTBL_MAX_BIN_ENTRIES);
unsigned int i;
@@ -1495,10 +1494,10 @@ static int b53_arl_read(struct b53_device *dev, u64 mac,
if (ret)
return ret;
- bitmap_zero(free_bins, dev->num_arl_entries);
+ bitmap_zero(free_bins, dev->num_arl_bins);
/* Read the bins */
- for (i = 0; i < dev->num_arl_entries; i++) {
+ for (i = 0; i < dev->num_arl_bins; i++) {
u64 mac_vid;
u32 fwd_entry;
@@ -1521,10 +1520,10 @@ static int b53_arl_read(struct b53_device *dev, u64 mac,
return 0;
}
- if (bitmap_weight(free_bins, dev->num_arl_entries) == 0)
+ if (bitmap_weight(free_bins, dev->num_arl_bins) == 0)
return -ENOSPC;
- *idx = find_first_bit(free_bins, dev->num_arl_entries);
+ *idx = find_first_bit(free_bins, dev->num_arl_bins);
return -ENOENT;
}
@@ -1550,7 +1549,8 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
if (ret)
return ret;
- ret = b53_arl_read(dev, mac, vid, &ent, &idx, is_valid);
+ ret = b53_arl_read(dev, mac, vid, &ent, &idx);
+
/* If this is a read, just finish now */
if (op)
return ret;
@@ -1692,7 +1692,7 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
if (ret)
return ret;
- if (priv->num_arl_entries > 2) {
+ if (priv->num_arl_bins > 2) {
b53_arl_search_rd(priv, 1, &results[1]);
ret = b53_fdb_copy(port, &results[1], cb, data);
if (ret)
@@ -1702,7 +1702,7 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
break;
}
- } while (count++ < 1024);
+ } while (count++ < b53_max_arl_entries(priv) / 2);
return 0;
}
@@ -2185,7 +2185,8 @@ struct b53_chip_data {
u16 enabled_ports;
u8 cpu_port;
u8 vta_regs[3];
- u8 arl_entries;
+ u8 arl_bins;
+ u16 arl_buckets;
u8 duplex_reg;
u8 jumbo_pm_reg;
u8 jumbo_size_reg;
@@ -2204,7 +2205,8 @@ static const struct b53_chip_data b53_switch_chips[] = {
.dev_name = "BCM5325",
.vlans = 16,
.enabled_ports = 0x1f,
- .arl_entries = 2,
+ .arl_bins = 2,
+ .arl_buckets = 1024,
.cpu_port = B53_CPU_PORT_25,
.duplex_reg = B53_DUPLEX_STAT_FE,
},
@@ -2213,7 +2215,8 @@ static const struct b53_chip_data b53_switch_chips[] = {
.dev_name = "BCM5365",
.vlans = 256,
.enabled_ports = 0x1f,
- .arl_entries = 2,
+ .arl_bins = 2,
+ .arl_buckets = 1024,
.cpu_port = B53_CPU_PORT_25,
.duplex_reg = B53_DUPLEX_STAT_FE,
},
@@ -2222,7 +2225,8 @@ static const struct b53_chip_data b53_switch_chips[] = {
.dev_name = "BCM5389",
.vlans = 4096,
.enabled_ports = 0x1f,
- .arl_entries = 4,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
.cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2234,7 +2238,8 @@ static const struct b53_chip_data b53_switch_chips[] = {
.dev_name = "BCM5395",
.vlans = 4096,
.enabled_ports = 0x1f,
- .arl_entries = 4,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
.cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2246,7 +2251,8 @@ static const struct b53_chip_data b53_switch_chips[] = {
.dev_name = "BCM5397",
.vlans = 4096,
.enabled_ports = 0x1f,
- .arl_entries = 4,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
.cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS_9798,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2258,7 +2264,8 @@ static const struct b53_chip_data b53_switch_chips[] = {
.dev_name = "BCM5398",
.vlans = 4096,
.enabled_ports = 0x7f,
- .arl_entries = 4,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
.cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS_9798,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2270,7 +2277,8 @@ static const struct b53_chip_data b53_switch_chips[] = {
.dev_name = "BCM53115",
.vlans = 4096,
.enabled_ports = 0x1f,
- .arl_entries = 4,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
.vta_regs = B53_VTA_REGS,
.cpu_port = B53_CPU_PORT,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2282,7 +2290,8 @@ static const struct b53_chip_data b53_switch_chips[] = {
.dev_name = "BCM53125",
.vlans = 4096,
.enabled_ports = 0xff,
- .arl_entries = 4,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
.cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2294,7 +2303,8 @@ static const struct b53_chip_data b53_switch_chips[] = {
.dev_name = "BCM53128",
.vlans = 4096,
.enabled_ports = 0x1ff,
- .arl_entries = 4,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
.cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2306,7 +2316,8 @@ static const struct b53_chip_data b53_switch_chips[] = {
.dev_name = "BCM63xx",
.vlans = 4096,
.enabled_ports = 0, /* pdata must provide them */
- .arl_entries = 4,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
.cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS_63XX,
.duplex_reg = B53_DUPLEX_STAT_63XX,
@@ -2318,7 +2329,8 @@ static const struct b53_chip_data b53_switch_chips[] = {
.dev_name = "BCM53010",
.vlans = 4096,
.enabled_ports = 0x1f,
- .arl_entries = 4,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2330,7 +2342,8 @@ static const struct b53_chip_data b53_switch_chips[] = {
.dev_name = "BCM53011",
.vlans = 4096,
.enabled_ports = 0x1bf,
- .arl_entries = 4,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2342,7 +2355,8 @@ static const struct b53_chip_data b53_switch_chips[] = {
.dev_name = "BCM53012",
.vlans = 4096,
.enabled_ports = 0x1bf,
- .arl_entries = 4,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2354,7 +2368,8 @@ static const struct b53_chip_data b53_switch_chips[] = {
.dev_name = "BCM53018",
.vlans = 4096,
.enabled_ports = 0x1f,
- .arl_entries = 4,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2366,7 +2381,8 @@ static const struct b53_chip_data b53_switch_chips[] = {
.dev_name = "BCM53019",
.vlans = 4096,
.enabled_ports = 0x1f,
- .arl_entries = 4,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2378,7 +2394,8 @@ static const struct b53_chip_data b53_switch_chips[] = {
.dev_name = "BCM585xx/586xx/88312",
.vlans = 4096,
.enabled_ports = 0x1ff,
- .arl_entries = 4,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
.cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2390,7 +2407,8 @@ static const struct b53_chip_data b53_switch_chips[] = {
.dev_name = "BCM583xx/11360",
.vlans = 4096,
.enabled_ports = 0x103,
- .arl_entries = 4,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
.cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2402,7 +2420,8 @@ static const struct b53_chip_data b53_switch_chips[] = {
.dev_name = "BCM7445",
.vlans = 4096,
.enabled_ports = 0x1ff,
- .arl_entries = 4,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
.cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2414,7 +2433,8 @@ static const struct b53_chip_data b53_switch_chips[] = {
.dev_name = "BCM7278",
.vlans = 4096,
.enabled_ports = 0x1ff,
- .arl_entries= 4,
+ .arl_bins = 4,
+ .arl_buckets = 256,
.cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2442,7 +2462,8 @@ static int b53_switch_init(struct b53_device *dev)
dev->jumbo_pm_reg = chip->jumbo_pm_reg;
dev->cpu_port = chip->cpu_port;
dev->num_vlans = chip->vlans;
- dev->num_arl_entries = chip->arl_entries;
+ dev->num_arl_bins = chip->arl_bins;
+ dev->num_arl_buckets = chip->arl_buckets;
break;
}
}
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index 3d42318bc3f1..e942c60e4365 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -117,7 +117,8 @@ struct b53_device {
u8 jumbo_pm_reg;
u8 jumbo_size_reg;
int reset_gpio;
- u8 num_arl_entries;
+ u8 num_arl_bins;
+ u16 num_arl_buckets;
enum dsa_tag_protocol tag_protocol;
/* used ports mask */
@@ -212,6 +213,11 @@ static inline int is58xx(struct b53_device *dev)
#define B53_CPU_PORT_25 5
#define B53_CPU_PORT 8
+static inline unsigned int b53_max_arl_entries(struct b53_device *dev)
+{
+ return dev->num_arl_buckets * dev->num_arl_bins;
+}
+
struct b53_device *b53_switch_alloc(struct device *base,
const struct b53_io_ops *ops,
void *priv);
diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c
index 0a1be5259be0..1207c3095027 100644
--- a/drivers/net/dsa/b53/b53_srab.c
+++ b/drivers/net/dsa/b53/b53_srab.c
@@ -524,7 +524,7 @@ static void b53_srab_prepare_irq(struct platform_device *pdev)
port->num = i;
port->dev = dev;
- port->irq = platform_get_irq_byname(pdev, name);
+ port->irq = platform_get_irq_byname_optional(pdev, name);
kfree(name);
}
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 5c444cd722bd..d30542fc556a 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -810,10 +810,15 @@ mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port)
PCR_MATRIX_MASK, PCR_MATRIX(MT7530_ALL_MEMBERS));
/* Trapped into security mode allows packet forwarding through VLAN
- * table lookup.
+ * table lookup. CPU port is set to fallback mode to let untagged
+ * frames pass through.
*/
- mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
- MT7530_PORT_SECURITY_MODE);
+ if (dsa_is_cpu_port(ds, port))
+ mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
+ MT7530_PORT_FALLBACK_MODE);
+ else
+ mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
+ MT7530_PORT_SECURITY_MODE);
/* Set the port as a user port which is to be able to recognize VID
* from incoming packets before fetching entry within the VLAN table.
@@ -1080,12 +1085,6 @@ mt7530_port_vlan_add(struct dsa_switch *ds, int port,
struct mt7530_priv *priv = ds->priv;
u16 vid;
- /* The port is kept as VLAN-unaware if bridge with vlan_filtering not
- * being set.
- */
- if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
- return;
-
mutex_lock(&priv->reg_mutex);
for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
@@ -1111,12 +1110,6 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port,
struct mt7530_priv *priv = ds->priv;
u16 vid, pvid;
- /* The port is kept as VLAN-unaware if bridge with vlan_filtering not
- * being set.
- */
- if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
- return 0;
-
mutex_lock(&priv->reg_mutex);
pvid = priv->ports[port].pvid;
@@ -1230,6 +1223,7 @@ mt7530_setup(struct dsa_switch *ds)
* as two netdev instances.
*/
dn = dsa_to_port(ds, MT7530_CPU_PORT)->master->dev.of_node->parent;
+ ds->configure_vlan_while_not_filtering = true;
if (priv->id == ID_MT7530) {
regulator_set_voltage(priv->core_pwr, 1000000, 1000000);
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index 979bb6374678..d45eb7540703 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -152,6 +152,12 @@ enum mt7530_port_mode {
/* Port Matrix Mode: Frames are forwarded by the PCR_MATRIX members. */
MT7530_PORT_MATRIX_MODE = PORT_VLAN(0),
+ /* Fallback Mode: Forward received frames with ingress ports that do
+ * not belong to the VLAN member. Frames whose VID is not listed on
+ * the VLAN table are forwarded by the PCR_MATRIX members.
+ */
+ MT7530_PORT_FALLBACK_MODE = PORT_VLAN(1),
+
/* Security Mode: Discard any frame due to ingress membership
* violation or VID missed on the VLAN table.
*/
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 2b4a723c8306..7627ea61e0ea 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -2233,26 +2233,34 @@ static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port,
mv88e6xxx_reg_unlock(chip);
}
-static int mv88e6xxx_crosschip_bridge_join(struct dsa_switch *ds, int dev,
+static int mv88e6xxx_crosschip_bridge_join(struct dsa_switch *ds,
+ int tree_index, int sw_index,
int port, struct net_device *br)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
+ if (tree_index != ds->dst->index)
+ return 0;
+
mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_pvt_map(chip, dev, port);
+ err = mv88e6xxx_pvt_map(chip, sw_index, port);
mv88e6xxx_reg_unlock(chip);
return err;
}
-static void mv88e6xxx_crosschip_bridge_leave(struct dsa_switch *ds, int dev,
+static void mv88e6xxx_crosschip_bridge_leave(struct dsa_switch *ds,
+ int tree_index, int sw_index,
int port, struct net_device *br)
{
struct mv88e6xxx_chip *chip = ds->priv;
+ if (tree_index != ds->dst->index)
+ return;
+
mv88e6xxx_reg_lock(chip);
- if (mv88e6xxx_pvt_map(chip, dev, port))
+ if (mv88e6xxx_pvt_map(chip, sw_index, port))
dev_err(ds->dev, "failed to remap cross-chip Port VLAN\n");
mv88e6xxx_reg_unlock(chip);
}
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index 2098f19b534d..9c07b4f3d345 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -534,21 +534,21 @@ static int mv88e6390_serdes_power_10g(struct mv88e6xxx_chip *chip, u8 lane,
int err;
err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_PCS_CONTROL_1, &val);
+ MV88E6390_10G_CTRL1, &val);
if (err)
return err;
if (up)
- new_val = val & ~(MV88E6390_PCS_CONTROL_1_RESET |
- MV88E6390_PCS_CONTROL_1_LOOPBACK |
- MV88E6390_PCS_CONTROL_1_PDOWN);
+ new_val = val & ~(MDIO_CTRL1_RESET |
+ MDIO_PCS_CTRL1_LOOPBACK |
+ MDIO_CTRL1_LPOWER);
else
- new_val = val | MV88E6390_PCS_CONTROL_1_PDOWN;
+ new_val = val | MDIO_CTRL1_LPOWER;
if (val != new_val)
err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_PCS_CONTROL_1, new_val);
+ MV88E6390_10G_CTRL1, new_val);
return err;
}
@@ -748,8 +748,8 @@ int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
MV88E6390_SGMII_BMCR, bmcr);
}
-int mv88e6390_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
- u8 lane, struct phylink_link_state *state)
+static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip,
+ int port, u8 lane, struct phylink_link_state *state)
{
u16 lpa, status;
int err;
@@ -771,6 +771,45 @@ int mv88e6390_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
return mv88e6xxx_serdes_pcs_get_state(chip, status, lpa, state);
}
+static int mv88e6390_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip,
+ int port, u8 lane, struct phylink_link_state *state)
+{
+ u16 status;
+ int err;
+
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_10G_STAT1, &status);
+ if (err)
+ return err;
+
+ state->link = !!(status & MDIO_STAT1_LSTATUS);
+ if (state->link) {
+ state->speed = SPEED_10000;
+ state->duplex = DUPLEX_FULL;
+ }
+
+ return 0;
+}
+
+int mv88e6390_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
+ u8 lane, struct phylink_link_state *state)
+{
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return mv88e6390_serdes_pcs_get_state_sgmii(chip, port, lane,
+ state);
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_RXAUI:
+ return mv88e6390_serdes_pcs_get_state_10g(chip, port, lane,
+ state);
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
int mv88e6390_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
u8 lane)
{
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h
index 7990cadba4c2..14315f26228a 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.h
+++ b/drivers/net/dsa/mv88e6xxx/serdes.h
@@ -40,11 +40,8 @@
#define MV88E6390_PORT10_LANE3 0x17
/* 10GBASE-R and 10GBASE-X4/X2 */
-#define MV88E6390_PCS_CONTROL_1 0x1000
-#define MV88E6390_PCS_CONTROL_1_RESET BIT(15)
-#define MV88E6390_PCS_CONTROL_1_LOOPBACK BIT(14)
-#define MV88E6390_PCS_CONTROL_1_SPEED BIT(13)
-#define MV88E6390_PCS_CONTROL_1_PDOWN BIT(11)
+#define MV88E6390_10G_CTRL1 (0x1000 + MDIO_CTRL1)
+#define MV88E6390_10G_STAT1 (0x1000 + MDIO_STAT1)
/* 1000BASE-X and SGMII */
#define MV88E6390_SGMII_BMCR (0x2000 + MII_BMCR)
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index e2c6bf0e430e..d2b114c96952 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -7,6 +7,7 @@
#include <soc/mscc/ocelot_sys.h>
#include <soc/mscc/ocelot_dev.h>
#include <soc/mscc/ocelot_ana.h>
+#include <soc/mscc/ocelot_ptp.h>
#include <soc/mscc/ocelot.h>
#include <linux/packing.h>
#include <linux/module.h>
@@ -236,6 +237,10 @@ static void felix_phylink_mac_config(struct dsa_switch *ds, int port,
if (felix->info->pcs_init)
felix->info->pcs_init(ocelot, port, link_an_mode, state);
+
+ if (felix->info->port_sched_speed_set)
+ felix->info->port_sched_speed_set(ocelot, port,
+ state->speed);
}
static void felix_phylink_mac_an_restart(struct dsa_switch *ds, int port)
@@ -288,6 +293,27 @@ static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
QSYS_SWITCH_PORT_MODE, port);
}
+static void felix_port_qos_map_init(struct ocelot *ocelot, int port)
+{
+ int i;
+
+ ocelot_rmw_gix(ocelot,
+ ANA_PORT_QOS_CFG_QOS_PCP_ENA,
+ ANA_PORT_QOS_CFG_QOS_PCP_ENA,
+ ANA_PORT_QOS_CFG,
+ port);
+
+ for (i = 0; i < FELIX_NUM_TC * 2; i++) {
+ ocelot_rmw_ix(ocelot,
+ (ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL & i) |
+ ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL(i),
+ ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL |
+ ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL_M,
+ ANA_PORT_PCP_DEI_MAP,
+ port, i);
+ }
+}
+
static void felix_get_strings(struct dsa_switch *ds, int port,
u32 stringset, u8 *data)
{
@@ -495,6 +521,23 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
return 0;
}
+static struct ptp_clock_info ocelot_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "felix ptp",
+ .max_adj = 0x7fffffff,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = OCELOT_PTP_PINS_NUM,
+ .n_pins = OCELOT_PTP_PINS_NUM,
+ .pps = 0,
+ .gettime64 = ocelot_ptp_gettime64,
+ .settime64 = ocelot_ptp_settime64,
+ .adjtime = ocelot_ptp_adjtime,
+ .adjfine = ocelot_ptp_adjfine,
+ .verify = ocelot_ptp_verify,
+ .enable = ocelot_ptp_enable,
+};
+
/* Hardware initialization done here so that we can allocate structures with
* devm without fear of dsa_register_switch returning -EPROBE_DEFER and causing
* us to allocate structures twice (leak memory) and map PCI memory twice
@@ -505,12 +548,21 @@ static int felix_setup(struct dsa_switch *ds)
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
int port, err;
+ int tc;
err = felix_init_structs(felix, ds->num_ports);
if (err)
return err;
ocelot_init(ocelot);
+ if (ocelot->ptp) {
+ err = ocelot_init_timestamp(ocelot, &ocelot_ptp_clock_info);
+ if (err) {
+ dev_err(ocelot->dev,
+ "Timestamp initialization failed\n");
+ ocelot->ptp = 0;
+ }
+ }
for (port = 0; port < ds->num_ports; port++) {
ocelot_init_port(ocelot, port);
@@ -520,6 +572,11 @@ static int felix_setup(struct dsa_switch *ds)
ocelot_configure_cpu(ocelot, port,
OCELOT_TAG_PREFIX_NONE,
OCELOT_TAG_PREFIX_LONG);
+
+ /* Set the default QoS Classification based on PCP and DEI
+ * bits of vlan tag.
+ */
+ felix_port_qos_map_init(ocelot, port);
}
/* Include the CPU port module in the forwarding mask for unknown
@@ -530,6 +587,12 @@ static int felix_setup(struct dsa_switch *ds)
ocelot_write_rix(ocelot,
ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports, 0)),
ANA_PGID_PGID, PGID_UC);
+ /* Setup the per-traffic class flooding PGIDs */
+ for (tc = 0; tc < FELIX_NUM_TC; tc++)
+ ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) |
+ ANA_FLOODING_FLD_BROADCAST(PGID_MC) |
+ ANA_FLOODING_FLD_UNICAST(PGID_UC),
+ ANA_FLOODING, tc);
ds->mtu_enforcement_ingress = true;
/* It looks like the MAC/PCS interrupt register - PM0_IEVENT (0x8040)
@@ -549,6 +612,7 @@ static void felix_teardown(struct dsa_switch *ds)
if (felix->info->mdio_bus_free)
felix->info->mdio_bus_free(ocelot);
+ ocelot_deinit_timestamp(ocelot);
/* stop workqueue thread */
ocelot_deinit(ocelot);
}
@@ -670,6 +734,19 @@ static void felix_port_policer_del(struct dsa_switch *ds, int port)
ocelot_port_policer_del(ocelot, port);
}
+static int felix_port_setup_tc(struct dsa_switch *ds, int port,
+ enum tc_setup_type type,
+ void *type_data)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+
+ if (felix->info->port_setup_tc)
+ return felix->info->port_setup_tc(ds, port, type, type_data);
+ else
+ return -EOPNOTSUPP;
+}
+
static const struct dsa_switch_ops felix_switch_ops = {
.get_tag_protocol = felix_get_tag_protocol,
.setup = felix_setup,
@@ -708,6 +785,7 @@ static const struct dsa_switch_ops felix_switch_ops = {
.cls_flower_add = felix_cls_flower_add,
.cls_flower_del = felix_cls_flower_del,
.cls_flower_stats = felix_cls_flower_stats,
+ .port_setup_tc = felix_port_setup_tc,
};
static struct felix_info *felix_instance_tbl[] = {
@@ -740,6 +818,11 @@ static int felix_pci_probe(struct pci_dev *pdev,
struct felix *felix;
int err;
+ if (pdev->dev.of_node && !of_device_is_available(pdev->dev.of_node)) {
+ dev_info(&pdev->dev, "device is disabled, skipping\n");
+ return -ENODEV;
+ }
+
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "device enable failed\n");
@@ -791,6 +874,7 @@ static int felix_pci_probe(struct pci_dev *pdev,
ds->dev = &pdev->dev;
ds->num_ports = felix->info->num_ports;
+ ds->num_tx_queues = felix->info->num_tx_queues;
ds->ops = &felix_switch_ops;
ds->priv = ocelot;
felix->ds = ds;
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index 9af106513e53..352f7b940af7 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -5,6 +5,7 @@
#define _MSCC_FELIX_H
#define ocelot_to_felix(o) container_of((o), struct felix, ocelot)
+#define FELIX_NUM_TC 8
/* Platform-specific information */
struct felix_info {
@@ -19,6 +20,7 @@ struct felix_info {
const struct ocelot_stat_layout *stats_layout;
unsigned int num_stats;
int num_ports;
+ int num_tx_queues;
struct vcap_field *vcap_is2_keys;
struct vcap_field *vcap_is2_actions;
const struct vcap_props *vcap;
@@ -34,6 +36,10 @@ struct felix_info {
struct phylink_link_state *state);
int (*prevalidate_phy_mode)(struct ocelot *ocelot, int port,
phy_interface_t phy_mode);
+ int (*port_setup_tc)(struct dsa_switch *ds, int port,
+ enum tc_setup_type type, void *type_data);
+ void (*port_sched_speed_set)(struct ocelot *ocelot, int port,
+ u32 speed);
};
extern struct felix_info felix_info_vsc9959;
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 8bf395f12b47..85e34d85cc51 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -3,9 +3,12 @@
* Copyright 2018-2019 NXP Semiconductors
*/
#include <linux/fsl/enetc_mdio.h>
+#include <soc/mscc/ocelot_qsys.h>
#include <soc/mscc/ocelot_vcap.h>
+#include <soc/mscc/ocelot_ptp.h>
#include <soc/mscc/ocelot_sys.h>
#include <soc/mscc/ocelot.h>
+#include <net/pkt_sched.h>
#include <linux/iopoll.h>
#include <linux/pci.h>
#include "felix.h"
@@ -27,6 +30,8 @@
#define USXGMII_LPA_DUPLEX(lpa) (((lpa) & GENMASK(12, 12)) >> 12)
#define USXGMII_LPA_SPEED(lpa) (((lpa) & GENMASK(11, 9)) >> 9)
+#define VSC9959_TAS_GCL_ENTRY_MAX 63
+
enum usxgmii_speed {
USXGMII_SPEED_10 = 0,
USXGMII_SPEED_100 = 1,
@@ -202,7 +207,7 @@ static const u32 vsc9959_qsys_regmap[] = {
REG(QSYS_QMAXSDU_CFG_6, 0x00f62c),
REG(QSYS_QMAXSDU_CFG_7, 0x00f648),
REG(QSYS_PREEMPTION_CFG, 0x00f664),
- REG_RESERVED(QSYS_CIR_CFG),
+ REG(QSYS_CIR_CFG, 0x000000),
REG(QSYS_EIR_CFG, 0x000004),
REG(QSYS_SE_CFG, 0x000008),
REG(QSYS_SE_DWRR_CFG, 0x00000c),
@@ -313,6 +318,8 @@ static const u32 vsc9959_ptp_regmap[] = {
REG(PTP_PIN_TOD_SEC_MSB, 0x000004),
REG(PTP_PIN_TOD_SEC_LSB, 0x000008),
REG(PTP_PIN_TOD_NSEC, 0x00000c),
+ REG(PTP_PIN_WF_HIGH_PERIOD, 0x000014),
+ REG(PTP_PIN_WF_LOW_PERIOD, 0x000018),
REG(PTP_CFG_MISC, 0x0000a0),
REG(PTP_CLK_CFG_ADJ_CFG, 0x0000a4),
REG(PTP_CLK_CFG_ADJ_FREQ, 0x0000a8),
@@ -1207,6 +1214,186 @@ static void vsc9959_mdio_bus_free(struct ocelot *ocelot)
mdiobus_unregister(felix->imdio);
}
+static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port,
+ u32 speed)
+{
+ ocelot_rmw_rix(ocelot,
+ QSYS_TAG_CONFIG_LINK_SPEED(speed),
+ QSYS_TAG_CONFIG_LINK_SPEED_M,
+ QSYS_TAG_CONFIG, port);
+}
+
+static void vsc9959_new_base_time(struct ocelot *ocelot, ktime_t base_time,
+ u64 cycle_time,
+ struct timespec64 *new_base_ts)
+{
+ struct timespec64 ts;
+ ktime_t new_base_time;
+ ktime_t current_time;
+
+ ocelot_ptp_gettime64(&ocelot->ptp_info, &ts);
+ current_time = timespec64_to_ktime(ts);
+ new_base_time = base_time;
+
+ if (base_time < current_time) {
+ u64 nr_of_cycles = current_time - base_time;
+
+ do_div(nr_of_cycles, cycle_time);
+ new_base_time += cycle_time * (nr_of_cycles + 1);
+ }
+
+ *new_base_ts = ktime_to_timespec64(new_base_time);
+}
+
+static u32 vsc9959_tas_read_cfg_status(struct ocelot *ocelot)
+{
+ return ocelot_read(ocelot, QSYS_TAS_PARAM_CFG_CTRL);
+}
+
+static void vsc9959_tas_gcl_set(struct ocelot *ocelot, const u32 gcl_ix,
+ struct tc_taprio_sched_entry *entry)
+{
+ ocelot_write(ocelot,
+ QSYS_GCL_CFG_REG_1_GCL_ENTRY_NUM(gcl_ix) |
+ QSYS_GCL_CFG_REG_1_GATE_STATE(entry->gate_mask),
+ QSYS_GCL_CFG_REG_1);
+ ocelot_write(ocelot, entry->interval, QSYS_GCL_CFG_REG_2);
+}
+
+static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
+ struct tc_taprio_qopt_offload *taprio)
+{
+ struct timespec64 base_ts;
+ int ret, i;
+ u32 val;
+
+ if (!taprio->enable) {
+ ocelot_rmw_rix(ocelot,
+ QSYS_TAG_CONFIG_INIT_GATE_STATE(0xFF),
+ QSYS_TAG_CONFIG_ENABLE |
+ QSYS_TAG_CONFIG_INIT_GATE_STATE_M,
+ QSYS_TAG_CONFIG, port);
+
+ return 0;
+ }
+
+ if (taprio->cycle_time > NSEC_PER_SEC ||
+ taprio->cycle_time_extension >= NSEC_PER_SEC)
+ return -EINVAL;
+
+ if (taprio->num_entries > VSC9959_TAS_GCL_ENTRY_MAX)
+ return -ERANGE;
+
+ ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM(port) |
+ QSYS_TAS_PARAM_CFG_CTRL_ALWAYS_GUARD_BAND_SCH_Q,
+ QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM_M |
+ QSYS_TAS_PARAM_CFG_CTRL_ALWAYS_GUARD_BAND_SCH_Q,
+ QSYS_TAS_PARAM_CFG_CTRL);
+
+ /* Hardware errata - Admin config could not be overwritten if
+ * config is pending, need reset the TAS module
+ */
+ val = ocelot_read(ocelot, QSYS_PARAM_STATUS_REG_8);
+ if (val & QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING)
+ return -EBUSY;
+
+ ocelot_rmw_rix(ocelot,
+ QSYS_TAG_CONFIG_ENABLE |
+ QSYS_TAG_CONFIG_INIT_GATE_STATE(0xFF) |
+ QSYS_TAG_CONFIG_SCH_TRAFFIC_QUEUES(0xFF),
+ QSYS_TAG_CONFIG_ENABLE |
+ QSYS_TAG_CONFIG_INIT_GATE_STATE_M |
+ QSYS_TAG_CONFIG_SCH_TRAFFIC_QUEUES_M,
+ QSYS_TAG_CONFIG, port);
+
+ vsc9959_new_base_time(ocelot, taprio->base_time,
+ taprio->cycle_time, &base_ts);
+ ocelot_write(ocelot, base_ts.tv_nsec, QSYS_PARAM_CFG_REG_1);
+ ocelot_write(ocelot, lower_32_bits(base_ts.tv_sec), QSYS_PARAM_CFG_REG_2);
+ val = upper_32_bits(base_ts.tv_sec);
+ ocelot_write(ocelot,
+ QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB(val) |
+ QSYS_PARAM_CFG_REG_3_LIST_LENGTH(taprio->num_entries),
+ QSYS_PARAM_CFG_REG_3);
+ ocelot_write(ocelot, taprio->cycle_time, QSYS_PARAM_CFG_REG_4);
+ ocelot_write(ocelot, taprio->cycle_time_extension, QSYS_PARAM_CFG_REG_5);
+
+ for (i = 0; i < taprio->num_entries; i++)
+ vsc9959_tas_gcl_set(ocelot, i, &taprio->entries[i]);
+
+ ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
+ QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
+ QSYS_TAS_PARAM_CFG_CTRL);
+
+ ret = readx_poll_timeout(vsc9959_tas_read_cfg_status, ocelot, val,
+ !(val & QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE),
+ 10, 100000);
+
+ return ret;
+}
+
+static int vsc9959_qos_port_cbs_set(struct dsa_switch *ds, int port,
+ struct tc_cbs_qopt_offload *cbs_qopt)
+{
+ struct ocelot *ocelot = ds->priv;
+ int port_ix = port * 8 + cbs_qopt->queue;
+ u32 rate, burst;
+
+ if (cbs_qopt->queue >= ds->num_tx_queues)
+ return -EINVAL;
+
+ if (!cbs_qopt->enable) {
+ ocelot_write_gix(ocelot, QSYS_CIR_CFG_CIR_RATE(0) |
+ QSYS_CIR_CFG_CIR_BURST(0),
+ QSYS_CIR_CFG, port_ix);
+
+ ocelot_rmw_gix(ocelot, 0, QSYS_SE_CFG_SE_AVB_ENA,
+ QSYS_SE_CFG, port_ix);
+
+ return 0;
+ }
+
+ /* Rate unit is 100 kbps */
+ rate = DIV_ROUND_UP(cbs_qopt->idleslope, 100);
+ /* Avoid using zero rate */
+ rate = clamp_t(u32, rate, 1, GENMASK(14, 0));
+ /* Burst unit is 4kB */
+ burst = DIV_ROUND_UP(cbs_qopt->hicredit, 4096);
+ /* Avoid using zero burst size */
+ burst = clamp_t(u32, burst, 1, GENMASK(5, 0));
+ ocelot_write_gix(ocelot,
+ QSYS_CIR_CFG_CIR_RATE(rate) |
+ QSYS_CIR_CFG_CIR_BURST(burst),
+ QSYS_CIR_CFG,
+ port_ix);
+
+ ocelot_rmw_gix(ocelot,
+ QSYS_SE_CFG_SE_FRM_MODE(0) |
+ QSYS_SE_CFG_SE_AVB_ENA,
+ QSYS_SE_CFG_SE_AVB_ENA |
+ QSYS_SE_CFG_SE_FRM_MODE_M,
+ QSYS_SE_CFG,
+ port_ix);
+
+ return 0;
+}
+
+static int vsc9959_port_setup_tc(struct dsa_switch *ds, int port,
+ enum tc_setup_type type,
+ void *type_data)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ switch (type) {
+ case TC_SETUP_QDISC_TAPRIO:
+ return vsc9959_qos_port_tas_set(ocelot, port, type_data);
+ case TC_SETUP_QDISC_CBS:
+ return vsc9959_qos_port_cbs_set(ds, port, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
struct felix_info felix_info_vsc9959 = {
.target_io_res = vsc9959_target_io_res,
.port_io_res = vsc9959_port_io_res,
@@ -1222,6 +1409,7 @@ struct felix_info felix_info_vsc9959 = {
.shared_queue_sz = 128 * 1024,
.num_mact_rows = 2048,
.num_ports = 6,
+ .num_tx_queues = FELIX_NUM_TC,
.switch_pci_bar = 4,
.imdio_pci_bar = 0,
.mdio_bus_alloc = vsc9959_mdio_bus_alloc,
@@ -1230,4 +1418,6 @@ struct felix_info felix_info_vsc9959 = {
.pcs_an_restart = vsc9959_pcs_an_restart,
.pcs_link_state = vsc9959_pcs_link_state,
.prevalidate_phy_mode = vsc9959_prevalidate_phy_mode,
+ .port_setup_tc = vsc9959_port_setup_tc,
+ .port_sched_speed_set = vsc9959_sched_speed_set,
};
diff --git a/drivers/net/dsa/sja1105/Kconfig b/drivers/net/dsa/sja1105/Kconfig
index 68c3086af9af..5e83b365f17a 100644
--- a/drivers/net/dsa/sja1105/Kconfig
+++ b/drivers/net/dsa/sja1105/Kconfig
@@ -34,3 +34,12 @@ config NET_DSA_SJA1105_TAS
This enables support for the TTEthernet-based egress scheduling
engine in the SJA1105 DSA driver, which is controlled using a
hardware offload of the tc-tqprio qdisc.
+
+config NET_DSA_SJA1105_VL
+ bool "Support for Virtual Links on NXP SJA1105"
+ depends on NET_DSA_SJA1105_TAS
+ help
+ This enables support for flow classification using capable devices
+ (SJA1105T, SJA1105Q, SJA1105S). The following actions are supported:
+ - redirect, trap, drop
+ - time-based ingress policing, via the tc-gate action
diff --git a/drivers/net/dsa/sja1105/Makefile b/drivers/net/dsa/sja1105/Makefile
index 8943d8d66f2b..c88e56a29db8 100644
--- a/drivers/net/dsa/sja1105/Makefile
+++ b/drivers/net/dsa/sja1105/Makefile
@@ -17,3 +17,7 @@ endif
ifdef CONFIG_NET_DSA_SJA1105_TAS
sja1105-objs += sja1105_tas.o
endif
+
+ifdef CONFIG_NET_DSA_SJA1105_VL
+sja1105-objs += sja1105_vl.o
+endif
diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
index 8b60dbd567f2..198d2a7d7f95 100644
--- a/drivers/net/dsa/sja1105/sja1105.h
+++ b/drivers/net/dsa/sja1105/sja1105.h
@@ -8,6 +8,7 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
#include <linux/dsa/sja1105.h>
+#include <linux/dsa/8021q.h>
#include <net/dsa.h>
#include <linux/mutex.h>
#include "sja1105_static_config.h"
@@ -36,6 +37,7 @@ struct sja1105_regs {
u64 status;
u64 port_control;
u64 rgu;
+ u64 vl_status;
u64 config;
u64 sgmii;
u64 rmii_pll1;
@@ -49,6 +51,7 @@ struct sja1105_regs {
u64 ptpschtm;
u64 ptpegr_ts[SJA1105_NUM_PORTS];
u64 pad_mii_tx[SJA1105_NUM_PORTS];
+ u64 pad_mii_rx[SJA1105_NUM_PORTS];
u64 pad_mii_id[SJA1105_NUM_PORTS];
u64 cgu_idiv[SJA1105_NUM_PORTS];
u64 mii_tx_clk[SJA1105_NUM_PORTS];
@@ -84,6 +87,12 @@ struct sja1105_info {
const struct sja1105_dynamic_table_ops *dyn_ops;
const struct sja1105_table_ops *static_ops;
const struct sja1105_regs *regs;
+ /* Both E/T and P/Q/R/S have quirks when it comes to popping the S-Tag
+ * from double-tagged frames. E/T will pop it only when it's equal to
+ * TPID from the General Parameters Table, while P/Q/R/S will only
+ * pop it when it's equal to TPID2.
+ */
+ u16 qinq_tpid;
int (*reset_cmd)(struct dsa_switch *ds);
int (*setup_rgmii_delay)(const void *ctx, int port);
/* Prototypes from include/net/dsa.h */
@@ -96,17 +105,52 @@ struct sja1105_info {
const char *name;
};
+enum sja1105_key_type {
+ SJA1105_KEY_BCAST,
+ SJA1105_KEY_TC,
+ SJA1105_KEY_VLAN_UNAWARE_VL,
+ SJA1105_KEY_VLAN_AWARE_VL,
+};
+
+struct sja1105_key {
+ enum sja1105_key_type type;
+
+ union {
+ /* SJA1105_KEY_TC */
+ struct {
+ int pcp;
+ } tc;
+
+ /* SJA1105_KEY_VLAN_UNAWARE_VL */
+ /* SJA1105_KEY_VLAN_AWARE_VL */
+ struct {
+ u64 dmac;
+ u16 vid;
+ u16 pcp;
+ } vl;
+ };
+};
+
enum sja1105_rule_type {
SJA1105_RULE_BCAST_POLICER,
SJA1105_RULE_TC_POLICER,
+ SJA1105_RULE_VL,
+};
+
+enum sja1105_vl_type {
+ SJA1105_VL_NONCRITICAL,
+ SJA1105_VL_RATE_CONSTRAINED,
+ SJA1105_VL_TIME_TRIGGERED,
};
struct sja1105_rule {
struct list_head list;
unsigned long cookie;
unsigned long port_mask;
+ struct sja1105_key key;
enum sja1105_rule_type type;
+ /* Action */
union {
/* SJA1105_RULE_BCAST_POLICER */
struct {
@@ -116,30 +160,64 @@ struct sja1105_rule {
/* SJA1105_RULE_TC_POLICER */
struct {
int sharindx;
- int tc;
} tc_pol;
+
+ /* SJA1105_RULE_VL */
+ struct {
+ enum sja1105_vl_type type;
+ unsigned long destports;
+ int sharindx;
+ int maxlen;
+ int ipv;
+ u64 base_time;
+ u64 cycle_time;
+ int num_entries;
+ struct action_gate_entry *entries;
+ struct flow_stats stats;
+ } vl;
};
};
struct sja1105_flow_block {
struct list_head rules;
bool l2_policer_used[SJA1105_NUM_L2_POLICERS];
+ int num_virtual_links;
+};
+
+struct sja1105_bridge_vlan {
+ struct list_head list;
+ int port;
+ u16 vid;
+ bool pvid;
+ bool untagged;
+};
+
+enum sja1105_vlan_state {
+ SJA1105_VLAN_UNAWARE,
+ SJA1105_VLAN_BEST_EFFORT,
+ SJA1105_VLAN_FILTERING_FULL,
};
struct sja1105_private {
struct sja1105_static_config static_config;
bool rgmii_rx_delay[SJA1105_NUM_PORTS];
bool rgmii_tx_delay[SJA1105_NUM_PORTS];
+ bool best_effort_vlan_filtering;
const struct sja1105_info *info;
struct gpio_desc *reset_gpio;
struct spi_device *spidev;
struct dsa_switch *ds;
+ struct list_head dsa_8021q_vlans;
+ struct list_head bridge_vlans;
+ struct list_head crosschip_links;
struct sja1105_flow_block flow_block;
struct sja1105_port ports[SJA1105_NUM_PORTS];
/* Serializes transmission of management frames so that
* the switch doesn't confuse them with one another.
*/
struct mutex mgmt_lock;
+ bool expect_dsa_8021q;
+ enum sja1105_vlan_state vlan_state;
struct sja1105_tagger_data tagger_data;
struct sja1105_ptp_data ptp_data;
struct sja1105_tas_data tas_data;
@@ -160,11 +238,14 @@ enum sja1105_reset_reason {
SJA1105_AGEING_TIME,
SJA1105_SCHEDULING,
SJA1105_BEST_EFFORT_POLICING,
+ SJA1105_VIRTUAL_LINKS,
};
int sja1105_static_config_reload(struct sja1105_private *priv,
enum sja1105_reset_reason reason);
+void sja1105_frame_memory_partitioning(struct sja1105_private *priv);
+
/* From sja1105_spi.c */
int sja1105_xfer_buf(const struct sja1105_private *priv,
sja1105_spi_rw_mode_t rw, u64 reg_addr,
@@ -249,17 +330,25 @@ size_t sja1105et_l2_lookup_entry_packing(void *buf, void *entry_ptr,
enum packing_op op);
size_t sja1105_vlan_lookup_entry_packing(void *buf, void *entry_ptr,
enum packing_op op);
+size_t sja1105_retagging_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
size_t sja1105pqrs_mac_config_entry_packing(void *buf, void *entry_ptr,
enum packing_op op);
size_t sja1105pqrs_avb_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op);
+size_t sja1105_vl_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
/* From sja1105_flower.c */
int sja1105_cls_flower_del(struct dsa_switch *ds, int port,
struct flow_cls_offload *cls, bool ingress);
int sja1105_cls_flower_add(struct dsa_switch *ds, int port,
struct flow_cls_offload *cls, bool ingress);
+int sja1105_cls_flower_stats(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress);
void sja1105_flower_setup(struct dsa_switch *ds);
void sja1105_flower_teardown(struct dsa_switch *ds);
+struct sja1105_rule *sja1105_rule_find(struct sja1105_private *priv,
+ unsigned long cookie);
#endif
diff --git a/drivers/net/dsa/sja1105/sja1105_clocking.c b/drivers/net/dsa/sja1105/sja1105_clocking.c
index 0fdc2d55fff6..2a9b8a6a5306 100644
--- a/drivers/net/dsa/sja1105/sja1105_clocking.c
+++ b/drivers/net/dsa/sja1105/sja1105_clocking.c
@@ -7,12 +7,16 @@
#define SJA1105_SIZE_CGU_CMD 4
-struct sja1105_cfg_pad_mii_tx {
+/* Common structure for CFG_PAD_MIIx_RX and CFG_PAD_MIIx_TX */
+struct sja1105_cfg_pad_mii {
u64 d32_os;
+ u64 d32_ih;
u64 d32_ipud;
+ u64 d10_ih;
u64 d10_os;
u64 d10_ipud;
u64 ctrl_os;
+ u64 ctrl_ih;
u64 ctrl_ipud;
u64 clk_os;
u64 clk_ih;
@@ -338,16 +342,19 @@ static int sja1105_cgu_rgmii_tx_clk_config(struct sja1105_private *priv,
/* AGU */
static void
-sja1105_cfg_pad_mii_tx_packing(void *buf, struct sja1105_cfg_pad_mii_tx *cmd,
- enum packing_op op)
+sja1105_cfg_pad_mii_packing(void *buf, struct sja1105_cfg_pad_mii *cmd,
+ enum packing_op op)
{
const int size = 4;
sja1105_packing(buf, &cmd->d32_os, 28, 27, size, op);
+ sja1105_packing(buf, &cmd->d32_ih, 26, 26, size, op);
sja1105_packing(buf, &cmd->d32_ipud, 25, 24, size, op);
sja1105_packing(buf, &cmd->d10_os, 20, 19, size, op);
+ sja1105_packing(buf, &cmd->d10_ih, 18, 18, size, op);
sja1105_packing(buf, &cmd->d10_ipud, 17, 16, size, op);
sja1105_packing(buf, &cmd->ctrl_os, 12, 11, size, op);
+ sja1105_packing(buf, &cmd->ctrl_ih, 10, 10, size, op);
sja1105_packing(buf, &cmd->ctrl_ipud, 9, 8, size, op);
sja1105_packing(buf, &cmd->clk_os, 4, 3, size, op);
sja1105_packing(buf, &cmd->clk_ih, 2, 2, size, op);
@@ -358,7 +365,7 @@ static int sja1105_rgmii_cfg_pad_tx_config(struct sja1105_private *priv,
int port)
{
const struct sja1105_regs *regs = priv->info->regs;
- struct sja1105_cfg_pad_mii_tx pad_mii_tx;
+ struct sja1105_cfg_pad_mii pad_mii_tx = {0};
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
/* Payload */
@@ -375,12 +382,45 @@ static int sja1105_rgmii_cfg_pad_tx_config(struct sja1105_private *priv,
pad_mii_tx.clk_os = 3; /* TX_CLK output stage */
pad_mii_tx.clk_ih = 0; /* TX_CLK input hysteresis (default) */
pad_mii_tx.clk_ipud = 2; /* TX_CLK input stage (default) */
- sja1105_cfg_pad_mii_tx_packing(packed_buf, &pad_mii_tx, PACK);
+ sja1105_cfg_pad_mii_packing(packed_buf, &pad_mii_tx, PACK);
return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_tx[port],
packed_buf, SJA1105_SIZE_CGU_CMD);
}
+static int sja1105_cfg_pad_rx_config(struct sja1105_private *priv, int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cfg_pad_mii pad_mii_rx = {0};
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+
+ /* Payload */
+ pad_mii_rx.d32_ih = 0; /* RXD[3:2] input stage hysteresis: */
+ /* non-Schmitt (default) */
+ pad_mii_rx.d32_ipud = 2; /* RXD[3:2] input weak pull-up/down */
+ /* plain input (default) */
+ pad_mii_rx.d10_ih = 0; /* RXD[1:0] input stage hysteresis: */
+ /* non-Schmitt (default) */
+ pad_mii_rx.d10_ipud = 2; /* RXD[1:0] input weak pull-up/down */
+ /* plain input (default) */
+ pad_mii_rx.ctrl_ih = 0; /* RX_DV/CRS_DV/RX_CTL and RX_ER */
+ /* input stage hysteresis: */
+ /* non-Schmitt (default) */
+ pad_mii_rx.ctrl_ipud = 3; /* RX_DV/CRS_DV/RX_CTL and RX_ER */
+ /* input stage weak pull-up/down: */
+ /* pull-down */
+ pad_mii_rx.clk_os = 2; /* RX_CLK/RXC output stage: */
+ /* medium noise/fast speed (default) */
+ pad_mii_rx.clk_ih = 0; /* RX_CLK/RXC input hysteresis: */
+ /* non-Schmitt (default) */
+ pad_mii_rx.clk_ipud = 2; /* RX_CLK/RXC input pull-up/down: */
+ /* plain input (default) */
+ sja1105_cfg_pad_mii_packing(packed_buf, &pad_mii_rx, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_rx[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
static void
sja1105_cfg_pad_mii_id_packing(void *buf, struct sja1105_cfg_pad_mii_id *cmd,
enum packing_op op)
@@ -669,10 +709,14 @@ int sja1105_clocking_setup_port(struct sja1105_private *priv, int port)
phy_mode);
return -EINVAL;
}
- if (rc)
+ if (rc) {
dev_err(dev, "Clocking setup for port %d failed: %d\n",
port, rc);
- return rc;
+ return rc;
+ }
+
+ /* Internally pull down the RX_DV/CRS_DV/RX_CTL and RX_ER inputs */
+ return sja1105_cfg_pad_rx_config(priv, port);
}
int sja1105_clocking_setup(struct sja1105_private *priv)
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
index bf9b36ff35bf..2a8fbd7fdedc 100644
--- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
@@ -97,6 +97,12 @@
#define SJA1105_SIZE_DYN_CMD 4
+#define SJA1105ET_SJA1105_SIZE_VL_LOOKUP_DYN_CMD \
+ SJA1105_SIZE_DYN_CMD
+
+#define SJA1105PQRS_SJA1105_SIZE_VL_LOOKUP_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105_SIZE_VL_LOOKUP_ENTRY)
+
#define SJA1105ET_SIZE_MAC_CONFIG_DYN_ENTRY \
SJA1105_SIZE_DYN_CMD
@@ -127,6 +133,9 @@
#define SJA1105PQRS_SIZE_AVB_PARAMS_DYN_CMD \
(SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY)
+#define SJA1105_SIZE_RETAGGING_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105_SIZE_RETAGGING_ENTRY)
+
#define SJA1105_MAX_DYN_CMD_SIZE \
SJA1105PQRS_SIZE_MAC_CONFIG_DYN_CMD
@@ -147,6 +156,29 @@ enum sja1105_hostcmd {
};
static void
+sja1105_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(buf, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(buf, &cmd->errors, 30, 30, size, op);
+ sja1105_packing(buf, &cmd->rdwrset, 29, 29, size, op);
+ sja1105_packing(buf, &cmd->index, 9, 0, size, op);
+}
+
+static size_t sja1105et_vl_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vl_lookup_entry *entry = entry_ptr;
+ const int size = SJA1105ET_SJA1105_SIZE_VL_LOOKUP_DYN_CMD;
+
+ sja1105_packing(buf, &entry->egrmirr, 21, 17, size, op);
+ sja1105_packing(buf, &entry->ingrmirr, 16, 16, size, op);
+ return size;
+}
+
+static void
sja1105pqrs_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
@@ -496,6 +528,20 @@ sja1105pqrs_avb_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
sja1105_packing(p, &cmd->rdwrset, 29, 29, size, op);
}
+static void
+sja1105_retagging_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105_SIZE_RETAGGING_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->errors, 30, 30, size, op);
+ sja1105_packing(p, &cmd->valident, 29, 29, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 28, 28, size, op);
+ sja1105_packing(p, &cmd->index, 5, 0, size, op);
+}
+
#define OP_READ BIT(0)
#define OP_WRITE BIT(1)
#define OP_DEL BIT(2)
@@ -505,6 +551,16 @@ sja1105pqrs_avb_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
[BLK_IDX_SCHEDULE] = {0},
[BLK_IDX_SCHEDULE_ENTRY_POINTS] = {0},
+ [BLK_IDX_VL_LOOKUP] = {
+ .entry_packing = sja1105et_vl_lookup_entry_packing,
+ .cmd_packing = sja1105_vl_lookup_cmd_packing,
+ .access = OP_WRITE,
+ .max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
+ .packed_size = SJA1105ET_SJA1105_SIZE_VL_LOOKUP_DYN_CMD,
+ .addr = 0x35,
+ },
+ [BLK_IDX_VL_POLICING] = {0},
+ [BLK_IDX_VL_FORWARDING] = {0},
[BLK_IDX_L2_LOOKUP] = {
.entry_packing = sja1105et_dyn_l2_lookup_entry_packing,
.cmd_packing = sja1105et_l2_lookup_cmd_packing,
@@ -548,6 +604,7 @@ struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
},
[BLK_IDX_SCHEDULE_PARAMS] = {0},
[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {0},
+ [BLK_IDX_VL_FORWARDING_PARAMS] = {0},
[BLK_IDX_L2_LOOKUP_PARAMS] = {
.entry_packing = sja1105et_l2_lookup_params_entry_packing,
.cmd_packing = sja1105et_l2_lookup_params_cmd_packing,
@@ -566,6 +623,14 @@ struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
.packed_size = SJA1105ET_SIZE_GENERAL_PARAMS_DYN_CMD,
.addr = 0x34,
},
+ [BLK_IDX_RETAGGING] = {
+ .entry_packing = sja1105_retagging_entry_packing,
+ .cmd_packing = sja1105_retagging_cmd_packing,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ .access = (OP_WRITE | OP_DEL),
+ .packed_size = SJA1105_SIZE_RETAGGING_DYN_CMD,
+ .addr = 0x31,
+ },
[BLK_IDX_XMII_PARAMS] = {0},
};
@@ -573,6 +638,16 @@ struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
[BLK_IDX_SCHEDULE] = {0},
[BLK_IDX_SCHEDULE_ENTRY_POINTS] = {0},
+ [BLK_IDX_VL_LOOKUP] = {
+ .entry_packing = sja1105_vl_lookup_entry_packing,
+ .cmd_packing = sja1105_vl_lookup_cmd_packing,
+ .access = (OP_READ | OP_WRITE),
+ .max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
+ .packed_size = SJA1105PQRS_SJA1105_SIZE_VL_LOOKUP_DYN_CMD,
+ .addr = 0x47,
+ },
+ [BLK_IDX_VL_POLICING] = {0},
+ [BLK_IDX_VL_FORWARDING] = {0},
[BLK_IDX_L2_LOOKUP] = {
.entry_packing = sja1105pqrs_dyn_l2_lookup_entry_packing,
.cmd_packing = sja1105pqrs_l2_lookup_cmd_packing,
@@ -616,6 +691,7 @@ struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
},
[BLK_IDX_SCHEDULE_PARAMS] = {0},
[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {0},
+ [BLK_IDX_VL_FORWARDING_PARAMS] = {0},
[BLK_IDX_L2_LOOKUP_PARAMS] = {
.entry_packing = sja1105et_l2_lookup_params_entry_packing,
.cmd_packing = sja1105et_l2_lookup_params_cmd_packing,
@@ -641,6 +717,14 @@ struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
.packed_size = SJA1105ET_SIZE_GENERAL_PARAMS_DYN_CMD,
.addr = 0x34,
},
+ [BLK_IDX_RETAGGING] = {
+ .entry_packing = sja1105_retagging_entry_packing,
+ .cmd_packing = sja1105_retagging_cmd_packing,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ .access = (OP_READ | OP_WRITE | OP_DEL),
+ .packed_size = SJA1105_SIZE_RETAGGING_DYN_CMD,
+ .addr = 0x38,
+ },
[BLK_IDX_XMII_PARAMS] = {0},
};
diff --git a/drivers/net/dsa/sja1105/sja1105_ethtool.c b/drivers/net/dsa/sja1105/sja1105_ethtool.c
index d742ffcbfce9..9133a831ec79 100644
--- a/drivers/net/dsa/sja1105/sja1105_ethtool.c
+++ b/drivers/net/dsa/sja1105/sja1105_ethtool.c
@@ -421,92 +421,96 @@ static char sja1105pqrs_extra_port_stats[][ETH_GSTRING_LEN] = {
void sja1105_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data)
{
struct sja1105_private *priv = ds->priv;
- struct sja1105_port_status status;
+ struct sja1105_port_status *status;
int rc, i, k = 0;
- memset(&status, 0, sizeof(status));
+ status = kzalloc(sizeof(*status), GFP_KERNEL);
+ if (!status)
+ goto out;
- rc = sja1105_port_status_get(priv, &status, port);
+ rc = sja1105_port_status_get(priv, status, port);
if (rc < 0) {
dev_err(ds->dev, "Failed to read port %d counters: %d\n",
port, rc);
- return;
+ goto out;
}
memset(data, 0, ARRAY_SIZE(sja1105_port_stats) * sizeof(u64));
- data[k++] = status.mac.n_runt;
- data[k++] = status.mac.n_soferr;
- data[k++] = status.mac.n_alignerr;
- data[k++] = status.mac.n_miierr;
- data[k++] = status.mac.typeerr;
- data[k++] = status.mac.sizeerr;
- data[k++] = status.mac.tctimeout;
- data[k++] = status.mac.priorerr;
- data[k++] = status.mac.nomaster;
- data[k++] = status.mac.memov;
- data[k++] = status.mac.memerr;
- data[k++] = status.mac.invtyp;
- data[k++] = status.mac.intcyov;
- data[k++] = status.mac.domerr;
- data[k++] = status.mac.pcfbagdrop;
- data[k++] = status.mac.spcprior;
- data[k++] = status.mac.ageprior;
- data[k++] = status.mac.portdrop;
- data[k++] = status.mac.lendrop;
- data[k++] = status.mac.bagdrop;
- data[k++] = status.mac.policeerr;
- data[k++] = status.mac.drpnona664err;
- data[k++] = status.mac.spcerr;
- data[k++] = status.mac.agedrp;
- data[k++] = status.hl1.n_n664err;
- data[k++] = status.hl1.n_vlanerr;
- data[k++] = status.hl1.n_unreleased;
- data[k++] = status.hl1.n_sizeerr;
- data[k++] = status.hl1.n_crcerr;
- data[k++] = status.hl1.n_vlnotfound;
- data[k++] = status.hl1.n_ctpolerr;
- data[k++] = status.hl1.n_polerr;
- data[k++] = status.hl1.n_rxfrm;
- data[k++] = status.hl1.n_rxbyte;
- data[k++] = status.hl1.n_txfrm;
- data[k++] = status.hl1.n_txbyte;
- data[k++] = status.hl2.n_qfull;
- data[k++] = status.hl2.n_part_drop;
- data[k++] = status.hl2.n_egr_disabled;
- data[k++] = status.hl2.n_not_reach;
+ data[k++] = status->mac.n_runt;
+ data[k++] = status->mac.n_soferr;
+ data[k++] = status->mac.n_alignerr;
+ data[k++] = status->mac.n_miierr;
+ data[k++] = status->mac.typeerr;
+ data[k++] = status->mac.sizeerr;
+ data[k++] = status->mac.tctimeout;
+ data[k++] = status->mac.priorerr;
+ data[k++] = status->mac.nomaster;
+ data[k++] = status->mac.memov;
+ data[k++] = status->mac.memerr;
+ data[k++] = status->mac.invtyp;
+ data[k++] = status->mac.intcyov;
+ data[k++] = status->mac.domerr;
+ data[k++] = status->mac.pcfbagdrop;
+ data[k++] = status->mac.spcprior;
+ data[k++] = status->mac.ageprior;
+ data[k++] = status->mac.portdrop;
+ data[k++] = status->mac.lendrop;
+ data[k++] = status->mac.bagdrop;
+ data[k++] = status->mac.policeerr;
+ data[k++] = status->mac.drpnona664err;
+ data[k++] = status->mac.spcerr;
+ data[k++] = status->mac.agedrp;
+ data[k++] = status->hl1.n_n664err;
+ data[k++] = status->hl1.n_vlanerr;
+ data[k++] = status->hl1.n_unreleased;
+ data[k++] = status->hl1.n_sizeerr;
+ data[k++] = status->hl1.n_crcerr;
+ data[k++] = status->hl1.n_vlnotfound;
+ data[k++] = status->hl1.n_ctpolerr;
+ data[k++] = status->hl1.n_polerr;
+ data[k++] = status->hl1.n_rxfrm;
+ data[k++] = status->hl1.n_rxbyte;
+ data[k++] = status->hl1.n_txfrm;
+ data[k++] = status->hl1.n_txbyte;
+ data[k++] = status->hl2.n_qfull;
+ data[k++] = status->hl2.n_part_drop;
+ data[k++] = status->hl2.n_egr_disabled;
+ data[k++] = status->hl2.n_not_reach;
if (priv->info->device_id == SJA1105E_DEVICE_ID ||
priv->info->device_id == SJA1105T_DEVICE_ID)
- return;
+ goto out;
memset(data + k, 0, ARRAY_SIZE(sja1105pqrs_extra_port_stats) *
sizeof(u64));
for (i = 0; i < 8; i++) {
- data[k++] = status.hl2.qlevel_hwm[i];
- data[k++] = status.hl2.qlevel[i];
+ data[k++] = status->hl2.qlevel_hwm[i];
+ data[k++] = status->hl2.qlevel[i];
}
- data[k++] = status.ether.n_drops_nolearn;
- data[k++] = status.ether.n_drops_noroute;
- data[k++] = status.ether.n_drops_ill_dtag;
- data[k++] = status.ether.n_drops_dtag;
- data[k++] = status.ether.n_drops_sotag;
- data[k++] = status.ether.n_drops_sitag;
- data[k++] = status.ether.n_drops_utag;
- data[k++] = status.ether.n_tx_bytes_1024_2047;
- data[k++] = status.ether.n_tx_bytes_512_1023;
- data[k++] = status.ether.n_tx_bytes_256_511;
- data[k++] = status.ether.n_tx_bytes_128_255;
- data[k++] = status.ether.n_tx_bytes_65_127;
- data[k++] = status.ether.n_tx_bytes_64;
- data[k++] = status.ether.n_tx_mcast;
- data[k++] = status.ether.n_tx_bcast;
- data[k++] = status.ether.n_rx_bytes_1024_2047;
- data[k++] = status.ether.n_rx_bytes_512_1023;
- data[k++] = status.ether.n_rx_bytes_256_511;
- data[k++] = status.ether.n_rx_bytes_128_255;
- data[k++] = status.ether.n_rx_bytes_65_127;
- data[k++] = status.ether.n_rx_bytes_64;
- data[k++] = status.ether.n_rx_mcast;
- data[k++] = status.ether.n_rx_bcast;
+ data[k++] = status->ether.n_drops_nolearn;
+ data[k++] = status->ether.n_drops_noroute;
+ data[k++] = status->ether.n_drops_ill_dtag;
+ data[k++] = status->ether.n_drops_dtag;
+ data[k++] = status->ether.n_drops_sotag;
+ data[k++] = status->ether.n_drops_sitag;
+ data[k++] = status->ether.n_drops_utag;
+ data[k++] = status->ether.n_tx_bytes_1024_2047;
+ data[k++] = status->ether.n_tx_bytes_512_1023;
+ data[k++] = status->ether.n_tx_bytes_256_511;
+ data[k++] = status->ether.n_tx_bytes_128_255;
+ data[k++] = status->ether.n_tx_bytes_65_127;
+ data[k++] = status->ether.n_tx_bytes_64;
+ data[k++] = status->ether.n_tx_mcast;
+ data[k++] = status->ether.n_tx_bcast;
+ data[k++] = status->ether.n_rx_bytes_1024_2047;
+ data[k++] = status->ether.n_rx_bytes_512_1023;
+ data[k++] = status->ether.n_rx_bytes_256_511;
+ data[k++] = status->ether.n_rx_bytes_128_255;
+ data[k++] = status->ether.n_rx_bytes_65_127;
+ data[k++] = status->ether.n_rx_bytes_64;
+ data[k++] = status->ether.n_rx_mcast;
+ data[k++] = status->ether.n_rx_bcast;
+out:
+ kfree(status);
}
void sja1105_get_strings(struct dsa_switch *ds, int port,
diff --git a/drivers/net/dsa/sja1105/sja1105_flower.c b/drivers/net/dsa/sja1105/sja1105_flower.c
index 5288a722e625..9ee8968610cd 100644
--- a/drivers/net/dsa/sja1105/sja1105_flower.c
+++ b/drivers/net/dsa/sja1105/sja1105_flower.c
@@ -2,9 +2,10 @@
/* Copyright 2020, NXP Semiconductors
*/
#include "sja1105.h"
+#include "sja1105_vl.h"
-static struct sja1105_rule *sja1105_rule_find(struct sja1105_private *priv,
- unsigned long cookie)
+struct sja1105_rule *sja1105_rule_find(struct sja1105_private *priv,
+ unsigned long cookie)
{
struct sja1105_rule *rule;
@@ -46,6 +47,7 @@ static int sja1105_setup_bcast_policer(struct sja1105_private *priv,
rule->cookie = cookie;
rule->type = SJA1105_RULE_BCAST_POLICER;
rule->bcast_pol.sharindx = sja1105_find_free_l2_policer(priv);
+ rule->key.type = SJA1105_KEY_BCAST;
new_rule = true;
}
@@ -117,7 +119,8 @@ static int sja1105_setup_tc_policer(struct sja1105_private *priv,
rule->cookie = cookie;
rule->type = SJA1105_RULE_TC_POLICER;
rule->tc_pol.sharindx = sja1105_find_free_l2_policer(priv);
- rule->tc_pol.tc = tc;
+ rule->key.type = SJA1105_KEY_TC;
+ rule->key.tc.pcp = tc;
new_rule = true;
}
@@ -169,14 +172,38 @@ out:
return rc;
}
-static int sja1105_flower_parse_policer(struct sja1105_private *priv, int port,
- struct netlink_ext_ack *extack,
- struct flow_cls_offload *cls,
- u64 rate_bytes_per_sec,
- s64 burst)
+static int sja1105_flower_policer(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack,
+ unsigned long cookie,
+ struct sja1105_key *key,
+ u64 rate_bytes_per_sec,
+ s64 burst)
+{
+ switch (key->type) {
+ case SJA1105_KEY_BCAST:
+ return sja1105_setup_bcast_policer(priv, extack, cookie, port,
+ rate_bytes_per_sec, burst);
+ case SJA1105_KEY_TC:
+ return sja1105_setup_tc_policer(priv, extack, cookie, port,
+ key->tc.pcp, rate_bytes_per_sec,
+ burst);
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unknown keys for policing");
+ return -EOPNOTSUPP;
+ }
+}
+
+static int sja1105_flower_parse_key(struct sja1105_private *priv,
+ struct netlink_ext_ack *extack,
+ struct flow_cls_offload *cls,
+ struct sja1105_key *key)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct flow_dissector *dissector = rule->match.dissector;
+ bool is_bcast_dmac = false;
+ u64 dmac = U64_MAX;
+ u16 vid = U16_MAX;
+ u16 pcp = U16_MAX;
if (dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
@@ -213,16 +240,14 @@ static int sja1105_flower_parse_policer(struct sja1105_private *priv, int port,
return -EOPNOTSUPP;
}
- if (!ether_addr_equal_masked(match.key->dst, bcast,
- match.mask->dst)) {
+ if (!ether_addr_equal(match.mask->dst, bcast)) {
NL_SET_ERR_MSG_MOD(extack,
- "Only matching on broadcast DMAC is supported");
+ "Masked matching on MAC not supported");
return -EOPNOTSUPP;
}
- return sja1105_setup_bcast_policer(priv, extack, cls->cookie,
- port, rate_bytes_per_sec,
- burst);
+ dmac = ether_addr_to_u64(match.key->dst);
+ is_bcast_dmac = ether_addr_equal(match.key->dst, bcast);
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
@@ -230,22 +255,46 @@ static int sja1105_flower_parse_policer(struct sja1105_private *priv, int port,
flow_rule_match_vlan(rule, &match);
- if (match.key->vlan_id & match.mask->vlan_id) {
+ if (match.mask->vlan_id &&
+ match.mask->vlan_id != VLAN_VID_MASK) {
NL_SET_ERR_MSG_MOD(extack,
- "Matching on VID is not supported");
+ "Masked matching on VID is not supported");
return -EOPNOTSUPP;
}
- if (match.mask->vlan_priority != 0x7) {
+ if (match.mask->vlan_priority &&
+ match.mask->vlan_priority != 0x7) {
NL_SET_ERR_MSG_MOD(extack,
"Masked matching on PCP is not supported");
return -EOPNOTSUPP;
}
- return sja1105_setup_tc_policer(priv, extack, cls->cookie, port,
- match.key->vlan_priority,
- rate_bytes_per_sec,
- burst);
+ if (match.mask->vlan_id)
+ vid = match.key->vlan_id;
+ if (match.mask->vlan_priority)
+ pcp = match.key->vlan_priority;
+ }
+
+ if (is_bcast_dmac && vid == U16_MAX && pcp == U16_MAX) {
+ key->type = SJA1105_KEY_BCAST;
+ return 0;
+ }
+ if (dmac == U64_MAX && vid == U16_MAX && pcp != U16_MAX) {
+ key->type = SJA1105_KEY_TC;
+ key->tc.pcp = pcp;
+ return 0;
+ }
+ if (dmac != U64_MAX && vid != U16_MAX && pcp != U16_MAX) {
+ key->type = SJA1105_KEY_VLAN_AWARE_VL;
+ key->vl.dmac = dmac;
+ key->vl.vid = vid;
+ key->vl.pcp = pcp;
+ return 0;
+ }
+ if (dmac != U64_MAX) {
+ key->type = SJA1105_KEY_VLAN_UNAWARE_VL;
+ key->vl.dmac = dmac;
+ return 0;
}
NL_SET_ERR_MSG_MOD(extack, "Not matching on any known key");
@@ -259,22 +308,110 @@ int sja1105_cls_flower_add(struct dsa_switch *ds, int port,
struct netlink_ext_ack *extack = cls->common.extack;
struct sja1105_private *priv = ds->priv;
const struct flow_action_entry *act;
- int rc = -EOPNOTSUPP, i;
+ unsigned long cookie = cls->cookie;
+ bool routing_rule = false;
+ struct sja1105_key key;
+ bool gate_rule = false;
+ bool vl_rule = false;
+ int rc, i;
+
+ rc = sja1105_flower_parse_key(priv, extack, cls, &key);
+ if (rc)
+ return rc;
+
+ rc = -EOPNOTSUPP;
flow_action_for_each(i, act, &rule->action) {
switch (act->id) {
case FLOW_ACTION_POLICE:
- rc = sja1105_flower_parse_policer(priv, port, extack, cls,
- act->police.rate_bytes_ps,
- act->police.burst);
+ rc = sja1105_flower_policer(priv, port, extack, cookie,
+ &key,
+ act->police.rate_bytes_ps,
+ act->police.burst);
+ if (rc)
+ goto out;
+ break;
+ case FLOW_ACTION_TRAP: {
+ int cpu = dsa_upstream_port(ds, port);
+
+ routing_rule = true;
+ vl_rule = true;
+
+ rc = sja1105_vl_redirect(priv, port, extack, cookie,
+ &key, BIT(cpu), true);
+ if (rc)
+ goto out;
+ break;
+ }
+ case FLOW_ACTION_REDIRECT: {
+ struct dsa_port *to_dp;
+
+ to_dp = dsa_port_from_netdev(act->dev);
+ if (IS_ERR(to_dp)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Destination not a switch port");
+ return -EOPNOTSUPP;
+ }
+
+ routing_rule = true;
+ vl_rule = true;
+
+ rc = sja1105_vl_redirect(priv, port, extack, cookie,
+ &key, BIT(to_dp->index), true);
+ if (rc)
+ goto out;
+ break;
+ }
+ case FLOW_ACTION_DROP:
+ vl_rule = true;
+
+ rc = sja1105_vl_redirect(priv, port, extack, cookie,
+ &key, 0, false);
+ if (rc)
+ goto out;
+ break;
+ case FLOW_ACTION_GATE:
+ gate_rule = true;
+ vl_rule = true;
+
+ rc = sja1105_vl_gate(priv, port, extack, cookie,
+ &key, act->gate.index,
+ act->gate.prio,
+ act->gate.basetime,
+ act->gate.cycletime,
+ act->gate.cycletimeext,
+ act->gate.num_entries,
+ act->gate.entries);
+ if (rc)
+ goto out;
break;
default:
NL_SET_ERR_MSG_MOD(extack,
"Action not supported");
- break;
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+ }
+
+ if (vl_rule && !rc) {
+ /* Delay scheduling configuration until DESTPORTS has been
+ * populated by all other actions.
+ */
+ if (gate_rule) {
+ if (!routing_rule) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only offload gate action together with redirect or trap");
+ return -EOPNOTSUPP;
+ }
+ rc = sja1105_init_scheduling(priv);
+ if (rc)
+ goto out;
}
+
+ rc = sja1105_static_config_reload(priv, SJA1105_VIRTUAL_LINKS);
}
+out:
return rc;
}
@@ -289,6 +426,9 @@ int sja1105_cls_flower_del(struct dsa_switch *ds, int port,
if (!rule)
return 0;
+ if (rule->type == SJA1105_RULE_VL)
+ return sja1105_vl_delete(priv, port, rule, cls->common.extack);
+
policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
if (rule->type == SJA1105_RULE_BCAST_POLICER) {
@@ -297,7 +437,7 @@ int sja1105_cls_flower_del(struct dsa_switch *ds, int port,
old_sharindx = policing[bcast].sharindx;
policing[bcast].sharindx = port;
} else if (rule->type == SJA1105_RULE_TC_POLICER) {
- int index = (port * SJA1105_NUM_TC) + rule->tc_pol.tc;
+ int index = (port * SJA1105_NUM_TC) + rule->key.tc.pcp;
old_sharindx = policing[index].sharindx;
policing[index].sharindx = port;
@@ -315,6 +455,27 @@ int sja1105_cls_flower_del(struct dsa_switch *ds, int port,
return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
}
+int sja1105_cls_flower_stats(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_rule *rule = sja1105_rule_find(priv, cls->cookie);
+ int rc;
+
+ if (!rule)
+ return 0;
+
+ if (rule->type != SJA1105_RULE_VL)
+ return 0;
+
+ rc = sja1105_vl_stats(priv, port, rule, &cls->stats,
+ cls->common.extack);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
void sja1105_flower_setup(struct dsa_switch *ds)
{
struct sja1105_private *priv = ds->priv;
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 472f4eb20c49..44ce7882dfb1 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -25,6 +25,8 @@
#include "sja1105_sgmii.h"
#include "sja1105_tas.h"
+static const struct dsa_switch_ops sja1105_switch_ops;
+
static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len,
unsigned int startup_delay)
{
@@ -301,7 +303,8 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv)
.tag_port = 0,
.vlanid = 1,
};
- int i;
+ struct dsa_switch *ds = priv->ds;
+ int port;
table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
@@ -322,12 +325,31 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv)
table->entry_count = 1;
/* VLAN 1: all DT-defined ports are members; no restrictions on
- * forwarding; always transmit priority-tagged frames as untagged.
+ * forwarding; always transmit as untagged.
*/
- for (i = 0; i < SJA1105_NUM_PORTS; i++) {
- pvid.vmemb_port |= BIT(i);
- pvid.vlan_bc |= BIT(i);
- pvid.tag_port &= ~BIT(i);
+ for (port = 0; port < ds->num_ports; port++) {
+ struct sja1105_bridge_vlan *v;
+
+ if (dsa_is_unused_port(ds, port))
+ continue;
+
+ pvid.vmemb_port |= BIT(port);
+ pvid.vlan_bc |= BIT(port);
+ pvid.tag_port &= ~BIT(port);
+
+ /* Let traffic that don't need dsa_8021q (e.g. STP, PTP) be
+ * transmitted as untagged.
+ */
+ v = kzalloc(sizeof(*v), GFP_KERNEL);
+ if (!v)
+ return -ENOMEM;
+
+ v->port = port;
+ v->vid = 1;
+ v->untagged = true;
+ if (dsa_is_cpu_port(ds, port))
+ v->pvid = true;
+ list_add(&v->list, &priv->dsa_8021q_vlans);
}
((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid;
@@ -410,6 +432,41 @@ static int sja1105_init_l2_forwarding_params(struct sja1105_private *priv)
return 0;
}
+void sja1105_frame_memory_partitioning(struct sja1105_private *priv)
+{
+ struct sja1105_l2_forwarding_params_entry *l2_fwd_params;
+ struct sja1105_vl_forwarding_params_entry *vl_fwd_params;
+ struct sja1105_table *table;
+ int max_mem;
+
+ /* VLAN retagging is implemented using a loopback port that consumes
+ * frame buffers. That leaves less for us.
+ */
+ if (priv->vlan_state == SJA1105_VLAN_BEST_EFFORT)
+ max_mem = SJA1105_MAX_FRAME_MEMORY_RETAGGING;
+ else
+ max_mem = SJA1105_MAX_FRAME_MEMORY;
+
+ table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
+ l2_fwd_params = table->entries;
+ l2_fwd_params->part_spc[0] = max_mem;
+
+ /* If we have any critical-traffic virtual links, we need to reserve
+ * some frame buffer memory for them. At the moment, hardcode the value
+ * at 100 blocks of 128 bytes of memory each. This leaves 829 blocks
+ * remaining for best-effort traffic. TODO: figure out a more flexible
+ * way to perform the frame buffer partitioning.
+ */
+ if (!priv->static_config.tables[BLK_IDX_VL_FORWARDING].entry_count)
+ return;
+
+ table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS];
+ vl_fwd_params = table->entries;
+
+ l2_fwd_params->part_spc[0] -= SJA1105_VL_FRAME_MEMORY;
+ vl_fwd_params->partspc[0] = SJA1105_VL_FRAME_MEMORY;
+}
+
static int sja1105_init_general_params(struct sja1105_private *priv)
{
struct sja1105_general_params_entry default_general_params = {
@@ -445,7 +502,7 @@ static int sja1105_init_general_params(struct sja1105_private *priv)
*/
.casc_port = SJA1105_NUM_PORTS,
/* No TTEthernet */
- .vllupformat = 0,
+ .vllupformat = SJA1105_VL_FORMAT_PSFP,
.vlmarker = 0,
.vlmask = 0,
/* Only update correctionField for 1-step PTP (L2 transport) */
@@ -1301,7 +1358,7 @@ int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
l2_lookup.vlanid = vid;
l2_lookup.iotag = SJA1105_S_TAG;
l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
- if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) {
+ if (priv->vlan_state != SJA1105_VLAN_UNAWARE) {
l2_lookup.mask_vlanid = VLAN_VID_MASK;
l2_lookup.mask_iotag = BIT(0);
} else {
@@ -1364,7 +1421,7 @@ int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
l2_lookup.vlanid = vid;
l2_lookup.iotag = SJA1105_S_TAG;
l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
- if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) {
+ if (priv->vlan_state != SJA1105_VLAN_UNAWARE) {
l2_lookup.mask_vlanid = VLAN_VID_MASK;
l2_lookup.mask_iotag = BIT(0);
} else {
@@ -1410,7 +1467,7 @@ static int sja1105_fdb_add(struct dsa_switch *ds, int port,
* for what gets printed in 'bridge fdb show'. In the case of zero,
* no VID gets printed at all.
*/
- if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
+ if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL)
vid = 0;
return priv->info->fdb_add_cmd(ds, port, addr, vid);
@@ -1421,7 +1478,7 @@ static int sja1105_fdb_del(struct dsa_switch *ds, int port,
{
struct sja1105_private *priv = ds->priv;
- if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
+ if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL)
vid = 0;
return priv->info->fdb_del_cmd(ds, port, addr, vid);
@@ -1460,7 +1517,7 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
/* We need to hide the dsa_8021q VLANs from the user. */
- if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
+ if (priv->vlan_state == SJA1105_VLAN_UNAWARE)
l2_lookup.vlanid = 0;
cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
}
@@ -1589,6 +1646,7 @@ static const char * const sja1105_reset_reasons[] = {
[SJA1105_AGEING_TIME] = "Ageing time",
[SJA1105_SCHEDULING] = "Time-aware scheduling",
[SJA1105_BEST_EFFORT_POLICING] = "Best-effort policing",
+ [SJA1105_VIRTUAL_LINKS] = "Virtual links",
};
/* For situations where we need to change a setting at runtime that is only
@@ -1714,6 +1772,154 @@ static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid)
&mac[port], true);
}
+static int sja1105_crosschip_bridge_join(struct dsa_switch *ds,
+ int tree_index, int sw_index,
+ int other_port, struct net_device *br)
+{
+ struct dsa_switch *other_ds = dsa_switch_find(tree_index, sw_index);
+ struct sja1105_private *other_priv = other_ds->priv;
+ struct sja1105_private *priv = ds->priv;
+ int port, rc;
+
+ if (other_ds->ops != &sja1105_switch_ops)
+ return 0;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ if (!dsa_is_user_port(ds, port))
+ continue;
+ if (dsa_to_port(ds, port)->bridge_dev != br)
+ continue;
+
+ other_priv->expect_dsa_8021q = true;
+ rc = dsa_8021q_crosschip_bridge_join(ds, port, other_ds,
+ other_port,
+ &priv->crosschip_links);
+ other_priv->expect_dsa_8021q = false;
+ if (rc)
+ return rc;
+
+ priv->expect_dsa_8021q = true;
+ rc = dsa_8021q_crosschip_bridge_join(other_ds, other_port, ds,
+ port,
+ &other_priv->crosschip_links);
+ priv->expect_dsa_8021q = false;
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static void sja1105_crosschip_bridge_leave(struct dsa_switch *ds,
+ int tree_index, int sw_index,
+ int other_port,
+ struct net_device *br)
+{
+ struct dsa_switch *other_ds = dsa_switch_find(tree_index, sw_index);
+ struct sja1105_private *other_priv = other_ds->priv;
+ struct sja1105_private *priv = ds->priv;
+ int port;
+
+ if (other_ds->ops != &sja1105_switch_ops)
+ return;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ if (!dsa_is_user_port(ds, port))
+ continue;
+ if (dsa_to_port(ds, port)->bridge_dev != br)
+ continue;
+
+ other_priv->expect_dsa_8021q = true;
+ dsa_8021q_crosschip_bridge_leave(ds, port, other_ds, other_port,
+ &priv->crosschip_links);
+ other_priv->expect_dsa_8021q = false;
+
+ priv->expect_dsa_8021q = true;
+ dsa_8021q_crosschip_bridge_leave(other_ds, other_port, ds, port,
+ &other_priv->crosschip_links);
+ priv->expect_dsa_8021q = false;
+ }
+}
+
+static int sja1105_setup_8021q_tagging(struct dsa_switch *ds, bool enabled)
+{
+ struct sja1105_private *priv = ds->priv;
+ int rc, i;
+
+ for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ priv->expect_dsa_8021q = true;
+ rc = dsa_port_setup_8021q_tagging(ds, i, enabled);
+ priv->expect_dsa_8021q = false;
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to setup VLAN tagging for port %d: %d\n",
+ i, rc);
+ return rc;
+ }
+ }
+
+ dev_info(ds->dev, "%s switch tagging\n",
+ enabled ? "Enabled" : "Disabled");
+ return 0;
+}
+
+static enum dsa_tag_protocol
+sja1105_get_tag_protocol(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol mp)
+{
+ return DSA_TAG_PROTO_SJA1105;
+}
+
+static int sja1105_find_free_subvlan(u16 *subvlan_map, bool pvid)
+{
+ int subvlan;
+
+ if (pvid)
+ return 0;
+
+ for (subvlan = 1; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
+ if (subvlan_map[subvlan] == VLAN_N_VID)
+ return subvlan;
+
+ return -1;
+}
+
+static int sja1105_find_subvlan(u16 *subvlan_map, u16 vid)
+{
+ int subvlan;
+
+ for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
+ if (subvlan_map[subvlan] == vid)
+ return subvlan;
+
+ return -1;
+}
+
+static int sja1105_find_committed_subvlan(struct sja1105_private *priv,
+ int port, u16 vid)
+{
+ struct sja1105_port *sp = &priv->ports[port];
+
+ return sja1105_find_subvlan(sp->subvlan_map, vid);
+}
+
+static void sja1105_init_subvlan_map(u16 *subvlan_map)
+{
+ int subvlan;
+
+ for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
+ subvlan_map[subvlan] = VLAN_N_VID;
+}
+
+static void sja1105_commit_subvlan_map(struct sja1105_private *priv, int port,
+ u16 *subvlan_map)
+{
+ struct sja1105_port *sp = &priv->ports[port];
+ int subvlan;
+
+ for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
+ sp->subvlan_map[subvlan] = subvlan_map[subvlan];
+}
+
static int sja1105_is_vlan_configured(struct sja1105_private *priv, u16 vid)
{
struct sja1105_vlan_lookup_entry *vlan;
@@ -1730,94 +1936,628 @@ static int sja1105_is_vlan_configured(struct sja1105_private *priv, u16 vid)
return -1;
}
-static int sja1105_vlan_apply(struct sja1105_private *priv, int port, u16 vid,
- bool enabled, bool untagged)
+static int
+sja1105_find_retagging_entry(struct sja1105_retagging_entry *retagging,
+ int count, int from_port, u16 from_vid,
+ u16 to_vid)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ if (retagging[i].ing_port == BIT(from_port) &&
+ retagging[i].vlan_ing == from_vid &&
+ retagging[i].vlan_egr == to_vid)
+ return i;
+
+ /* Return an invalid entry index if not found */
+ return -1;
+}
+
+static int sja1105_commit_vlans(struct sja1105_private *priv,
+ struct sja1105_vlan_lookup_entry *new_vlan,
+ struct sja1105_retagging_entry *new_retagging,
+ int num_retagging)
{
+ struct sja1105_retagging_entry *retagging;
struct sja1105_vlan_lookup_entry *vlan;
struct sja1105_table *table;
- bool keep = true;
- int match, rc;
+ int num_vlans = 0;
+ int rc, i, k = 0;
+ /* VLAN table */
table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
+ vlan = table->entries;
- match = sja1105_is_vlan_configured(priv, vid);
- if (match < 0) {
- /* Can't delete a missing entry. */
- if (!enabled)
- return 0;
- rc = sja1105_table_resize(table, table->entry_count + 1);
+ for (i = 0; i < VLAN_N_VID; i++) {
+ int match = sja1105_is_vlan_configured(priv, i);
+
+ if (new_vlan[i].vlanid != VLAN_N_VID)
+ num_vlans++;
+
+ if (new_vlan[i].vlanid == VLAN_N_VID && match >= 0) {
+ /* Was there before, no longer is. Delete */
+ dev_dbg(priv->ds->dev, "Deleting VLAN %d\n", i);
+ rc = sja1105_dynamic_config_write(priv,
+ BLK_IDX_VLAN_LOOKUP,
+ i, &vlan[match], false);
+ if (rc < 0)
+ return rc;
+ } else if (new_vlan[i].vlanid != VLAN_N_VID) {
+ /* Nothing changed, don't do anything */
+ if (match >= 0 &&
+ vlan[match].vlanid == new_vlan[i].vlanid &&
+ vlan[match].tag_port == new_vlan[i].tag_port &&
+ vlan[match].vlan_bc == new_vlan[i].vlan_bc &&
+ vlan[match].vmemb_port == new_vlan[i].vmemb_port)
+ continue;
+ /* Update entry */
+ dev_dbg(priv->ds->dev, "Updating VLAN %d\n", i);
+ rc = sja1105_dynamic_config_write(priv,
+ BLK_IDX_VLAN_LOOKUP,
+ i, &new_vlan[i],
+ true);
+ if (rc < 0)
+ return rc;
+ }
+ }
+
+ if (table->entry_count)
+ kfree(table->entries);
+
+ table->entries = kcalloc(num_vlans, table->ops->unpacked_entry_size,
+ GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = num_vlans;
+ vlan = table->entries;
+
+ for (i = 0; i < VLAN_N_VID; i++) {
+ if (new_vlan[i].vlanid == VLAN_N_VID)
+ continue;
+ vlan[k++] = new_vlan[i];
+ }
+
+ /* VLAN Retagging Table */
+ table = &priv->static_config.tables[BLK_IDX_RETAGGING];
+ retagging = table->entries;
+
+ for (i = 0; i < table->entry_count; i++) {
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_RETAGGING,
+ i, &retagging[i], false);
if (rc)
return rc;
- match = table->entry_count - 1;
}
- /* Assign pointer after the resize (it's new memory) */
- vlan = table->entries;
- vlan[match].vlanid = vid;
- if (enabled) {
- vlan[match].vlan_bc |= BIT(port);
- vlan[match].vmemb_port |= BIT(port);
- } else {
- vlan[match].vlan_bc &= ~BIT(port);
- vlan[match].vmemb_port &= ~BIT(port);
+
+ if (table->entry_count)
+ kfree(table->entries);
+
+ table->entries = kcalloc(num_retagging, table->ops->unpacked_entry_size,
+ GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = num_retagging;
+ retagging = table->entries;
+
+ for (i = 0; i < num_retagging; i++) {
+ retagging[i] = new_retagging[i];
+
+ /* Update entry */
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_RETAGGING,
+ i, &retagging[i], true);
+ if (rc < 0)
+ return rc;
}
- /* Also unset tag_port if removing this VLAN was requested,
- * just so we don't have a confusing bitmap (no practical purpose).
- */
- if (untagged || !enabled)
- vlan[match].tag_port &= ~BIT(port);
+
+ return 0;
+}
+
+struct sja1105_crosschip_vlan {
+ struct list_head list;
+ u16 vid;
+ bool untagged;
+ int port;
+ int other_port;
+ struct dsa_switch *other_ds;
+};
+
+struct sja1105_crosschip_switch {
+ struct list_head list;
+ struct dsa_switch *other_ds;
+};
+
+static int sja1105_commit_pvid(struct sja1105_private *priv)
+{
+ struct sja1105_bridge_vlan *v;
+ struct list_head *vlan_list;
+ int rc = 0;
+
+ if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
+ vlan_list = &priv->bridge_vlans;
else
- vlan[match].tag_port |= BIT(port);
- /* If there's no port left as member of this VLAN,
- * it's time for it to go.
- */
- if (!vlan[match].vmemb_port)
- keep = false;
+ vlan_list = &priv->dsa_8021q_vlans;
- dev_dbg(priv->ds->dev,
- "%s: port %d, vid %llu, broadcast domain 0x%llx, "
- "port members 0x%llx, tagged ports 0x%llx, keep %d\n",
- __func__, port, vlan[match].vlanid, vlan[match].vlan_bc,
- vlan[match].vmemb_port, vlan[match].tag_port, keep);
+ list_for_each_entry(v, vlan_list, list) {
+ if (v->pvid) {
+ rc = sja1105_pvid_apply(priv, v->port, v->vid);
+ if (rc)
+ break;
+ }
+ }
- rc = sja1105_dynamic_config_write(priv, BLK_IDX_VLAN_LOOKUP, vid,
- &vlan[match], keep);
- if (rc < 0)
- return rc;
+ return rc;
+}
- if (!keep)
- return sja1105_table_delete_entry(table, match);
+static int
+sja1105_build_bridge_vlans(struct sja1105_private *priv,
+ struct sja1105_vlan_lookup_entry *new_vlan)
+{
+ struct sja1105_bridge_vlan *v;
+
+ if (priv->vlan_state == SJA1105_VLAN_UNAWARE)
+ return 0;
+
+ list_for_each_entry(v, &priv->bridge_vlans, list) {
+ int match = v->vid;
+
+ new_vlan[match].vlanid = v->vid;
+ new_vlan[match].vmemb_port |= BIT(v->port);
+ new_vlan[match].vlan_bc |= BIT(v->port);
+ if (!v->untagged)
+ new_vlan[match].tag_port |= BIT(v->port);
+ }
return 0;
}
-static int sja1105_setup_8021q_tagging(struct dsa_switch *ds, bool enabled)
+static int
+sja1105_build_dsa_8021q_vlans(struct sja1105_private *priv,
+ struct sja1105_vlan_lookup_entry *new_vlan)
{
- int rc, i;
+ struct sja1105_bridge_vlan *v;
- for (i = 0; i < SJA1105_NUM_PORTS; i++) {
- rc = dsa_port_setup_8021q_tagging(ds, i, enabled);
- if (rc < 0) {
- dev_err(ds->dev, "Failed to setup VLAN tagging for port %d: %d\n",
- i, rc);
- return rc;
+ if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
+ return 0;
+
+ list_for_each_entry(v, &priv->dsa_8021q_vlans, list) {
+ int match = v->vid;
+
+ new_vlan[match].vlanid = v->vid;
+ new_vlan[match].vmemb_port |= BIT(v->port);
+ new_vlan[match].vlan_bc |= BIT(v->port);
+ if (!v->untagged)
+ new_vlan[match].tag_port |= BIT(v->port);
+ }
+
+ return 0;
+}
+
+static int sja1105_build_subvlans(struct sja1105_private *priv,
+ u16 subvlan_map[][DSA_8021Q_N_SUBVLAN],
+ struct sja1105_vlan_lookup_entry *new_vlan,
+ struct sja1105_retagging_entry *new_retagging,
+ int *num_retagging)
+{
+ struct sja1105_bridge_vlan *v;
+ int k = *num_retagging;
+
+ if (priv->vlan_state != SJA1105_VLAN_BEST_EFFORT)
+ return 0;
+
+ list_for_each_entry(v, &priv->bridge_vlans, list) {
+ int upstream = dsa_upstream_port(priv->ds, v->port);
+ int match, subvlan;
+ u16 rx_vid;
+
+ /* Only sub-VLANs on user ports need to be applied.
+ * Bridge VLANs also include VLANs added automatically
+ * by DSA on the CPU port.
+ */
+ if (!dsa_is_user_port(priv->ds, v->port))
+ continue;
+
+ subvlan = sja1105_find_subvlan(subvlan_map[v->port],
+ v->vid);
+ if (subvlan < 0) {
+ subvlan = sja1105_find_free_subvlan(subvlan_map[v->port],
+ v->pvid);
+ if (subvlan < 0) {
+ dev_err(priv->ds->dev, "No more free subvlans\n");
+ return -ENOSPC;
+ }
+ }
+
+ rx_vid = dsa_8021q_rx_vid_subvlan(priv->ds, v->port, subvlan);
+
+ /* @v->vid on @v->port needs to be retagged to @rx_vid
+ * on @upstream. Assume @v->vid on @v->port and on
+ * @upstream was already configured by the previous
+ * iteration over bridge_vlans.
+ */
+ match = rx_vid;
+ new_vlan[match].vlanid = rx_vid;
+ new_vlan[match].vmemb_port |= BIT(v->port);
+ new_vlan[match].vmemb_port |= BIT(upstream);
+ new_vlan[match].vlan_bc |= BIT(v->port);
+ new_vlan[match].vlan_bc |= BIT(upstream);
+ /* The "untagged" flag is set the same as for the
+ * original VLAN
+ */
+ if (!v->untagged)
+ new_vlan[match].tag_port |= BIT(v->port);
+ /* But it's always tagged towards the CPU */
+ new_vlan[match].tag_port |= BIT(upstream);
+
+ /* The Retagging Table generates packet *clones* with
+ * the new VLAN. This is a very odd hardware quirk
+ * which we need to suppress by dropping the original
+ * packet.
+ * Deny egress of the original VLAN towards the CPU
+ * port. This will force the switch to drop it, and
+ * we'll see only the retagged packets.
+ */
+ match = v->vid;
+ new_vlan[match].vlan_bc &= ~BIT(upstream);
+
+ /* And the retagging itself */
+ new_retagging[k].vlan_ing = v->vid;
+ new_retagging[k].vlan_egr = rx_vid;
+ new_retagging[k].ing_port = BIT(v->port);
+ new_retagging[k].egr_port = BIT(upstream);
+ if (k++ == SJA1105_MAX_RETAGGING_COUNT) {
+ dev_err(priv->ds->dev, "No more retagging rules\n");
+ return -ENOSPC;
}
+
+ subvlan_map[v->port][subvlan] = v->vid;
}
- dev_info(ds->dev, "%s switch tagging\n",
- enabled ? "Enabled" : "Disabled");
+
+ *num_retagging = k;
+
return 0;
}
-static enum dsa_tag_protocol
-sja1105_get_tag_protocol(struct dsa_switch *ds, int port,
- enum dsa_tag_protocol mp)
+/* Sadly, in crosschip scenarios where the CPU port is also the link to another
+ * switch, we should retag backwards (the dsa_8021q vid to the original vid) on
+ * the CPU port of neighbour switches.
+ */
+static int
+sja1105_build_crosschip_subvlans(struct sja1105_private *priv,
+ struct sja1105_vlan_lookup_entry *new_vlan,
+ struct sja1105_retagging_entry *new_retagging,
+ int *num_retagging)
+{
+ struct sja1105_crosschip_vlan *tmp, *pos;
+ struct dsa_8021q_crosschip_link *c;
+ struct sja1105_bridge_vlan *v, *w;
+ struct list_head crosschip_vlans;
+ int k = *num_retagging;
+ int rc = 0;
+
+ if (priv->vlan_state != SJA1105_VLAN_BEST_EFFORT)
+ return 0;
+
+ INIT_LIST_HEAD(&crosschip_vlans);
+
+ list_for_each_entry(c, &priv->crosschip_links, list) {
+ struct sja1105_private *other_priv = c->other_ds->priv;
+
+ if (other_priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
+ continue;
+
+ /* Crosschip links are also added to the CPU ports.
+ * Ignore those.
+ */
+ if (!dsa_is_user_port(priv->ds, c->port))
+ continue;
+ if (!dsa_is_user_port(c->other_ds, c->other_port))
+ continue;
+
+ /* Search for VLANs on the remote port */
+ list_for_each_entry(v, &other_priv->bridge_vlans, list) {
+ bool already_added = false;
+ bool we_have_it = false;
+
+ if (v->port != c->other_port)
+ continue;
+
+ /* If @v is a pvid on @other_ds, it does not need
+ * re-retagging, because its SVL field is 0 and we
+ * already allow that, via the dsa_8021q crosschip
+ * links.
+ */
+ if (v->pvid)
+ continue;
+
+ /* Search for the VLAN on our local port */
+ list_for_each_entry(w, &priv->bridge_vlans, list) {
+ if (w->port == c->port && w->vid == v->vid) {
+ we_have_it = true;
+ break;
+ }
+ }
+
+ if (!we_have_it)
+ continue;
+
+ list_for_each_entry(tmp, &crosschip_vlans, list) {
+ if (tmp->vid == v->vid &&
+ tmp->untagged == v->untagged &&
+ tmp->port == c->port &&
+ tmp->other_port == v->port &&
+ tmp->other_ds == c->other_ds) {
+ already_added = true;
+ break;
+ }
+ }
+
+ if (already_added)
+ continue;
+
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp) {
+ dev_err(priv->ds->dev, "Failed to allocate memory\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+ tmp->vid = v->vid;
+ tmp->port = c->port;
+ tmp->other_port = v->port;
+ tmp->other_ds = c->other_ds;
+ tmp->untagged = v->untagged;
+ list_add(&tmp->list, &crosschip_vlans);
+ }
+ }
+
+ list_for_each_entry(tmp, &crosschip_vlans, list) {
+ struct sja1105_private *other_priv = tmp->other_ds->priv;
+ int upstream = dsa_upstream_port(priv->ds, tmp->port);
+ int match, subvlan;
+ u16 rx_vid;
+
+ subvlan = sja1105_find_committed_subvlan(other_priv,
+ tmp->other_port,
+ tmp->vid);
+ /* If this happens, it's a bug. The neighbour switch does not
+ * have a subvlan for tmp->vid on tmp->other_port, but it
+ * should, since we already checked for its vlan_state.
+ */
+ if (WARN_ON(subvlan < 0)) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rx_vid = dsa_8021q_rx_vid_subvlan(tmp->other_ds,
+ tmp->other_port,
+ subvlan);
+
+ /* The @rx_vid retagged from @tmp->vid on
+ * {@tmp->other_ds, @tmp->other_port} needs to be
+ * re-retagged to @tmp->vid on the way back to us.
+ *
+ * Assume the original @tmp->vid is already configured
+ * on this local switch, otherwise we wouldn't be
+ * retagging its subvlan on the other switch in the
+ * first place. We just need to add a reverse retagging
+ * rule for @rx_vid and install @rx_vid on our ports.
+ */
+ match = rx_vid;
+ new_vlan[match].vlanid = rx_vid;
+ new_vlan[match].vmemb_port |= BIT(tmp->port);
+ new_vlan[match].vmemb_port |= BIT(upstream);
+ /* The "untagged" flag is set the same as for the
+ * original VLAN. And towards the CPU, it doesn't
+ * really matter, because @rx_vid will only receive
+ * traffic on that port. For consistency with other dsa_8021q
+ * VLANs, we'll keep the CPU port tagged.
+ */
+ if (!tmp->untagged)
+ new_vlan[match].tag_port |= BIT(tmp->port);
+ new_vlan[match].tag_port |= BIT(upstream);
+ /* Deny egress of @rx_vid towards our front-panel port.
+ * This will force the switch to drop it, and we'll see
+ * only the re-retagged packets (having the original,
+ * pre-initial-retagging, VLAN @tmp->vid).
+ */
+ new_vlan[match].vlan_bc &= ~BIT(tmp->port);
+
+ /* On reverse retagging, the same ingress VLAN goes to multiple
+ * ports. So we have an opportunity to create composite rules
+ * to not waste the limited space in the retagging table.
+ */
+ k = sja1105_find_retagging_entry(new_retagging, *num_retagging,
+ upstream, rx_vid, tmp->vid);
+ if (k < 0) {
+ if (*num_retagging == SJA1105_MAX_RETAGGING_COUNT) {
+ dev_err(priv->ds->dev, "No more retagging rules\n");
+ rc = -ENOSPC;
+ goto out;
+ }
+ k = (*num_retagging)++;
+ }
+ /* And the retagging itself */
+ new_retagging[k].vlan_ing = rx_vid;
+ new_retagging[k].vlan_egr = tmp->vid;
+ new_retagging[k].ing_port = BIT(upstream);
+ new_retagging[k].egr_port |= BIT(tmp->port);
+ }
+
+out:
+ list_for_each_entry_safe(tmp, pos, &crosschip_vlans, list) {
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
+
+ return rc;
+}
+
+static int sja1105_build_vlan_table(struct sja1105_private *priv, bool notify);
+
+static int sja1105_notify_crosschip_switches(struct sja1105_private *priv)
{
- return DSA_TAG_PROTO_SJA1105;
+ struct sja1105_crosschip_switch *s, *pos;
+ struct list_head crosschip_switches;
+ struct dsa_8021q_crosschip_link *c;
+ int rc = 0;
+
+ INIT_LIST_HEAD(&crosschip_switches);
+
+ list_for_each_entry(c, &priv->crosschip_links, list) {
+ bool already_added = false;
+
+ list_for_each_entry(s, &crosschip_switches, list) {
+ if (s->other_ds == c->other_ds) {
+ already_added = true;
+ break;
+ }
+ }
+
+ if (already_added)
+ continue;
+
+ s = kzalloc(sizeof(*s), GFP_KERNEL);
+ if (!s) {
+ dev_err(priv->ds->dev, "Failed to allocate memory\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+ s->other_ds = c->other_ds;
+ list_add(&s->list, &crosschip_switches);
+ }
+
+ list_for_each_entry(s, &crosschip_switches, list) {
+ struct sja1105_private *other_priv = s->other_ds->priv;
+
+ rc = sja1105_build_vlan_table(other_priv, false);
+ if (rc)
+ goto out;
+ }
+
+out:
+ list_for_each_entry_safe(s, pos, &crosschip_switches, list) {
+ list_del(&s->list);
+ kfree(s);
+ }
+
+ return rc;
+}
+
+static int sja1105_build_vlan_table(struct sja1105_private *priv, bool notify)
+{
+ u16 subvlan_map[SJA1105_NUM_PORTS][DSA_8021Q_N_SUBVLAN];
+ struct sja1105_retagging_entry *new_retagging;
+ struct sja1105_vlan_lookup_entry *new_vlan;
+ struct sja1105_table *table;
+ int i, num_retagging = 0;
+ int rc;
+
+ table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
+ new_vlan = kcalloc(VLAN_N_VID,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!new_vlan)
+ return -ENOMEM;
+
+ table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
+ new_retagging = kcalloc(SJA1105_MAX_RETAGGING_COUNT,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!new_retagging) {
+ kfree(new_vlan);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < VLAN_N_VID; i++)
+ new_vlan[i].vlanid = VLAN_N_VID;
+
+ for (i = 0; i < SJA1105_MAX_RETAGGING_COUNT; i++)
+ new_retagging[i].vlan_ing = VLAN_N_VID;
+
+ for (i = 0; i < priv->ds->num_ports; i++)
+ sja1105_init_subvlan_map(subvlan_map[i]);
+
+ /* Bridge VLANs */
+ rc = sja1105_build_bridge_vlans(priv, new_vlan);
+ if (rc)
+ goto out;
+
+ /* VLANs necessary for dsa_8021q operation, given to us by tag_8021q.c:
+ * - RX VLANs
+ * - TX VLANs
+ * - Crosschip links
+ */
+ rc = sja1105_build_dsa_8021q_vlans(priv, new_vlan);
+ if (rc)
+ goto out;
+
+ /* Private VLANs necessary for dsa_8021q operation, which we need to
+ * determine on our own:
+ * - Sub-VLANs
+ * - Sub-VLANs of crosschip switches
+ */
+ rc = sja1105_build_subvlans(priv, subvlan_map, new_vlan, new_retagging,
+ &num_retagging);
+ if (rc)
+ goto out;
+
+ rc = sja1105_build_crosschip_subvlans(priv, new_vlan, new_retagging,
+ &num_retagging);
+ if (rc)
+ goto out;
+
+ rc = sja1105_commit_vlans(priv, new_vlan, new_retagging, num_retagging);
+ if (rc)
+ goto out;
+
+ rc = sja1105_commit_pvid(priv);
+ if (rc)
+ goto out;
+
+ for (i = 0; i < priv->ds->num_ports; i++)
+ sja1105_commit_subvlan_map(priv, i, subvlan_map[i]);
+
+ if (notify) {
+ rc = sja1105_notify_crosschip_switches(priv);
+ if (rc)
+ goto out;
+ }
+
+out:
+ kfree(new_vlan);
+ kfree(new_retagging);
+
+ return rc;
+}
+
+/* Select the list to which we should add this VLAN. */
+static struct list_head *sja1105_classify_vlan(struct sja1105_private *priv,
+ u16 vid)
+{
+ if (priv->expect_dsa_8021q)
+ return &priv->dsa_8021q_vlans;
+
+ return &priv->bridge_vlans;
}
-/* This callback needs to be present */
static int sja1105_vlan_prepare(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
+ struct sja1105_private *priv = ds->priv;
+ u16 vid;
+
+ if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
+ return 0;
+
+ /* If the user wants best-effort VLAN filtering (aka vlan_filtering
+ * bridge plus tagging), be sure to at least deny alterations to the
+ * configuration done by dsa_8021q.
+ */
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ if (!priv->expect_dsa_8021q && vid_is_dsa_8021q(vid)) {
+ dev_err(ds->dev, "Range 1024-3071 reserved for dsa_8021q operation\n");
+ return -EBUSY;
+ }
+ }
+
return 0;
}
@@ -1830,10 +2570,21 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
struct sja1105_l2_lookup_params_entry *l2_lookup_params;
struct sja1105_general_params_entry *general_params;
struct sja1105_private *priv = ds->priv;
+ enum sja1105_vlan_state state;
struct sja1105_table *table;
+ struct sja1105_rule *rule;
+ bool want_tagging;
u16 tpid, tpid2;
int rc;
+ list_for_each_entry(rule, &priv->flow_block.rules, list) {
+ if (rule->type == SJA1105_RULE_VL) {
+ dev_err(ds->dev,
+ "Cannot change VLAN filtering state while VL rules are active\n");
+ return -EBUSY;
+ }
+ }
+
if (enabled) {
/* Enable VLAN filtering. */
tpid = ETH_P_8021Q;
@@ -1844,6 +2595,29 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
tpid2 = ETH_P_SJA1105;
}
+ for (port = 0; port < ds->num_ports; port++) {
+ struct sja1105_port *sp = &priv->ports[port];
+
+ if (enabled)
+ sp->xmit_tpid = priv->info->qinq_tpid;
+ else
+ sp->xmit_tpid = ETH_P_SJA1105;
+ }
+
+ if (!enabled)
+ state = SJA1105_VLAN_UNAWARE;
+ else if (priv->best_effort_vlan_filtering)
+ state = SJA1105_VLAN_BEST_EFFORT;
+ else
+ state = SJA1105_VLAN_FILTERING_FULL;
+
+ if (priv->vlan_state == state)
+ return 0;
+
+ priv->vlan_state = state;
+ want_tagging = (state == SJA1105_VLAN_UNAWARE ||
+ state == SJA1105_VLAN_BEST_EFFORT);
+
table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
general_params = table->entries;
/* EtherType used to identify inner tagged (C-tag) VLAN traffic */
@@ -1856,8 +2630,10 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
general_params->incl_srcpt1 = enabled;
general_params->incl_srcpt0 = enabled;
+ want_tagging = priv->best_effort_vlan_filtering || !enabled;
+
/* VLAN filtering => independent VLAN learning.
- * No VLAN filtering => shared VLAN learning.
+ * No VLAN filtering (or best effort) => shared VLAN learning.
*
* In shared VLAN learning mode, untagged traffic still gets
* pvid-tagged, and the FDB table gets populated with entries
@@ -1876,7 +2652,9 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
*/
table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
l2_lookup_params = table->entries;
- l2_lookup_params->shared_learn = !enabled;
+ l2_lookup_params->shared_learn = want_tagging;
+
+ sja1105_frame_memory_partitioning(priv);
rc = sja1105_static_config_reload(priv, SJA1105_VLAN_FILTERING);
if (rc)
@@ -1884,56 +2662,191 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
/* Switch port identification based on 802.1Q is only passable
* if we are not under a vlan_filtering bridge. So make sure
- * the two configurations are mutually exclusive.
+ * the two configurations are mutually exclusive (of course, the
+ * user may know better, i.e. best_effort_vlan_filtering).
*/
- return sja1105_setup_8021q_tagging(ds, !enabled);
+ return sja1105_setup_8021q_tagging(ds, want_tagging);
}
static void sja1105_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct sja1105_private *priv = ds->priv;
+ bool vlan_table_changed = false;
u16 vid;
int rc;
for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- rc = sja1105_vlan_apply(priv, port, vid, true, vlan->flags &
- BRIDGE_VLAN_INFO_UNTAGGED);
- if (rc < 0) {
- dev_err(ds->dev, "Failed to add VLAN %d to port %d: %d\n",
- vid, port, rc);
- return;
- }
- if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
- rc = sja1105_pvid_apply(ds->priv, port, vid);
- if (rc < 0) {
- dev_err(ds->dev, "Failed to set pvid %d on port %d: %d\n",
- vid, port, rc);
- return;
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ struct sja1105_bridge_vlan *v;
+ struct list_head *vlan_list;
+ bool already_added = false;
+
+ vlan_list = sja1105_classify_vlan(priv, vid);
+
+ list_for_each_entry(v, vlan_list, list) {
+ if (v->port == port && v->vid == vid &&
+ v->untagged == untagged && v->pvid == pvid) {
+ already_added = true;
+ break;
}
}
+
+ if (already_added)
+ continue;
+
+ v = kzalloc(sizeof(*v), GFP_KERNEL);
+ if (!v) {
+ dev_err(ds->dev, "Out of memory while storing VLAN\n");
+ return;
+ }
+
+ v->port = port;
+ v->vid = vid;
+ v->untagged = untagged;
+ v->pvid = pvid;
+ list_add(&v->list, vlan_list);
+
+ vlan_table_changed = true;
}
+
+ if (!vlan_table_changed)
+ return;
+
+ rc = sja1105_build_vlan_table(priv, true);
+ if (rc)
+ dev_err(ds->dev, "Failed to build VLAN table: %d\n", rc);
}
static int sja1105_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct sja1105_private *priv = ds->priv;
+ bool vlan_table_changed = false;
u16 vid;
- int rc;
for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- rc = sja1105_vlan_apply(priv, port, vid, false, vlan->flags &
- BRIDGE_VLAN_INFO_UNTAGGED);
- if (rc < 0) {
- dev_err(ds->dev, "Failed to remove VLAN %d from port %d: %d\n",
- vid, port, rc);
- return rc;
+ struct sja1105_bridge_vlan *v, *n;
+ struct list_head *vlan_list;
+
+ vlan_list = sja1105_classify_vlan(priv, vid);
+
+ list_for_each_entry_safe(v, n, vlan_list, list) {
+ if (v->port == port && v->vid == vid) {
+ list_del(&v->list);
+ kfree(v);
+ vlan_table_changed = true;
+ break;
+ }
}
}
+
+ if (!vlan_table_changed)
+ return 0;
+
+ return sja1105_build_vlan_table(priv, true);
+}
+
+static int sja1105_best_effort_vlan_filtering_get(struct sja1105_private *priv,
+ bool *be_vlan)
+{
+ *be_vlan = priv->best_effort_vlan_filtering;
+
return 0;
}
+static int sja1105_best_effort_vlan_filtering_set(struct sja1105_private *priv,
+ bool be_vlan)
+{
+ struct dsa_switch *ds = priv->ds;
+ bool vlan_filtering;
+ int port;
+ int rc;
+
+ priv->best_effort_vlan_filtering = be_vlan;
+
+ rtnl_lock();
+ for (port = 0; port < ds->num_ports; port++) {
+ struct dsa_port *dp;
+
+ if (!dsa_is_user_port(ds, port))
+ continue;
+
+ dp = dsa_to_port(ds, port);
+ vlan_filtering = dsa_port_is_vlan_filtering(dp);
+
+ rc = sja1105_vlan_filtering(ds, port, vlan_filtering);
+ if (rc)
+ break;
+ }
+ rtnl_unlock();
+
+ return rc;
+}
+
+enum sja1105_devlink_param_id {
+ SJA1105_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING,
+};
+
+static int sja1105_devlink_param_get(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct sja1105_private *priv = ds->priv;
+ int err;
+
+ switch (id) {
+ case SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING:
+ err = sja1105_best_effort_vlan_filtering_get(priv,
+ &ctx->val.vbool);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int sja1105_devlink_param_set(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct sja1105_private *priv = ds->priv;
+ int err;
+
+ switch (id) {
+ case SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING:
+ err = sja1105_best_effort_vlan_filtering_set(priv,
+ ctx->val.vbool);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static const struct devlink_param sja1105_devlink_params[] = {
+ DSA_DEVLINK_PARAM_DRIVER(SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING,
+ "best_effort_vlan_filtering",
+ DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME)),
+};
+
+static int sja1105_setup_devlink_params(struct dsa_switch *ds)
+{
+ return dsa_devlink_params_register(ds, sja1105_devlink_params,
+ ARRAY_SIZE(sja1105_devlink_params));
+}
+
+static void sja1105_teardown_devlink_params(struct dsa_switch *ds)
+{
+ dsa_devlink_params_unregister(ds, sja1105_devlink_params,
+ ARRAY_SIZE(sja1105_devlink_params));
+}
+
/* The programming model for the SJA1105 switch is "all-at-once" via static
* configuration tables. Some of these can be dynamically modified at runtime,
* but not the xMII mode parameters table.
@@ -1999,6 +2912,12 @@ static int sja1105_setup(struct dsa_switch *ds)
ds->mtu_enforcement_ingress = true;
+ ds->configure_vlan_while_not_filtering = true;
+
+ rc = sja1105_setup_devlink_params(ds);
+ if (rc < 0)
+ return rc;
+
/* The DSA/switchdev model brings up switch ports in standalone mode by
* default, and that means vlan_filtering is 0 since they're not under
* a bridge, so it's safe to set up switch tagging at this time.
@@ -2009,6 +2928,7 @@ static int sja1105_setup(struct dsa_switch *ds)
static void sja1105_teardown(struct dsa_switch *ds)
{
struct sja1105_private *priv = ds->priv;
+ struct sja1105_bridge_vlan *v, *n;
int port;
for (port = 0; port < SJA1105_NUM_PORTS; port++) {
@@ -2021,10 +2941,21 @@ static void sja1105_teardown(struct dsa_switch *ds)
kthread_destroy_worker(sp->xmit_worker);
}
+ sja1105_teardown_devlink_params(ds);
sja1105_flower_teardown(ds);
sja1105_tas_teardown(ds);
sja1105_ptp_clock_unregister(ds);
sja1105_static_config_free(&priv->static_config);
+
+ list_for_each_entry_safe(v, n, &priv->dsa_8021q_vlans, list) {
+ list_del(&v->list);
+ kfree(v);
+ }
+
+ list_for_each_entry_safe(v, n, &priv->bridge_vlans, list) {
+ list_del(&v->list);
+ kfree(v);
+ }
}
static int sja1105_port_enable(struct dsa_switch *ds, int port,
@@ -2359,6 +3290,11 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
.port_policer_del = sja1105_port_policer_del,
.cls_flower_add = sja1105_cls_flower_add,
.cls_flower_del = sja1105_cls_flower_del,
+ .cls_flower_stats = sja1105_cls_flower_stats,
+ .crosschip_bridge_join = sja1105_crosschip_bridge_join,
+ .crosschip_bridge_leave = sja1105_crosschip_bridge_leave,
+ .devlink_param_get = sja1105_devlink_param_get,
+ .devlink_param_set = sja1105_devlink_param_set,
};
static int sja1105_check_device_id(struct sja1105_private *priv)
@@ -2461,6 +3397,10 @@ static int sja1105_probe(struct spi_device *spi)
mutex_init(&priv->ptp_data.lock);
mutex_init(&priv->mgmt_lock);
+ INIT_LIST_HEAD(&priv->crosschip_links);
+ INIT_LIST_HEAD(&priv->bridge_vlans);
+ INIT_LIST_HEAD(&priv->dsa_8021q_vlans);
+
sja1105_tas_setup(ds);
sja1105_flower_setup(ds);
@@ -2473,6 +3413,7 @@ static int sja1105_probe(struct spi_device *spi)
struct sja1105_port *sp = &priv->ports[port];
struct dsa_port *dp = dsa_to_port(ds, port);
struct net_device *slave;
+ int subvlan;
if (!dsa_is_user_port(ds, port))
continue;
@@ -2492,6 +3433,10 @@ static int sja1105_probe(struct spi_device *spi)
goto out;
}
skb_queue_head_init(&sp->xmit_queue);
+ sp->xmit_tpid = ETH_P_SJA1105;
+
+ for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
+ sp->subvlan_map[subvlan] = VLAN_N_VID;
}
return 0;
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.h b/drivers/net/dsa/sja1105/sja1105_ptp.h
index 43480b24f1f0..6408d1158f2d 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.h
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.h
@@ -48,6 +48,19 @@ static inline s64 future_base_time(s64 base_time, s64 cycle_time, s64 now)
return base_time + n * cycle_time;
}
+/* This is not a preprocessor macro because the "ns" argument may or may not be
+ * s64 at caller side. This ensures it is properly type-cast before div_s64.
+ */
+static inline s64 ns_to_sja1105_delta(s64 ns)
+{
+ return div_s64(ns, 200);
+}
+
+static inline s64 sja1105_delta_to_ns(s64 delta)
+{
+ return delta * 200;
+}
+
struct sja1105_ptp_cmd {
u64 startptpcp; /* start toggling PTP_CLK pin */
u64 stopptpcp; /* stop toggling PTP_CLK pin */
diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c
index 04bdb72ae6b6..a0dacae803cc 100644
--- a/drivers/net/dsa/sja1105/sja1105_spi.c
+++ b/drivers/net/dsa/sja1105/sja1105_spi.c
@@ -439,10 +439,12 @@ static struct sja1105_regs sja1105et_regs = {
.prod_id = 0x100BC3,
.status = 0x1,
.port_control = 0x11,
+ .vl_status = 0x10000,
.config = 0x020000,
.rgu = 0x100440,
/* UM10944.pdf, Table 86, ACU Register overview */
.pad_mii_tx = {0x100800, 0x100802, 0x100804, 0x100806, 0x100808},
+ .pad_mii_rx = {0x100801, 0x100803, 0x100805, 0x100807, 0x100809},
.rmii_pll1 = 0x10000A,
.cgu_idiv = {0x10000B, 0x10000C, 0x10000D, 0x10000E, 0x10000F},
.mac = {0x200, 0x202, 0x204, 0x206, 0x208},
@@ -471,10 +473,12 @@ static struct sja1105_regs sja1105pqrs_regs = {
.prod_id = 0x100BC3,
.status = 0x1,
.port_control = 0x12,
+ .vl_status = 0x10000,
.config = 0x020000,
.rgu = 0x100440,
/* UM10944.pdf, Table 86, ACU Register overview */
.pad_mii_tx = {0x100800, 0x100802, 0x100804, 0x100806, 0x100808},
+ .pad_mii_rx = {0x100801, 0x100803, 0x100805, 0x100807, 0x100809},
.pad_mii_id = {0x100810, 0x100811, 0x100812, 0x100813, 0x100814},
.sgmii = 0x1F0000,
.rmii_pll1 = 0x10000A,
@@ -508,6 +512,7 @@ struct sja1105_info sja1105e_info = {
.part_no = SJA1105ET_PART_NO,
.static_ops = sja1105e_table_ops,
.dyn_ops = sja1105et_dyn_ops,
+ .qinq_tpid = ETH_P_8021Q,
.ptp_ts_bits = 24,
.ptpegr_ts_bytes = 4,
.reset_cmd = sja1105et_reset_cmd,
@@ -522,6 +527,7 @@ struct sja1105_info sja1105t_info = {
.part_no = SJA1105ET_PART_NO,
.static_ops = sja1105t_table_ops,
.dyn_ops = sja1105et_dyn_ops,
+ .qinq_tpid = ETH_P_8021Q,
.ptp_ts_bits = 24,
.ptpegr_ts_bytes = 4,
.reset_cmd = sja1105et_reset_cmd,
@@ -536,6 +542,7 @@ struct sja1105_info sja1105p_info = {
.part_no = SJA1105P_PART_NO,
.static_ops = sja1105p_table_ops,
.dyn_ops = sja1105pqrs_dyn_ops,
+ .qinq_tpid = ETH_P_8021AD,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
@@ -551,6 +558,7 @@ struct sja1105_info sja1105q_info = {
.part_no = SJA1105Q_PART_NO,
.static_ops = sja1105q_table_ops,
.dyn_ops = sja1105pqrs_dyn_ops,
+ .qinq_tpid = ETH_P_8021AD,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
@@ -566,6 +574,7 @@ struct sja1105_info sja1105r_info = {
.part_no = SJA1105R_PART_NO,
.static_ops = sja1105r_table_ops,
.dyn_ops = sja1105pqrs_dyn_ops,
+ .qinq_tpid = ETH_P_8021AD,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
@@ -582,6 +591,7 @@ struct sja1105_info sja1105s_info = {
.static_ops = sja1105s_table_ops,
.dyn_ops = sja1105pqrs_dyn_ops,
.regs = &sja1105pqrs_regs,
+ .qinq_tpid = ETH_P_8021AD,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c
index bbfe034910a0..780aca034cdc 100644
--- a/drivers/net/dsa/sja1105/sja1105_static_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.c
@@ -432,6 +432,84 @@ static size_t sja1105_schedule_entry_packing(void *buf, void *entry_ptr,
return size;
}
+static size_t
+sja1105_vl_forwarding_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vl_forwarding_params_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_VL_FORWARDING_PARAMS_ENTRY;
+ int offset, i;
+
+ for (i = 0, offset = 16; i < 8; i++, offset += 10)
+ sja1105_packing(buf, &entry->partspc[i],
+ offset + 9, offset + 0, size, op);
+ sja1105_packing(buf, &entry->debugen, 15, 15, size, op);
+ return size;
+}
+
+static size_t sja1105_vl_forwarding_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vl_forwarding_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_VL_FORWARDING_ENTRY;
+
+ sja1105_packing(buf, &entry->type, 31, 31, size, op);
+ sja1105_packing(buf, &entry->priority, 30, 28, size, op);
+ sja1105_packing(buf, &entry->partition, 27, 25, size, op);
+ sja1105_packing(buf, &entry->destports, 24, 20, size, op);
+ return size;
+}
+
+size_t sja1105_vl_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vl_lookup_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_VL_LOOKUP_ENTRY;
+
+ if (entry->format == SJA1105_VL_FORMAT_PSFP) {
+ /* Interpreting vllupformat as 0 */
+ sja1105_packing(buf, &entry->destports,
+ 95, 91, size, op);
+ sja1105_packing(buf, &entry->iscritical,
+ 90, 90, size, op);
+ sja1105_packing(buf, &entry->macaddr,
+ 89, 42, size, op);
+ sja1105_packing(buf, &entry->vlanid,
+ 41, 30, size, op);
+ sja1105_packing(buf, &entry->port,
+ 29, 27, size, op);
+ sja1105_packing(buf, &entry->vlanprior,
+ 26, 24, size, op);
+ } else {
+ /* Interpreting vllupformat as 1 */
+ sja1105_packing(buf, &entry->egrmirr,
+ 95, 91, size, op);
+ sja1105_packing(buf, &entry->ingrmirr,
+ 90, 90, size, op);
+ sja1105_packing(buf, &entry->vlid,
+ 57, 42, size, op);
+ sja1105_packing(buf, &entry->port,
+ 29, 27, size, op);
+ }
+ return size;
+}
+
+static size_t sja1105_vl_policing_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vl_policing_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_VL_POLICING_ENTRY;
+
+ sja1105_packing(buf, &entry->type, 63, 63, size, op);
+ sja1105_packing(buf, &entry->maxlen, 62, 52, size, op);
+ sja1105_packing(buf, &entry->sharindx, 51, 42, size, op);
+ if (entry->type == 0) {
+ sja1105_packing(buf, &entry->bag, 41, 28, size, op);
+ sja1105_packing(buf, &entry->jitter, 27, 18, size, op);
+ }
+ return size;
+}
+
size_t sja1105_vlan_lookup_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
@@ -463,6 +541,22 @@ static size_t sja1105_xmii_params_entry_packing(void *buf, void *entry_ptr,
return size;
}
+size_t sja1105_retagging_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_retagging_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_RETAGGING_ENTRY;
+
+ sja1105_packing(buf, &entry->egr_port, 63, 59, size, op);
+ sja1105_packing(buf, &entry->ing_port, 58, 54, size, op);
+ sja1105_packing(buf, &entry->vlan_ing, 53, 42, size, op);
+ sja1105_packing(buf, &entry->vlan_egr, 41, 30, size, op);
+ sja1105_packing(buf, &entry->do_not_learn, 29, 29, size, op);
+ sja1105_packing(buf, &entry->use_dest_ports, 28, 28, size, op);
+ sja1105_packing(buf, &entry->destports, 27, 23, size, op);
+ return size;
+}
+
size_t sja1105_table_header_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
@@ -510,6 +604,9 @@ static void sja1105_table_write_crc(u8 *table_start, u8 *crc_ptr)
static u64 blk_id_map[BLK_IDX_MAX] = {
[BLK_IDX_SCHEDULE] = BLKID_SCHEDULE,
[BLK_IDX_SCHEDULE_ENTRY_POINTS] = BLKID_SCHEDULE_ENTRY_POINTS,
+ [BLK_IDX_VL_LOOKUP] = BLKID_VL_LOOKUP,
+ [BLK_IDX_VL_POLICING] = BLKID_VL_POLICING,
+ [BLK_IDX_VL_FORWARDING] = BLKID_VL_FORWARDING,
[BLK_IDX_L2_LOOKUP] = BLKID_L2_LOOKUP,
[BLK_IDX_L2_POLICING] = BLKID_L2_POLICING,
[BLK_IDX_VLAN_LOOKUP] = BLKID_VLAN_LOOKUP,
@@ -517,10 +614,12 @@ static u64 blk_id_map[BLK_IDX_MAX] = {
[BLK_IDX_MAC_CONFIG] = BLKID_MAC_CONFIG,
[BLK_IDX_SCHEDULE_PARAMS] = BLKID_SCHEDULE_PARAMS,
[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = BLKID_SCHEDULE_ENTRY_POINTS_PARAMS,
+ [BLK_IDX_VL_FORWARDING_PARAMS] = BLKID_VL_FORWARDING_PARAMS,
[BLK_IDX_L2_LOOKUP_PARAMS] = BLKID_L2_LOOKUP_PARAMS,
[BLK_IDX_L2_FORWARDING_PARAMS] = BLKID_L2_FORWARDING_PARAMS,
[BLK_IDX_AVB_PARAMS] = BLKID_AVB_PARAMS,
[BLK_IDX_GENERAL_PARAMS] = BLKID_GENERAL_PARAMS,
+ [BLK_IDX_RETAGGING] = BLKID_RETAGGING,
[BLK_IDX_XMII_PARAMS] = BLKID_XMII_PARAMS,
};
@@ -533,6 +632,9 @@ const char *sja1105_static_config_error_msg[] = {
"schedule-table present, but one of "
"schedule-entry-points-table, schedule-parameters-table or "
"schedule-entry-points-parameters table is empty",
+ [SJA1105_INCORRECT_VIRTUAL_LINK_CONFIGURATION] =
+ "vl-lookup-table present, but one of vl-policing-table, "
+ "vl-forwarding-table or vl-forwarding-parameters-table is empty",
[SJA1105_MISSING_L2_POLICING_TABLE] =
"l2-policing-table needs to have at least one entry",
[SJA1105_MISSING_L2_FORWARDING_TABLE] =
@@ -560,14 +662,26 @@ static sja1105_config_valid_t
static_config_check_memory_size(const struct sja1105_table *tables)
{
const struct sja1105_l2_forwarding_params_entry *l2_fwd_params;
- int i, mem = 0;
+ const struct sja1105_vl_forwarding_params_entry *vl_fwd_params;
+ int i, max_mem, mem = 0;
l2_fwd_params = tables[BLK_IDX_L2_FORWARDING_PARAMS].entries;
for (i = 0; i < 8; i++)
mem += l2_fwd_params->part_spc[i];
- if (mem > SJA1105_MAX_FRAME_MEMORY)
+ if (tables[BLK_IDX_VL_FORWARDING_PARAMS].entry_count) {
+ vl_fwd_params = tables[BLK_IDX_VL_FORWARDING_PARAMS].entries;
+ for (i = 0; i < 8; i++)
+ mem += vl_fwd_params->partspc[i];
+ }
+
+ if (tables[BLK_IDX_RETAGGING].entry_count)
+ max_mem = SJA1105_MAX_FRAME_MEMORY_RETAGGING;
+ else
+ max_mem = SJA1105_MAX_FRAME_MEMORY;
+
+ if (mem > max_mem)
return SJA1105_OVERCOMMITTED_FRAME_MEMORY;
return SJA1105_CONFIG_OK;
@@ -594,6 +708,32 @@ sja1105_static_config_check_valid(const struct sja1105_static_config *config)
if (!IS_FULL(BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS))
return SJA1105_INCORRECT_TTETHERNET_CONFIGURATION;
}
+ if (tables[BLK_IDX_VL_LOOKUP].entry_count) {
+ struct sja1105_vl_lookup_entry *vl_lookup;
+ bool has_critical_links = false;
+ int i;
+
+ vl_lookup = tables[BLK_IDX_VL_LOOKUP].entries;
+
+ for (i = 0; i < tables[BLK_IDX_VL_LOOKUP].entry_count; i++) {
+ if (vl_lookup[i].iscritical) {
+ has_critical_links = true;
+ break;
+ }
+ }
+
+ if (tables[BLK_IDX_VL_POLICING].entry_count == 0 &&
+ has_critical_links)
+ return SJA1105_INCORRECT_VIRTUAL_LINK_CONFIGURATION;
+
+ if (tables[BLK_IDX_VL_FORWARDING].entry_count == 0 &&
+ has_critical_links)
+ return SJA1105_INCORRECT_VIRTUAL_LINK_CONFIGURATION;
+
+ if (tables[BLK_IDX_VL_FORWARDING_PARAMS].entry_count == 0 &&
+ has_critical_links)
+ return SJA1105_INCORRECT_VIRTUAL_LINK_CONFIGURATION;
+ }
if (tables[BLK_IDX_L2_POLICING].entry_count == 0)
return SJA1105_MISSING_L2_POLICING_TABLE;
@@ -703,6 +843,9 @@ sja1105_static_config_get_length(const struct sja1105_static_config *config)
struct sja1105_table_ops sja1105e_table_ops[BLK_IDX_MAX] = {
[BLK_IDX_SCHEDULE] = {0},
[BLK_IDX_SCHEDULE_ENTRY_POINTS] = {0},
+ [BLK_IDX_VL_LOOKUP] = {0},
+ [BLK_IDX_VL_POLICING] = {0},
+ [BLK_IDX_VL_FORWARDING] = {0},
[BLK_IDX_L2_LOOKUP] = {
.packing = sja1105et_l2_lookup_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
@@ -735,6 +878,7 @@ struct sja1105_table_ops sja1105e_table_ops[BLK_IDX_MAX] = {
},
[BLK_IDX_SCHEDULE_PARAMS] = {0},
[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {0},
+ [BLK_IDX_VL_FORWARDING_PARAMS] = {0},
[BLK_IDX_L2_LOOKUP_PARAMS] = {
.packing = sja1105et_l2_lookup_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
@@ -759,6 +903,12 @@ struct sja1105_table_ops sja1105e_table_ops[BLK_IDX_MAX] = {
.packed_entry_size = SJA1105ET_SIZE_GENERAL_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
},
+ [BLK_IDX_RETAGGING] = {
+ .packing = sja1105_retagging_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_retagging_entry),
+ .packed_entry_size = SJA1105_SIZE_RETAGGING_ENTRY,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ },
[BLK_IDX_XMII_PARAMS] = {
.packing = sja1105_xmii_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
@@ -781,6 +931,24 @@ struct sja1105_table_ops sja1105t_table_ops[BLK_IDX_MAX] = {
.packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY,
.max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_COUNT,
},
+ [BLK_IDX_VL_LOOKUP] = {
+ .packing = sja1105_vl_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
+ },
+ [BLK_IDX_VL_POLICING] = {
+ .packing = sja1105_vl_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_POLICING_COUNT,
+ },
+ [BLK_IDX_VL_FORWARDING] = {
+ .packing = sja1105_vl_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_FORWARDING_COUNT,
+ },
[BLK_IDX_L2_LOOKUP] = {
.packing = sja1105et_l2_lookup_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
@@ -823,6 +991,12 @@ struct sja1105_table_ops sja1105t_table_ops[BLK_IDX_MAX] = {
.packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT,
},
+ [BLK_IDX_VL_FORWARDING_PARAMS] = {
+ .packing = sja1105_vl_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_FORWARDING_PARAMS_COUNT,
+ },
[BLK_IDX_L2_LOOKUP_PARAMS] = {
.packing = sja1105et_l2_lookup_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
@@ -847,6 +1021,12 @@ struct sja1105_table_ops sja1105t_table_ops[BLK_IDX_MAX] = {
.packed_entry_size = SJA1105ET_SIZE_GENERAL_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
},
+ [BLK_IDX_RETAGGING] = {
+ .packing = sja1105_retagging_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_retagging_entry),
+ .packed_entry_size = SJA1105_SIZE_RETAGGING_ENTRY,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ },
[BLK_IDX_XMII_PARAMS] = {
.packing = sja1105_xmii_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
@@ -859,6 +1039,9 @@ struct sja1105_table_ops sja1105t_table_ops[BLK_IDX_MAX] = {
struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX] = {
[BLK_IDX_SCHEDULE] = {0},
[BLK_IDX_SCHEDULE_ENTRY_POINTS] = {0},
+ [BLK_IDX_VL_LOOKUP] = {0},
+ [BLK_IDX_VL_POLICING] = {0},
+ [BLK_IDX_VL_FORWARDING] = {0},
[BLK_IDX_L2_LOOKUP] = {
.packing = sja1105pqrs_l2_lookup_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
@@ -891,6 +1074,7 @@ struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX] = {
},
[BLK_IDX_SCHEDULE_PARAMS] = {0},
[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {0},
+ [BLK_IDX_VL_FORWARDING_PARAMS] = {0},
[BLK_IDX_L2_LOOKUP_PARAMS] = {
.packing = sja1105pqrs_l2_lookup_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
@@ -915,6 +1099,12 @@ struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX] = {
.packed_entry_size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
},
+ [BLK_IDX_RETAGGING] = {
+ .packing = sja1105_retagging_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_retagging_entry),
+ .packed_entry_size = SJA1105_SIZE_RETAGGING_ENTRY,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ },
[BLK_IDX_XMII_PARAMS] = {
.packing = sja1105_xmii_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
@@ -937,6 +1127,24 @@ struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX] = {
.packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY,
.max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_COUNT,
},
+ [BLK_IDX_VL_LOOKUP] = {
+ .packing = sja1105_vl_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
+ },
+ [BLK_IDX_VL_POLICING] = {
+ .packing = sja1105_vl_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_POLICING_COUNT,
+ },
+ [BLK_IDX_VL_FORWARDING] = {
+ .packing = sja1105_vl_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_FORWARDING_COUNT,
+ },
[BLK_IDX_L2_LOOKUP] = {
.packing = sja1105pqrs_l2_lookup_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
@@ -979,6 +1187,12 @@ struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX] = {
.packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT,
},
+ [BLK_IDX_VL_FORWARDING_PARAMS] = {
+ .packing = sja1105_vl_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_FORWARDING_PARAMS_COUNT,
+ },
[BLK_IDX_L2_LOOKUP_PARAMS] = {
.packing = sja1105pqrs_l2_lookup_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
@@ -1003,6 +1217,12 @@ struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX] = {
.packed_entry_size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
},
+ [BLK_IDX_RETAGGING] = {
+ .packing = sja1105_retagging_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_retagging_entry),
+ .packed_entry_size = SJA1105_SIZE_RETAGGING_ENTRY,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ },
[BLK_IDX_XMII_PARAMS] = {
.packing = sja1105_xmii_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
@@ -1015,6 +1235,9 @@ struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX] = {
struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX] = {
[BLK_IDX_SCHEDULE] = {0},
[BLK_IDX_SCHEDULE_ENTRY_POINTS] = {0},
+ [BLK_IDX_VL_LOOKUP] = {0},
+ [BLK_IDX_VL_POLICING] = {0},
+ [BLK_IDX_VL_FORWARDING] = {0},
[BLK_IDX_L2_LOOKUP] = {
.packing = sja1105pqrs_l2_lookup_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
@@ -1047,6 +1270,7 @@ struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX] = {
},
[BLK_IDX_SCHEDULE_PARAMS] = {0},
[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {0},
+ [BLK_IDX_VL_FORWARDING_PARAMS] = {0},
[BLK_IDX_L2_LOOKUP_PARAMS] = {
.packing = sja1105pqrs_l2_lookup_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
@@ -1071,6 +1295,12 @@ struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX] = {
.packed_entry_size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
},
+ [BLK_IDX_RETAGGING] = {
+ .packing = sja1105_retagging_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_retagging_entry),
+ .packed_entry_size = SJA1105_SIZE_RETAGGING_ENTRY,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ },
[BLK_IDX_XMII_PARAMS] = {
.packing = sja1105_xmii_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
@@ -1093,6 +1323,24 @@ struct sja1105_table_ops sja1105s_table_ops[BLK_IDX_MAX] = {
.packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY,
.max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_COUNT,
},
+ [BLK_IDX_VL_LOOKUP] = {
+ .packing = sja1105_vl_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
+ },
+ [BLK_IDX_VL_POLICING] = {
+ .packing = sja1105_vl_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_POLICING_COUNT,
+ },
+ [BLK_IDX_VL_FORWARDING] = {
+ .packing = sja1105_vl_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_FORWARDING_COUNT,
+ },
[BLK_IDX_L2_LOOKUP] = {
.packing = sja1105pqrs_l2_lookup_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
@@ -1135,6 +1383,12 @@ struct sja1105_table_ops sja1105s_table_ops[BLK_IDX_MAX] = {
.packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT,
},
+ [BLK_IDX_VL_FORWARDING_PARAMS] = {
+ .packing = sja1105_vl_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_FORWARDING_PARAMS_COUNT,
+ },
[BLK_IDX_L2_LOOKUP_PARAMS] = {
.packing = sja1105pqrs_l2_lookup_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
@@ -1159,6 +1413,12 @@ struct sja1105_table_ops sja1105s_table_ops[BLK_IDX_MAX] = {
.packed_entry_size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
},
+ [BLK_IDX_RETAGGING] = {
+ .packing = sja1105_retagging_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_retagging_entry),
+ .packed_entry_size = SJA1105_SIZE_RETAGGING_ENTRY,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ },
[BLK_IDX_XMII_PARAMS] = {
.packing = sja1105_xmii_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.h b/drivers/net/dsa/sja1105/sja1105_static_config.h
index 8afafb6aef12..5946847bb5b9 100644
--- a/drivers/net/dsa/sja1105/sja1105_static_config.h
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.h
@@ -13,13 +13,18 @@
#define SJA1105_SIZE_TABLE_HEADER 12
#define SJA1105_SIZE_SCHEDULE_ENTRY 8
#define SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY 4
+#define SJA1105_SIZE_VL_LOOKUP_ENTRY 12
+#define SJA1105_SIZE_VL_POLICING_ENTRY 8
+#define SJA1105_SIZE_VL_FORWARDING_ENTRY 4
#define SJA1105_SIZE_L2_POLICING_ENTRY 8
#define SJA1105_SIZE_VLAN_LOOKUP_ENTRY 8
#define SJA1105_SIZE_L2_FORWARDING_ENTRY 8
#define SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY 12
+#define SJA1105_SIZE_RETAGGING_ENTRY 8
#define SJA1105_SIZE_XMII_PARAMS_ENTRY 4
#define SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY 12
#define SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY 4
+#define SJA1105_SIZE_VL_FORWARDING_PARAMS_ENTRY 12
#define SJA1105ET_SIZE_L2_LOOKUP_ENTRY 12
#define SJA1105ET_SIZE_MAC_CONFIG_ENTRY 28
#define SJA1105ET_SIZE_L2_LOOKUP_PARAMS_ENTRY 4
@@ -35,6 +40,9 @@
enum {
BLKID_SCHEDULE = 0x00,
BLKID_SCHEDULE_ENTRY_POINTS = 0x01,
+ BLKID_VL_LOOKUP = 0x02,
+ BLKID_VL_POLICING = 0x03,
+ BLKID_VL_FORWARDING = 0x04,
BLKID_L2_LOOKUP = 0x05,
BLKID_L2_POLICING = 0x06,
BLKID_VLAN_LOOKUP = 0x07,
@@ -42,16 +50,21 @@ enum {
BLKID_MAC_CONFIG = 0x09,
BLKID_SCHEDULE_PARAMS = 0x0A,
BLKID_SCHEDULE_ENTRY_POINTS_PARAMS = 0x0B,
+ BLKID_VL_FORWARDING_PARAMS = 0x0C,
BLKID_L2_LOOKUP_PARAMS = 0x0D,
BLKID_L2_FORWARDING_PARAMS = 0x0E,
BLKID_AVB_PARAMS = 0x10,
BLKID_GENERAL_PARAMS = 0x11,
+ BLKID_RETAGGING = 0x12,
BLKID_XMII_PARAMS = 0x4E,
};
enum sja1105_blk_idx {
BLK_IDX_SCHEDULE = 0,
BLK_IDX_SCHEDULE_ENTRY_POINTS,
+ BLK_IDX_VL_LOOKUP,
+ BLK_IDX_VL_POLICING,
+ BLK_IDX_VL_FORWARDING,
BLK_IDX_L2_LOOKUP,
BLK_IDX_L2_POLICING,
BLK_IDX_VLAN_LOOKUP,
@@ -59,10 +72,12 @@ enum sja1105_blk_idx {
BLK_IDX_MAC_CONFIG,
BLK_IDX_SCHEDULE_PARAMS,
BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS,
+ BLK_IDX_VL_FORWARDING_PARAMS,
BLK_IDX_L2_LOOKUP_PARAMS,
BLK_IDX_L2_FORWARDING_PARAMS,
BLK_IDX_AVB_PARAMS,
BLK_IDX_GENERAL_PARAMS,
+ BLK_IDX_RETAGGING,
BLK_IDX_XMII_PARAMS,
BLK_IDX_MAX,
/* Fake block indices that are only valid for dynamic access */
@@ -73,6 +88,9 @@ enum sja1105_blk_idx {
#define SJA1105_MAX_SCHEDULE_COUNT 1024
#define SJA1105_MAX_SCHEDULE_ENTRY_POINTS_COUNT 2048
+#define SJA1105_MAX_VL_LOOKUP_COUNT 1024
+#define SJA1105_MAX_VL_POLICING_COUNT 1024
+#define SJA1105_MAX_VL_FORWARDING_COUNT 1024
#define SJA1105_MAX_L2_LOOKUP_COUNT 1024
#define SJA1105_MAX_L2_POLICING_COUNT 45
#define SJA1105_MAX_VLAN_LOOKUP_COUNT 4096
@@ -80,13 +98,17 @@ enum sja1105_blk_idx {
#define SJA1105_MAX_MAC_CONFIG_COUNT 5
#define SJA1105_MAX_SCHEDULE_PARAMS_COUNT 1
#define SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT 1
+#define SJA1105_MAX_VL_FORWARDING_PARAMS_COUNT 1
#define SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT 1
#define SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT 1
#define SJA1105_MAX_GENERAL_PARAMS_COUNT 1
+#define SJA1105_MAX_RETAGGING_COUNT 32
#define SJA1105_MAX_XMII_PARAMS_COUNT 1
#define SJA1105_MAX_AVB_PARAMS_COUNT 1
#define SJA1105_MAX_FRAME_MEMORY 929
+#define SJA1105_MAX_FRAME_MEMORY_RETAGGING 910
+#define SJA1105_VL_FRAME_MEMORY 100
#define SJA1105E_DEVICE_ID 0x9C00000Cull
#define SJA1105T_DEVICE_ID 0x9E00030Eull
@@ -257,11 +279,69 @@ struct sja1105_mac_config_entry {
u64 ingress;
};
+struct sja1105_retagging_entry {
+ u64 egr_port;
+ u64 ing_port;
+ u64 vlan_ing;
+ u64 vlan_egr;
+ u64 do_not_learn;
+ u64 use_dest_ports;
+ u64 destports;
+};
+
struct sja1105_xmii_params_entry {
u64 phy_mac[5];
u64 xmii_mode[5];
};
+enum {
+ SJA1105_VL_FORMAT_PSFP = 0,
+ SJA1105_VL_FORMAT_ARINC664 = 1,
+};
+
+struct sja1105_vl_lookup_entry {
+ u64 format;
+ u64 port;
+ union {
+ /* SJA1105_VL_FORMAT_PSFP */
+ struct {
+ u64 destports;
+ u64 iscritical;
+ u64 macaddr;
+ u64 vlanid;
+ u64 vlanprior;
+ };
+ /* SJA1105_VL_FORMAT_ARINC664 */
+ struct {
+ u64 egrmirr;
+ u64 ingrmirr;
+ u64 vlid;
+ };
+ };
+ /* Not part of hardware structure */
+ unsigned long flow_cookie;
+};
+
+struct sja1105_vl_policing_entry {
+ u64 type;
+ u64 maxlen;
+ u64 sharindx;
+ u64 bag;
+ u64 jitter;
+};
+
+struct sja1105_vl_forwarding_entry {
+ u64 type;
+ u64 priority;
+ u64 partition;
+ u64 destports;
+};
+
+struct sja1105_vl_forwarding_params_entry {
+ u64 partspc[8];
+ u64 debugen;
+};
+
struct sja1105_table_header {
u64 block_id;
u64 len;
@@ -303,6 +383,7 @@ typedef enum {
SJA1105_CONFIG_OK = 0,
SJA1105_TTETHERNET_NOT_SUPPORTED,
SJA1105_INCORRECT_TTETHERNET_CONFIGURATION,
+ SJA1105_INCORRECT_VIRTUAL_LINK_CONFIGURATION,
SJA1105_MISSING_L2_POLICING_TABLE,
SJA1105_MISSING_L2_FORWARDING_TABLE,
SJA1105_MISSING_L2_FORWARDING_PARAMS_TABLE,
diff --git a/drivers/net/dsa/sja1105/sja1105_tas.c b/drivers/net/dsa/sja1105/sja1105_tas.c
index 77e547b4cd89..3aa1a8b5f766 100644
--- a/drivers/net/dsa/sja1105/sja1105_tas.c
+++ b/drivers/net/dsa/sja1105/sja1105_tas.c
@@ -7,7 +7,6 @@
#define SJA1105_TAS_CLKSRC_STANDALONE 1
#define SJA1105_TAS_CLKSRC_AS6802 2
#define SJA1105_TAS_CLKSRC_PTP 3
-#define SJA1105_TAS_MAX_DELTA BIT(19)
#define SJA1105_GATE_MASK GENMASK_ULL(SJA1105_NUM_TC - 1, 0)
#define work_to_sja1105_tas(d) \
@@ -15,22 +14,10 @@
#define tas_to_sja1105(d) \
container_of((d), struct sja1105_private, tas_data)
-/* This is not a preprocessor macro because the "ns" argument may or may not be
- * s64 at caller side. This ensures it is properly type-cast before div_s64.
- */
-static s64 ns_to_sja1105_delta(s64 ns)
-{
- return div_s64(ns, 200);
-}
-
-static s64 sja1105_delta_to_ns(s64 delta)
-{
- return delta * 200;
-}
-
static int sja1105_tas_set_runtime_params(struct sja1105_private *priv)
{
struct sja1105_tas_data *tas_data = &priv->tas_data;
+ struct sja1105_gating_config *gating_cfg = &tas_data->gating_cfg;
struct dsa_switch *ds = priv->ds;
s64 earliest_base_time = S64_MAX;
s64 latest_base_time = 0;
@@ -59,6 +46,19 @@ static int sja1105_tas_set_runtime_params(struct sja1105_private *priv)
}
}
+ if (!list_empty(&gating_cfg->entries)) {
+ tas_data->enabled = true;
+
+ if (max_cycle_time < gating_cfg->cycle_time)
+ max_cycle_time = gating_cfg->cycle_time;
+ if (latest_base_time < gating_cfg->base_time)
+ latest_base_time = gating_cfg->base_time;
+ if (earliest_base_time > gating_cfg->base_time) {
+ earliest_base_time = gating_cfg->base_time;
+ its_cycle_time = gating_cfg->cycle_time;
+ }
+ }
+
if (!tas_data->enabled)
return 0;
@@ -155,13 +155,14 @@ static int sja1105_tas_set_runtime_params(struct sja1105_private *priv)
* their "subschedule end index" (subscheind) equal to the last valid
* subschedule's end index (in this case 5).
*/
-static int sja1105_init_scheduling(struct sja1105_private *priv)
+int sja1105_init_scheduling(struct sja1105_private *priv)
{
struct sja1105_schedule_entry_points_entry *schedule_entry_points;
struct sja1105_schedule_entry_points_params_entry
*schedule_entry_points_params;
struct sja1105_schedule_params_entry *schedule_params;
struct sja1105_tas_data *tas_data = &priv->tas_data;
+ struct sja1105_gating_config *gating_cfg = &tas_data->gating_cfg;
struct sja1105_schedule_entry *schedule;
struct sja1105_table *table;
int schedule_start_idx;
@@ -213,6 +214,11 @@ static int sja1105_init_scheduling(struct sja1105_private *priv)
}
}
+ if (!list_empty(&gating_cfg->entries)) {
+ num_entries += gating_cfg->num_entries;
+ num_cycles++;
+ }
+
/* Nothing to do */
if (!num_cycles)
return 0;
@@ -312,6 +318,42 @@ static int sja1105_init_scheduling(struct sja1105_private *priv)
cycle++;
}
+ if (!list_empty(&gating_cfg->entries)) {
+ struct sja1105_gate_entry *e;
+
+ /* Relative base time */
+ s64 rbt;
+
+ schedule_start_idx = k;
+ schedule_end_idx = k + gating_cfg->num_entries - 1;
+ rbt = future_base_time(gating_cfg->base_time,
+ gating_cfg->cycle_time,
+ tas_data->earliest_base_time);
+ rbt -= tas_data->earliest_base_time;
+ entry_point_delta = ns_to_sja1105_delta(rbt) + 1;
+
+ schedule_entry_points[cycle].subschindx = cycle;
+ schedule_entry_points[cycle].delta = entry_point_delta;
+ schedule_entry_points[cycle].address = schedule_start_idx;
+
+ for (i = cycle; i < 8; i++)
+ schedule_params->subscheind[i] = schedule_end_idx;
+
+ list_for_each_entry(e, &gating_cfg->entries, list) {
+ schedule[k].delta = ns_to_sja1105_delta(e->interval);
+ schedule[k].destports = e->rule->vl.destports;
+ schedule[k].setvalid = true;
+ schedule[k].txen = true;
+ schedule[k].vlindex = e->rule->vl.sharindx;
+ schedule[k].winstindex = e->rule->vl.sharindx;
+ if (e->gate_state) /* Gate open */
+ schedule[k].winst = true;
+ else /* Gate closed */
+ schedule[k].winend = true;
+ k++;
+ }
+ }
+
return 0;
}
@@ -415,6 +457,54 @@ sja1105_tas_check_conflicts(struct sja1105_private *priv, int port,
return false;
}
+/* Check the tc-taprio configuration on @port for conflicts with the tc-gate
+ * global subschedule. If @port is -1, check it against all ports.
+ * To reuse the sja1105_tas_check_conflicts logic without refactoring it,
+ * convert the gating configuration to a dummy tc-taprio offload structure.
+ */
+bool sja1105_gating_check_conflicts(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_gating_config *gating_cfg = &priv->tas_data.gating_cfg;
+ size_t num_entries = gating_cfg->num_entries;
+ struct tc_taprio_qopt_offload *dummy;
+ struct sja1105_gate_entry *e;
+ bool conflict;
+ int i = 0;
+
+ if (list_empty(&gating_cfg->entries))
+ return false;
+
+ dummy = kzalloc(sizeof(struct tc_taprio_sched_entry) * num_entries +
+ sizeof(struct tc_taprio_qopt_offload), GFP_KERNEL);
+ if (!dummy) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory");
+ return true;
+ }
+
+ dummy->num_entries = num_entries;
+ dummy->base_time = gating_cfg->base_time;
+ dummy->cycle_time = gating_cfg->cycle_time;
+
+ list_for_each_entry(e, &gating_cfg->entries, list)
+ dummy->entries[i++].interval = e->interval;
+
+ if (port != -1) {
+ conflict = sja1105_tas_check_conflicts(priv, port, dummy);
+ } else {
+ for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ conflict = sja1105_tas_check_conflicts(priv, port,
+ dummy);
+ if (conflict)
+ break;
+ }
+ }
+
+ kfree(dummy);
+
+ return conflict;
+}
+
int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port,
struct tc_taprio_qopt_offload *admin)
{
@@ -473,6 +563,11 @@ int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port,
return -ERANGE;
}
+ if (sja1105_gating_check_conflicts(priv, port, NULL)) {
+ dev_err(ds->dev, "Conflict with tc-gate schedule\n");
+ return -ERANGE;
+ }
+
tas_data->offload[port] = taprio_offload_get(admin);
rc = sja1105_init_scheduling(priv);
@@ -779,6 +874,8 @@ void sja1105_tas_setup(struct dsa_switch *ds)
INIT_WORK(&tas_data->tas_work, sja1105_tas_state_machine);
tas_data->state = SJA1105_TAS_STATE_DISABLED;
tas_data->last_op = SJA1105_PTP_NONE;
+
+ INIT_LIST_HEAD(&tas_data->gating_cfg.entries);
}
void sja1105_tas_teardown(struct dsa_switch *ds)
diff --git a/drivers/net/dsa/sja1105/sja1105_tas.h b/drivers/net/dsa/sja1105/sja1105_tas.h
index b226c3dfd5b1..0c173ff51751 100644
--- a/drivers/net/dsa/sja1105/sja1105_tas.h
+++ b/drivers/net/dsa/sja1105/sja1105_tas.h
@@ -6,6 +6,10 @@
#include <net/pkt_sched.h>
+#define SJA1105_TAS_MAX_DELTA BIT(18)
+
+struct sja1105_private;
+
#if IS_ENABLED(CONFIG_NET_DSA_SJA1105_TAS)
enum sja1105_tas_state {
@@ -20,8 +24,23 @@ enum sja1105_ptp_op {
SJA1105_PTP_ADJUSTFREQ,
};
+struct sja1105_gate_entry {
+ struct list_head list;
+ struct sja1105_rule *rule;
+ s64 interval;
+ u8 gate_state;
+};
+
+struct sja1105_gating_config {
+ u64 cycle_time;
+ s64 base_time;
+ int num_entries;
+ struct list_head entries;
+};
+
struct sja1105_tas_data {
struct tc_taprio_qopt_offload *offload[SJA1105_NUM_PORTS];
+ struct sja1105_gating_config gating_cfg;
enum sja1105_tas_state state;
enum sja1105_ptp_op last_op;
struct work_struct tas_work;
@@ -42,6 +61,11 @@ void sja1105_tas_clockstep(struct dsa_switch *ds);
void sja1105_tas_adjfreq(struct dsa_switch *ds);
+bool sja1105_gating_check_conflicts(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack);
+
+int sja1105_init_scheduling(struct sja1105_private *priv);
+
#else
/* C doesn't allow empty structures, bah! */
@@ -63,6 +87,18 @@ static inline void sja1105_tas_clockstep(struct dsa_switch *ds) { }
static inline void sja1105_tas_adjfreq(struct dsa_switch *ds) { }
+static inline bool
+sja1105_gating_check_conflicts(struct dsa_switch *ds, int port,
+ struct netlink_ext_ack *extack)
+{
+ return true;
+}
+
+static inline int sja1105_init_scheduling(struct sja1105_private *priv)
+{
+ return 0;
+}
+
#endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_TAS) */
#endif /* _SJA1105_TAS_H */
diff --git a/drivers/net/dsa/sja1105/sja1105_vl.c b/drivers/net/dsa/sja1105/sja1105_vl.c
new file mode 100644
index 000000000000..f37611885376
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_vl.c
@@ -0,0 +1,782 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2020, NXP Semiconductors
+ */
+#include <net/tc_act/tc_gate.h>
+#include <linux/dsa/8021q.h>
+#include "sja1105.h"
+
+#define SJA1105_SIZE_VL_STATUS 8
+
+/* The switch flow classification core implements TTEthernet, which 'thinks' in
+ * terms of Virtual Links (VL), a concept borrowed from ARINC 664 part 7.
+ * However it also has one other operating mode (VLLUPFORMAT=0) where it acts
+ * somewhat closer to a pre-standard implementation of IEEE 802.1Qci
+ * (Per-Stream Filtering and Policing), which is what the driver is going to be
+ * implementing.
+ *
+ * VL Lookup
+ * Key = {DMAC && VLANID +---------+ Key = { (DMAC[47:16] & VLMASK ==
+ * && VLAN PCP | | VLMARKER)
+ * && INGRESS PORT} +---------+ (both fixed)
+ * (exact match, | && DMAC[15:0] == VLID
+ * all specified in rule) | (specified in rule)
+ * v && INGRESS PORT }
+ * ------------
+ * 0 (PSFP) / \ 1 (ARINC664)
+ * +-----------/ VLLUPFORMAT \----------+
+ * | \ (fixed) / |
+ * | \ / |
+ * 0 (forwarding) v ------------ |
+ * ------------ |
+ * / \ 1 (QoS classification) |
+ * +---/ ISCRITICAL \-----------+ |
+ * | \ (per rule) / | |
+ * | \ / VLID taken from VLID taken from
+ * v ------------ index of rule contents of rule
+ * select that matched that matched
+ * DESTPORTS | |
+ * | +---------+--------+
+ * | |
+ * | v
+ * | VL Forwarding
+ * | (indexed by VLID)
+ * | +---------+
+ * | +--------------| |
+ * | | select TYPE +---------+
+ * | v
+ * | 0 (rate ------------ 1 (time
+ * | constrained) / \ triggered)
+ * | +------/ TYPE \------------+
+ * | | \ (per VLID) / |
+ * | v \ / v
+ * | VL Policing ------------ VL Policing
+ * | (indexed by VLID) (indexed by VLID)
+ * | +---------+ +---------+
+ * | | TYPE=0 | | TYPE=1 |
+ * | +---------+ +---------+
+ * | select SHARINDX select SHARINDX to
+ * | to rate-limit re-enter VL Forwarding
+ * | groups of VL's with new VLID for egress
+ * | to same quota |
+ * | | |
+ * | select MAXLEN -> exceed => drop select MAXLEN -> exceed => drop
+ * | | |
+ * | v v
+ * | VL Forwarding VL Forwarding
+ * | (indexed by SHARINDX) (indexed by SHARINDX)
+ * | +---------+ +---------+
+ * | | TYPE=0 | | TYPE=1 |
+ * | +---------+ +---------+
+ * | select PRIORITY, select PRIORITY,
+ * | PARTITION, DESTPORTS PARTITION, DESTPORTS
+ * | | |
+ * | v v
+ * | VL Policing VL Policing
+ * | (indexed by SHARINDX) (indexed by SHARINDX)
+ * | +---------+ +---------+
+ * | | TYPE=0 | | TYPE=1 |
+ * | +---------+ +---------+
+ * | | |
+ * | v |
+ * | select BAG, -> exceed => drop |
+ * | JITTER v
+ * | | ----------------------------------------------
+ * | | / Reception Window is open for this VL \
+ * | | / (the Schedule Table executes an entry i \
+ * | | / M <= i < N, for which these conditions hold): \ no
+ * | | +----/ \-+
+ * | | |yes \ WINST[M] == 1 && WINSTINDEX[M] == VLID / |
+ * | | | \ WINEND[N] == 1 && WINSTINDEX[N] == VLID / |
+ * | | | \ / |
+ * | | | \ (the VL window has opened and not yet closed)/ |
+ * | | | ---------------------------------------------- |
+ * | | v v
+ * | | dispatch to DESTPORTS when the Schedule Table drop
+ * | | executes an entry i with TXEN == 1 && VLINDEX == i
+ * v v
+ * dispatch immediately to DESTPORTS
+ *
+ * The per-port classification key is always composed of {DMAC, VID, PCP} and
+ * is non-maskable. This 'looks like' the NULL stream identification function
+ * from IEEE 802.1CB clause 6, except for the extra VLAN PCP. When the switch
+ * ports operate as VLAN-unaware, we do allow the user to not specify the VLAN
+ * ID and PCP, and then the port-based defaults will be used.
+ *
+ * In TTEthernet, routing is something that needs to be done manually for each
+ * Virtual Link. So the flow action must always include one of:
+ * a. 'redirect', 'trap' or 'drop': select the egress port list
+ * Additionally, the following actions may be applied on a Virtual Link,
+ * turning it into 'critical' traffic:
+ * b. 'police': turn it into a rate-constrained VL, with bandwidth limitation
+ * given by the maximum frame length, bandwidth allocation gap (BAG) and
+ * maximum jitter.
+ * c. 'gate': turn it into a time-triggered VL, which can be only be received
+ * and forwarded according to a given schedule.
+ */
+
+static bool sja1105_vl_key_lower(struct sja1105_vl_lookup_entry *a,
+ struct sja1105_vl_lookup_entry *b)
+{
+ if (a->macaddr < b->macaddr)
+ return true;
+ if (a->macaddr > b->macaddr)
+ return false;
+ if (a->vlanid < b->vlanid)
+ return true;
+ if (a->vlanid > b->vlanid)
+ return false;
+ if (a->port < b->port)
+ return true;
+ if (a->port > b->port)
+ return false;
+ if (a->vlanprior < b->vlanprior)
+ return true;
+ if (a->vlanprior > b->vlanprior)
+ return false;
+ /* Keys are equal */
+ return false;
+}
+
+static int sja1105_init_virtual_links(struct sja1105_private *priv,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_vl_policing_entry *vl_policing;
+ struct sja1105_vl_forwarding_entry *vl_fwd;
+ struct sja1105_vl_lookup_entry *vl_lookup;
+ bool have_critical_virtual_links = false;
+ struct sja1105_table *table;
+ struct sja1105_rule *rule;
+ int num_virtual_links = 0;
+ int max_sharindx = 0;
+ int i, j, k;
+
+ /* Figure out the dimensioning of the problem */
+ list_for_each_entry(rule, &priv->flow_block.rules, list) {
+ if (rule->type != SJA1105_RULE_VL)
+ continue;
+ /* Each VL lookup entry matches on a single ingress port */
+ num_virtual_links += hweight_long(rule->port_mask);
+
+ if (rule->vl.type != SJA1105_VL_NONCRITICAL)
+ have_critical_virtual_links = true;
+ if (max_sharindx < rule->vl.sharindx)
+ max_sharindx = rule->vl.sharindx;
+ }
+
+ if (num_virtual_links > SJA1105_MAX_VL_LOOKUP_COUNT) {
+ NL_SET_ERR_MSG_MOD(extack, "Not enough VL entries available");
+ return -ENOSPC;
+ }
+
+ if (max_sharindx + 1 > SJA1105_MAX_VL_LOOKUP_COUNT) {
+ NL_SET_ERR_MSG_MOD(extack, "Policer index out of range");
+ return -ENOSPC;
+ }
+
+ max_sharindx = max_t(int, num_virtual_links, max_sharindx) + 1;
+
+ /* Discard previous VL Lookup Table */
+ table = &priv->static_config.tables[BLK_IDX_VL_LOOKUP];
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ /* Discard previous VL Policing Table */
+ table = &priv->static_config.tables[BLK_IDX_VL_POLICING];
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ /* Discard previous VL Forwarding Table */
+ table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING];
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ /* Discard previous VL Forwarding Parameters Table */
+ table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS];
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ /* Nothing to do */
+ if (!num_virtual_links)
+ return 0;
+
+ /* Pre-allocate space in the static config tables */
+
+ /* VL Lookup Table */
+ table = &priv->static_config.tables[BLK_IDX_VL_LOOKUP];
+ table->entries = kcalloc(num_virtual_links,
+ table->ops->unpacked_entry_size,
+ GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+ table->entry_count = num_virtual_links;
+ vl_lookup = table->entries;
+
+ k = 0;
+
+ list_for_each_entry(rule, &priv->flow_block.rules, list) {
+ unsigned long port;
+
+ if (rule->type != SJA1105_RULE_VL)
+ continue;
+
+ for_each_set_bit(port, &rule->port_mask, SJA1105_NUM_PORTS) {
+ vl_lookup[k].format = SJA1105_VL_FORMAT_PSFP;
+ vl_lookup[k].port = port;
+ vl_lookup[k].macaddr = rule->key.vl.dmac;
+ if (rule->key.type == SJA1105_KEY_VLAN_AWARE_VL) {
+ vl_lookup[k].vlanid = rule->key.vl.vid;
+ vl_lookup[k].vlanprior = rule->key.vl.pcp;
+ } else {
+ u16 vid = dsa_8021q_rx_vid(priv->ds, port);
+
+ vl_lookup[k].vlanid = vid;
+ vl_lookup[k].vlanprior = 0;
+ }
+ /* For critical VLs, the DESTPORTS mask is taken from
+ * the VL Forwarding Table, so no point in putting it
+ * in the VL Lookup Table
+ */
+ if (rule->vl.type == SJA1105_VL_NONCRITICAL)
+ vl_lookup[k].destports = rule->vl.destports;
+ else
+ vl_lookup[k].iscritical = true;
+ vl_lookup[k].flow_cookie = rule->cookie;
+ k++;
+ }
+ }
+
+ /* UM10944.pdf chapter 4.2.3 VL Lookup table:
+ * "the entries in the VL Lookup table must be sorted in ascending
+ * order (i.e. the smallest value must be loaded first) according to
+ * the following sort order: MACADDR, VLANID, PORT, VLANPRIOR."
+ */
+ for (i = 0; i < num_virtual_links; i++) {
+ struct sja1105_vl_lookup_entry *a = &vl_lookup[i];
+
+ for (j = i + 1; j < num_virtual_links; j++) {
+ struct sja1105_vl_lookup_entry *b = &vl_lookup[j];
+
+ if (sja1105_vl_key_lower(b, a)) {
+ struct sja1105_vl_lookup_entry tmp = *a;
+
+ *a = *b;
+ *b = tmp;
+ }
+ }
+ }
+
+ if (!have_critical_virtual_links)
+ return 0;
+
+ /* VL Policing Table */
+ table = &priv->static_config.tables[BLK_IDX_VL_POLICING];
+ table->entries = kcalloc(max_sharindx, table->ops->unpacked_entry_size,
+ GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+ table->entry_count = max_sharindx;
+ vl_policing = table->entries;
+
+ /* VL Forwarding Table */
+ table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING];
+ table->entries = kcalloc(max_sharindx, table->ops->unpacked_entry_size,
+ GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+ table->entry_count = max_sharindx;
+ vl_fwd = table->entries;
+
+ /* VL Forwarding Parameters Table */
+ table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS];
+ table->entries = kcalloc(1, table->ops->unpacked_entry_size,
+ GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+ table->entry_count = 1;
+
+ for (i = 0; i < num_virtual_links; i++) {
+ unsigned long cookie = vl_lookup[i].flow_cookie;
+ struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
+
+ if (rule->vl.type == SJA1105_VL_NONCRITICAL)
+ continue;
+ if (rule->vl.type == SJA1105_VL_TIME_TRIGGERED) {
+ int sharindx = rule->vl.sharindx;
+
+ vl_policing[i].type = 1;
+ vl_policing[i].sharindx = sharindx;
+ vl_policing[i].maxlen = rule->vl.maxlen;
+ vl_policing[sharindx].type = 1;
+
+ vl_fwd[i].type = 1;
+ vl_fwd[sharindx].type = 1;
+ vl_fwd[sharindx].priority = rule->vl.ipv;
+ vl_fwd[sharindx].partition = 0;
+ vl_fwd[sharindx].destports = rule->vl.destports;
+ }
+ }
+
+ sja1105_frame_memory_partitioning(priv);
+
+ return 0;
+}
+
+int sja1105_vl_redirect(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack, unsigned long cookie,
+ struct sja1105_key *key, unsigned long destports,
+ bool append)
+{
+ struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
+ int rc;
+
+ if (priv->vlan_state == SJA1105_VLAN_UNAWARE &&
+ key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only redirect based on DMAC");
+ return -EOPNOTSUPP;
+ } else if (key->type != SJA1105_KEY_VLAN_AWARE_VL) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only redirect based on {DMAC, VID, PCP}");
+ return -EOPNOTSUPP;
+ }
+
+ if (!rule) {
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ rule->cookie = cookie;
+ rule->type = SJA1105_RULE_VL;
+ rule->key = *key;
+ list_add(&rule->list, &priv->flow_block.rules);
+ }
+
+ rule->port_mask |= BIT(port);
+ if (append)
+ rule->vl.destports |= destports;
+ else
+ rule->vl.destports = destports;
+
+ rc = sja1105_init_virtual_links(priv, extack);
+ if (rc) {
+ rule->port_mask &= ~BIT(port);
+ if (!rule->port_mask) {
+ list_del(&rule->list);
+ kfree(rule);
+ }
+ }
+
+ return rc;
+}
+
+int sja1105_vl_delete(struct sja1105_private *priv, int port,
+ struct sja1105_rule *rule, struct netlink_ext_ack *extack)
+{
+ int rc;
+
+ rule->port_mask &= ~BIT(port);
+ if (!rule->port_mask) {
+ list_del(&rule->list);
+ kfree(rule);
+ }
+
+ rc = sja1105_init_virtual_links(priv, extack);
+ if (rc)
+ return rc;
+
+ return sja1105_static_config_reload(priv, SJA1105_VIRTUAL_LINKS);
+}
+
+/* Insert into the global gate list, sorted by gate action time. */
+static int sja1105_insert_gate_entry(struct sja1105_gating_config *gating_cfg,
+ struct sja1105_rule *rule,
+ u8 gate_state, s64 entry_time,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_gate_entry *e;
+ int rc;
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ e->rule = rule;
+ e->gate_state = gate_state;
+ e->interval = entry_time;
+
+ if (list_empty(&gating_cfg->entries)) {
+ list_add(&e->list, &gating_cfg->entries);
+ } else {
+ struct sja1105_gate_entry *p;
+
+ list_for_each_entry(p, &gating_cfg->entries, list) {
+ if (p->interval == e->interval) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Gate conflict");
+ rc = -EBUSY;
+ goto err;
+ }
+
+ if (e->interval < p->interval)
+ break;
+ }
+ list_add(&e->list, p->list.prev);
+ }
+
+ gating_cfg->num_entries++;
+
+ return 0;
+err:
+ kfree(e);
+ return rc;
+}
+
+/* The gate entries contain absolute times in their e->interval field. Convert
+ * that to proper intervals (i.e. "0, 5, 10, 15" to "5, 5, 5, 5").
+ */
+static void
+sja1105_gating_cfg_time_to_interval(struct sja1105_gating_config *gating_cfg,
+ u64 cycle_time)
+{
+ struct sja1105_gate_entry *last_e;
+ struct sja1105_gate_entry *e;
+ struct list_head *prev;
+
+ list_for_each_entry(e, &gating_cfg->entries, list) {
+ struct sja1105_gate_entry *p;
+
+ prev = e->list.prev;
+
+ if (prev == &gating_cfg->entries)
+ continue;
+
+ p = list_entry(prev, struct sja1105_gate_entry, list);
+ p->interval = e->interval - p->interval;
+ }
+ last_e = list_last_entry(&gating_cfg->entries,
+ struct sja1105_gate_entry, list);
+ if (last_e->list.prev != &gating_cfg->entries)
+ last_e->interval = cycle_time - last_e->interval;
+}
+
+static void sja1105_free_gating_config(struct sja1105_gating_config *gating_cfg)
+{
+ struct sja1105_gate_entry *e, *n;
+
+ list_for_each_entry_safe(e, n, &gating_cfg->entries, list) {
+ list_del(&e->list);
+ kfree(e);
+ }
+}
+
+static int sja1105_compose_gating_subschedule(struct sja1105_private *priv,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_gating_config *gating_cfg = &priv->tas_data.gating_cfg;
+ struct sja1105_rule *rule;
+ s64 max_cycle_time = 0;
+ s64 its_base_time = 0;
+ int i, rc = 0;
+
+ list_for_each_entry(rule, &priv->flow_block.rules, list) {
+ if (rule->type != SJA1105_RULE_VL)
+ continue;
+ if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
+ continue;
+
+ if (max_cycle_time < rule->vl.cycle_time) {
+ max_cycle_time = rule->vl.cycle_time;
+ its_base_time = rule->vl.base_time;
+ }
+ }
+
+ if (!max_cycle_time)
+ return 0;
+
+ dev_dbg(priv->ds->dev, "max_cycle_time %lld its_base_time %lld\n",
+ max_cycle_time, its_base_time);
+
+ sja1105_free_gating_config(gating_cfg);
+
+ gating_cfg->base_time = its_base_time;
+ gating_cfg->cycle_time = max_cycle_time;
+ gating_cfg->num_entries = 0;
+
+ list_for_each_entry(rule, &priv->flow_block.rules, list) {
+ s64 time;
+ s64 rbt;
+
+ if (rule->type != SJA1105_RULE_VL)
+ continue;
+ if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
+ continue;
+
+ /* Calculate the difference between this gating schedule's
+ * base time, and the base time of the gating schedule with the
+ * longest cycle time. We call it the relative base time (rbt).
+ */
+ rbt = future_base_time(rule->vl.base_time, rule->vl.cycle_time,
+ its_base_time);
+ rbt -= its_base_time;
+
+ time = rbt;
+
+ for (i = 0; i < rule->vl.num_entries; i++) {
+ u8 gate_state = rule->vl.entries[i].gate_state;
+ s64 entry_time = time;
+
+ while (entry_time < max_cycle_time) {
+ rc = sja1105_insert_gate_entry(gating_cfg, rule,
+ gate_state,
+ entry_time,
+ extack);
+ if (rc)
+ goto err;
+
+ entry_time += rule->vl.cycle_time;
+ }
+ time += rule->vl.entries[i].interval;
+ }
+ }
+
+ sja1105_gating_cfg_time_to_interval(gating_cfg, max_cycle_time);
+
+ return 0;
+err:
+ sja1105_free_gating_config(gating_cfg);
+ return rc;
+}
+
+int sja1105_vl_gate(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack, unsigned long cookie,
+ struct sja1105_key *key, u32 index, s32 prio,
+ u64 base_time, u64 cycle_time, u64 cycle_time_ext,
+ u32 num_entries, struct action_gate_entry *entries)
+{
+ struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
+ int ipv = -1;
+ int i, rc;
+ s32 rem;
+
+ if (cycle_time_ext) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cycle time extension not supported");
+ return -EOPNOTSUPP;
+ }
+
+ div_s64_rem(base_time, sja1105_delta_to_ns(1), &rem);
+ if (rem) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Base time must be multiple of 200 ns");
+ return -ERANGE;
+ }
+
+ div_s64_rem(cycle_time, sja1105_delta_to_ns(1), &rem);
+ if (rem) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cycle time must be multiple of 200 ns");
+ return -ERANGE;
+ }
+
+ if (priv->vlan_state == SJA1105_VLAN_UNAWARE &&
+ key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
+ dev_err(priv->ds->dev, "1: vlan state %d key type %d\n",
+ priv->vlan_state, key->type);
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only gate based on DMAC");
+ return -EOPNOTSUPP;
+ } else if (key->type != SJA1105_KEY_VLAN_AWARE_VL) {
+ dev_err(priv->ds->dev, "2: vlan state %d key type %d\n",
+ priv->vlan_state, key->type);
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only gate based on {DMAC, VID, PCP}");
+ return -EOPNOTSUPP;
+ }
+
+ if (!rule) {
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ list_add(&rule->list, &priv->flow_block.rules);
+ rule->cookie = cookie;
+ rule->type = SJA1105_RULE_VL;
+ rule->key = *key;
+ rule->vl.type = SJA1105_VL_TIME_TRIGGERED;
+ rule->vl.sharindx = index;
+ rule->vl.base_time = base_time;
+ rule->vl.cycle_time = cycle_time;
+ rule->vl.num_entries = num_entries;
+ rule->vl.entries = kcalloc(num_entries,
+ sizeof(struct action_gate_entry),
+ GFP_KERNEL);
+ if (!rule->vl.entries) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < num_entries; i++) {
+ div_s64_rem(entries[i].interval,
+ sja1105_delta_to_ns(1), &rem);
+ if (rem) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Interval must be multiple of 200 ns");
+ rc = -ERANGE;
+ goto out;
+ }
+
+ if (!entries[i].interval) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Interval cannot be zero");
+ rc = -ERANGE;
+ goto out;
+ }
+
+ if (ns_to_sja1105_delta(entries[i].interval) >
+ SJA1105_TAS_MAX_DELTA) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Maximum interval is 52 ms");
+ rc = -ERANGE;
+ goto out;
+ }
+
+ if (entries[i].maxoctets != -1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload IntervalOctetMax");
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (ipv == -1) {
+ ipv = entries[i].ipv;
+ } else if (ipv != entries[i].ipv) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only support a single IPV per VL");
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+
+ rule->vl.entries[i] = entries[i];
+ }
+
+ if (ipv == -1) {
+ if (key->type == SJA1105_KEY_VLAN_AWARE_VL)
+ ipv = key->vl.pcp;
+ else
+ ipv = 0;
+ }
+
+ /* TODO: support per-flow MTU */
+ rule->vl.maxlen = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
+ rule->vl.ipv = ipv;
+ }
+
+ rule->port_mask |= BIT(port);
+
+ rc = sja1105_compose_gating_subschedule(priv, extack);
+ if (rc)
+ goto out;
+
+ rc = sja1105_init_virtual_links(priv, extack);
+ if (rc)
+ goto out;
+
+ if (sja1105_gating_check_conflicts(priv, -1, extack)) {
+ NL_SET_ERR_MSG_MOD(extack, "Conflict with tc-taprio schedule");
+ rc = -ERANGE;
+ goto out;
+ }
+
+out:
+ if (rc) {
+ rule->port_mask &= ~BIT(port);
+ if (!rule->port_mask) {
+ list_del(&rule->list);
+ kfree(rule->vl.entries);
+ kfree(rule);
+ }
+ }
+
+ return rc;
+}
+
+static int sja1105_find_vlid(struct sja1105_private *priv, int port,
+ struct sja1105_key *key)
+{
+ struct sja1105_vl_lookup_entry *vl_lookup;
+ struct sja1105_table *table;
+ int i;
+
+ if (WARN_ON(key->type != SJA1105_KEY_VLAN_AWARE_VL &&
+ key->type != SJA1105_KEY_VLAN_UNAWARE_VL))
+ return -1;
+
+ table = &priv->static_config.tables[BLK_IDX_VL_LOOKUP];
+ vl_lookup = table->entries;
+
+ for (i = 0; i < table->entry_count; i++) {
+ if (key->type == SJA1105_KEY_VLAN_AWARE_VL) {
+ if (vl_lookup[i].port == port &&
+ vl_lookup[i].macaddr == key->vl.dmac &&
+ vl_lookup[i].vlanid == key->vl.vid &&
+ vl_lookup[i].vlanprior == key->vl.pcp)
+ return i;
+ } else {
+ if (vl_lookup[i].port == port &&
+ vl_lookup[i].macaddr == key->vl.dmac)
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+int sja1105_vl_stats(struct sja1105_private *priv, int port,
+ struct sja1105_rule *rule, struct flow_stats *stats,
+ struct netlink_ext_ack *extack)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u8 buf[SJA1105_SIZE_VL_STATUS] = {0};
+ u64 unreleased;
+ u64 timingerr;
+ u64 lengtherr;
+ int vlid, rc;
+ u64 pkts;
+
+ if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
+ return 0;
+
+ vlid = sja1105_find_vlid(priv, port, &rule->key);
+ if (vlid < 0)
+ return 0;
+
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs->vl_status + 2 * vlid, buf,
+ SJA1105_SIZE_VL_STATUS);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "SPI access failed");
+ return rc;
+ }
+
+ sja1105_unpack(buf, &timingerr, 31, 16, SJA1105_SIZE_VL_STATUS);
+ sja1105_unpack(buf, &unreleased, 15, 0, SJA1105_SIZE_VL_STATUS);
+ sja1105_unpack(buf, &lengtherr, 47, 32, SJA1105_SIZE_VL_STATUS);
+
+ pkts = timingerr + unreleased + lengtherr;
+
+ flow_stats_update(stats, 0, pkts - rule->vl.stats.pkts,
+ jiffies - rule->vl.stats.lastused,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
+
+ rule->vl.stats.pkts = pkts;
+ rule->vl.stats.lastused = jiffies;
+
+ return 0;
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_vl.h b/drivers/net/dsa/sja1105/sja1105_vl.h
new file mode 100644
index 000000000000..323fa0535af7
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_vl.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2020, NXP Semiconductors
+ */
+#ifndef _SJA1105_VL_H
+#define _SJA1105_VL_H
+
+#if IS_ENABLED(CONFIG_NET_DSA_SJA1105_VL)
+
+int sja1105_vl_redirect(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack, unsigned long cookie,
+ struct sja1105_key *key, unsigned long destports,
+ bool append);
+
+int sja1105_vl_delete(struct sja1105_private *priv, int port,
+ struct sja1105_rule *rule,
+ struct netlink_ext_ack *extack);
+
+int sja1105_vl_gate(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack, unsigned long cookie,
+ struct sja1105_key *key, u32 index, s32 prio,
+ u64 base_time, u64 cycle_time, u64 cycle_time_ext,
+ u32 num_entries, struct action_gate_entry *entries);
+
+int sja1105_vl_stats(struct sja1105_private *priv, int port,
+ struct sja1105_rule *rule, struct flow_stats *stats,
+ struct netlink_ext_ack *extack);
+
+#else
+
+static inline int sja1105_vl_redirect(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack,
+ unsigned long cookie,
+ struct sja1105_key *key,
+ unsigned long destports,
+ bool append)
+{
+ NL_SET_ERR_MSG_MOD(extack, "Virtual Links not compiled in");
+ return -EOPNOTSUPP;
+}
+
+static inline int sja1105_vl_delete(struct sja1105_private *priv,
+ int port, struct sja1105_rule *rule,
+ struct netlink_ext_ack *extack)
+{
+ NL_SET_ERR_MSG_MOD(extack, "Virtual Links not compiled in");
+ return -EOPNOTSUPP;
+}
+
+static inline int sja1105_vl_gate(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack,
+ unsigned long cookie,
+ struct sja1105_key *key, u32 index, s32 prio,
+ u64 base_time, u64 cycle_time,
+ u64 cycle_time_ext, u32 num_entries,
+ struct action_gate_entry *entries)
+{
+ NL_SET_ERR_MSG_MOD(extack, "Virtual Links not compiled in");
+ return -EOPNOTSUPP;
+}
+
+static inline int sja1105_vl_stats(struct sja1105_private *priv, int port,
+ struct sja1105_rule *rule,
+ struct flow_stats *stats,
+ struct netlink_ext_ack *extack)
+{
+ NL_SET_ERR_MSG_MOD(extack, "Virtual Links not compiled in");
+ return -EOPNOTSUPP;
+}
+
+#endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_VL) */
+
+#endif /* _SJA1105_VL_H */
diff --git a/drivers/net/dsa/vitesse-vsc73xx-platform.c b/drivers/net/dsa/vitesse-vsc73xx-platform.c
index 0541785f9fee..5e54a5726aa4 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-platform.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-platform.c
@@ -89,7 +89,6 @@ static int vsc73xx_platform_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct vsc73xx_platform *vsc_platform;
- struct resource *res = NULL;
int ret;
vsc_platform = devm_kzalloc(dev, sizeof(*vsc_platform), GFP_KERNEL);
@@ -103,14 +102,7 @@ static int vsc73xx_platform_probe(struct platform_device *pdev)
vsc_platform->vsc.ops = &vsc73xx_platform_ops;
/* obtain I/O memory space */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "cannot obtain I/O memory space\n");
- ret = -ENXIO;
- return ret;
- }
-
- vsc_platform->base_addr = devm_ioremap_resource(&pdev->dev, res);
+ vsc_platform->base_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(vsc_platform->base_addr)) {
dev_err(&pdev->dev, "cannot request I/O memory space\n");
ret = -ENXIO;
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index b762176a1406..139d0120f511 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -85,7 +85,6 @@
#include <linux/device.h>
#include <linux/eisa.h>
#include <linux/bitops.h>
-#include <linux/vermagic.h>
#include <linux/uaccess.h>
#include <asm/io.h>
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index 90312fcd6319..47b4215bb93b 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -22,7 +22,6 @@
*/
-#include <linux/vermagic.h>
#define DRV_NAME "3c515"
#define CORKSCREW 1
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index a2b7f7ab8170..5984b7033999 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1149,7 +1149,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
print_info = (vortex_debug > 1);
if (print_info)
- pr_info("See Documentation/networking/device_drivers/3com/vortex.txt\n");
+ pr_info("See Documentation/networking/device_drivers/3com/vortex.rst\n");
pr_info("%s: 3Com %s %s at %p.\n",
print_name,
@@ -1954,7 +1954,7 @@ vortex_error(struct net_device *dev, int status)
dev->name, tx_status);
if (tx_status == 0x82) {
pr_err("Probably a duplex mismatch. See "
- "Documentation/networking/device_drivers/3com/vortex.txt\n");
+ "Documentation/networking/device_drivers/3com/vortex.rst\n");
}
dump_tx_ring(dev);
}
diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig
index 3a6fc99c6f32..7cc259893cb9 100644
--- a/drivers/net/ethernet/3com/Kconfig
+++ b/drivers/net/ethernet/3com/Kconfig
@@ -76,7 +76,7 @@ config VORTEX
"Hurricane" (3c555/3cSOHO) PCI
If you have such a card, say Y here. More specific information is in
- <file:Documentation/networking/device_drivers/3com/vortex.txt> and
+ <file:Documentation/networking/device_drivers/3com/vortex.rst> and
in the comments at the beginning of
<file:drivers/net/ethernet/3com/3c59x.c>.
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 2db42211329f..a64191fc2af9 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -45,7 +45,6 @@
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <linux/uaccess.h>
#include <asm/io.h>
-#include <linux/vermagic.h>
/*
* The current frame processor firmware fails to checksum a fragment
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 1b19385ad8a9..865892c1f23f 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -714,11 +714,11 @@ static int et131x_init_eeprom(struct et131x_adapter *adapter)
* gather additional information that normally would
* come from the eeprom, like MAC Address
*/
- adapter->has_eeprom = 0;
+ adapter->has_eeprom = false;
return -EIO;
}
}
- adapter->has_eeprom = 1;
+ adapter->has_eeprom = true;
/* Read the EEPROM for information regarding LED behavior. Refer to
* et131x_xcvr_init() for its use.
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 18d3b4340bd4..b3b8a8010142 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -417,7 +417,7 @@ static void emac_timeout(struct net_device *dev, unsigned int txqueue)
/* Hardware start transmission.
* Send a packet to media from the upper layer.
*/
-static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct emac_board_info *db = netdev_priv(dev);
unsigned long channel;
@@ -425,7 +425,7 @@ static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev)
channel = db->tx_fifo_stat & 3;
if (channel == 3)
- return 1;
+ return NETDEV_TX_BUSY;
channel = (channel == 1 ? 1 : 0);
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 1671c1f36691..907125abef2c 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -554,7 +554,7 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
* physically contiguous fragment starting at
* skb->data, for length of skb_headlen(skb).
*/
-static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
unsigned int txsize = priv->tx_ring_size;
@@ -562,7 +562,7 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct tse_buffer *buffer = NULL;
int nfrags = skb_shinfo(skb)->nr_frags;
unsigned int nopaged_len = skb_headlen(skb);
- enum netdev_tx ret = NETDEV_TX_OK;
+ netdev_tx_t ret = NETDEV_TX_OK;
dma_addr_t dma_addr;
spin_lock_bh(&priv->tx_lock);
diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
index 8baf847e8622..7be3dcbf3d16 100644
--- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
@@ -404,6 +404,10 @@ struct ena_admin_basic_stats {
u32 rx_drops_low;
u32 rx_drops_high;
+
+ u32 tx_drops_low;
+
+ u32 tx_drops_high;
};
struct ena_admin_acq_get_stats_resp {
@@ -1017,6 +1021,10 @@ struct ena_admin_aenq_keep_alive_desc {
u32 rx_drops_low;
u32 rx_drops_high;
+
+ u32 tx_drops_low;
+
+ u32 tx_drops_high;
};
struct ena_admin_ena_mmio_req_read_less_resp {
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index a250046b8e18..b51bf62af11b 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -1067,16 +1067,10 @@ static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
{
struct ena_rss *rss = &ena_dev->rss;
- struct ena_admin_get_feat_resp get_resp;
- int rc;
- rc = ena_com_get_feature_ex(ena_dev, &get_resp,
- ENA_ADMIN_RSS_HASH_FUNCTION,
- ena_dev->rss.hash_key_dma_addr,
- sizeof(ena_dev->rss.hash_key), 0);
- if (unlikely(rc)) {
+ if (!ena_com_check_supported_feature_id(ena_dev,
+ ENA_ADMIN_RSS_HASH_FUNCTION))
return -EOPNOTSUPP;
- }
rss->hash_key =
dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
@@ -2286,6 +2280,7 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
struct ena_admin_get_feat_resp get_resp;
struct ena_admin_feature_rss_flow_hash_control *hash_key =
rss->hash_key;
+ enum ena_admin_hash_functions old_func;
int rc;
/* Make sure size is a mult of DWs */
@@ -2325,26 +2320,27 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
return -EINVAL;
}
+ old_func = rss->hash_func;
rss->hash_func = func;
rc = ena_com_set_hash_function(ena_dev);
/* Restore the old function */
if (unlikely(rc))
- ena_com_get_hash_function(ena_dev, NULL, NULL);
+ rss->hash_func = old_func;
return rc;
}
int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
- enum ena_admin_hash_functions *func,
- u8 *key)
+ enum ena_admin_hash_functions *func)
{
struct ena_rss *rss = &ena_dev->rss;
struct ena_admin_get_feat_resp get_resp;
- struct ena_admin_feature_rss_flow_hash_control *hash_key =
- rss->hash_key;
int rc;
+ if (unlikely(!func))
+ return -EINVAL;
+
rc = ena_com_get_feature_ex(ena_dev, &get_resp,
ENA_ADMIN_RSS_HASH_FUNCTION,
rss->hash_key_dma_addr,
@@ -2357,8 +2353,15 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
if (rss->hash_func)
rss->hash_func--;
- if (func)
- *func = rss->hash_func;
+ *func = rss->hash_func;
+
+ return 0;
+}
+
+int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key)
+{
+ struct ena_admin_feature_rss_flow_hash_control *hash_key =
+ ena_dev->rss.hash_key;
if (key)
memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
@@ -2641,10 +2644,10 @@ int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
* ignore this error and have indirection table support only.
*/
rc = ena_com_hash_key_allocate(ena_dev);
- if (unlikely(rc) && rc != -EOPNOTSUPP)
- goto err_hash_key;
- else if (rc != -EOPNOTSUPP)
+ if (likely(!rc))
ena_com_hash_key_fill_default_key(ena_dev);
+ else if (rc != -EOPNOTSUPP)
+ goto err_hash_key;
rc = ena_com_hash_ctrl_init(ena_dev);
if (unlikely(rc))
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index 469f298199a7..13a1b7812c46 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -54,9 +54,9 @@
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#define ENA_MAX_NUM_IO_QUEUES 128U
+#define ENA_MAX_NUM_IO_QUEUES 128U
/* We need to queues for each IO (on for Tx and one for Rx) */
-#define ENA_TOTAL_NUM_QUEUES (2 * (ENA_MAX_NUM_IO_QUEUES))
+#define ENA_TOTAL_NUM_QUEUES (2 * (ENA_MAX_NUM_IO_QUEUES))
#define ENA_MAX_HANDLERS 256
@@ -73,13 +73,13 @@
/*****************************************************************************/
/* ENA adaptive interrupt moderation settings */
-#define ENA_INTR_INITIAL_TX_INTERVAL_USECS 64
-#define ENA_INTR_INITIAL_RX_INTERVAL_USECS 0
-#define ENA_DEFAULT_INTR_DELAY_RESOLUTION 1
+#define ENA_INTR_INITIAL_TX_INTERVAL_USECS 64
+#define ENA_INTR_INITIAL_RX_INTERVAL_USECS 0
+#define ENA_DEFAULT_INTR_DELAY_RESOLUTION 1
-#define ENA_HW_HINTS_NO_TIMEOUT 0xFFFF
+#define ENA_HW_HINTS_NO_TIMEOUT 0xFFFF
-#define ENA_FEATURE_MAX_QUEUE_EXT_VER 1
+#define ENA_FEATURE_MAX_QUEUE_EXT_VER 1
struct ena_llq_configurations {
enum ena_admin_llq_header_location llq_header_location;
@@ -501,18 +501,6 @@ bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev);
*/
void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling);
-/* ena_com_set_admin_polling_mode - Get the admin completion queue polling mode
- * @ena_dev: ENA communication layer struct
- *
- * Get the admin completion mode.
- * If polling mode is on, ena_com_execute_admin_command will perform a
- * polling on the admin completion queue for the commands completion,
- * otherwise it will wait on wait event.
- *
- * @return state
- */
-bool ena_com_get_ena_admin_polling_mode(struct ena_com_dev *ena_dev);
-
/* ena_com_set_admin_auto_polling_mode - Enable autoswitch to polling mode
* @ena_dev: ENA communication layer struct
* @polling: Enable/Disable polling mode
@@ -695,13 +683,11 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
*/
int ena_com_set_hash_function(struct ena_com_dev *ena_dev);
-/* ena_com_get_hash_function - Retrieve the hash function and the hash key
- * from the device.
+/* ena_com_get_hash_function - Retrieve the hash function from the device.
* @ena_dev: ENA communication layer struct
* @func: hash function
- * @key: hash key
*
- * Retrieve the hash function and the hash key from the device.
+ * Retrieve the hash function from the device.
*
* @note: If the caller called ena_com_fill_hash_function but didn't flash
* it to the device, the new configuration will be lost.
@@ -709,9 +695,20 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev);
* @return: 0 on Success and negative value otherwise.
*/
int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
- enum ena_admin_hash_functions *func,
- u8 *key);
+ enum ena_admin_hash_functions *func);
+/* ena_com_get_hash_key - Retrieve the hash key
+ * @ena_dev: ENA communication layer struct
+ * @key: hash key
+ *
+ * Retrieve the hash key.
+ *
+ * @note: If the caller called ena_com_fill_hash_key but didn't flash
+ * it to the device, the new configuration will be lost.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key);
/* ena_com_fill_hash_ctrl - Fill RSS hash control
* @ena_dev: ENA communication layer struct.
* @proto: The protocol to configure.
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 9cc28b4b2627..830d3711d6ee 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -83,6 +83,7 @@ static const struct ena_stats ena_stats_tx_strings[] = {
ENA_STAT_TX_ENTRY(bad_req_id),
ENA_STAT_TX_ENTRY(llq_buffer_copy),
ENA_STAT_TX_ENTRY(missed_tx),
+ ENA_STAT_TX_ENTRY(unmask_interrupt),
};
static const struct ena_stats ena_stats_rx_strings[] = {
@@ -635,6 +636,32 @@ static u32 ena_get_rxfh_key_size(struct net_device *netdev)
return ENA_HASH_KEY_SIZE;
}
+static int ena_indirection_table_set(struct ena_adapter *adapter,
+ const u32 *indir)
+{
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ int i, rc;
+
+ for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
+ rc = ena_com_indirect_table_fill_entry(ena_dev,
+ i,
+ ENA_IO_RXQ_IDX(indir[i]));
+ if (unlikely(rc)) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Cannot fill indirect table (index is too large)\n");
+ return rc;
+ }
+ }
+
+ rc = ena_com_indirect_table_set(ena_dev);
+ if (rc) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Cannot set indirect table\n");
+ return rc == -EPERM ? -EOPNOTSUPP : rc;
+ }
+ return rc;
+}
+
static int ena_indirection_table_get(struct ena_adapter *adapter, u32 *indir)
{
struct ena_com_dev *ena_dev = adapter->ena_dev;
@@ -672,17 +699,18 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
/* We call this function in order to check if the device
* supports getting/setting the hash function.
*/
- rc = ena_com_get_hash_function(adapter->ena_dev, &ena_func, key);
+ rc = ena_com_get_hash_function(adapter->ena_dev, &ena_func);
if (rc) {
- if (rc == -EOPNOTSUPP) {
- key = NULL;
- hfunc = NULL;
+ if (rc == -EOPNOTSUPP)
rc = 0;
- }
return rc;
}
+ rc = ena_com_get_hash_key(adapter->ena_dev, key);
+ if (rc)
+ return rc;
+
switch (ena_func) {
case ENA_ADMIN_TOEPLITZ:
func = ETH_RSS_HASH_TOP;
@@ -699,7 +727,7 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
if (hfunc)
*hfunc = func;
- return rc;
+ return 0;
}
static int ena_set_rxfh(struct net_device *netdev, const u32 *indir,
@@ -707,27 +735,13 @@ static int ena_set_rxfh(struct net_device *netdev, const u32 *indir,
{
struct ena_adapter *adapter = netdev_priv(netdev);
struct ena_com_dev *ena_dev = adapter->ena_dev;
- enum ena_admin_hash_functions func;
- int rc, i;
+ enum ena_admin_hash_functions func = 0;
+ int rc;
if (indir) {
- for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
- rc = ena_com_indirect_table_fill_entry(ena_dev,
- i,
- ENA_IO_RXQ_IDX(indir[i]));
- if (unlikely(rc)) {
- netif_err(adapter, drv, netdev,
- "Cannot fill indirect table (index is too large)\n");
- return rc;
- }
- }
-
- rc = ena_com_indirect_table_set(ena_dev);
- if (rc) {
- netif_err(adapter, drv, netdev,
- "Cannot set indirect table\n");
- return rc == -EPERM ? -EOPNOTSUPP : rc;
- }
+ rc = ena_indirection_table_set(adapter, indir);
+ if (rc)
+ return rc;
}
switch (hfunc) {
@@ -746,7 +760,7 @@ static int ena_set_rxfh(struct net_device *netdev, const u32 *indir,
return -EOPNOTSUPP;
}
- if (key) {
+ if (key || func) {
rc = ena_com_fill_hash_function(ena_dev, func, key,
ENA_HASH_KEY_SIZE,
0xFFFFFFFF);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 2cc765df8da3..85b87ed02dd5 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -1606,6 +1606,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
"%s qid %d\n", __func__, rx_ring->qid);
res_budget = budget;
xdp.rxq = &rx_ring->xdp_rxq;
+ xdp.frame_sz = ENA_PAGE_SIZE;
do {
xdp_verdict = XDP_PASS;
@@ -1762,6 +1763,9 @@ static void ena_unmask_interrupt(struct ena_ring *tx_ring,
tx_ring->smoothed_interval,
true);
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->tx_stats.unmask_interrupt++;
+ u64_stats_update_end(&tx_ring->syncp);
/* It is a shared MSI-X.
* Tx and Rx CQ have pointer to it.
* So we use one of them to reach the intr reg
@@ -3169,6 +3173,7 @@ static void ena_get_stats64(struct net_device *netdev,
struct ena_ring *rx_ring, *tx_ring;
unsigned int start;
u64 rx_drops;
+ u64 tx_drops;
int i;
if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
@@ -3203,9 +3208,11 @@ static void ena_get_stats64(struct net_device *netdev,
do {
start = u64_stats_fetch_begin_irq(&adapter->syncp);
rx_drops = adapter->dev_stats.rx_drops;
+ tx_drops = adapter->dev_stats.tx_drops;
} while (u64_stats_fetch_retry_irq(&adapter->syncp, start));
stats->rx_dropped = rx_drops;
+ stats->tx_dropped = tx_drops;
stats->multicast = 0;
stats->collisions = 0;
@@ -3433,6 +3440,7 @@ static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
ena_com_mmio_reg_read_request_destroy(ena_dev);
+ /* return reset reason to default value */
adapter->reset_reason = ENA_REGS_RESET_NORMAL;
clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
@@ -3991,7 +3999,7 @@ static int ena_rss_init_default(struct ena_adapter *adapter)
}
}
- rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL,
+ rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_TOEPLITZ, NULL,
ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
if (unlikely(rc && (rc != -EOPNOTSUPP))) {
dev_err(dev, "Cannot fill hash function\n");
@@ -4356,6 +4364,7 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
cancel_work_sync(&adapter->reset_task);
rtnl_lock(); /* lock released inside the below if-else block */
+ adapter->reset_reason = ENA_REGS_RESET_SHUTDOWN;
ena_destroy_device(adapter, true);
if (shutdown) {
netif_device_detach(netdev);
@@ -4514,14 +4523,17 @@ static void ena_keep_alive_wd(void *adapter_data,
struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
struct ena_admin_aenq_keep_alive_desc *desc;
u64 rx_drops;
+ u64 tx_drops;
desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
adapter->last_keep_alive_jiffies = jiffies;
rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low;
+ tx_drops = ((u64)desc->tx_drops_high << 32) | desc->tx_drops_low;
u64_stats_update_begin(&adapter->syncp);
adapter->dev_stats.rx_drops = rx_drops;
+ adapter->dev_stats.tx_drops = tx_drops;
u64_stats_update_end(&adapter->syncp);
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 9e1860d81908..680099afcccf 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -151,8 +151,9 @@
* The buffer size we share with the device is defined to be ENA_PAGE_SIZE
*/
-#define ENA_XDP_MAX_MTU (ENA_PAGE_SIZE - ETH_HLEN - ETH_FCS_LEN - \
- VLAN_HLEN - XDP_PACKET_HEADROOM)
+#define ENA_XDP_MAX_MTU (ENA_PAGE_SIZE - ETH_HLEN - ETH_FCS_LEN - \
+ VLAN_HLEN - XDP_PACKET_HEADROOM - \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define ENA_IS_XDP_INDEX(adapter, index) (((index) >= (adapter)->xdp_first_ring) && \
((index) < (adapter)->xdp_first_ring + (adapter)->xdp_num_queues))
@@ -248,6 +249,7 @@ struct ena_stats_tx {
u64 bad_req_id;
u64 llq_buffer_copy;
u64 missed_tx;
+ u64 unmask_interrupt;
};
struct ena_stats_rx {
@@ -333,6 +335,7 @@ struct ena_stats_dev {
u64 interface_down;
u64 admin_q_pause;
u64 rx_drops;
+ u64 tx_drops;
};
enum ena_flags_t {
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
index cf3562e82ca9..50fb66369415 100644
--- a/drivers/net/ethernet/amd/7990.c
+++ b/drivers/net/ethernet/amd/7990.c
@@ -536,7 +536,7 @@ void lance_tx_timeout(struct net_device *dev, unsigned int txqueue)
}
EXPORT_SYMBOL_GPL(lance_tx_timeout);
-int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
+netdev_tx_t lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_init_block *ib = lp->init_block;
diff --git a/drivers/net/ethernet/amd/7990.h b/drivers/net/ethernet/amd/7990.h
index 8266b3c1fefc..e53551daeea1 100644
--- a/drivers/net/ethernet/amd/7990.h
+++ b/drivers/net/ethernet/amd/7990.h
@@ -241,7 +241,7 @@ struct lance_private {
/* Now the prototypes we export */
int lance_open(struct net_device *dev);
int lance_close(struct net_device *dev);
-int lance_start_xmit(struct sk_buff *skb, struct net_device *dev);
+netdev_tx_t lance_start_xmit(struct sk_buff *skb, struct net_device *dev);
void lance_set_multicast(struct net_device *dev);
void lance_tx_timeout(struct net_device *dev, unsigned int txqueue);
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index 4e36122609a3..961796abab35 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -156,7 +156,7 @@ struct lance_memory {
struct lance_init_block init;
struct lance_tx_head tx_head[TX_RING_SIZE];
struct lance_rx_head rx_head[RX_RING_SIZE];
- char packet_area[0]; /* packet data follow after the
+ char packet_area[]; /* packet data follow after the
* init block and the ring
* descriptors and are located
* at runtime */
diff --git a/drivers/net/ethernet/aquantia/atlantic/Makefile b/drivers/net/ethernet/aquantia/atlantic/Makefile
index 8b555665a33a..130a105d03f3 100644
--- a/drivers/net/ethernet/aquantia/atlantic/Makefile
+++ b/drivers/net/ethernet/aquantia/atlantic/Makefile
@@ -25,6 +25,10 @@ atlantic-objs := aq_main.o \
hw_atl/hw_atl_utils.o \
hw_atl/hw_atl_utils_fw2x.o \
hw_atl/hw_atl_llh.o \
+ hw_atl2/hw_atl2.o \
+ hw_atl2/hw_atl2_utils.o \
+ hw_atl2/hw_atl2_utils_fw.o \
+ hw_atl2/hw_atl2_llh.o \
macsec/macsec_api.o
atlantic-$(CONFIG_MACSEC) += aq_macsec.o
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 7560f5506e55..52b9833fda99 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -80,8 +80,8 @@
#define AQ_CFG_LOCK_TRYS 100U
-#define AQ_CFG_DRV_AUTHOR "aQuantia"
-#define AQ_CFG_DRV_DESC "aQuantia Corporation(R) Network Driver"
+#define AQ_CFG_DRV_AUTHOR "Marvell"
+#define AQ_CFG_DRV_DESC "Marvell (Aquantia) Corporation(R) Network Driver"
#define AQ_CFG_DRV_NAME "atlantic"
#endif /* AQ_CFG_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
index c8c402b013bb..52ad9433cabc 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_common.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
@@ -1,7 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
*/
/* File aq_common.h: Basic includes for all files in project. */
@@ -37,22 +38,31 @@
#define AQ_DEVICE_ID_AQC111S 0x91B1
#define AQ_DEVICE_ID_AQC112S 0x92B1
-#define HW_ATL_NIC_NAME "aQuantia AQtion 10Gbit Network Adapter"
+#define AQ_DEVICE_ID_AQC113DEV 0x00C0
+#define AQ_DEVICE_ID_AQC113CS 0x94C0
+#define AQ_DEVICE_ID_AQC114CS 0x93C0
+#define AQ_DEVICE_ID_AQC113 0x04C0
+#define AQ_DEVICE_ID_AQC113C 0x14C0
+#define AQ_DEVICE_ID_AQC115C 0x12C0
+
+#define HW_ATL_NIC_NAME "Marvell (aQuantia) AQtion 10Gbit Network Adapter"
#define AQ_HWREV_ANY 0
#define AQ_HWREV_1 1
#define AQ_HWREV_2 2
-#define AQ_NIC_RATE_10G BIT(0)
-#define AQ_NIC_RATE_5G BIT(1)
-#define AQ_NIC_RATE_5GSR BIT(2)
-#define AQ_NIC_RATE_2GS BIT(3)
-#define AQ_NIC_RATE_1G BIT(4)
-#define AQ_NIC_RATE_100M BIT(5)
-
-#define AQ_NIC_RATE_EEE_10G BIT(6)
-#define AQ_NIC_RATE_EEE_5G BIT(7)
-#define AQ_NIC_RATE_EEE_2GS BIT(8)
-#define AQ_NIC_RATE_EEE_1G BIT(9)
+#define AQ_NIC_RATE_10G BIT(0)
+#define AQ_NIC_RATE_5G BIT(1)
+#define AQ_NIC_RATE_5GSR BIT(2)
+#define AQ_NIC_RATE_2G5 BIT(3)
+#define AQ_NIC_RATE_1G BIT(4)
+#define AQ_NIC_RATE_100M BIT(5)
+#define AQ_NIC_RATE_10M BIT(6)
+
+#define AQ_NIC_RATE_EEE_10G BIT(7)
+#define AQ_NIC_RATE_EEE_5G BIT(8)
+#define AQ_NIC_RATE_EEE_2G5 BIT(9)
+#define AQ_NIC_RATE_EEE_1G BIT(10)
+#define AQ_NIC_RATE_EEE_100M BIT(11)
#endif /* AQ_COMMON_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index 7241cf92b43a..86fc77d85fda 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -605,12 +605,15 @@ static enum hw_atl_fw2x_rate eee_mask_to_ethtool_mask(u32 speed)
if (speed & AQ_NIC_RATE_EEE_10G)
rate |= SUPPORTED_10000baseT_Full;
- if (speed & AQ_NIC_RATE_EEE_2GS)
+ if (speed & AQ_NIC_RATE_EEE_2G5)
rate |= SUPPORTED_2500baseX_Full;
if (speed & AQ_NIC_RATE_EEE_1G)
rate |= SUPPORTED_1000baseT_Full;
+ if (speed & AQ_NIC_RATE_EEE_100M)
+ rate |= SUPPORTED_100baseT_Full;
+
return rate;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index 7d71bc7dc500..03fea9469f01 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -55,6 +55,7 @@ struct aq_hw_caps_s {
u8 rx_rings;
bool flow_control;
bool is_64_dma;
+ u32 priv_data_len;
};
struct aq_hw_link_status_s {
@@ -136,6 +137,19 @@ enum aq_priv_flags {
BIT(AQ_HW_LOOPBACK_PHYINT_SYS) |\
BIT(AQ_HW_LOOPBACK_PHYEXT_SYS))
+#define ATL_HW_CHIP_MIPS 0x00000001U
+#define ATL_HW_CHIP_TPO2 0x00000002U
+#define ATL_HW_CHIP_RPF2 0x00000004U
+#define ATL_HW_CHIP_MPI_AQ 0x00000010U
+#define ATL_HW_CHIP_ATLANTIC 0x00800000U
+#define ATL_HW_CHIP_REVISION_A0 0x01000000U
+#define ATL_HW_CHIP_REVISION_B0 0x02000000U
+#define ATL_HW_CHIP_REVISION_B1 0x04000000U
+#define ATL_HW_CHIP_ANTIGUA 0x08000000U
+
+#define ATL_HW_IS_CHIP_FEATURE(_HW_, _F_) (!!(ATL_HW_CHIP_##_F_ & \
+ (_HW_)->chip_features))
+
struct aq_hw_s {
atomic_t flags;
u8 rbl_enabled:1;
@@ -159,6 +173,7 @@ struct aq_hw_s {
struct hw_atl_utils_fw_rpc rpc;
s64 ptp_clk_offset;
u16 phy_id;
+ void *priv;
};
struct aq_ring_s;
@@ -182,6 +197,11 @@ struct aq_hw_ops {
int (*hw_set_mac_address)(struct aq_hw_s *self, u8 *mac_addr);
+ int (*hw_soft_reset)(struct aq_hw_s *self);
+
+ int (*hw_prepare)(struct aq_hw_s *self,
+ const struct aq_fw_ops **fw_ops);
+
int (*hw_reset)(struct aq_hw_s *self);
int (*hw_init)(struct aq_hw_s *self, u8 *mac_addr);
@@ -254,7 +274,7 @@ struct aq_hw_ops {
struct aq_stats_s *(*hw_get_hw_stats)(struct aq_hw_s *self);
- int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version);
+ u32 (*hw_get_fw_version)(struct aq_hw_s *self);
int (*hw_set_offload)(struct aq_hw_s *self,
struct aq_nic_cfg_s *aq_nic_cfg);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
index 0b3e234a54aa..91870ceaf3fe 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
@@ -401,7 +401,7 @@ static u32 aq_sc_idx_max(const enum aq_macsec_sc_sa sc_sa)
break;
default:
break;
- };
+ }
return result;
}
@@ -417,7 +417,7 @@ static u32 aq_to_hw_sc_idx(const u32 sc_idx, const enum aq_macsec_sc_sa sc_sa)
return sc_idx;
default:
WARN_ONCE(true, "Invalid sc_sa");
- };
+ }
return sc_idx;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index a369705a786a..1c6d12deb47a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
*/
/* File aq_nic.c: Definition of common code for NIC. */
@@ -257,6 +258,28 @@ static void aq_nic_polling_timer_cb(struct timer_list *t)
AQ_CFG_POLLING_TIMER_INTERVAL);
}
+static int aq_nic_hw_prepare(struct aq_nic_s *self)
+{
+ int err = 0;
+
+ err = self->aq_hw_ops->hw_soft_reset(self->aq_hw);
+ if (err)
+ goto exit;
+
+ err = self->aq_hw_ops->hw_prepare(self->aq_hw, &self->aq_fw_ops);
+
+exit:
+ return err;
+}
+
+static bool aq_nic_is_valid_ether_addr(const u8 *addr)
+{
+ /* Some engineering samples of Aquantia NICs are provisioned with a
+ * partially populated MAC, which is still invalid.
+ */
+ return !(addr[0] == 0 && addr[1] == 0 && addr[2] == 0);
+}
+
int aq_nic_ndev_register(struct aq_nic_s *self)
{
int err = 0;
@@ -266,7 +289,7 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
goto err_exit;
}
- err = hw_atl_utils_initfw(self->aq_hw, &self->aq_fw_ops);
+ err = aq_nic_hw_prepare(self);
if (err)
goto err_exit;
@@ -281,6 +304,12 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
if (err)
goto err_exit;
+ if (!is_valid_ether_addr(self->ndev->dev_addr) ||
+ !aq_nic_is_valid_ether_addr(self->ndev->dev_addr)) {
+ netdev_warn(self->ndev, "MAC is invalid, will use random.");
+ eth_hw_addr_random(self->ndev);
+ }
+
#if defined(AQ_CFG_MAC_ADDR_PERMANENT)
{
static u8 mac_addr_permanent[] = AQ_CFG_MAC_ADDR_PERMANENT;
@@ -364,7 +393,8 @@ int aq_nic_init(struct aq_nic_s *self)
if (err < 0)
goto err_exit;
- if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_TP) {
+ if (ATL_HW_IS_CHIP_FEATURE(self->aq_hw, ATLANTIC) &&
+ self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_TP) {
self->aq_hw->phy_id = HW_ATL_PHY_ID_MAX;
err = aq_phy_init(self->aq_hw);
}
@@ -764,6 +794,9 @@ int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p)
u32 *regs_buff = p;
int err = 0;
+ if (unlikely(!self->aq_hw_ops->hw_get_regs))
+ return -EOPNOTSUPP;
+
regs->version = 1;
err = self->aq_hw_ops->hw_get_regs(self->aq_hw,
@@ -778,6 +811,9 @@ err_exit:
int aq_nic_get_regs_count(struct aq_nic_s *self)
{
+ if (unlikely(!self->aq_hw_ops->hw_get_regs))
+ return 0;
+
return self->aq_nic_cfg.aq_hw_caps->mac_regs_count;
}
@@ -873,7 +909,7 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
ethtool_link_ksettings_add_link_mode(cmd, supported,
5000baseT_Full);
- if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_2GS)
+ if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_2G5)
ethtool_link_ksettings_add_link_mode(cmd, supported,
2500baseT_Full);
@@ -885,6 +921,10 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
ethtool_link_ksettings_add_link_mode(cmd, supported,
100baseT_Full);
+ if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10M)
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 10baseT_Full);
+
if (self->aq_nic_cfg.aq_hw_caps->flow_control) {
ethtool_link_ksettings_add_link_mode(cmd, supported,
Pause);
@@ -912,7 +952,7 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
ethtool_link_ksettings_add_link_mode(cmd, advertising,
5000baseT_Full);
- if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_2GS)
+ if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_2G5)
ethtool_link_ksettings_add_link_mode(cmd, advertising,
2500baseT_Full);
@@ -924,6 +964,10 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
ethtool_link_ksettings_add_link_mode(cmd, advertising,
100baseT_Full);
+ if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10M)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 10baseT_Full);
+
if (self->aq_nic_cfg.fc.cur & AQ_NIC_FC_RX)
ethtool_link_ksettings_add_link_mode(cmd, advertising,
Pause);
@@ -954,6 +998,10 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self,
speed = cmd->base.speed;
switch (speed) {
+ case SPEED_10:
+ rate = AQ_NIC_RATE_10M;
+ break;
+
case SPEED_100:
rate = AQ_NIC_RATE_100M;
break;
@@ -963,7 +1011,7 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self,
break;
case SPEED_2500:
- rate = AQ_NIC_RATE_2GS;
+ rate = AQ_NIC_RATE_2G5;
break;
case SPEED_5000:
@@ -1006,11 +1054,7 @@ struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self)
u32 aq_nic_get_fw_version(struct aq_nic_s *self)
{
- u32 fw_version = 0U;
-
- self->aq_hw_ops->hw_get_fw_version(self->aq_hw, &fw_version);
-
- return fw_version;
+ return self->aq_hw_ops->hw_get_fw_version(self->aq_hw);
}
int aq_nic_set_loopback(struct aq_nic_s *self)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 8a70ffe1d326..d10fff8a8c71 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -16,6 +16,7 @@
#include "aq_pci_func.h"
#include "hw_atl/hw_atl_a0.h"
#include "hw_atl/hw_atl_b0.h"
+#include "hw_atl2/hw_atl2.h"
#include "aq_filters.h"
#include "aq_drvinfo.h"
#include "aq_macsec.h"
@@ -41,6 +42,13 @@ static const struct pci_device_id aq_pci_tbl[] = {
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111S), },
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112S), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113DEV), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113CS), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC114CS), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113C), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC115C), },
+
{}
};
@@ -70,6 +78,13 @@ static const struct aq_board_revision_s hw_atl_boards[] = {
{ AQ_DEVICE_ID_AQC109S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109s, },
{ AQ_DEVICE_ID_AQC111S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111s, },
{ AQ_DEVICE_ID_AQC112S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112s, },
+
+ { AQ_DEVICE_ID_AQC113DEV, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
+ { AQ_DEVICE_ID_AQC113, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
+ { AQ_DEVICE_ID_AQC113CS, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
+ { AQ_DEVICE_ID_AQC114CS, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
+ { AQ_DEVICE_ID_AQC113C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
+ { AQ_DEVICE_ID_AQC115C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
};
MODULE_DEVICE_TABLE(pci, aq_pci_tbl);
@@ -104,10 +119,8 @@ int aq_pci_func_init(struct pci_dev *pdev)
int err;
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (!err) {
+ if (!err)
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
-
- }
if (err) {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (!err)
@@ -237,6 +250,15 @@ static int aq_pci_probe(struct pci_dev *pdev,
goto err_ioremap;
}
self->aq_hw->aq_nic_cfg = aq_nic_get_cfg(self);
+ if (self->aq_hw->aq_nic_cfg->aq_hw_caps->priv_data_len) {
+ int len = self->aq_hw->aq_nic_cfg->aq_hw_caps->priv_data_len;
+
+ self->aq_hw->priv = kzalloc(len, GFP_KERNEL);
+ if (!self->aq_hw->priv) {
+ err = -ENOMEM;
+ goto err_free_aq_hw;
+ }
+ }
for (bar = 0; bar < 4; ++bar) {
if (IORESOURCE_MEM & pci_resource_flags(pdev, bar)) {
@@ -245,19 +267,19 @@ static int aq_pci_probe(struct pci_dev *pdev,
mmio_pa = pci_resource_start(pdev, bar);
if (mmio_pa == 0U) {
err = -EIO;
- goto err_free_aq_hw;
+ goto err_free_aq_hw_priv;
}
reg_sz = pci_resource_len(pdev, bar);
if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) {
err = -EIO;
- goto err_free_aq_hw;
+ goto err_free_aq_hw_priv;
}
self->aq_hw->mmio = ioremap(mmio_pa, reg_sz);
if (!self->aq_hw->mmio) {
err = -EIO;
- goto err_free_aq_hw;
+ goto err_free_aq_hw_priv;
}
break;
}
@@ -265,7 +287,7 @@ static int aq_pci_probe(struct pci_dev *pdev,
if (bar == 4) {
err = -EIO;
- goto err_free_aq_hw;
+ goto err_free_aq_hw_priv;
}
numvecs = min((u8)AQ_CFG_VECS_DEF,
@@ -305,6 +327,8 @@ err_register:
aq_pci_free_irq_vectors(self);
err_hwinit:
iounmap(self->aq_hw->mmio);
+err_free_aq_hw_priv:
+ kfree(self->aq_hw->priv);
err_free_aq_hw:
kfree(self->aq_hw);
err_ioremap:
@@ -332,6 +356,7 @@ static void aq_pci_remove(struct pci_dev *pdev)
aq_nic_free_vectors(self);
aq_pci_free_irq_vectors(self);
iounmap(self->aq_hw->mmio);
+ kfree(self->aq_hw->priv);
kfree(self->aq_hw);
pci_release_regions(pdev);
free_netdev(self->ndev);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index 9b1062b8af64..1b0670a8ae33 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
*/
/* File hw_atl_a0.c: Definition of Atlantic hardware specific functions. */
@@ -47,7 +48,7 @@ const struct aq_hw_caps_s hw_atl_a0_caps_aqc100 = {
DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
.media_type = AQ_HW_MEDIA_TYPE_FIBRE,
.link_speed_msk = AQ_NIC_RATE_5G |
- AQ_NIC_RATE_2GS |
+ AQ_NIC_RATE_2G5 |
AQ_NIC_RATE_1G |
AQ_NIC_RATE_100M,
};
@@ -57,7 +58,7 @@ const struct aq_hw_caps_s hw_atl_a0_caps_aqc107 = {
.media_type = AQ_HW_MEDIA_TYPE_TP,
.link_speed_msk = AQ_NIC_RATE_10G |
AQ_NIC_RATE_5G |
- AQ_NIC_RATE_2GS |
+ AQ_NIC_RATE_2G5 |
AQ_NIC_RATE_1G |
AQ_NIC_RATE_100M,
};
@@ -66,7 +67,7 @@ const struct aq_hw_caps_s hw_atl_a0_caps_aqc108 = {
DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
.media_type = AQ_HW_MEDIA_TYPE_TP,
.link_speed_msk = AQ_NIC_RATE_5G |
- AQ_NIC_RATE_2GS |
+ AQ_NIC_RATE_2G5 |
AQ_NIC_RATE_1G |
AQ_NIC_RATE_100M,
};
@@ -74,7 +75,7 @@ const struct aq_hw_caps_s hw_atl_a0_caps_aqc108 = {
const struct aq_hw_caps_s hw_atl_a0_caps_aqc109 = {
DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
.media_type = AQ_HW_MEDIA_TYPE_TP,
- .link_speed_msk = AQ_NIC_RATE_2GS |
+ .link_speed_msk = AQ_NIC_RATE_2G5 |
AQ_NIC_RATE_1G |
AQ_NIC_RATE_100M,
};
@@ -267,8 +268,7 @@ static int hw_atl_a0_hw_init_tx_path(struct aq_hw_s *self)
hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
/* misc */
- aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ?
- 0x00010000U : 0x00000000U);
+ aq_hw_write_reg(self, 0x00007040U, 0x00000000U);
hw_atl_tdm_tx_dca_en_set(self, 0U);
hw_atl_tdm_tx_dca_mode_set(self, 0U);
@@ -886,6 +886,8 @@ static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self,
}
const struct aq_hw_ops hw_atl_ops_a0 = {
+ .hw_soft_reset = hw_atl_utils_soft_reset,
+ .hw_prepare = hw_atl_utils_initfw,
.hw_set_mac_address = hw_atl_a0_hw_mac_addr_set,
.hw_init = hw_atl_a0_hw_init,
.hw_reset = hw_atl_a0_hw_reset,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index d20d91cdece8..fa3cd7e9954b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
*/
/* File hw_atl_b0.c: Definition of Atlantic hardware specific functions. */
@@ -59,7 +60,7 @@ const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = {
.media_type = AQ_HW_MEDIA_TYPE_FIBRE,
.link_speed_msk = AQ_NIC_RATE_10G |
AQ_NIC_RATE_5G |
- AQ_NIC_RATE_2GS |
+ AQ_NIC_RATE_2G5 |
AQ_NIC_RATE_1G |
AQ_NIC_RATE_100M,
};
@@ -69,7 +70,7 @@ const struct aq_hw_caps_s hw_atl_b0_caps_aqc107 = {
.media_type = AQ_HW_MEDIA_TYPE_TP,
.link_speed_msk = AQ_NIC_RATE_10G |
AQ_NIC_RATE_5G |
- AQ_NIC_RATE_2GS |
+ AQ_NIC_RATE_2G5 |
AQ_NIC_RATE_1G |
AQ_NIC_RATE_100M,
};
@@ -78,7 +79,7 @@ const struct aq_hw_caps_s hw_atl_b0_caps_aqc108 = {
DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
.media_type = AQ_HW_MEDIA_TYPE_TP,
.link_speed_msk = AQ_NIC_RATE_5G |
- AQ_NIC_RATE_2GS |
+ AQ_NIC_RATE_2G5 |
AQ_NIC_RATE_1G |
AQ_NIC_RATE_100M,
};
@@ -86,7 +87,7 @@ const struct aq_hw_caps_s hw_atl_b0_caps_aqc108 = {
const struct aq_hw_caps_s hw_atl_b0_caps_aqc109 = {
DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
.media_type = AQ_HW_MEDIA_TYPE_TP,
- .link_speed_msk = AQ_NIC_RATE_2GS |
+ .link_speed_msk = AQ_NIC_RATE_2G5 |
AQ_NIC_RATE_1G |
AQ_NIC_RATE_100M,
};
@@ -187,8 +188,8 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
return aq_hw_err_from_flags(self);
}
-static int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
- struct aq_rss_parameters *rss_params)
+int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
+ struct aq_rss_parameters *rss_params)
{
struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
unsigned int addr = 0U;
@@ -251,9 +252,10 @@ err_exit:
return err;
}
-static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
- struct aq_nic_cfg_s *aq_nic_cfg)
+int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
+ struct aq_nic_cfg_s *aq_nic_cfg)
{
+ u64 rxcsum = !!(aq_nic_cfg->features & NETIF_F_RXCSUM);
unsigned int i;
/* TX checksums offloads*/
@@ -261,10 +263,8 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
/* RX checksums offloads*/
- hw_atl_rpo_ipv4header_crc_offload_en_set(self, !!(aq_nic_cfg->features &
- NETIF_F_RXCSUM));
- hw_atl_rpo_tcp_udp_crc_offload_en_set(self, !!(aq_nic_cfg->features &
- NETIF_F_RXCSUM));
+ hw_atl_rpo_ipv4header_crc_offload_en_set(self, rxcsum);
+ hw_atl_rpo_tcp_udp_crc_offload_en_set(self, rxcsum);
/* LSO offloads*/
hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
@@ -272,7 +272,7 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
/* Outer VLAN tag offload */
hw_atl_rpo_outer_vlan_tag_mode_set(self, 1U);
-/* LRO offloads */
+ /* LRO offloads */
{
unsigned int val = (8U < HW_ATL_B0_LRO_RXD_MAX) ? 0x3U :
((4U < HW_ATL_B0_LRO_RXD_MAX) ? 0x2U :
@@ -314,7 +314,7 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
{
/* Tx TC/Queue number config */
- hw_atl_rpb_tps_tx_tc_mode_set(self, 1U);
+ hw_atl_tpb_tps_tx_tc_mode_set(self, 1U);
hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
@@ -324,7 +324,7 @@ static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
/* misc */
- aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ?
+ aq_hw_write_reg(self, 0x00007040U, ATL_HW_IS_CHIP_FEATURE(self, TPO2) ?
0x00010000U : 0x00000000U);
hw_atl_tdm_tx_dca_en_set(self, 0U);
hw_atl_tdm_tx_dca_mode_set(self, 0U);
@@ -372,8 +372,8 @@ static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
/* misc */
- aq_hw_write_reg(self, 0x00005040U,
- IS_CHIP_FEATURE(RPF2) ? 0x000F0000U : 0x00000000U);
+ aq_hw_write_reg(self, 0x00005040U, ATL_HW_IS_CHIP_FEATURE(self, RPF2) ?
+ 0x000F0000U : 0x00000000U);
hw_atl_rpfl2broadcast_flr_act_set(self, 1U);
hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
@@ -384,7 +384,7 @@ static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
return aq_hw_err_from_flags(self);
}
-static int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
+int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
{
unsigned int h = 0U;
unsigned int l = 0U;
@@ -479,23 +479,21 @@ err_exit:
return err;
}
-static int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self,
- struct aq_ring_s *ring)
+int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self, struct aq_ring_s *ring)
{
hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx);
return aq_hw_err_from_flags(self);
}
-static int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self,
- struct aq_ring_s *ring)
+int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self, struct aq_ring_s *ring)
{
hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx);
return aq_hw_err_from_flags(self);
}
-static int hw_atl_b0_hw_start(struct aq_hw_s *self)
+int hw_atl_b0_hw_start(struct aq_hw_s *self)
{
hw_atl_tpb_tx_buff_en_set(self, 1);
hw_atl_rpb_rx_buff_en_set(self, 1);
@@ -511,9 +509,8 @@ static int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self,
return 0;
}
-static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
- struct aq_ring_s *ring,
- unsigned int frags)
+int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self, struct aq_ring_s *ring,
+ unsigned int frags)
{
struct aq_ring_buff_s *buff = NULL;
struct hw_atl_txd_s *txd = NULL;
@@ -600,9 +597,8 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
return aq_hw_err_from_flags(self);
}
-static int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self,
- struct aq_ring_s *aq_ring,
- struct aq_ring_param_s *aq_ring_param)
+int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
+ struct aq_ring_param_s *aq_ring_param)
{
u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
u32 vlan_rx_stripping = self->aq_nic_cfg->is_vlan_rx_strip;
@@ -643,9 +639,8 @@ static int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self,
return aq_hw_err_from_flags(self);
}
-static int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self,
- struct aq_ring_s *aq_ring,
- struct aq_ring_param_s *aq_ring_param)
+int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
+ struct aq_ring_param_s *aq_ring_param)
{
u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
@@ -673,9 +668,8 @@ static int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self,
return aq_hw_err_from_flags(self);
}
-static int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self,
- struct aq_ring_s *ring,
- unsigned int sw_tail_old)
+int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self, struct aq_ring_s *ring,
+ unsigned int sw_tail_old)
{
for (; sw_tail_old != ring->sw_tail;
sw_tail_old = aq_ring_next_dx(ring, sw_tail_old)) {
@@ -734,8 +728,8 @@ static int hw_atl_b0_hw_ring_hwts_rx_receive(struct aq_hw_s *self,
return aq_hw_err_from_flags(self);
}
-static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
- struct aq_ring_s *ring)
+int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
{
unsigned int hw_head_;
int err = 0;
@@ -753,8 +747,7 @@ err_exit:
return err;
}
-static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
- struct aq_ring_s *ring)
+int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, struct aq_ring_s *ring)
{
for (; ring->hw_head != ring->sw_tail;
ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) {
@@ -854,14 +847,14 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
return aq_hw_err_from_flags(self);
}
-static int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
+int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
{
hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask));
return aq_hw_err_from_flags(self);
}
-static int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
+int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
{
hw_atl_itr_irq_msk_clearlsw_set(self, LODWORD(mask));
hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask));
@@ -871,7 +864,7 @@ static int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
return aq_hw_err_from_flags(self);
}
-static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
+int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
{
*mask = hw_atl_itr_irq_statuslsw_get(self);
@@ -880,8 +873,8 @@ static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
#define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U)
-static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
- unsigned int packet_filter)
+int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
+ unsigned int packet_filter)
{
struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
unsigned int i = 0U;
@@ -1071,16 +1064,14 @@ err_exit:
return err;
}
-static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self,
- struct aq_ring_s *ring)
+int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self, struct aq_ring_s *ring)
{
hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx);
return aq_hw_err_from_flags(self);
}
-static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
- struct aq_ring_s *ring)
+int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, struct aq_ring_s *ring)
{
hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx);
@@ -1089,7 +1080,7 @@ static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
static int hw_atl_b0_tx_tc_mode_get(struct aq_hw_s *self, u32 *tc_mode)
{
- *tc_mode = hw_atl_rpb_tps_tx_tc_mode_get(self);
+ *tc_mode = hw_atl_tpb_tps_tx_tc_mode_get(self);
return aq_hw_err_from_flags(self);
}
@@ -1478,6 +1469,8 @@ static int hw_atl_b0_set_loopback(struct aq_hw_s *self, u32 mode, bool enable)
}
const struct aq_hw_ops hw_atl_ops_b0 = {
+ .hw_soft_reset = hw_atl_utils_soft_reset,
+ .hw_prepare = hw_atl_utils_initfw,
.hw_set_mac_address = hw_atl_b0_hw_mac_addr_set,
.hw_init = hw_atl_b0_hw_init,
.hw_reset = hw_atl_b0_hw_reset,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
index 09af1683034b..b855459272ca 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
@@ -1,7 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
*/
/* File hw_atl_b0.h: Declaration of abstract interface for Atlantic hardware
@@ -33,4 +34,39 @@ extern const struct aq_hw_ops hw_atl_ops_b0;
#define hw_atl_ops_b1 hw_atl_ops_b0
+int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
+ struct aq_rss_parameters *rss_params);
+int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
+ struct aq_nic_cfg_s *aq_nic_cfg);
+
+int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self, struct aq_ring_s *ring);
+int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self, struct aq_ring_s *ring);
+
+int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
+ struct aq_ring_param_s *aq_ring_param);
+int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self, struct aq_ring_s *ring,
+ unsigned int sw_tail_old);
+int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, struct aq_ring_s *ring);
+
+int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
+ struct aq_ring_param_s *aq_ring_param);
+int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self, struct aq_ring_s *ring,
+ unsigned int frags);
+int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
+ struct aq_ring_s *ring);
+
+int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self, struct aq_ring_s *ring);
+int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, struct aq_ring_s *ring);
+
+int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr);
+
+int hw_atl_b0_hw_start(struct aq_hw_s *self);
+
+int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask);
+int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask);
+int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask);
+
+int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
+ unsigned int packet_filter);
+
#endif /* HW_ATL_B0_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
index d1f68fc16291..9e2d01a6aac8 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
@@ -693,6 +693,13 @@ void hw_atl_rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw,
HW_ATL_RPFL2MC_ENF_SHIFT, l2multicast_flr_en);
}
+u32 hw_atl_rpfl2promiscuous_mode_en_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_RPFL2PROMIS_MODE_ADR,
+ HW_ATL_RPFL2PROMIS_MODE_MSK,
+ HW_ATL_RPFL2PROMIS_MODE_SHIFT);
+}
+
void hw_atl_rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
u32 l2promiscuous_mode_en)
{
@@ -867,6 +874,13 @@ void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw,
vlan_prom_mode_en);
}
+u32 hw_atl_rpf_vlan_prom_mode_en_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_RPF_VL_PROMIS_MODE_ADR,
+ HW_ATL_RPF_VL_PROMIS_MODE_MSK,
+ HW_ATL_RPF_VL_PROMIS_MODE_SHIFT);
+}
+
void hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
u32 vlan_acc_untagged_packets)
{
@@ -1304,14 +1318,14 @@ void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en)
HW_ATL_TPB_TX_BUF_EN_SHIFT, tx_buff_en);
}
-u32 hw_atl_rpb_tps_tx_tc_mode_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_tpb_tps_tx_tc_mode_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg_bit(aq_hw, HW_ATL_TPB_TX_TC_MODE_ADDR,
HW_ATL_TPB_TX_TC_MODE_MSK,
HW_ATL_TPB_TX_TC_MODE_SHIFT);
}
-void hw_atl_rpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw,
+void hw_atl_tpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw,
u32 tx_traf_class_mode)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TX_TC_MODE_ADDR,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
index 62992b23c0e8..b88cb84805d5 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
@@ -349,6 +349,9 @@ void hw_atl_rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw,
u32 l2multicast_flr_en,
u32 filter);
+/* get l2 promiscuous mode enable */
+u32 hw_atl_rpfl2promiscuous_mode_en_get(struct aq_hw_s *aq_hw);
+
/* set l2 promiscuous mode enable */
void hw_atl_rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
u32 l2promiscuous_mode_en);
@@ -420,6 +423,9 @@ void hw_atl_rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht);
void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw,
u32 vlan_prom_mode_en);
+/* Get VLAN promiscuous mode enable */
+u32 hw_atl_rpf_vlan_prom_mode_en_get(struct aq_hw_s *aq_hw);
+
/* Set VLAN untagged action */
void hw_atl_rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw,
u32 vlan_untagged_act);
@@ -610,11 +616,11 @@ void hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
/* tpb */
/* set TX Traffic Class Mode */
-void hw_atl_rpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw,
+void hw_atl_tpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw,
u32 tx_traf_class_mode);
/* get TX Traffic Class Mode */
-u32 hw_atl_rpb_tps_tx_tc_mode_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_tpb_tps_tx_tc_mode_get(struct aq_hw_s *aq_hw);
/* set tx buffer enable */
void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 354705f9bc49..73c0f41df8d8 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
*/
/* File hw_atl_utils.c: Definition of common functions for Atlantic hardware
@@ -53,7 +54,6 @@ enum mcp_area {
MCP_AREA_SETTINGS = 0x20000000,
};
-static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual);
static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
enum hal_atl_utils_fw_state_e state);
static u32 hw_atl_utils_get_mpi_mbox_tid(struct aq_hw_s *self);
@@ -67,14 +67,10 @@ int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops)
{
int err = 0;
- err = hw_atl_utils_soft_reset(self);
- if (err)
- return err;
-
hw_atl_utils_hw_chip_features_init(self,
&self->chip_features);
- hw_atl_utils_get_fw_version(self, &self->fw_ver_actual);
+ self->fw_ver_actual = hw_atl_utils_get_fw_version(self);
if (hw_atl_utils_ver_match(HW_ATL_FW_VER_1X,
self->fw_ver_actual) == 0) {
@@ -313,7 +309,7 @@ int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
for (++cnt; --cnt && !err;) {
aq_hw_write_reg(self, HW_ATL_MIF_CMD, 0x00008000U);
- if (IS_CHIP_FEATURE(REVISION_B1))
+ if (ATL_HW_IS_CHIP_FEATURE(self, REVISION_B1))
err = readx_poll_timeout_atomic(hw_atl_utils_mif_addr_get,
self, val, val != a,
1U, 1000U);
@@ -409,7 +405,7 @@ static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 addr, u32 *p,
if (err < 0)
goto err_exit;
- if (IS_CHIP_FEATURE(REVISION_B1))
+ if (ATL_HW_IS_CHIP_FEATURE(self, REVISION_B1))
err = hw_atl_utils_write_b1_mbox(self, addr, p, cnt, area);
else
err = hw_atl_utils_write_b0_mbox(self, addr, p, cnt);
@@ -438,7 +434,7 @@ int hw_atl_write_fwsettings_dwords(struct aq_hw_s *self, u32 offset, u32 *p,
p, cnt, MCP_AREA_SETTINGS);
}
-static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual)
+int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual)
{
const u32 dw_major_mask = 0xff000000U;
const u32 dw_minor_mask = 0x00ffffffU;
@@ -501,7 +497,7 @@ int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size)
struct aq_hw_atl_utils_fw_rpc_tid_s sw;
int err = 0;
- if (!IS_CHIP_FEATURE(MIPS)) {
+ if (!ATL_HW_IS_CHIP_FEATURE(self, MIPS)) {
err = -1;
goto err_exit;
}
@@ -607,7 +603,7 @@ void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
if (err < 0)
goto err_exit;
- if (IS_CHIP_FEATURE(REVISION_A0)) {
+ if (ATL_HW_IS_CHIP_FEATURE(self, REVISION_A0)) {
unsigned int mtu = self->aq_nic_cfg ?
self->aq_nic_cfg->mtu : 1514U;
pmbox->stats.ubrc = pmbox->stats.uprc * mtu;
@@ -692,7 +688,7 @@ int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
link_status->mbps = 5000U;
break;
- case HAL_ATLANTIC_RATE_2GS:
+ case HAL_ATLANTIC_RATE_2G5:
link_status->mbps = 2500U;
break;
@@ -806,22 +802,24 @@ void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p)
u32 mif_rev = val & 0xFFU;
u32 chip_features = 0U;
+ chip_features |= ATL_HW_CHIP_ATLANTIC;
+
if ((0xFU & mif_rev) == 1U) {
- chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 |
- HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
- HAL_ATLANTIC_UTILS_CHIP_MIPS;
+ chip_features |= ATL_HW_CHIP_REVISION_A0 |
+ ATL_HW_CHIP_MPI_AQ |
+ ATL_HW_CHIP_MIPS;
} else if ((0xFU & mif_rev) == 2U) {
- chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 |
- HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
- HAL_ATLANTIC_UTILS_CHIP_MIPS |
- HAL_ATLANTIC_UTILS_CHIP_TPO2 |
- HAL_ATLANTIC_UTILS_CHIP_RPF2;
+ chip_features |= ATL_HW_CHIP_REVISION_B0 |
+ ATL_HW_CHIP_MPI_AQ |
+ ATL_HW_CHIP_MIPS |
+ ATL_HW_CHIP_TPO2 |
+ ATL_HW_CHIP_RPF2;
} else if ((0xFU & mif_rev) == 0xAU) {
- chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_B1 |
- HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
- HAL_ATLANTIC_UTILS_CHIP_MIPS |
- HAL_ATLANTIC_UTILS_CHIP_TPO2 |
- HAL_ATLANTIC_UTILS_CHIP_RPF2;
+ chip_features |= ATL_HW_CHIP_REVISION_B1 |
+ ATL_HW_CHIP_MPI_AQ |
+ ATL_HW_CHIP_MIPS |
+ ATL_HW_CHIP_TPO2 |
+ ATL_HW_CHIP_RPF2;
}
*p = chip_features;
@@ -919,11 +917,9 @@ int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
return 0;
}
-int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version)
+u32 hw_atl_utils_get_fw_version(struct aq_hw_s *self)
{
- *fw_version = aq_hw_read_reg(self, 0x18U);
-
- return 0;
+ return aq_hw_read_reg(self, HW_ATL_MPI_FW_VERSION);
}
static int aq_fw1x_set_wake_magic(struct aq_hw_s *self, bool wol_enabled,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index b15513914636..0b4b54fc1de0 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -1,7 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
*/
/* File hw_atl_utils.h: Declaration of common functions for Atlantic hardware
@@ -360,6 +361,8 @@ struct aq_rx_filter_vlan {
u8 queue;
};
+#define HW_ATL_VLAN_MAX_FILTERS 16U
+
struct aq_rx_filter_l2 {
s8 queue;
u8 location;
@@ -406,17 +409,6 @@ enum hw_atl_rx_ctrl_registers_l3l4 {
#define HW_ATL_GET_REG_LOCATION_FL3L4(location) \
((location) - AQ_RX_FIRST_LOC_FL3L4)
-#define HAL_ATLANTIC_UTILS_CHIP_MIPS 0x00000001U
-#define HAL_ATLANTIC_UTILS_CHIP_TPO2 0x00000002U
-#define HAL_ATLANTIC_UTILS_CHIP_RPF2 0x00000004U
-#define HAL_ATLANTIC_UTILS_CHIP_MPI_AQ 0x00000010U
-#define HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 0x01000000U
-#define HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 0x02000000U
-#define HAL_ATLANTIC_UTILS_CHIP_REVISION_B1 0x04000000U
-
-#define IS_CHIP_FEATURE(_F_) (HAL_ATLANTIC_UTILS_CHIP_##_F_ & \
- self->chip_features)
-
enum hal_atl_utils_fw_state_e {
MPI_DEINIT = 0,
MPI_RESET = 1,
@@ -427,7 +419,7 @@ enum hal_atl_utils_fw_state_e {
#define HAL_ATLANTIC_RATE_10G BIT(0)
#define HAL_ATLANTIC_RATE_5G BIT(1)
#define HAL_ATLANTIC_RATE_5GSR BIT(2)
-#define HAL_ATLANTIC_RATE_2GS BIT(3)
+#define HAL_ATLANTIC_RATE_2G5 BIT(3)
#define HAL_ATLANTIC_RATE_1G BIT(4)
#define HAL_ATLANTIC_RATE_100M BIT(5)
#define HAL_ATLANTIC_RATE_INVALID BIT(6)
@@ -622,7 +614,7 @@ int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
int hw_atl_utils_hw_deinit(struct aq_hw_s *self);
-int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version);
+u32 hw_atl_utils_get_fw_version(struct aq_hw_s *self);
int hw_atl_utils_update_stats(struct aq_hw_s *self);
@@ -643,6 +635,8 @@ int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size);
int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
struct hw_atl_utils_fw_rpc **rpc);
+int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual);
+
extern const struct aq_fw_ops aq_fw_1x_ops;
extern const struct aq_fw_ops aq_fw_2x_ops;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
index 1ad10cc14918..eeedd8c90067 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
*/
/* File hw_atl_utils_fw2x.c: Definition of firmware 2.x functions for
@@ -134,7 +135,7 @@ static enum hw_atl_fw2x_rate link_speed_mask_2fw2x_ratemask(u32 speed)
if (speed & AQ_NIC_RATE_5GSR)
rate |= FW2X_RATE_5G;
- if (speed & AQ_NIC_RATE_2GS)
+ if (speed & AQ_NIC_RATE_2G5)
rate |= FW2X_RATE_2G5;
if (speed & AQ_NIC_RATE_1G)
@@ -155,7 +156,7 @@ static u32 fw2x_to_eee_mask(u32 speed)
if (speed & HW_ATL_FW2X_CAP_EEE_5G_MASK)
rate |= AQ_NIC_RATE_EEE_5G;
if (speed & HW_ATL_FW2X_CAP_EEE_2G5_MASK)
- rate |= AQ_NIC_RATE_EEE_2GS;
+ rate |= AQ_NIC_RATE_EEE_2G5;
if (speed & HW_ATL_FW2X_CAP_EEE_1G_MASK)
rate |= AQ_NIC_RATE_EEE_1G;
@@ -170,7 +171,7 @@ static u32 eee_mask_to_fw2x(u32 speed)
rate |= HW_ATL_FW2X_CAP_EEE_10G_MASK;
if (speed & AQ_NIC_RATE_EEE_5G)
rate |= HW_ATL_FW2X_CAP_EEE_5G_MASK;
- if (speed & AQ_NIC_RATE_EEE_2GS)
+ if (speed & AQ_NIC_RATE_EEE_2G5)
rate |= HW_ATL_FW2X_CAP_EEE_2G5_MASK;
if (speed & AQ_NIC_RATE_EEE_1G)
rate |= HW_ATL_FW2X_CAP_EEE_1G_MASK;
@@ -282,8 +283,6 @@ static int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
u32 efuse_addr = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_EFUSE_ADDR);
u32 mac_addr[2] = { 0 };
int err = 0;
- u32 h = 0U;
- u32 l = 0U;
if (efuse_addr != 0) {
err = hw_atl_utils_fw_downld_dwords(self,
@@ -298,26 +297,6 @@ static int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
ether_addr_copy(mac, (u8 *)mac_addr);
- if ((mac[0] & 0x01U) || ((mac[0] | mac[1] | mac[2]) == 0x00U)) {
- unsigned int rnd = 0;
-
- get_random_bytes(&rnd, sizeof(unsigned int));
-
- l = 0xE3000000U | (0xFFFFU & rnd) | (0x00 << 16);
- h = 0x8001300EU;
-
- mac[5] = (u8)(0xFFU & l);
- l >>= 8;
- mac[4] = (u8)(0xFFU & l);
- l >>= 8;
- mac[3] = (u8)(0xFFU & l);
- l >>= 8;
- mac[2] = (u8)(0xFFU & l);
- mac[1] = (u8)(0xFFU & h);
- h >>= 8;
- mac[0] = (u8)(0xFFU & h);
- }
-
return err;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
new file mode 100644
index 000000000000..6f2b33ae3d06
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
@@ -0,0 +1,684 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#include "aq_hw.h"
+#include "aq_hw_utils.h"
+#include "aq_ring.h"
+#include "aq_nic.h"
+#include "hw_atl/hw_atl_b0.h"
+#include "hw_atl/hw_atl_utils.h"
+#include "hw_atl/hw_atl_llh.h"
+#include "hw_atl2_utils.h"
+#include "hw_atl2_llh.h"
+#include "hw_atl2_internal.h"
+#include "hw_atl2_llh_internal.h"
+
+static int hw_atl2_act_rslvr_table_set(struct aq_hw_s *self, u8 location,
+ u32 tag, u32 mask, u32 action);
+
+#define DEFAULT_BOARD_BASIC_CAPABILITIES \
+ .is_64_dma = true, \
+ .msix_irqs = 8U, \
+ .irq_mask = ~0U, \
+ .vecs = HW_ATL2_RSS_MAX, \
+ .tcs = HW_ATL2_TC_MAX, \
+ .rxd_alignment = 1U, \
+ .rxd_size = HW_ATL2_RXD_SIZE, \
+ .rxds_max = HW_ATL2_MAX_RXD, \
+ .rxds_min = HW_ATL2_MIN_RXD, \
+ .txd_alignment = 1U, \
+ .txd_size = HW_ATL2_TXD_SIZE, \
+ .txds_max = HW_ATL2_MAX_TXD, \
+ .txds_min = HW_ATL2_MIN_TXD, \
+ .txhwb_alignment = 4096U, \
+ .tx_rings = HW_ATL2_TX_RINGS, \
+ .rx_rings = HW_ATL2_RX_RINGS, \
+ .hw_features = NETIF_F_HW_CSUM | \
+ NETIF_F_RXCSUM | \
+ NETIF_F_RXHASH | \
+ NETIF_F_SG | \
+ NETIF_F_TSO | \
+ NETIF_F_TSO6 | \
+ NETIF_F_LRO | \
+ NETIF_F_NTUPLE | \
+ NETIF_F_HW_VLAN_CTAG_FILTER | \
+ NETIF_F_HW_VLAN_CTAG_RX | \
+ NETIF_F_HW_VLAN_CTAG_TX | \
+ NETIF_F_GSO_UDP_L4 | \
+ NETIF_F_GSO_PARTIAL, \
+ .hw_priv_flags = IFF_UNICAST_FLT, \
+ .flow_control = true, \
+ .mtu = HW_ATL2_MTU_JUMBO, \
+ .mac_regs_count = 72, \
+ .hw_alive_check_addr = 0x10U, \
+ .priv_data_len = sizeof(struct hw_atl2_priv)
+
+const struct aq_hw_caps_s hw_atl2_caps_aqc113 = {
+ DEFAULT_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = AQ_NIC_RATE_10G |
+ AQ_NIC_RATE_5G |
+ AQ_NIC_RATE_2G5 |
+ AQ_NIC_RATE_1G |
+ AQ_NIC_RATE_100M |
+ AQ_NIC_RATE_10M,
+};
+
+static u32 hw_atl2_sem_act_rslvr_get(struct aq_hw_s *self)
+{
+ return hw_atl_reg_glb_cpu_sem_get(self, HW_ATL2_FW_SM_ACT_RSLVR);
+}
+
+static int hw_atl2_hw_reset(struct aq_hw_s *self)
+{
+ struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ int err;
+
+ err = hw_atl2_utils_soft_reset(self);
+ if (err)
+ return err;
+
+ memset(priv, 0, sizeof(*priv));
+
+ self->aq_fw_ops->set_state(self, MPI_RESET);
+
+ err = aq_hw_err_from_flags(self);
+
+ return err;
+}
+
+static int hw_atl2_hw_queue_to_tc_map_set(struct aq_hw_s *self)
+{
+ if (!hw_atl_rpb_rpf_rx_traf_class_mode_get(self)) {
+ aq_hw_write_reg(self, HW_ATL2_RX_Q_TC_MAP_ADR(0), 0x11110000);
+ aq_hw_write_reg(self, HW_ATL2_RX_Q_TC_MAP_ADR(8), 0x33332222);
+ aq_hw_write_reg(self, HW_ATL2_RX_Q_TC_MAP_ADR(16), 0x55554444);
+ aq_hw_write_reg(self, HW_ATL2_RX_Q_TC_MAP_ADR(24), 0x77776666);
+ } else {
+ aq_hw_write_reg(self, HW_ATL2_RX_Q_TC_MAP_ADR(0), 0x00000000);
+ aq_hw_write_reg(self, HW_ATL2_RX_Q_TC_MAP_ADR(8), 0x11111111);
+ aq_hw_write_reg(self, HW_ATL2_RX_Q_TC_MAP_ADR(16), 0x22222222);
+ aq_hw_write_reg(self, HW_ATL2_RX_Q_TC_MAP_ADR(24), 0x33333333);
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl2_hw_qos_set(struct aq_hw_s *self)
+{
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
+ u32 tx_buff_size = HW_ATL2_TXBUF_MAX;
+ u32 rx_buff_size = HW_ATL2_RXBUF_MAX;
+ unsigned int prio = 0U;
+ u32 threshold = 0U;
+ u32 tc = 0U;
+
+ /* TPS Descriptor rate init */
+ hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
+ hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
+
+ /* TPS VM init */
+ hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
+
+ /* TPS TC credits init */
+ hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
+ hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
+
+ tc = 0;
+
+ /* TX Packet Scheduler Data TC0 */
+ hw_atl2_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF0, tc);
+ hw_atl2_tps_tx_pkt_shed_tc_data_weight_set(self, 0x640, tc);
+ hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, tc);
+ hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, tc);
+
+ /* Tx buf size TC0 */
+ hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, tx_buff_size, tc);
+
+ threshold = (tx_buff_size * (1024 / 32U) * 66U) / 100U;
+ hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self, threshold, tc);
+
+ threshold = (tx_buff_size * (1024 / 32U) * 50U) / 100U;
+ hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self, threshold, tc);
+
+ /* QoS Rx buf size per TC */
+ hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, rx_buff_size, tc);
+
+ threshold = (rx_buff_size * (1024U / 32U) * 66U) / 100U;
+ hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self, threshold, tc);
+
+ threshold = (rx_buff_size * (1024U / 32U) * 50U) / 100U;
+ hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self, threshold, tc);
+
+ /* QoS 802.1p priority -> TC mapping */
+ for (prio = 0; prio < 8; ++prio)
+ hw_atl_rpf_rpb_user_priority_tc_map_set(self, prio,
+ cfg->tcs * prio / 8);
+
+ /* ATL2 Apply legacy ring to TC mapping */
+ hw_atl2_hw_queue_to_tc_map_set(self);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl2_hw_rss_set(struct aq_hw_s *self,
+ struct aq_rss_parameters *rss_params)
+{
+ u8 *indirection_table = rss_params->indirection_table;
+ int i;
+
+ for (i = HW_ATL2_RSS_REDIRECTION_MAX; i--;)
+ hw_atl2_new_rpf_rss_redir_set(self, 0, i, indirection_table[i]);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl2_hw_init_tx_path(struct aq_hw_s *self)
+{
+ /* Tx TC/RSS number config */
+ hw_atl_tpb_tps_tx_tc_mode_set(self, 1U);
+
+ hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
+ hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
+ hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
+
+ /* Tx interrupts */
+ hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
+
+ /* misc */
+ hw_atl_tdm_tx_dca_en_set(self, 0U);
+ hw_atl_tdm_tx_dca_mode_set(self, 0U);
+
+ hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U);
+
+ hw_atl2_tpb_tx_buf_clk_gate_en_set(self, 0U);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static void hw_atl2_hw_init_new_rx_filters(struct aq_hw_s *self)
+{
+ struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ u8 index;
+
+ hw_atl2_rpf_act_rslvr_section_en_set(self, 0xFFFF);
+ hw_atl2_rpfl2_uc_flr_tag_set(self, HW_ATL2_RPF_TAG_BASE_UC,
+ HW_ATL2_MAC_UC);
+ hw_atl2_rpfl2_bc_flr_tag_set(self, HW_ATL2_RPF_TAG_BASE_UC);
+
+ index = priv->art_base_index + HW_ATL2_RPF_L2_PROMISC_OFF_INDEX;
+ hw_atl2_act_rslvr_table_set(self, index, 0,
+ HW_ATL2_RPF_TAG_UC_MASK |
+ HW_ATL2_RPF_TAG_ALLMC_MASK,
+ HW_ATL2_ACTION_DROP);
+
+ index = priv->art_base_index + HW_ATL2_RPF_VLAN_PROMISC_OFF_INDEX;
+ hw_atl2_act_rslvr_table_set(self, index, 0,
+ HW_ATL2_RPF_TAG_VLAN_MASK |
+ HW_ATL2_RPF_TAG_UNTAG_MASK,
+ HW_ATL2_ACTION_DROP);
+
+ index = priv->art_base_index + HW_ATL2_RPF_VLAN_INDEX;
+ hw_atl2_act_rslvr_table_set(self, index, HW_ATL2_RPF_TAG_BASE_VLAN,
+ HW_ATL2_RPF_TAG_VLAN_MASK,
+ HW_ATL2_ACTION_ASSIGN_TC(0));
+
+ index = priv->art_base_index + HW_ATL2_RPF_MAC_INDEX;
+ hw_atl2_act_rslvr_table_set(self, index, HW_ATL2_RPF_TAG_BASE_UC,
+ HW_ATL2_RPF_TAG_UC_MASK,
+ HW_ATL2_ACTION_ASSIGN_TC(0));
+
+ index = priv->art_base_index + HW_ATL2_RPF_ALLMC_INDEX;
+ hw_atl2_act_rslvr_table_set(self, index, HW_ATL2_RPF_TAG_BASE_ALLMC,
+ HW_ATL2_RPF_TAG_ALLMC_MASK,
+ HW_ATL2_ACTION_ASSIGN_TC(0));
+
+ index = priv->art_base_index + HW_ATL2_RPF_UNTAG_INDEX;
+ hw_atl2_act_rslvr_table_set(self, index, HW_ATL2_RPF_TAG_UNTAG_MASK,
+ HW_ATL2_RPF_TAG_UNTAG_MASK,
+ HW_ATL2_ACTION_ASSIGN_TC(0));
+
+ index = priv->art_base_index + HW_ATL2_RPF_VLAN_PROMISC_ON_INDEX;
+ hw_atl2_act_rslvr_table_set(self, index, 0, HW_ATL2_RPF_TAG_VLAN_MASK,
+ HW_ATL2_ACTION_DISABLE);
+
+ index = priv->art_base_index + HW_ATL2_RPF_L2_PROMISC_ON_INDEX;
+ hw_atl2_act_rslvr_table_set(self, index, 0, HW_ATL2_RPF_TAG_UC_MASK,
+ HW_ATL2_ACTION_DISABLE);
+}
+
+static void hw_atl2_hw_new_rx_filter_vlan_promisc(struct aq_hw_s *self,
+ bool promisc)
+{
+ u16 off_action = (!promisc &&
+ !hw_atl_rpfl2promiscuous_mode_en_get(self)) ?
+ HW_ATL2_ACTION_DROP : HW_ATL2_ACTION_DISABLE;
+ struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ u8 index;
+
+ index = priv->art_base_index + HW_ATL2_RPF_VLAN_PROMISC_OFF_INDEX;
+ hw_atl2_act_rslvr_table_set(self, index, 0,
+ HW_ATL2_RPF_TAG_VLAN_MASK |
+ HW_ATL2_RPF_TAG_UNTAG_MASK, off_action);
+}
+
+static void hw_atl2_hw_new_rx_filter_promisc(struct aq_hw_s *self, bool promisc)
+{
+ u16 off_action = promisc ? HW_ATL2_ACTION_DISABLE : HW_ATL2_ACTION_DROP;
+ struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ bool vlan_promisc_enable;
+ u8 index;
+
+ index = priv->art_base_index + HW_ATL2_RPF_L2_PROMISC_OFF_INDEX;
+ hw_atl2_act_rslvr_table_set(self, index, 0,
+ HW_ATL2_RPF_TAG_UC_MASK |
+ HW_ATL2_RPF_TAG_ALLMC_MASK,
+ off_action);
+
+ /* turn VLAN promisc mode too */
+ vlan_promisc_enable = hw_atl_rpf_vlan_prom_mode_en_get(self);
+ hw_atl2_hw_new_rx_filter_vlan_promisc(self, promisc |
+ vlan_promisc_enable);
+}
+
+static int hw_atl2_act_rslvr_table_set(struct aq_hw_s *self, u8 location,
+ u32 tag, u32 mask, u32 action)
+{
+ u32 val;
+ int err;
+
+ err = readx_poll_timeout_atomic(hw_atl2_sem_act_rslvr_get,
+ self, val, val == 1,
+ 1, 10000U);
+ if (err)
+ return err;
+
+ hw_atl2_rpf_act_rslvr_record_set(self, location, tag, mask,
+ action);
+
+ hw_atl_reg_glb_cpu_sem_set(self, 1, HW_ATL2_FW_SM_ACT_RSLVR);
+
+ return err;
+}
+
+static int hw_atl2_hw_init_rx_path(struct aq_hw_s *self)
+{
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
+ int i;
+
+ /* Rx TC/RSS number config */
+ hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U);
+
+ /* Rx flow control */
+ hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U);
+
+ hw_atl2_rpf_rss_hash_type_set(self, HW_ATL2_RPF_RSS_HASH_TYPE_ALL);
+
+ /* RSS Ring selection */
+ hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ?
+ HW_ATL_RSS_ENABLED_3INDEX_BITS :
+ HW_ATL_RSS_DISABLED);
+
+ /* Multicast filters */
+ for (i = HW_ATL2_MAC_MAX; i--;) {
+ hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
+ hw_atl_rpfl2unicast_flr_act_set(self, 1U, i);
+ }
+
+ hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
+ hw_atl_reg_rx_flr_mcst_flr_set(self, HW_ATL_MCAST_FLT_ANY_TO_HOST, 0U);
+
+ /* Vlan filters */
+ hw_atl_rpf_vlan_outer_etht_set(self, ETH_P_8021AD);
+ hw_atl_rpf_vlan_inner_etht_set(self, ETH_P_8021Q);
+
+ hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
+
+ /* Always accept untagged packets */
+ hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U);
+ hw_atl_rpf_vlan_untagged_act_set(self, 1U);
+
+ hw_atl2_hw_init_new_rx_filters(self);
+
+ /* Rx Interrupts */
+ hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
+
+ hw_atl_rpfl2broadcast_flr_act_set(self, 1U);
+ hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
+
+ hw_atl_rdm_rx_dca_en_set(self, 0U);
+ hw_atl_rdm_rx_dca_mode_set(self, 0U);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl2_hw_init(struct aq_hw_s *self, u8 *mac_addr)
+{
+ static u32 aq_hw_atl2_igcr_table_[4][2] = {
+ [AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U },
+ [AQ_HW_IRQ_LEGACY] = { 0x20000080U, 0x20000080U },
+ [AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U },
+ [AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U },
+ };
+
+ struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
+ u8 base_index, count;
+ int err;
+
+ err = hw_atl2_utils_get_action_resolve_table_caps(self, &base_index,
+ &count);
+ if (err)
+ return err;
+
+ priv->art_base_index = 8 * base_index;
+
+ hw_atl2_init_launchtime(self);
+
+ hw_atl2_hw_init_tx_path(self);
+ hw_atl2_hw_init_rx_path(self);
+
+ hw_atl_b0_hw_mac_addr_set(self, mac_addr);
+
+ self->aq_fw_ops->set_link_speed(self, aq_nic_cfg->link_speed_msk);
+ self->aq_fw_ops->set_state(self, MPI_INIT);
+
+ hw_atl2_hw_qos_set(self);
+ hw_atl2_hw_rss_set(self, &aq_nic_cfg->aq_rss);
+ hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
+
+ hw_atl2_rpf_new_enable_set(self, 1);
+
+ /* Reset link status and read out initial hardware counters */
+ self->aq_link_status.mbps = 0;
+ self->aq_fw_ops->update_stats(self);
+
+ err = aq_hw_err_from_flags(self);
+ if (err < 0)
+ goto err_exit;
+
+ /* Interrupts */
+ hw_atl_reg_irq_glb_ctl_set(self,
+ aq_hw_atl2_igcr_table_[aq_nic_cfg->irq_type]
+ [(aq_nic_cfg->vecs > 1U) ?
+ 1 : 0]);
+
+ hw_atl_itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
+
+ /* Interrupts */
+ hw_atl_reg_gen_irq_map_set(self,
+ ((HW_ATL2_ERR_INT << 0x18) |
+ (1U << 0x1F)) |
+ ((HW_ATL2_ERR_INT << 0x10) |
+ (1U << 0x17)), 0U);
+
+ hw_atl_b0_hw_offload_set(self, aq_nic_cfg);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl2_hw_ring_rx_init(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring,
+ struct aq_ring_param_s *aq_ring_param)
+{
+ return hw_atl_b0_hw_ring_rx_init(self, aq_ring, aq_ring_param);
+}
+
+static int hw_atl2_hw_ring_tx_init(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring,
+ struct aq_ring_param_s *aq_ring_param)
+{
+ return hw_atl_b0_hw_ring_tx_init(self, aq_ring, aq_ring_param);
+}
+
+#define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U)
+
+static int hw_atl2_hw_packet_filter_set(struct aq_hw_s *self,
+ unsigned int packet_filter)
+{
+ hw_atl2_hw_new_rx_filter_promisc(self, IS_FILTER_ENABLED(IFF_PROMISC));
+
+ return hw_atl_b0_hw_packet_filter_set(self, packet_filter);
+}
+
+#undef IS_FILTER_ENABLED
+
+static int hw_atl2_hw_multicast_list_set(struct aq_hw_s *self,
+ u8 ar_mac
+ [AQ_HW_MULTICAST_ADDRESS_MAX]
+ [ETH_ALEN],
+ u32 count)
+{
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
+ int err = 0;
+
+ if (count > (HW_ATL2_MAC_MAX - HW_ATL2_MAC_MIN)) {
+ err = -EBADRQC;
+ goto err_exit;
+ }
+ for (cfg->mc_list_count = 0U;
+ cfg->mc_list_count < count;
+ ++cfg->mc_list_count) {
+ u32 i = cfg->mc_list_count;
+ u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]);
+ u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
+ (ar_mac[i][4] << 8) | ar_mac[i][5];
+
+ hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL2_MAC_MIN + i);
+
+ hw_atl_rpfl2unicast_dest_addresslsw_set(self, l,
+ HW_ATL2_MAC_MIN + i);
+
+ hw_atl_rpfl2unicast_dest_addressmsw_set(self, h,
+ HW_ATL2_MAC_MIN + i);
+
+ hw_atl2_rpfl2_uc_flr_tag_set(self, 1, HW_ATL2_MAC_MIN + i);
+
+ hw_atl_rpfl2_uc_flr_en_set(self, (cfg->is_mc_list_enabled),
+ HW_ATL2_MAC_MIN + i);
+ }
+
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl2_hw_interrupt_moderation_set(struct aq_hw_s *self)
+{
+ unsigned int i = 0U;
+ u32 itr_tx = 2U;
+ u32 itr_rx = 2U;
+
+ switch (self->aq_nic_cfg->itr) {
+ case AQ_CFG_INTERRUPT_MODERATION_ON:
+ case AQ_CFG_INTERRUPT_MODERATION_AUTO:
+ hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
+ hw_atl_tdm_tdm_intr_moder_en_set(self, 1U);
+ hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
+ hw_atl_rdm_rdm_intr_moder_en_set(self, 1U);
+
+ if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) {
+ /* HW timers are in 2us units */
+ int tx_max_timer = self->aq_nic_cfg->tx_itr / 2;
+ int tx_min_timer = tx_max_timer / 2;
+
+ int rx_max_timer = self->aq_nic_cfg->rx_itr / 2;
+ int rx_min_timer = rx_max_timer / 2;
+
+ tx_max_timer = min(HW_ATL2_INTR_MODER_MAX,
+ tx_max_timer);
+ tx_min_timer = min(HW_ATL2_INTR_MODER_MIN,
+ tx_min_timer);
+ rx_max_timer = min(HW_ATL2_INTR_MODER_MAX,
+ rx_max_timer);
+ rx_min_timer = min(HW_ATL2_INTR_MODER_MIN,
+ rx_min_timer);
+
+ itr_tx |= tx_min_timer << 0x8U;
+ itr_tx |= tx_max_timer << 0x10U;
+ itr_rx |= rx_min_timer << 0x8U;
+ itr_rx |= rx_max_timer << 0x10U;
+ } else {
+ static unsigned int hw_atl2_timers_table_tx_[][2] = {
+ {0xfU, 0xffU}, /* 10Gbit */
+ {0xfU, 0x1ffU}, /* 5Gbit */
+ {0xfU, 0x1ffU}, /* 5Gbit 5GS */
+ {0xfU, 0x1ffU}, /* 2.5Gbit */
+ {0xfU, 0x1ffU}, /* 1Gbit */
+ {0xfU, 0x1ffU}, /* 100Mbit */
+ };
+ static unsigned int hw_atl2_timers_table_rx_[][2] = {
+ {0x6U, 0x38U},/* 10Gbit */
+ {0xCU, 0x70U},/* 5Gbit */
+ {0xCU, 0x70U},/* 5Gbit 5GS */
+ {0x18U, 0xE0U},/* 2.5Gbit */
+ {0x30U, 0x80U},/* 1Gbit */
+ {0x4U, 0x50U},/* 100Mbit */
+ };
+ unsigned int mbps = self->aq_link_status.mbps;
+ unsigned int speed_index;
+
+ speed_index = hw_atl_utils_mbps_2_speed_index(mbps);
+
+ /* Update user visible ITR settings */
+ self->aq_nic_cfg->tx_itr = hw_atl2_timers_table_tx_
+ [speed_index][1] * 2;
+ self->aq_nic_cfg->rx_itr = hw_atl2_timers_table_rx_
+ [speed_index][1] * 2;
+
+ itr_tx |= hw_atl2_timers_table_tx_
+ [speed_index][0] << 0x8U;
+ itr_tx |= hw_atl2_timers_table_tx_
+ [speed_index][1] << 0x10U;
+
+ itr_rx |= hw_atl2_timers_table_rx_
+ [speed_index][0] << 0x8U;
+ itr_rx |= hw_atl2_timers_table_rx_
+ [speed_index][1] << 0x10U;
+ }
+ break;
+ case AQ_CFG_INTERRUPT_MODERATION_OFF:
+ hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
+ hw_atl_tdm_tdm_intr_moder_en_set(self, 0U);
+ hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
+ hw_atl_rdm_rdm_intr_moder_en_set(self, 0U);
+ itr_tx = 0U;
+ itr_rx = 0U;
+ break;
+ }
+
+ for (i = HW_ATL2_RINGS_MAX; i--;) {
+ hw_atl2_reg_tx_intr_moder_ctrl_set(self, itr_tx, i);
+ hw_atl_reg_rx_intr_moder_ctrl_set(self, itr_rx, i);
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl2_hw_stop(struct aq_hw_s *self)
+{
+ hw_atl_b0_hw_irq_disable(self, HW_ATL2_INT_MASK);
+
+ return 0;
+}
+
+static struct aq_stats_s *hw_atl2_utils_get_hw_stats(struct aq_hw_s *self)
+{
+ return &self->curr_stats;
+}
+
+static int hw_atl2_hw_vlan_set(struct aq_hw_s *self,
+ struct aq_rx_filter_vlan *aq_vlans)
+{
+ struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ u32 queue;
+ u8 index;
+ int i;
+
+ hw_atl_rpf_vlan_prom_mode_en_set(self, 1U);
+
+ for (i = 0; i < HW_ATL_VLAN_MAX_FILTERS; i++) {
+ queue = HW_ATL2_ACTION_ASSIGN_QUEUE(aq_vlans[i].queue);
+
+ hw_atl_rpf_vlan_flr_en_set(self, 0U, i);
+ hw_atl_rpf_vlan_rxq_en_flr_set(self, 0U, i);
+ index = priv->art_base_index + HW_ATL2_RPF_VLAN_USER_INDEX + i;
+ hw_atl2_act_rslvr_table_set(self, index, 0, 0,
+ HW_ATL2_ACTION_DISABLE);
+ if (aq_vlans[i].enable) {
+ hw_atl_rpf_vlan_id_flr_set(self,
+ aq_vlans[i].vlan_id, i);
+ hw_atl_rpf_vlan_flr_act_set(self, 1U, i);
+ hw_atl_rpf_vlan_flr_en_set(self, 1U, i);
+
+ if (aq_vlans[i].queue != 0xFF) {
+ hw_atl_rpf_vlan_rxq_flr_set(self,
+ aq_vlans[i].queue,
+ i);
+ hw_atl_rpf_vlan_rxq_en_flr_set(self, 1U, i);
+
+ hw_atl2_rpf_vlan_flr_tag_set(self, i + 2, i);
+
+ index = priv->art_base_index +
+ HW_ATL2_RPF_VLAN_USER_INDEX + i;
+ hw_atl2_act_rslvr_table_set(self, index,
+ (i + 2) << HW_ATL2_RPF_TAG_VLAN_OFFSET,
+ HW_ATL2_RPF_TAG_VLAN_MASK, queue);
+ } else {
+ hw_atl2_rpf_vlan_flr_tag_set(self, 1, i);
+ }
+ }
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl2_hw_vlan_ctrl(struct aq_hw_s *self, bool enable)
+{
+ /* set promisc in case of disabing the vlan filter */
+ hw_atl_rpf_vlan_prom_mode_en_set(self, !enable);
+ hw_atl2_hw_new_rx_filter_vlan_promisc(self, !enable);
+
+ return aq_hw_err_from_flags(self);
+}
+
+const struct aq_hw_ops hw_atl2_ops = {
+ .hw_soft_reset = hw_atl2_utils_soft_reset,
+ .hw_prepare = hw_atl2_utils_initfw,
+ .hw_set_mac_address = hw_atl_b0_hw_mac_addr_set,
+ .hw_init = hw_atl2_hw_init,
+ .hw_reset = hw_atl2_hw_reset,
+ .hw_start = hw_atl_b0_hw_start,
+ .hw_ring_tx_start = hw_atl_b0_hw_ring_tx_start,
+ .hw_ring_tx_stop = hw_atl_b0_hw_ring_tx_stop,
+ .hw_ring_rx_start = hw_atl_b0_hw_ring_rx_start,
+ .hw_ring_rx_stop = hw_atl_b0_hw_ring_rx_stop,
+ .hw_stop = hw_atl2_hw_stop,
+
+ .hw_ring_tx_xmit = hw_atl_b0_hw_ring_tx_xmit,
+ .hw_ring_tx_head_update = hw_atl_b0_hw_ring_tx_head_update,
+
+ .hw_ring_rx_receive = hw_atl_b0_hw_ring_rx_receive,
+ .hw_ring_rx_fill = hw_atl_b0_hw_ring_rx_fill,
+
+ .hw_irq_enable = hw_atl_b0_hw_irq_enable,
+ .hw_irq_disable = hw_atl_b0_hw_irq_disable,
+ .hw_irq_read = hw_atl_b0_hw_irq_read,
+
+ .hw_ring_rx_init = hw_atl2_hw_ring_rx_init,
+ .hw_ring_tx_init = hw_atl2_hw_ring_tx_init,
+ .hw_packet_filter_set = hw_atl2_hw_packet_filter_set,
+ .hw_filter_vlan_set = hw_atl2_hw_vlan_set,
+ .hw_filter_vlan_ctrl = hw_atl2_hw_vlan_ctrl,
+ .hw_multicast_list_set = hw_atl2_hw_multicast_list_set,
+ .hw_interrupt_moderation_set = hw_atl2_hw_interrupt_moderation_set,
+ .hw_rss_set = hw_atl2_hw_rss_set,
+ .hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set,
+ .hw_get_hw_stats = hw_atl2_utils_get_hw_stats,
+ .hw_get_fw_version = hw_atl2_utils_get_fw_version,
+ .hw_set_offload = hw_atl_b0_hw_offload_set,
+};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h
new file mode 100644
index 000000000000..de8723f1c28a
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef HW_ATL2_H
+#define HW_ATL2_H
+
+#include "aq_common.h"
+
+extern const struct aq_hw_caps_s hw_atl2_caps_aqc113;
+extern const struct aq_hw_ops hw_atl2_ops;
+
+#endif /* HW_ATL2_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_internal.h
new file mode 100644
index 000000000000..e66b3583bfe9
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_internal.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef HW_ATL2_INTERNAL_H
+#define HW_ATL2_INTERNAL_H
+
+#include "hw_atl2_utils.h"
+
+#define HW_ATL2_MTU_JUMBO 16352U
+#define HW_ATL2_MTU 1514U
+
+#define HW_ATL2_TX_RINGS 4U
+#define HW_ATL2_RX_RINGS 4U
+
+#define HW_ATL2_RINGS_MAX 32U
+#define HW_ATL2_TXD_SIZE (16U)
+#define HW_ATL2_RXD_SIZE (16U)
+
+#define HW_ATL2_MAC_UC 0U
+#define HW_ATL2_MAC_MIN 1U
+#define HW_ATL2_MAC_MAX 38U
+
+/* interrupts */
+#define HW_ATL2_ERR_INT 8U
+#define HW_ATL2_INT_MASK (0xFFFFFFFFU)
+
+#define HW_ATL2_TXBUF_MAX 128U
+#define HW_ATL2_RXBUF_MAX 192U
+
+#define HW_ATL2_RSS_REDIRECTION_MAX 64U
+
+#define HW_ATL2_TC_MAX 1U
+#define HW_ATL2_RSS_MAX 8U
+
+#define HW_ATL2_INTR_MODER_MAX 0x1FF
+#define HW_ATL2_INTR_MODER_MIN 0xFF
+
+#define HW_ATL2_MIN_RXD \
+ (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_RXD_MULTIPLE))
+#define HW_ATL2_MIN_TXD \
+ (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_TXD_MULTIPLE))
+
+#define HW_ATL2_MAX_RXD 8184U
+#define HW_ATL2_MAX_TXD 8184U
+
+#define HW_ATL2_FW_SM_ACT_RSLVR 0x3U
+
+#define HW_ATL2_RPF_TAG_UC_OFFSET 0x0
+#define HW_ATL2_RPF_TAG_ALLMC_OFFSET 0x6
+#define HW_ATL2_RPF_TAG_ET_OFFSET 0x7
+#define HW_ATL2_RPF_TAG_VLAN_OFFSET 0xA
+#define HW_ATL2_RPF_TAG_UNTAG_OFFSET 0xE
+#define HW_ATL2_RPF_TAG_L3_V4_OFFSET 0xF
+#define HW_ATL2_RPF_TAG_L3_V6_OFFSET 0x12
+#define HW_ATL2_RPF_TAG_L4_OFFSET 0x15
+#define HW_ATL2_RPF_TAG_L4_FLEX_OFFSET 0x18
+#define HW_ATL2_RPF_TAG_FLEX_OFFSET 0x1B
+#define HW_ATL2_RPF_TAG_PCP_OFFSET 0x1D
+
+#define HW_ATL2_RPF_TAG_UC_MASK (0x0000003F << HW_ATL2_RPF_TAG_UC_OFFSET)
+#define HW_ATL2_RPF_TAG_ALLMC_MASK (0x00000001 << HW_ATL2_RPF_TAG_ALLMC_OFFSET)
+#define HW_ATL2_RPF_TAG_UNTAG_MASK (0x00000001 << HW_ATL2_RPF_TAG_UNTAG_OFFSET)
+#define HW_ATL2_RPF_TAG_VLAN_MASK (0x0000000F << HW_ATL2_RPF_TAG_VLAN_OFFSET)
+#define HW_ATL2_RPF_TAG_ET_MASK (0x00000007 << HW_ATL2_RPF_TAG_ET_OFFSET)
+#define HW_ATL2_RPF_TAG_L3_V4_MASK (0x00000007 << HW_ATL2_RPF_TAG_L3_V4_OFFSET)
+#define HW_ATL2_RPF_TAG_L3_V6_MASK (0x00000007 << HW_ATL2_RPF_TAG_L3_V6_OFFSET)
+#define HW_ATL2_RPF_TAG_L4_MASK (0x00000007 << HW_ATL2_RPF_TAG_L4_OFFSET)
+#define HW_ATL2_RPF_TAG_PCP_MASK (0x00000007 << HW_ATL2_RPF_TAG_PCP_OFFSET)
+
+#define HW_ATL2_RPF_TAG_BASE_UC BIT(HW_ATL2_RPF_TAG_UC_OFFSET)
+#define HW_ATL2_RPF_TAG_BASE_ALLMC BIT(HW_ATL2_RPF_TAG_ALLMC_OFFSET)
+#define HW_ATL2_RPF_TAG_BASE_UNTAG BIT(HW_ATL2_RPF_TAG_UNTAG_OFFSET)
+#define HW_ATL2_RPF_TAG_BASE_VLAN BIT(HW_ATL2_RPF_TAG_VLAN_OFFSET)
+
+enum HW_ATL2_RPF_ART_INDEX {
+ HW_ATL2_RPF_L2_PROMISC_OFF_INDEX,
+ HW_ATL2_RPF_VLAN_PROMISC_OFF_INDEX,
+ HW_ATL2_RPF_L3L4_USER_INDEX = 8,
+ HW_ATL2_RPF_ET_PCP_USER_INDEX = HW_ATL2_RPF_L3L4_USER_INDEX + 16,
+ HW_ATL2_RPF_VLAN_USER_INDEX = HW_ATL2_RPF_ET_PCP_USER_INDEX + 16,
+ HW_ATL2_RPF_PCP_TO_TC_INDEX = HW_ATL2_RPF_VLAN_USER_INDEX +
+ HW_ATL_VLAN_MAX_FILTERS,
+ HW_ATL2_RPF_VLAN_INDEX = HW_ATL2_RPF_PCP_TO_TC_INDEX +
+ AQ_CFG_TCS_MAX,
+ HW_ATL2_RPF_MAC_INDEX,
+ HW_ATL2_RPF_ALLMC_INDEX,
+ HW_ATL2_RPF_UNTAG_INDEX,
+ HW_ATL2_RPF_VLAN_PROMISC_ON_INDEX,
+ HW_ATL2_RPF_L2_PROMISC_ON_INDEX,
+};
+
+#define HW_ATL2_ACTION(ACTION, RSS, INDEX, VALID) \
+ ((((ACTION) & 0x3U) << 8) | \
+ (((RSS) & 0x1U) << 7) | \
+ (((INDEX) & 0x3FU) << 2) | \
+ (((VALID) & 0x1U) << 0))
+
+#define HW_ATL2_ACTION_DROP HW_ATL2_ACTION(0, 0, 0, 1)
+#define HW_ATL2_ACTION_DISABLE HW_ATL2_ACTION(0, 0, 0, 0)
+#define HW_ATL2_ACTION_ASSIGN_QUEUE(QUEUE) HW_ATL2_ACTION(1, 0, (QUEUE), 1)
+#define HW_ATL2_ACTION_ASSIGN_TC(TC) HW_ATL2_ACTION(1, 1, (TC), 1)
+
+enum HW_ATL2_RPF_RSS_HASH_TYPE {
+ HW_ATL2_RPF_RSS_HASH_TYPE_NONE = 0,
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV4 = BIT(0),
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV4_TCP = BIT(1),
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV4_UDP = BIT(2),
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6 = BIT(3),
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_TCP = BIT(4),
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_UDP = BIT(5),
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_EX = BIT(6),
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_EX_TCP = BIT(7),
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_EX_UDP = BIT(8),
+ HW_ATL2_RPF_RSS_HASH_TYPE_ALL = HW_ATL2_RPF_RSS_HASH_TYPE_IPV4 |
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV4_TCP |
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV4_UDP |
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6 |
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_TCP |
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_UDP |
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_EX |
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_EX_TCP |
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_EX_UDP,
+};
+
+#define HW_ATL_RSS_DISABLED 0x00000000U
+#define HW_ATL_RSS_ENABLED_3INDEX_BITS 0xB3333333U
+
+#define HW_ATL_MCAST_FLT_ANY_TO_HOST 0x00010FFFU
+
+struct hw_atl2_priv {
+ struct statistics_s last_stats;
+ unsigned int art_base_index;
+};
+
+#endif /* HW_ATL2_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.c
new file mode 100644
index 000000000000..e779d70fde66
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.c
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#include "hw_atl2_llh.h"
+#include "hw_atl2_llh_internal.h"
+#include "aq_hw_utils.h"
+
+void hw_atl2_rpf_rss_hash_type_set(struct aq_hw_s *aq_hw, u32 rss_hash_type)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_ADR,
+ HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_MSK,
+ HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_SHIFT,
+ rss_hash_type);
+}
+
+/* rpf */
+
+void hw_atl2_rpf_new_enable_set(struct aq_hw_s *aq_hw, u32 enable)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_NEW_EN_ADR,
+ HW_ATL2_RPF_NEW_EN_MSK,
+ HW_ATL2_RPF_NEW_EN_SHIFT,
+ enable);
+}
+
+void hw_atl2_rpfl2_uc_flr_tag_set(struct aq_hw_s *aq_hw, u32 tag, u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPFL2UC_TAG_ADR(filter),
+ HW_ATL2_RPFL2UC_TAG_MSK,
+ HW_ATL2_RPFL2UC_TAG_SHIFT,
+ tag);
+}
+
+void hw_atl2_rpfl2_bc_flr_tag_set(struct aq_hw_s *aq_hw, u32 tag)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_L2_BC_TAG_ADR,
+ HW_ATL2_RPF_L2_BC_TAG_MSK,
+ HW_ATL2_RPF_L2_BC_TAG_SHIFT,
+ tag);
+}
+
+void hw_atl2_new_rpf_rss_redir_set(struct aq_hw_s *aq_hw, u32 tc, u32 index,
+ u32 queue)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_RSS_REDIR_ADR(tc, index),
+ HW_ATL2_RPF_RSS_REDIR_MSK(tc),
+ HW_ATL2_RPF_RSS_REDIR_SHIFT(tc),
+ queue);
+}
+
+void hw_atl2_rpf_vlan_flr_tag_set(struct aq_hw_s *aq_hw, u32 tag, u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_VL_TAG_ADR(filter),
+ HW_ATL2_RPF_VL_TAG_MSK,
+ HW_ATL2_RPF_VL_TAG_SHIFT,
+ tag);
+}
+
+/* TX */
+
+void hw_atl2_tpb_tx_buf_clk_gate_en_set(struct aq_hw_s *aq_hw, u32 clk_gate_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_ADR,
+ HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_MSK,
+ HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_SHIFT,
+ clk_gate_en);
+}
+
+void hw_atl2_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+ u32 tx_intr_moderation_ctl,
+ u32 queue)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL2_TX_INTR_MODERATION_CTL_ADR(queue),
+ tx_intr_moderation_ctl);
+}
+
+void hw_atl2_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
+ u32 max_credit,
+ u32 tc)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_TPS_DATA_TCTCREDIT_MAX_ADR(tc),
+ HW_ATL2_TPS_DATA_TCTCREDIT_MAX_MSK,
+ HW_ATL2_TPS_DATA_TCTCREDIT_MAX_SHIFT,
+ max_credit);
+}
+
+void hw_atl2_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_tc_data_weight,
+ u32 tc)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_TPS_DATA_TCTWEIGHT_ADR(tc),
+ HW_ATL2_TPS_DATA_TCTWEIGHT_MSK,
+ HW_ATL2_TPS_DATA_TCTWEIGHT_SHIFT,
+ tx_pkt_shed_tc_data_weight);
+}
+
+u32 hw_atl2_get_hw_version(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL2_FPGA_VER_ADR);
+}
+
+void hw_atl2_init_launchtime(struct aq_hw_s *aq_hw)
+{
+ u32 hw_ver = hw_atl2_get_hw_version(aq_hw);
+
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_LT_CTRL_ADR,
+ HW_ATL2_LT_CTRL_CLK_RATIO_MSK,
+ HW_ATL2_LT_CTRL_CLK_RATIO_SHIFT,
+ hw_ver < HW_ATL2_FPGA_VER_U32(1, 0, 0, 0) ?
+ HW_ATL2_LT_CTRL_CLK_RATIO_FULL_SPEED :
+ hw_ver >= HW_ATL2_FPGA_VER_U32(1, 0, 85, 2) ?
+ HW_ATL2_LT_CTRL_CLK_RATIO_HALF_SPEED :
+ HW_ATL2_LT_CTRL_CLK_RATIO_QUATER_SPEED);
+}
+
+/* set action resolver record */
+void hw_atl2_rpf_act_rslvr_record_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 tag, u32 mask, u32 action)
+{
+ aq_hw_write_reg(aq_hw,
+ HW_ATL2_RPF_ACT_RSLVR_REQ_TAG_ADR(location),
+ tag);
+ aq_hw_write_reg(aq_hw,
+ HW_ATL2_RPF_ACT_RSLVR_TAG_MASK_ADR(location),
+ mask);
+ aq_hw_write_reg(aq_hw,
+ HW_ATL2_RPF_ACT_RSLVR_ACTN_ADR(location),
+ action);
+}
+
+void hw_atl2_rpf_act_rslvr_section_en_set(struct aq_hw_s *aq_hw, u32 sections)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_REC_TAB_EN_ADR,
+ HW_ATL2_RPF_REC_TAB_EN_MSK,
+ HW_ATL2_RPF_REC_TAB_EN_SHIFT,
+ sections);
+}
+
+void hw_atl2_mif_shared_buf_get(struct aq_hw_s *aq_hw, int offset, u32 *data,
+ int len)
+{
+ int j = 0;
+ int i;
+
+ for (i = offset; i < offset + len; i++, j++)
+ data[j] = aq_hw_read_reg(aq_hw,
+ HW_ATL2_MIF_SHARED_BUFFER_IN_ADR(i));
+}
+
+void hw_atl2_mif_shared_buf_write(struct aq_hw_s *aq_hw, int offset, u32 *data,
+ int len)
+{
+ int j = 0;
+ int i;
+
+ for (i = offset; i < offset + len; i++, j++)
+ aq_hw_write_reg(aq_hw, HW_ATL2_MIF_SHARED_BUFFER_IN_ADR(i),
+ data[j]);
+}
+
+void hw_atl2_mif_shared_buf_read(struct aq_hw_s *aq_hw, int offset, u32 *data,
+ int len)
+{
+ int j = 0;
+ int i;
+
+ for (i = offset; i < offset + len; i++, j++)
+ data[j] = aq_hw_read_reg(aq_hw,
+ HW_ATL2_MIF_SHARED_BUFFER_OUT_ADR(i));
+}
+
+void hw_atl2_mif_host_finished_write_set(struct aq_hw_s *aq_hw, u32 finish)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_MIF_HOST_FINISHED_WRITE_ADR,
+ HW_ATL2_MIF_HOST_FINISHED_WRITE_MSK,
+ HW_ATL2_MIF_HOST_FINISHED_WRITE_SHIFT,
+ finish);
+}
+
+u32 hw_atl2_mif_mcp_finished_read_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL2_MIF_MCP_FINISHED_READ_ADR,
+ HW_ATL2_MIF_MCP_FINISHED_READ_MSK,
+ HW_ATL2_MIF_MCP_FINISHED_READ_SHIFT);
+}
+
+u32 hw_atl2_mif_mcp_boot_reg_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL2_MIF_BOOT_REG_ADR);
+}
+
+void hw_atl2_mif_mcp_boot_reg_set(struct aq_hw_s *aq_hw, u32 val)
+{
+ return aq_hw_write_reg(aq_hw, HW_ATL2_MIF_BOOT_REG_ADR, val);
+}
+
+u32 hw_atl2_mif_host_req_int_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL2_MCP_HOST_REQ_INT_ADR);
+}
+
+void hw_atl2_mif_host_req_int_clr(struct aq_hw_s *aq_hw, u32 val)
+{
+ return aq_hw_write_reg(aq_hw, HW_ATL2_MCP_HOST_REQ_INT_CLR_ADR,
+ val);
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.h
new file mode 100644
index 000000000000..8c6d78a64d42
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef HW_ATL2_LLH_H
+#define HW_ATL2_LLH_H
+
+#include <linux/types.h>
+
+struct aq_hw_s;
+
+/* Set TX Interrupt Moderation Control Register */
+void hw_atl2_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+ u32 tx_intr_moderation_ctl,
+ u32 queue);
+
+/** Set RSS HASH type */
+void hw_atl2_rpf_rss_hash_type_set(struct aq_hw_s *aq_hw, u32 rss_hash_type);
+
+/* set new RPF enable */
+void hw_atl2_rpf_new_enable_set(struct aq_hw_s *aq_hw, u32 enable);
+
+/* set l2 unicast filter tag */
+void hw_atl2_rpfl2_uc_flr_tag_set(struct aq_hw_s *aq_hw, u32 tag, u32 filter);
+
+/* set l2 broadcast filter tag */
+void hw_atl2_rpfl2_bc_flr_tag_set(struct aq_hw_s *aq_hw, u32 tag);
+
+/* set new rss redirection table */
+void hw_atl2_new_rpf_rss_redir_set(struct aq_hw_s *aq_hw, u32 tc, u32 index,
+ u32 queue);
+
+/* Set VLAN filter tag */
+void hw_atl2_rpf_vlan_flr_tag_set(struct aq_hw_s *aq_hw, u32 tag, u32 filter);
+
+/* set tx buffer clock gate enable */
+void hw_atl2_tpb_tx_buf_clk_gate_en_set(struct aq_hw_s *aq_hw, u32 clk_gate_en);
+
+/* set tx packet scheduler tc data max credit */
+void hw_atl2_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
+ u32 max_credit,
+ u32 tc);
+
+/* set tx packet scheduler tc data weight */
+void hw_atl2_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_tc_data_weight,
+ u32 tc);
+
+u32 hw_atl2_get_hw_version(struct aq_hw_s *aq_hw);
+
+void hw_atl2_init_launchtime(struct aq_hw_s *aq_hw);
+
+/* set action resolver record */
+void hw_atl2_rpf_act_rslvr_record_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 tag, u32 mask, u32 action);
+
+/* set enable action resolver section */
+void hw_atl2_rpf_act_rslvr_section_en_set(struct aq_hw_s *aq_hw, u32 sections);
+
+/* get data from firmware shared input buffer */
+void hw_atl2_mif_shared_buf_get(struct aq_hw_s *aq_hw, int offset, u32 *data,
+ int len);
+
+/* set data into firmware shared input buffer */
+void hw_atl2_mif_shared_buf_write(struct aq_hw_s *aq_hw, int offset, u32 *data,
+ int len);
+
+/* get data from firmware shared output buffer */
+void hw_atl2_mif_shared_buf_read(struct aq_hw_s *aq_hw, int offset, u32 *data,
+ int len);
+
+/* set host finished write shared buffer indication */
+void hw_atl2_mif_host_finished_write_set(struct aq_hw_s *aq_hw, u32 finish);
+
+/* get mcp finished read shared buffer indication */
+u32 hw_atl2_mif_mcp_finished_read_get(struct aq_hw_s *aq_hw);
+
+/* get mcp boot register */
+u32 hw_atl2_mif_mcp_boot_reg_get(struct aq_hw_s *aq_hw);
+
+/* set mcp boot register */
+void hw_atl2_mif_mcp_boot_reg_set(struct aq_hw_s *aq_hw, u32 val);
+
+/* get host interrupt request */
+u32 hw_atl2_mif_host_req_int_get(struct aq_hw_s *aq_hw);
+
+/* clear host interrupt request */
+void hw_atl2_mif_host_req_int_clr(struct aq_hw_s *aq_hw, u32 val);
+
+#endif /* HW_ATL2_LLH_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh_internal.h
new file mode 100644
index 000000000000..cde9e9d2836d
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh_internal.h
@@ -0,0 +1,328 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef HW_ATL2_LLH_INTERNAL_H
+#define HW_ATL2_LLH_INTERNAL_H
+
+/* RX pif_rpf_rss_hash_type_i Bitfield Definitions
+ */
+#define HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_ADR 0x000054C8
+#define HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_MSK 0x000001FF
+#define HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_MSKN 0xFFFFFE00
+#define HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_SHIFT 0
+#define HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_WIDTH 9
+
+/* rx rpf_new_rpf_en bitfield definitions
+ * preprocessor definitions for the bitfield "rpf_new_rpf_en_i".
+ * port="pif_rpf_new_rpf_en_i
+ */
+
+/* register address for bitfield rpf_new_rpf_en */
+#define HW_ATL2_RPF_NEW_EN_ADR 0x00005104
+/* bitmask for bitfield rpf_new_rpf_en */
+#define HW_ATL2_RPF_NEW_EN_MSK 0x00000800
+/* inverted bitmask for bitfield rpf_new_rpf_en */
+#define HW_ATL2_RPF_NEW_EN_MSKN 0xfffff7ff
+/* lower bit position of bitfield rpf_new_rpf_en */
+#define HW_ATL2_RPF_NEW_EN_SHIFT 11
+/* width of bitfield rpf_new_rpf_en */
+#define HW_ATL2_RPF_NEW_EN_WIDTH 1
+/* default value of bitfield rpf_new_rpf_en */
+#define HW_ATL2_RPF_NEW_EN_DEFAULT 0x0
+
+/* rx l2_uc_req_tag0{f}[5:0] bitfield definitions
+ * preprocessor definitions for the bitfield "l2_uc_req_tag0{f}[7:0]".
+ * parameter: filter {f} | stride size 0x8 | range [0, 37]
+ * port="pif_rpf_l2_uc_req_tag0[5:0]"
+ */
+
+/* register address for bitfield l2_uc_req_tag0{f}[2:0] */
+#define HW_ATL2_RPFL2UC_TAG_ADR(filter) (0x00005114 + (filter) * 0x8)
+/* bitmask for bitfield l2_uc_req_tag0{f}[2:0] */
+#define HW_ATL2_RPFL2UC_TAG_MSK 0x0FC00000
+/* inverted bitmask for bitfield l2_uc_req_tag0{f}[2:0] */
+#define HW_ATL2_RPFL2UC_TAG_MSKN 0xF03FFFFF
+/* lower bit position of bitfield l2_uc_req_tag0{f}[2:0] */
+#define HW_ATL2_RPFL2UC_TAG_SHIFT 22
+/* width of bitfield l2_uc_req_tag0{f}[2:0] */
+#define HW_ATL2_RPFL2UC_TAG_WIDTH 6
+/* default value of bitfield l2_uc_req_tag0{f}[2:0] */
+#define HW_ATL2_RPFL2UC_TAG_DEFAULT 0x0
+
+/* rpf_l2_bc_req_tag[5:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rpf_l2_bc_req_tag[5:0]".
+ * port="pifrpf_l2_bc_req_tag_i[5:0]"
+ */
+
+/* register address for bitfield rpf_l2_bc_req_tag */
+#define HW_ATL2_RPF_L2_BC_TAG_ADR 0x000050F0
+/* bitmask for bitfield rpf_l2_bc_req_tag */
+#define HW_ATL2_RPF_L2_BC_TAG_MSK 0x0000003F
+/* inverted bitmask for bitfield rpf_l2_bc_req_tag */
+#define HW_ATL2_RPF_L2_BC_TAG_MSKN 0xffffffc0
+/* lower bit position of bitfield rpf_l2_bc_req_tag */
+#define HW_ATL2_RPF_L2_BC_TAG_SHIFT 0
+/* width of bitfield rpf_l2_bc_req_tag */
+#define HW_ATL2_RPF_L2_BC_TAG_WIDTH 6
+/* default value of bitfield rpf_l2_bc_req_tag */
+#define HW_ATL2_RPF_L2_BC_TAG_DEFAULT 0x0
+
+/* rx rpf_rss_red1_data_[4:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rpf_rss_red1_data[4:0]".
+ * port="pif_rpf_rss_red1_data_i[4:0]"
+ */
+
+/* register address for bitfield rpf_rss_red1_data[4:0] */
+#define HW_ATL2_RPF_RSS_REDIR_ADR(TC, INDEX) (0x00006200 + \
+ (0x100 * !!((TC) > 3)) + (INDEX) * 4)
+/* bitmask for bitfield rpf_rss_red1_data[4:0] */
+#define HW_ATL2_RPF_RSS_REDIR_MSK(TC) (0x00000001F << (5 * ((TC) % 4)))
+/* lower bit position of bitfield rpf_rss_red1_data[4:0] */
+#define HW_ATL2_RPF_RSS_REDIR_SHIFT(TC) (5 * ((TC) % 4))
+/* width of bitfield rpf_rss_red1_data[4:0] */
+#define HW_ATL2_RPF_RSS_REDIR_WIDTH 5
+/* default value of bitfield rpf_rss_red1_data[4:0] */
+#define HW_ATL2_RPF_RSS_REDIR_DEFAULT 0x0
+
+/* rx vlan_req_tag0{f}[3:0] bitfield definitions
+ * preprocessor definitions for the bitfield "vlan_req_tag0{f}[3:0]".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_vlan_req_tag0[3:0]"
+ */
+
+/* register address for bitfield vlan_req_tag0{f}[3:0] */
+#define HW_ATL2_RPF_VL_TAG_ADR(filter) (0x00005290 + (filter) * 0x4)
+/* bitmask for bitfield vlan_req_tag0{f}[3:0] */
+#define HW_ATL2_RPF_VL_TAG_MSK 0x0000F000
+/* inverted bitmask for bitfield vlan_req_tag0{f}[3:0] */
+#define HW_ATL2_RPF_VL_TAG_MSKN 0xFFFF0FFF
+/* lower bit position of bitfield vlan_req_tag0{f}[3:0] */
+#define HW_ATL2_RPF_VL_TAG_SHIFT 12
+/* width of bitfield vlan_req_tag0{f}[3:0] */
+#define HW_ATL2_RPF_VL_TAG_WIDTH 4
+/* default value of bitfield vlan_req_tag0{f}[3:0] */
+#define HW_ATL2_RPF_VL_TAG_DEFAULT 0x0
+
+/* RX rx_q{Q}_tc_map[2:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "rx_q{Q}_tc_map[2:0]".
+ * Parameter: Queue {Q} | bit-level stride | range [0, 31]
+ * PORT="pif_rx_q0_tc_map_i[2:0]"
+ */
+
+/* Register address for bitfield rx_q{Q}_tc_map[2:0] */
+#define HW_ATL2_RX_Q_TC_MAP_ADR(queue) \
+ (((queue) < 32) ? 0x00005900 + ((queue) / 8) * 4 : 0)
+/* Lower bit position of bitfield rx_q{Q}_tc_map[2:0] */
+#define HW_ATL2_RX_Q_TC_MAP_SHIFT(queue) \
+ (((queue) < 32) ? ((queue) * 4) % 32 : 0)
+/* Width of bitfield rx_q{Q}_tc_map[2:0] */
+#define HW_ATL2_RX_Q_TC_MAP_WIDTH 3
+/* Default value of bitfield rx_q{Q}_tc_map[2:0] */
+#define HW_ATL2_RX_Q_TC_MAP_DEFAULT 0x0
+
+/* tx tx_buffer_clk_gate_en bitfield definitions
+ * preprocessor definitions for the bitfield "tx_buffer_clk_gate_en".
+ * port="pif_tpb_tx_buffer_clk_gate_en_i"
+ */
+
+/* register address for bitfield tx_buffer_clk_gate_en */
+#define HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_ADR 0x00007900
+/* bitmask for bitfield tx_buffer_clk_gate_en */
+#define HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_MSK 0x00000020
+/* inverted bitmask for bitfield tx_buffer_clk_gate_en */
+#define HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_MSKN 0xffffffdf
+/* lower bit position of bitfield tx_buffer_clk_gate_en */
+#define HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_SHIFT 5
+/* width of bitfield tx_buffer_clk_gate_en */
+#define HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_WIDTH 1
+/* default value of bitfield tx_buffer_clk_gate_en */
+#define HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_DEFAULT 0x0
+
+/* tx data_tc{t}_credit_max[b:0] bitfield definitions
+ * preprocessor definitions for the bitfield "data_tc{t}_credit_max[b:0]".
+ * parameter: tc {t} | stride size 0x4 | range [0, 7]
+ * port="pif_tps_data_tc0_credit_max_i[11:0]"
+ */
+
+/* register address for bitfield data_tc{t}_credit_max[b:0] */
+#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_ADR(tc) (0x00007110 + (tc) * 0x4)
+/* bitmask for bitfield data_tc{t}_credit_max[b:0] */
+#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_MSK 0x0fff0000
+/* inverted bitmask for bitfield data_tc{t}_credit_max[b:0] */
+#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_MSKN 0xf000ffff
+/* lower bit position of bitfield data_tc{t}_credit_max[b:0] */
+#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_SHIFT 16
+/* width of bitfield data_tc{t}_credit_max[b:0] */
+#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_WIDTH 12
+/* default value of bitfield data_tc{t}_credit_max[b:0] */
+#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_DEFAULT 0x0
+
+/* tx data_tc{t}_weight[8:0] bitfield definitions
+ * preprocessor definitions for the bitfield "data_tc{t}_weight[8:0]".
+ * parameter: tc {t} | stride size 0x4 | range [0, 7]
+ * port="pif_tps_data_tc0_weight_i[8:0]"
+ */
+
+/* register address for bitfield data_tc{t}_weight[8:0] */
+#define HW_ATL2_TPS_DATA_TCTWEIGHT_ADR(tc) (0x00007110 + (tc) * 0x4)
+/* bitmask for bitfield data_tc{t}_weight[8:0] */
+#define HW_ATL2_TPS_DATA_TCTWEIGHT_MSK 0x000001ff
+/* inverted bitmask for bitfield data_tc{t}_weight[8:0] */
+#define HW_ATL2_TPS_DATA_TCTWEIGHT_MSKN 0xfffffe00
+/* lower bit position of bitfield data_tc{t}_weight[8:0] */
+#define HW_ATL2_TPS_DATA_TCTWEIGHT_SHIFT 0
+/* width of bitfield data_tc{t}_weight[8:0] */
+#define HW_ATL2_TPS_DATA_TCTWEIGHT_WIDTH 9
+/* default value of bitfield data_tc{t}_weight[8:0] */
+#define HW_ATL2_TPS_DATA_TCTWEIGHT_DEFAULT 0x0
+
+/* tx interrupt moderation control register definitions
+ * Preprocessor definitions for TX Interrupt Moderation Control Register
+ * Base Address: 0x00007c28
+ * Parameter: queue {Q} | stride size 0x4 | range [0, 31]
+ */
+
+#define HW_ATL2_TX_INTR_MODERATION_CTL_ADR(queue) (0x00007c28u + (queue) * 0x40)
+
+/* Launch time control register */
+#define HW_ATL2_LT_CTRL_ADR 0x00007a1c
+
+#define HW_ATL2_LT_CTRL_AVB_LEN_CMP_TRSHLD_MSK 0xFFFF0000
+#define HW_ATL2_LT_CTRL_AVB_LEN_CMP_TRSHLD_SHIFT 16
+
+#define HW_ATL2_LT_CTRL_CLK_RATIO_MSK 0x0000FF00
+#define HW_ATL2_LT_CTRL_CLK_RATIO_SHIFT 8
+#define HW_ATL2_LT_CTRL_CLK_RATIO_QUATER_SPEED 4
+#define HW_ATL2_LT_CTRL_CLK_RATIO_HALF_SPEED 2
+#define HW_ATL2_LT_CTRL_CLK_RATIO_FULL_SPEED 1
+
+#define HW_ATL2_LT_CTRL_25G_MODE_SUPPORT_MSK 0x00000008
+#define HW_ATL2_LT_CTRL_25G_MODE_SUPPORT_SHIFT 3
+
+#define HW_ATL2_LT_CTRL_LINK_SPEED_MSK 0x00000007
+#define HW_ATL2_LT_CTRL_LINK_SPEED_SHIFT 0
+
+/* FPGA VER register */
+#define HW_ATL2_FPGA_VER_ADR 0x000000f4
+#define HW_ATL2_FPGA_VER_U32(mj, mi, bl, rv) \
+ ((((mj) & 0xff) << 24) | \
+ (((mi) & 0xff) << 16) | \
+ (((bl) & 0xff) << 8) | \
+ (((rv) & 0xff) << 0))
+
+/* ahb_mem_addr{f}[31:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "ahb_mem_addr{f}[31:0]".
+ * Parameter: filter {f} | stride size 0x10 | range [0, 127]
+ * PORT="ahb_mem_addr{f}[31:0]"
+ */
+
+/* Register address for bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_REQ_TAG_ADR(filter) \
+ (0x00014000u + (filter) * 0x10)
+/* Bitmask for bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_REQ_TAG_MSK 0xFFFFFFFFu
+/* Inverted bitmask for bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_REQ_TAG_MSKN 0x00000000u
+/* Lower bit position of bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_REQ_TAG_SHIFT 0
+/* Width of bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_REQ_TAG_WIDTH 31
+/* Default value of bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_REQ_TAG_DEFAULT 0x0
+
+/* Register address for bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_TAG_MASK_ADR(filter) \
+ (0x00014004u + (filter) * 0x10)
+/* Bitmask for bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_TAG_MASK_MSK 0xFFFFFFFFu
+/* Inverted bitmask for bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_TAG_MASK_MSKN 0x00000000u
+/* Lower bit position of bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_TAG_MASK_SHIFT 0
+/* Width of bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_TAG_MASK_WIDTH 31
+/* Default value of bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_TAG_MASK_DEFAULT 0x0
+
+/* Register address for bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_ACTN_ADR(filter) \
+ (0x00014008u + (filter) * 0x10)
+/* Bitmask for bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_ACTN_MSK 0x000007FFu
+/* Inverted bitmask for bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_ACTN_MSKN 0xFFFFF800u
+/* Lower bit position of bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_ACTN_SHIFT 0
+/* Width of bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_ACTN_WIDTH 10
+/* Default value of bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_ACTN_DEFAULT 0x0
+
+/* rpf_rec_tab_en[15:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "rpf_rec_tab_en[15:0]".
+ * PORT="pif_rpf_rec_tab_en[15:0]"
+ */
+/* Register address for bitfield rpf_rec_tab_en[15:0] */
+#define HW_ATL2_RPF_REC_TAB_EN_ADR 0x00006ff0u
+/* Bitmask for bitfield rpf_rec_tab_en[15:0] */
+#define HW_ATL2_RPF_REC_TAB_EN_MSK 0x0000FFFFu
+/* Inverted bitmask for bitfield rpf_rec_tab_en[15:0] */
+#define HW_ATL2_RPF_REC_TAB_EN_MSKN 0xFFFF0000u
+/* Lower bit position of bitfield rpf_rec_tab_en[15:0] */
+#define HW_ATL2_RPF_REC_TAB_EN_SHIFT 0
+/* Width of bitfield rpf_rec_tab_en[15:0] */
+#define HW_ATL2_RPF_REC_TAB_EN_WIDTH 16
+/* Default value of bitfield rpf_rec_tab_en[15:0] */
+#define HW_ATL2_RPF_REC_TAB_EN_DEFAULT 0x0
+
+/* Register address for firmware shared input buffer */
+#define HW_ATL2_MIF_SHARED_BUFFER_IN_ADR(dword) (0x00012000U + (dword) * 0x4U)
+/* Register address for firmware shared output buffer */
+#define HW_ATL2_MIF_SHARED_BUFFER_OUT_ADR(dword) (0x00013000U + (dword) * 0x4U)
+
+/* pif_host_finished_buf_wr_i Bitfield Definitions
+ * Preprocessor definitions for the bitfield "pif_host_finished_buf_wr_i".
+ * PORT="pif_host_finished_buf_wr_i"
+ */
+/* Register address for bitfield rpif_host_finished_buf_wr_i */
+#define HW_ATL2_MIF_HOST_FINISHED_WRITE_ADR 0x00000e00u
+/* Bitmask for bitfield pif_host_finished_buf_wr_i */
+#define HW_ATL2_MIF_HOST_FINISHED_WRITE_MSK 0x00000001u
+/* Inverted bitmask for bitfield pif_host_finished_buf_wr_i */
+#define HW_ATL2_MIF_HOST_FINISHED_WRITE_MSKN 0xFFFFFFFEu
+/* Lower bit position of bitfield pif_host_finished_buf_wr_i */
+#define HW_ATL2_MIF_HOST_FINISHED_WRITE_SHIFT 0
+/* Width of bitfield pif_host_finished_buf_wr_i */
+#define HW_ATL2_MIF_HOST_FINISHED_WRITE_WIDTH 1
+/* Default value of bitfield pif_host_finished_buf_wr_i */
+#define HW_ATL2_MIF_HOST_FINISHED_WRITE_DEFAULT 0x0
+
+/* pif_mcp_finished_buf_rd_i Bitfield Definitions
+ * Preprocessor definitions for the bitfield "pif_mcp_finished_buf_rd_i".
+ * PORT="pif_mcp_finished_buf_rd_i"
+ */
+/* Register address for bitfield pif_mcp_finished_buf_rd_i */
+#define HW_ATL2_MIF_MCP_FINISHED_READ_ADR 0x00000e04u
+/* Bitmask for bitfield pif_mcp_finished_buf_rd_i */
+#define HW_ATL2_MIF_MCP_FINISHED_READ_MSK 0x00000001u
+/* Inverted bitmask for bitfield pif_mcp_finished_buf_rd_i */
+#define HW_ATL2_MIF_MCP_FINISHED_READ_MSKN 0xFFFFFFFEu
+/* Lower bit position of bitfield pif_mcp_finished_buf_rd_i */
+#define HW_ATL2_MIF_MCP_FINISHED_READ_SHIFT 0
+/* Width of bitfield pif_mcp_finished_buf_rd_i */
+#define HW_ATL2_MIF_MCP_FINISHED_READ_WIDTH 1
+/* Default value of bitfield pif_mcp_finished_buf_rd_i */
+#define HW_ATL2_MIF_MCP_FINISHED_READ_DEFAULT 0x0
+
+/* Register address for bitfield pif_mcp_boot_reg */
+#define HW_ATL2_MIF_BOOT_REG_ADR 0x00003040u
+
+#define HW_ATL2_MCP_HOST_REQ_INT_READY BIT(0)
+
+#define HW_ATL2_MCP_HOST_REQ_INT_ADR 0x00000F00u
+#define HW_ATL2_MCP_HOST_REQ_INT_SET_ADR 0x00000F04u
+#define HW_ATL2_MCP_HOST_REQ_INT_CLR_ADR 0x00000F08u
+
+#endif /* HW_ATL2_LLH_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.c
new file mode 100644
index 000000000000..f3766780e975
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#include <linux/iopoll.h>
+
+#include "aq_hw_utils.h"
+#include "hw_atl/hw_atl_utils.h"
+#include "hw_atl2_utils.h"
+#include "hw_atl2_llh.h"
+#include "hw_atl2_llh_internal.h"
+
+#define HW_ATL2_FW_VER_1X 0x01000000U
+
+#define AQ_A2_BOOT_STARTED BIT(0x18)
+#define AQ_A2_CRASH_INIT BIT(0x1B)
+#define AQ_A2_BOOT_CODE_FAILED BIT(0x1C)
+#define AQ_A2_FW_INIT_FAILED BIT(0x1D)
+#define AQ_A2_FW_INIT_COMP_SUCCESS BIT(0x1F)
+
+#define AQ_A2_FW_BOOT_FAILED_MASK (AQ_A2_CRASH_INIT | \
+ AQ_A2_BOOT_CODE_FAILED | \
+ AQ_A2_FW_INIT_FAILED)
+#define AQ_A2_FW_BOOT_COMPLETE_MASK (AQ_A2_FW_BOOT_FAILED_MASK | \
+ AQ_A2_FW_INIT_COMP_SUCCESS)
+
+#define AQ_A2_FW_BOOT_REQ_REBOOT BIT(0x0)
+#define AQ_A2_FW_BOOT_REQ_HOST_BOOT BIT(0x8)
+#define AQ_A2_FW_BOOT_REQ_MAC_FAST_BOOT BIT(0xA)
+#define AQ_A2_FW_BOOT_REQ_PHY_FAST_BOOT BIT(0xB)
+
+int hw_atl2_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops)
+{
+ int err;
+
+ self->fw_ver_actual = hw_atl2_utils_get_fw_version(self);
+
+ if (hw_atl_utils_ver_match(HW_ATL2_FW_VER_1X,
+ self->fw_ver_actual) == 0) {
+ *fw_ops = &aq_a2_fw_ops;
+ } else {
+ aq_pr_err("Bad FW version detected: %x, but continue\n",
+ self->fw_ver_actual);
+ *fw_ops = &aq_a2_fw_ops;
+ }
+ aq_pr_trace("Detect ATL2FW %x\n", self->fw_ver_actual);
+ self->aq_fw_ops = *fw_ops;
+ err = self->aq_fw_ops->init(self);
+
+ self->chip_features |= ATL_HW_CHIP_ANTIGUA;
+
+ return err;
+}
+
+static bool hw_atl2_mcp_boot_complete(struct aq_hw_s *self)
+{
+ u32 rbl_status;
+
+ rbl_status = hw_atl2_mif_mcp_boot_reg_get(self);
+ if (rbl_status & AQ_A2_FW_BOOT_COMPLETE_MASK)
+ return true;
+
+ /* Host boot requested */
+ if (hw_atl2_mif_host_req_int_get(self) & HW_ATL2_MCP_HOST_REQ_INT_READY)
+ return true;
+
+ return false;
+}
+
+int hw_atl2_utils_soft_reset(struct aq_hw_s *self)
+{
+ bool rbl_complete = false;
+ u32 rbl_status = 0;
+ u32 rbl_request;
+ int err;
+
+ hw_atl2_mif_host_req_int_clr(self, 0x01);
+ rbl_request = AQ_A2_FW_BOOT_REQ_REBOOT;
+#ifdef AQ_CFG_FAST_START
+ rbl_request |= AQ_A2_FW_BOOT_REQ_MAC_FAST_BOOT;
+#endif
+ hw_atl2_mif_mcp_boot_reg_set(self, rbl_request);
+
+ /* Wait for RBL boot */
+ err = readx_poll_timeout_atomic(hw_atl2_mif_mcp_boot_reg_get, self,
+ rbl_status,
+ ((rbl_status & AQ_A2_BOOT_STARTED) &&
+ (rbl_status != 0xFFFFFFFFu)),
+ 10, 200000);
+ if (err) {
+ aq_pr_err("Boot code hanged");
+ goto err_exit;
+ }
+
+ err = readx_poll_timeout_atomic(hw_atl2_mcp_boot_complete, self,
+ rbl_complete,
+ rbl_complete,
+ 10, 2000000);
+
+ if (err) {
+ aq_pr_err("FW Restart timed out");
+ goto err_exit;
+ }
+
+ rbl_status = hw_atl2_mif_mcp_boot_reg_get(self);
+
+ if (rbl_status & AQ_A2_FW_BOOT_FAILED_MASK) {
+ err = -EIO;
+ aq_pr_err("FW Restart failed");
+ goto err_exit;
+ }
+
+ if (hw_atl2_mif_host_req_int_get(self) &
+ HW_ATL2_MCP_HOST_REQ_INT_READY) {
+ err = -EIO;
+ aq_pr_err("No FW detected. Dynamic FW load not implemented");
+ goto err_exit;
+ }
+
+ if (self->aq_fw_ops) {
+ err = self->aq_fw_ops->init(self);
+ if (err) {
+ aq_pr_err("FW Init failed");
+ goto err_exit;
+ }
+ }
+
+err_exit:
+ return err;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h
new file mode 100644
index 000000000000..b66fa346581c
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h
@@ -0,0 +1,606 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef HW_ATL2_UTILS_H
+#define HW_ATL2_UTILS_H
+
+#include "aq_hw.h"
+
+/* F W A P I */
+
+struct link_options_s {
+ u8 link_up:1;
+ u8 link_renegotiate:1;
+ u8 minimal_link_speed:1;
+ u8 internal_loopback:1;
+ u8 external_loopback:1;
+ u8 rate_10M_hd:1;
+ u8 rate_100M_hd:1;
+ u8 rate_1G_hd:1;
+
+ u8 rate_10M:1;
+ u8 rate_100M:1;
+ u8 rate_1G:1;
+ u8 rate_2P5G:1;
+ u8 rate_N2P5G:1;
+ u8 rate_5G:1;
+ u8 rate_N5G:1;
+ u8 rate_10G:1;
+
+ u8 eee_100M:1;
+ u8 eee_1G:1;
+ u8 eee_2P5G:1;
+ u8 eee_5G:1;
+ u8 eee_10G:1;
+ u8 rsvd3:3;
+
+ u8 pause_rx:1;
+ u8 pause_tx:1;
+ u8 rsvd4:1;
+ u8 downshift:1;
+ u8 downshift_retry:4;
+};
+
+struct link_control_s {
+ u8 mode:4;
+ u8 disable_crc_corruption:1;
+ u8 discard_short_frames:1;
+ u8 flow_control_mode:1;
+ u8 disable_length_check:1;
+
+ u8 discard_errored_frames:1;
+ u8 control_frame_enable:1;
+ u8 enable_tx_padding:1;
+ u8 enable_crc_forwarding:1;
+ u8 enable_frame_padding_removal_rx: 1;
+ u8 promiscuous_mode: 1;
+ u8 rsvd:2;
+
+ u16 rsvd2;
+};
+
+struct thermal_shutdown_s {
+ u8 enable:1;
+ u8 warning_enable:1;
+ u8 rsvd:6;
+
+ u8 shutdown_temperature;
+ u8 cold_temperature;
+ u8 warning_temperature;
+};
+
+struct mac_address_s {
+ u8 mac_address[6];
+};
+
+struct mac_address_aligned_s {
+ struct mac_address_s aligned;
+ u16 rsvd;
+};
+
+struct sleep_proxy_s {
+ struct wake_on_lan_s {
+ u8 wake_on_magic_packet:1;
+ u8 wake_on_pattern:1;
+ u8 wake_on_link_up:1;
+ u8 wake_on_link_down:1;
+ u8 wake_on_ping:1;
+ u8 wake_on_timer:1;
+ u8 rsvd:2;
+
+ u8 rsvd2;
+ u16 rsvd3;
+
+ u32 link_up_timeout;
+ u32 link_down_timeout;
+ u32 timer;
+ } wake_on_lan;
+
+ struct {
+ u32 mask[4];
+ u32 crc32;
+ } wake_up_pattern[8];
+
+ struct __packed {
+ u8 arp_responder:1;
+ u8 echo_responder:1;
+ u8 igmp_client:1;
+ u8 echo_truncate:1;
+ u8 address_guard:1;
+ u8 ignore_fragmented:1;
+ u8 rsvd:2;
+
+ u16 echo_max_len;
+ u8 rsvd2;
+ } ipv4_offload;
+
+ u32 ipv4_offload_addr[8];
+ u32 reserved[8];
+
+ struct __packed {
+ u8 ns_responder:1;
+ u8 echo_responder:1;
+ u8 mld_client:1;
+ u8 echo_truncate:1;
+ u8 address_guard:1;
+ u8 rsvd:3;
+
+ u16 echo_max_len;
+ u8 rsvd2;
+ } ipv6_offload;
+
+ u32 ipv6_offload_addr[16][4];
+
+ struct {
+ u16 port[16];
+ } tcp_port_offload;
+
+ struct {
+ u16 port[16];
+ } udp_port_offload;
+
+ struct {
+ u32 retry_count;
+ u32 retry_interval;
+ } ka4_offload;
+
+ struct {
+ u32 timeout;
+ u16 local_port;
+ u16 remote_port;
+ u8 remote_mac_addr[6];
+ u16 rsvd;
+ u32 rsvd2;
+ u32 rsvd3;
+ u16 rsvd4;
+ u16 win_size;
+ u32 seq_num;
+ u32 ack_num;
+ u32 local_ip;
+ u32 remote_ip;
+ } ka4_connection[16];
+
+ struct {
+ u32 retry_count;
+ u32 retry_interval;
+ } ka6_offload;
+
+ struct {
+ u32 timeout;
+ u16 local_port;
+ u16 remote_port;
+ u8 remote_mac_addr[6];
+ u16 rsvd;
+ u32 rsvd2;
+ u32 rsvd3;
+ u16 rsvd4;
+ u16 win_size;
+ u32 seq_num;
+ u32 ack_num;
+ u32 local_ip[4];
+ u32 remote_ip[4];
+ } ka6_connection[16];
+
+ struct {
+ u32 rr_count;
+ u32 rr_buf_len;
+ u32 idx_offset;
+ u32 rr__offset;
+ } mdns_offload;
+};
+
+struct pause_quanta_s {
+ u16 quanta_10M;
+ u16 threshold_10M;
+ u16 quanta_100M;
+ u16 threshold_100M;
+ u16 quanta_1G;
+ u16 threshold_1G;
+ u16 quanta_2P5G;
+ u16 threshold_2P5G;
+ u16 quanta_5G;
+ u16 threshold_5G;
+ u16 quanta_10G;
+ u16 threshold_10G;
+};
+
+struct data_buffer_status_s {
+ u32 data_offset;
+ u32 data_length;
+};
+
+struct device_caps_s {
+ u8 finite_flashless:1;
+ u8 cable_diag:1;
+ u8 ncsi:1;
+ u8 avb:1;
+ u8 rsvd:4;
+
+ u8 rsvd2;
+ u16 rsvd3;
+ u32 rsvd4;
+};
+
+struct version_s {
+ struct bundle_version_t {
+ u8 major;
+ u8 minor;
+ u16 build;
+ } bundle;
+ struct mac_version_t {
+ u8 major;
+ u8 minor;
+ u16 build;
+ } mac;
+ struct phy_version_t {
+ u8 major;
+ u8 minor;
+ u16 build;
+ } phy;
+ u32 rsvd;
+};
+
+struct link_status_s {
+ u8 link_state:4;
+ u8 link_rate:4;
+
+ u8 pause_tx:1;
+ u8 pause_rx:1;
+ u8 eee:1;
+ u8 duplex:1;
+ u8 rsvd:4;
+
+ u16 rsvd2;
+};
+
+struct wol_status_s {
+ u8 wake_count;
+ u8 wake_reason;
+
+ u16 wake_up_packet_length :12;
+ u16 wake_up_pattern_number :3;
+ u16 rsvd:1;
+
+ u32 wake_up_packet[379];
+};
+
+struct mac_health_monitor_s {
+ u8 mac_ready:1;
+ u8 mac_fault:1;
+ u8 mac_flashless_finished:1;
+ u8 rsvd:5;
+
+ u8 mac_temperature;
+ u16 mac_heart_beat;
+ u16 mac_fault_code;
+ u16 rsvd2;
+};
+
+struct phy_health_monitor_s {
+ u8 phy_ready:1;
+ u8 phy_fault:1;
+ u8 phy_hot_warning:1;
+ u8 rsvd:5;
+
+ u8 phy_temperature;
+ u16 phy_heart_beat;
+ u16 phy_fault_code;
+ u16 rsvd2;
+};
+
+struct device_link_caps_s {
+ u8 rsvd:3;
+ u8 internal_loopback:1;
+ u8 external_loopback:1;
+ u8 rate_10M_hd:1;
+ u8 rate_100M_hd:1;
+ u8 rate_1G_hd:1;
+
+ u8 rate_10M:1;
+ u8 rate_100M:1;
+ u8 rate_1G:1;
+ u8 rate_2P5G:1;
+ u8 rate_N2P5G:1;
+ u8 rate_5G:1;
+ u8 rate_N5G:1;
+ u8 rate_10G:1;
+
+ u8 rsvd3:1;
+ u8 eee_100M:1;
+ u8 eee_1G:1;
+ u8 eee_2P5G:1;
+ u8 rsvd4:1;
+ u8 eee_5G:1;
+ u8 rsvd5:1;
+ u8 eee_10G:1;
+
+ u8 pause_rx:1;
+ u8 pause_tx:1;
+ u8 pfc:1;
+ u8 downshift:1;
+ u8 downshift_retry:4;
+};
+
+struct sleep_proxy_caps_s {
+ u8 ipv4_offload:1;
+ u8 ipv6_offload:1;
+ u8 tcp_port_offload:1;
+ u8 udp_port_offload:1;
+ u8 ka4_offload:1;
+ u8 ka6_offload:1;
+ u8 mdns_offload:1;
+ u8 wake_on_ping:1;
+
+ u8 wake_on_magic_packet:1;
+ u8 wake_on_pattern:1;
+ u8 wake_on_timer:1;
+ u8 wake_on_link:1;
+ u8 wake_patterns_count:4;
+
+ u8 ipv4_count;
+ u8 ipv6_count;
+
+ u8 tcp_port_offload_count;
+ u8 udp_port_offload_count;
+
+ u8 tcp4_ka_count;
+ u8 tcp6_ka_count;
+
+ u8 igmp_offload:1;
+ u8 mld_offload:1;
+ u8 rsvd:6;
+
+ u8 rsvd2;
+ u16 rsvd3;
+};
+
+struct lkp_link_caps_s {
+ u8 rsvd:5;
+ u8 rate_10M_hd:1;
+ u8 rate_100M_hd:1;
+ u8 rate_1G_hd:1;
+
+ u8 rate_10M:1;
+ u8 rate_100M:1;
+ u8 rate_1G:1;
+ u8 rate_2P5G:1;
+ u8 rate_N2P5G:1;
+ u8 rate_5G:1;
+ u8 rate_N5G:1;
+ u8 rate_10G:1;
+
+ u8 rsvd2:1;
+ u8 eee_100M:1;
+ u8 eee_1G:1;
+ u8 eee_2P5G:1;
+ u8 rsvd3:1;
+ u8 eee_5G:1;
+ u8 rsvd4:1;
+ u8 eee_10G:1;
+
+ u8 pause_rx:1;
+ u8 pause_tx:1;
+ u8 rsvd5:6;
+};
+
+struct core_dump_s {
+ u32 reg0;
+ u32 reg1;
+ u32 reg2;
+
+ u32 hi;
+ u32 lo;
+
+ u32 regs[32];
+};
+
+struct trace_s {
+ u32 sync_counter;
+ u32 mem_buffer[0x1ff];
+};
+
+struct cable_diag_control_s {
+ u8 toggle :1;
+ u8 rsvd:7;
+
+ u8 wait_timeout_sec;
+ u16 rsvd2;
+};
+
+struct cable_diag_lane_data_s {
+ u8 result_code;
+ u8 dist;
+ u8 far_dist;
+ u8 rsvd;
+};
+
+struct cable_diag_status_s {
+ struct cable_diag_lane_data_s lane_data[4];
+ u8 transact_id;
+ u8 status:4;
+ u8 rsvd:4;
+ u16 rsvd2;
+};
+
+struct statistics_s {
+ struct {
+ u32 link_up;
+ u32 link_down;
+ } link;
+
+ struct {
+ u64 tx_unicast_octets;
+ u64 tx_multicast_octets;
+ u64 tx_broadcast_octets;
+ u64 rx_unicast_octets;
+ u64 rx_multicast_octets;
+ u64 rx_broadcast_octets;
+
+ u32 tx_unicast_frames;
+ u32 tx_multicast_frames;
+ u32 tx_broadcast_frames;
+ u32 tx_errors;
+
+ u32 rx_unicast_frames;
+ u32 rx_multicast_frames;
+ u32 rx_broadcast_frames;
+ u32 rx_dropped_frames;
+ u32 rx_error_frames;
+
+ u32 tx_good_frames;
+ u32 rx_good_frames;
+ u32 reserve_fw_gap;
+ } msm;
+ u32 main_loop_cycles;
+ u32 reserve_fw_gap;
+};
+
+struct filter_caps_s {
+ u8 l2_filters_base_index:6;
+ u8 flexible_filter_mask:2;
+ u8 l2_filter_count;
+ u8 ethertype_filter_base_index;
+ u8 ethertype_filter_count;
+
+ u8 vlan_filter_base_index;
+ u8 vlan_filter_count;
+ u8 l3_ip4_filter_base_index:4;
+ u8 l3_ip4_filter_count:4;
+ u8 l3_ip6_filter_base_index:4;
+ u8 l3_ip6_filter_count:4;
+
+ u8 l4_filter_base_index:4;
+ u8 l4_filter_count:4;
+ u8 l4_flex_filter_base_index:4;
+ u8 l4_flex_filter_count:4;
+ u8 rslv_tbl_base_index;
+ u8 rslv_tbl_count;
+};
+
+struct request_policy_s {
+ struct {
+ u8 all:1;
+ u8 mcast:1;
+ u8 rx_queue_tc_index:5;
+ u8 queue_or_tc:1;
+ } promisc;
+
+ struct {
+ u8 accept:1;
+ u8 rsvd:1;
+ u8 rx_queue_tc_index:5;
+ u8 queue_or_tc:1;
+ } bcast;
+
+ struct {
+ u8 accept:1;
+ u8 rsvd:1;
+ u8 rx_queue_tc_index:5;
+ u8 queue_or_tc:1;
+ } mcast;
+
+ u8 rsvd:8;
+};
+
+struct fw_interface_in {
+ u32 mtu;
+ u32 rsvd1;
+ struct mac_address_aligned_s mac_address;
+ struct link_control_s link_control;
+ u32 rsvd2;
+ struct link_options_s link_options;
+ u32 rsvd3;
+ struct thermal_shutdown_s thermal_shutdown;
+ u32 rsvd4;
+ struct sleep_proxy_s sleep_proxy;
+ u32 rsvd5;
+ struct pause_quanta_s pause_quanta[8];
+ struct cable_diag_control_s cable_diag_control;
+ u32 rsvd6;
+ struct data_buffer_status_s data_buffer_status;
+ u32 rsvd7;
+ struct request_policy_s request_policy;
+};
+
+struct transaction_counter_s {
+ u16 transaction_cnt_a;
+ u16 transaction_cnt_b;
+};
+
+struct management_status_s {
+ struct mac_address_s mac_address;
+ u16 vlan;
+
+ struct{
+ u32 enable : 1;
+ u32 rsvd:31;
+ } flags;
+
+ u32 rsvd1;
+ u32 rsvd2;
+ u32 rsvd3;
+ u32 rsvd4;
+ u32 rsvd5;
+};
+
+struct fw_interface_out {
+ struct transaction_counter_s transaction_id;
+ struct version_s version;
+ struct link_status_s link_status;
+ struct wol_status_s wol_status;
+ u32 rsvd;
+ u32 rsvd2;
+ struct mac_health_monitor_s mac_health_monitor;
+ u32 rsvd3;
+ u32 rsvd4;
+ struct phy_health_monitor_s phy_health_monitor;
+ u32 rsvd5;
+ u32 rsvd6;
+ struct cable_diag_status_s cable_diag_status;
+ u32 rsvd7;
+ struct device_link_caps_s device_link_caps;
+ u32 rsvd8;
+ struct sleep_proxy_caps_s sleep_proxy_caps;
+ u32 rsvd9;
+ struct lkp_link_caps_s lkp_link_caps;
+ u32 rsvd10;
+ struct core_dump_s core_dump;
+ u32 rsvd11;
+ struct statistics_s stats;
+ u32 rsvd12;
+ struct filter_caps_s filter_caps;
+ struct device_caps_s device_caps;
+ u32 rsvd13;
+ struct management_status_s management_status;
+ u32 reserve[21];
+ struct trace_s trace;
+};
+
+#define AQ_A2_FW_LINK_RATE_INVALID 0
+#define AQ_A2_FW_LINK_RATE_10M 1
+#define AQ_A2_FW_LINK_RATE_100M 2
+#define AQ_A2_FW_LINK_RATE_1G 3
+#define AQ_A2_FW_LINK_RATE_2G5 4
+#define AQ_A2_FW_LINK_RATE_5G 5
+#define AQ_A2_FW_LINK_RATE_10G 6
+
+#define AQ_HOST_MODE_INVALID 0U
+#define AQ_HOST_MODE_ACTIVE 1U
+#define AQ_HOST_MODE_SLEEP_PROXY 2U
+#define AQ_HOST_MODE_LOW_POWER 3U
+#define AQ_HOST_MODE_SHUTDOWN 4U
+
+int hw_atl2_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops);
+
+int hw_atl2_utils_soft_reset(struct aq_hw_s *self);
+
+u32 hw_atl2_utils_get_fw_version(struct aq_hw_s *self);
+
+int hw_atl2_utils_get_action_resolve_table_caps(struct aq_hw_s *self,
+ u8 *base_index, u8 *count);
+
+extern const struct aq_fw_ops aq_a2_fw_ops;
+
+#endif /* HW_ATL2_UTILS_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
new file mode 100644
index 000000000000..0ffc33bd67d0
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#include <linux/iopoll.h>
+
+#include "aq_hw.h"
+#include "aq_hw_utils.h"
+#include "hw_atl/hw_atl_llh.h"
+#include "hw_atl2_utils.h"
+#include "hw_atl2_llh.h"
+#include "hw_atl2_internal.h"
+
+#define AQ_A2_FW_READ_TRY_MAX 1000
+
+#define hw_atl2_shared_buffer_write(HW, ITEM, VARIABLE) \
+ hw_atl2_mif_shared_buf_write(HW,\
+ (offsetof(struct fw_interface_in, ITEM) / sizeof(u32)),\
+ (u32 *)&(VARIABLE), sizeof(VARIABLE) / sizeof(u32))
+
+#define hw_atl2_shared_buffer_get(HW, ITEM, VARIABLE) \
+ hw_atl2_mif_shared_buf_get(HW, \
+ (offsetof(struct fw_interface_in, ITEM) / sizeof(u32)),\
+ (u32 *)&(VARIABLE), \
+ sizeof(VARIABLE) / sizeof(u32))
+
+/* This should never be used on non atomic fields,
+ * treat any > u32 read as non atomic.
+ */
+#define hw_atl2_shared_buffer_read(HW, ITEM, VARIABLE) \
+{\
+ BUILD_BUG_ON_MSG((offsetof(struct fw_interface_out, ITEM) % \
+ sizeof(u32)) != 0,\
+ "Non aligned read " # ITEM);\
+ BUILD_BUG_ON_MSG(sizeof(VARIABLE) > sizeof(u32),\
+ "Non atomic read " # ITEM);\
+ hw_atl2_mif_shared_buf_read(HW, \
+ (offsetof(struct fw_interface_out, ITEM) / sizeof(u32)),\
+ (u32 *)&(VARIABLE), sizeof(VARIABLE) / sizeof(u32));\
+}
+
+#define hw_atl2_shared_buffer_read_safe(HW, ITEM, DATA) \
+ hw_atl2_shared_buffer_read_block((HW), \
+ (offsetof(struct fw_interface_out, ITEM) / sizeof(u32)),\
+ sizeof(((struct fw_interface_out *)0)->ITEM) / sizeof(u32),\
+ (DATA))
+
+static int hw_atl2_shared_buffer_read_block(struct aq_hw_s *self,
+ u32 offset, u32 dwords, void *data)
+{
+ struct transaction_counter_s tid1, tid2;
+ int cnt = 0;
+
+ do {
+ do {
+ hw_atl2_shared_buffer_read(self, transaction_id, tid1);
+ cnt++;
+ if (cnt > AQ_A2_FW_READ_TRY_MAX)
+ return -ETIME;
+ if (tid1.transaction_cnt_a != tid1.transaction_cnt_b)
+ udelay(1);
+ } while (tid1.transaction_cnt_a != tid1.transaction_cnt_b);
+
+ hw_atl2_mif_shared_buf_read(self, offset, (u32 *)data, dwords);
+
+ hw_atl2_shared_buffer_read(self, transaction_id, tid2);
+
+ cnt++;
+ if (cnt > AQ_A2_FW_READ_TRY_MAX)
+ return -ETIME;
+ } while (tid2.transaction_cnt_a != tid2.transaction_cnt_b ||
+ tid1.transaction_cnt_a != tid2.transaction_cnt_a);
+
+ return 0;
+}
+
+static inline int hw_atl2_shared_buffer_finish_ack(struct aq_hw_s *self)
+{
+ u32 val;
+ int err;
+
+ hw_atl2_mif_host_finished_write_set(self, 1U);
+ err = readx_poll_timeout_atomic(hw_atl2_mif_mcp_finished_read_get,
+ self, val, val == 0U,
+ 100, 100000U);
+ WARN(err, "hw_atl2_shared_buffer_finish_ack");
+
+ return err;
+}
+
+static int aq_a2_fw_init(struct aq_hw_s *self)
+{
+ struct link_control_s link_control;
+ u32 mtu;
+ u32 val;
+ int err;
+
+ hw_atl2_shared_buffer_get(self, link_control, link_control);
+ link_control.mode = AQ_HOST_MODE_ACTIVE;
+ hw_atl2_shared_buffer_write(self, link_control, link_control);
+
+ hw_atl2_shared_buffer_get(self, mtu, mtu);
+ mtu = HW_ATL2_MTU_JUMBO;
+ hw_atl2_shared_buffer_write(self, mtu, mtu);
+
+ hw_atl2_mif_host_finished_write_set(self, 1U);
+ err = readx_poll_timeout_atomic(hw_atl2_mif_mcp_finished_read_get,
+ self, val, val == 0U,
+ 100, 5000000U);
+ WARN(err, "hw_atl2_shared_buffer_finish_ack");
+
+ return err;
+}
+
+static int aq_a2_fw_deinit(struct aq_hw_s *self)
+{
+ struct link_control_s link_control;
+
+ hw_atl2_shared_buffer_get(self, link_control, link_control);
+ link_control.mode = AQ_HOST_MODE_SHUTDOWN;
+ hw_atl2_shared_buffer_write(self, link_control, link_control);
+
+ return hw_atl2_shared_buffer_finish_ack(self);
+}
+
+static void a2_link_speed_mask2fw(u32 speed,
+ struct link_options_s *link_options)
+{
+ link_options->rate_10G = !!(speed & AQ_NIC_RATE_10G);
+ link_options->rate_5G = !!(speed & AQ_NIC_RATE_5G);
+ link_options->rate_N5G = !!(speed & AQ_NIC_RATE_5GSR);
+ link_options->rate_2P5G = !!(speed & AQ_NIC_RATE_2G5);
+ link_options->rate_N2P5G = link_options->rate_2P5G;
+ link_options->rate_1G = !!(speed & AQ_NIC_RATE_1G);
+ link_options->rate_100M = !!(speed & AQ_NIC_RATE_100M);
+ link_options->rate_10M = !!(speed & AQ_NIC_RATE_10M);
+}
+
+static int aq_a2_fw_set_link_speed(struct aq_hw_s *self, u32 speed)
+{
+ struct link_options_s link_options;
+
+ hw_atl2_shared_buffer_get(self, link_options, link_options);
+ link_options.link_up = 1U;
+ a2_link_speed_mask2fw(speed, &link_options);
+ hw_atl2_shared_buffer_write(self, link_options, link_options);
+
+ return hw_atl2_shared_buffer_finish_ack(self);
+}
+
+static int aq_a2_fw_set_state(struct aq_hw_s *self,
+ enum hal_atl_utils_fw_state_e state)
+{
+ struct link_options_s link_options;
+
+ hw_atl2_shared_buffer_get(self, link_options, link_options);
+
+ switch (state) {
+ case MPI_INIT:
+ link_options.link_up = 1U;
+ break;
+ case MPI_DEINIT:
+ link_options.link_up = 0U;
+ break;
+ case MPI_RESET:
+ case MPI_POWER:
+ /* No actions */
+ break;
+ }
+
+ hw_atl2_shared_buffer_write(self, link_options, link_options);
+
+ return hw_atl2_shared_buffer_finish_ack(self);
+}
+
+static int aq_a2_fw_update_link_status(struct aq_hw_s *self)
+{
+ struct link_status_s link_status;
+
+ hw_atl2_shared_buffer_read(self, link_status, link_status);
+
+ switch (link_status.link_rate) {
+ case AQ_A2_FW_LINK_RATE_10G:
+ self->aq_link_status.mbps = 10000;
+ break;
+ case AQ_A2_FW_LINK_RATE_5G:
+ self->aq_link_status.mbps = 5000;
+ break;
+ case AQ_A2_FW_LINK_RATE_2G5:
+ self->aq_link_status.mbps = 2500;
+ break;
+ case AQ_A2_FW_LINK_RATE_1G:
+ self->aq_link_status.mbps = 1000;
+ break;
+ case AQ_A2_FW_LINK_RATE_100M:
+ self->aq_link_status.mbps = 100;
+ break;
+ case AQ_A2_FW_LINK_RATE_10M:
+ self->aq_link_status.mbps = 10;
+ break;
+ default:
+ self->aq_link_status.mbps = 0;
+ }
+
+ return 0;
+}
+
+static int aq_a2_fw_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
+{
+ struct mac_address_aligned_s mac_address;
+
+ hw_atl2_shared_buffer_get(self, mac_address, mac_address);
+ ether_addr_copy(mac, (u8 *)mac_address.aligned.mac_address);
+
+ return 0;
+}
+
+static int aq_a2_fw_update_stats(struct aq_hw_s *self)
+{
+ struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ struct statistics_s stats;
+
+ hw_atl2_shared_buffer_read_safe(self, stats, &stats);
+
+#define AQ_SDELTA(_N_, _F_) (self->curr_stats._N_ += \
+ stats.msm._F_ - priv->last_stats.msm._F_)
+
+ if (self->aq_link_status.mbps) {
+ AQ_SDELTA(uprc, rx_unicast_frames);
+ AQ_SDELTA(mprc, rx_multicast_frames);
+ AQ_SDELTA(bprc, rx_broadcast_frames);
+ AQ_SDELTA(erpr, rx_error_frames);
+
+ AQ_SDELTA(uptc, tx_unicast_frames);
+ AQ_SDELTA(mptc, tx_multicast_frames);
+ AQ_SDELTA(bptc, tx_broadcast_frames);
+ AQ_SDELTA(erpt, tx_errors);
+
+ AQ_SDELTA(ubrc, rx_unicast_octets);
+ AQ_SDELTA(ubtc, tx_unicast_octets);
+ AQ_SDELTA(mbrc, rx_multicast_octets);
+ AQ_SDELTA(mbtc, tx_multicast_octets);
+ AQ_SDELTA(bbrc, rx_broadcast_octets);
+ AQ_SDELTA(bbtc, tx_broadcast_octets);
+ }
+#undef AQ_SDELTA
+ self->curr_stats.dma_pkt_rc =
+ hw_atl_stats_rx_dma_good_pkt_counter_get(self);
+ self->curr_stats.dma_pkt_tc =
+ hw_atl_stats_tx_dma_good_pkt_counter_get(self);
+ self->curr_stats.dma_oct_rc =
+ hw_atl_stats_rx_dma_good_octet_counter_get(self);
+ self->curr_stats.dma_oct_tc =
+ hw_atl_stats_tx_dma_good_octet_counter_get(self);
+ self->curr_stats.dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self);
+
+ memcpy(&priv->last_stats, &stats, sizeof(stats));
+
+ return 0;
+}
+
+static int aq_a2_fw_renegotiate(struct aq_hw_s *self)
+{
+ struct link_options_s link_options;
+ int err;
+
+ hw_atl2_shared_buffer_get(self, link_options, link_options);
+ link_options.link_renegotiate = 1U;
+ hw_atl2_shared_buffer_write(self, link_options, link_options);
+
+ err = hw_atl2_shared_buffer_finish_ack(self);
+
+ /* We should put renegotiate status back to zero
+ * after command completes
+ */
+ link_options.link_renegotiate = 0U;
+ hw_atl2_shared_buffer_write(self, link_options, link_options);
+
+ return err;
+}
+
+u32 hw_atl2_utils_get_fw_version(struct aq_hw_s *self)
+{
+ struct version_s version;
+
+ hw_atl2_shared_buffer_read_safe(self, version, &version);
+
+ /* A2 FW version is stored in reverse order */
+ return version.mac.major << 24 |
+ version.mac.minor << 16 |
+ version.mac.build;
+}
+
+int hw_atl2_utils_get_action_resolve_table_caps(struct aq_hw_s *self,
+ u8 *base_index, u8 *count)
+{
+ struct filter_caps_s filter_caps;
+ int err;
+
+ err = hw_atl2_shared_buffer_read_safe(self, filter_caps, &filter_caps);
+ if (err)
+ return err;
+
+ *base_index = filter_caps.rslv_tbl_base_index;
+ *count = filter_caps.rslv_tbl_count;
+ return 0;
+}
+
+const struct aq_fw_ops aq_a2_fw_ops = {
+ .init = aq_a2_fw_init,
+ .deinit = aq_a2_fw_deinit,
+ .reset = NULL,
+ .renegotiate = aq_a2_fw_renegotiate,
+ .get_mac_permanent = aq_a2_fw_get_mac_permanent,
+ .set_link_speed = aq_a2_fw_set_link_speed,
+ .set_state = aq_a2_fw_set_state,
+ .update_link_status = aq_a2_fw_update_link_status,
+ .update_stats = aq_a2_fw_update_stats,
+};
diff --git a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c
index fbe9d88b13c7..36c7cf05630a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c
+++ b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c
@@ -846,8 +846,7 @@ static int get_ingress_sakey_record(struct aq_hw_s *hw,
rec->key[7] = packed_record[14];
rec->key[7] |= packed_record[15] << 16;
- rec->key_len = (rec->key_len & 0xFFFFFFFC) |
- (packed_record[16] & 0x3);
+ rec->key_len = packed_record[16] & 0x3;
return 0;
}
@@ -1158,6 +1157,7 @@ static int set_egress_ctlf_record(struct aq_hw_s *hw,
packed_record[0] = rec->sa_da[0] & 0xFFFF;
packed_record[1] = (rec->sa_da[0] >> 16) & 0xFFFF;
+
packed_record[2] = rec->sa_da[1] & 0xFFFF;
packed_record[3] = rec->eth_type & 0xFFFF;
@@ -1552,7 +1552,7 @@ static int set_egress_sc_record(struct aq_hw_s *hw,
packed_record[5] |= (rec->sak_len & 0x3) << 4;
- packed_record[7] |= (rec->valid & 0x1) << 15;
+ packed_record[7] = (rec->valid & 0x1) << 15;
return set_raw_egress_record(hw, packed_record, 8, 2,
ROWOFFSET_EGRESSSCRECORD + table_index);
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 02b7705393ca..112edbd30823 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -871,13 +871,40 @@ static void ag71xx_mac_validate(struct phylink_config *config,
unsigned long *supported,
struct phylink_link_state *state)
{
+ struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_GMII &&
- state->interface != PHY_INTERFACE_MODE_MII) {
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
- return;
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_NA:
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 0) ||
+ ag71xx_is(ag, AR9340) ||
+ ag71xx_is(ag, QCA9530) ||
+ (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
+ break;
+ goto unsupported;
+ case PHY_INTERFACE_MODE_GMII:
+ if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 1) ||
+ (ag71xx_is(ag, AR9340) && ag->mac_idx == 1) ||
+ (ag71xx_is(ag, QCA9530) && ag->mac_idx == 1))
+ break;
+ goto unsupported;
+ case PHY_INTERFACE_MODE_SGMII:
+ if (ag71xx_is(ag, QCA9550) && ag->mac_idx == 0)
+ break;
+ goto unsupported;
+ case PHY_INTERFACE_MODE_RMII:
+ if (ag71xx_is(ag, AR9340) && ag->mac_idx == 0)
+ break;
+ goto unsupported;
+ case PHY_INTERFACE_MODE_RGMII:
+ if ((ag71xx_is(ag, AR9340) && ag->mac_idx == 0) ||
+ (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
+ break;
+ goto unsupported;
+ default:
+ goto unsupported;
}
phylink_set(mask, MII);
@@ -889,6 +916,8 @@ static void ag71xx_mac_validate(struct phylink_config *config,
phylink_set(mask, 100baseT_Full);
if (state->interface == PHY_INTERFACE_MODE_NA ||
+ state->interface == PHY_INTERFACE_MODE_SGMII ||
+ state->interface == PHY_INTERFACE_MODE_RGMII ||
state->interface == PHY_INTERFACE_MODE_GMII) {
phylink_set(mask, 1000baseT_Full);
phylink_set(mask, 1000baseX_Full);
@@ -898,6 +927,10 @@ static void ag71xx_mac_validate(struct phylink_config *config,
__ETHTOOL_LINK_MODE_MASK_NBITS);
bitmap_and(state->advertising, state->advertising, mask,
__ETHTOOL_LINK_MODE_MASK_NBITS);
+
+ return;
+unsupported:
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
}
static void ag71xx_mac_pcs_get_state(struct phylink_config *config,
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 00bd7bd55794..decab9a8e4a8 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -1186,7 +1186,7 @@ static void atl1c_start_mac(struct atl1c_adapter *adapter)
struct atl1c_hw *hw = &adapter->hw;
u32 mac, txq, rxq;
- hw->mac_duplex = adapter->link_duplex == FULL_DUPLEX ? true : false;
+ hw->mac_duplex = adapter->link_duplex == FULL_DUPLEX;
hw->mac_speed = adapter->link_speed == SPEED_1000 ?
atl1c_mac_speed_1000 : atl1c_mac_speed_10_100;
@@ -2449,12 +2449,6 @@ static int atl1c_resume(struct device *dev)
atl1c_reset_mac(&adapter->hw);
atl1c_phy_init(&adapter->hw);
-#if 0
- AT_READ_REG(&adapter->hw, REG_PM_CTRLSTAT, &pm_data);
- pm_data &= ~PM_CTRLSTAT_PME_EN;
- AT_WRITE_REG(&adapter->hw, REG_PM_CTRLSTAT, pm_data);
-#endif
-
netif_device_attach(netdev);
if (netif_running(netdev))
atl1c_up(adapter);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 271e7034fa70..b35fcfcd692d 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -1042,7 +1042,7 @@ static s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
* each ring/block may need up to 8 bytes for alignment, hence the
* additional 40 bytes tacked onto the end.
*/
- ring_header->size = size =
+ ring_header->size =
sizeof(struct tx_packet_desc) * tpd_ring->count
+ sizeof(struct rx_free_desc) * rfd_ring->count
+ sizeof(struct rx_return_desc) * rrd_ring->count
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 2c6ba046d2a8..17ae6df90723 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -1145,7 +1145,7 @@ static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp,
break;
}
}
- if (false == pg_found) {
+ if (!pg_found) {
data[help_data->num_of_pg].pg = add_pg;
data[help_data->num_of_pg].pg_priority =
(1 << ttp[add_traf_type]);
@@ -1155,7 +1155,7 @@ static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp,
}
DP(BNX2X_MSG_DCB,
"add_traf_type %d pg_found %s num_of_pg %d\n",
- add_traf_type, (false == pg_found) ? "NO" : "YES",
+ add_traf_type, !pg_found ? "NO" : "YES",
help_data->num_of_pg);
}
}
@@ -1544,8 +1544,7 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params(
if (pg_entry < DCBX_MAX_NUM_PG_BW_ENTRIES) {
entry = 0;
- if (i == (num_of_pri-1) &&
- false == b_found_strict)
+ if (i == (num_of_pri-1) && !b_found_strict)
/* last entry will be handled separately
* If no priority is strict than last
* entry goes to last queue.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 5097a44686b3..b4476f44e386 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -331,27 +331,6 @@ bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
BP_VFDB(bp)->vf_sbs_pool++;
}
-static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *obj,
- atomic_t *counter)
-{
- struct list_head *pos;
- int read_lock;
- int cnt = 0;
-
- read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj);
- if (read_lock)
- DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n");
-
- list_for_each(pos, &obj->head)
- cnt++;
-
- if (!read_lock)
- bnx2x_vlan_mac_h_read_unlock(bp, obj);
-
- atomic_set(counter, cnt);
-}
-
static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf,
int qid, bool drv_only, int type)
{
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index d1a83716d934..f86b6217f829 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1766,7 +1766,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
rc = -EIO;
if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
- bnapi->cp_ring.rx_buf_errors++;
+ bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
netdev_warn(bp->dev, "RX buffer error %x\n",
rx_err);
@@ -1849,7 +1849,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
} else {
if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
if (dev->features & NETIF_F_RXCSUM)
- bnapi->cp_ring.rx_l4_csum_errors++;
+ bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
}
}
@@ -5045,8 +5045,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
req.lb_rule = cpu_to_le16(0xffff);
vnic_mru:
- req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
- VLAN_HLEN);
+ req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
#ifdef CONFIG_BNXT_SRIOV
@@ -5356,9 +5355,9 @@ static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
{
if (bp->flags & BNXT_FLAG_CHIP_P5) {
if (BNXT_PF(bp))
- db->doorbell = bp->bar1 + 0x10000;
+ db->doorbell = bp->bar1 + DB_PF_OFFSET_P5;
else
- db->doorbell = bp->bar1 + 0x4000;
+ db->doorbell = bp->bar1 + DB_VF_OFFSET_P5;
switch (ring_type) {
case HWRM_RING_ALLOC_TX:
db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
@@ -6365,6 +6364,7 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
{
struct hwrm_func_qcfg_input req = {0};
struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ u32 min_db_offset = 0;
u16 flags;
int rc;
@@ -6413,6 +6413,21 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
if (!bp->max_mtu)
bp->max_mtu = BNXT_MAX_MTU;
+ if (bp->db_size)
+ goto func_qcfg_exit;
+
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (BNXT_PF(bp))
+ min_db_offset = DB_PF_OFFSET_P5;
+ else
+ min_db_offset = DB_VF_OFFSET_P5;
+ }
+ bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
+ 1024);
+ if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
+ bp->db_size <= min_db_offset)
+ bp->db_size = pci_resource_len(bp->pdev, 2);
+
func_qcfg_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
@@ -6434,23 +6449,13 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
if (!rc) {
struct bnxt_ctx_pg_info *ctx_pg;
struct bnxt_ctx_mem_info *ctx;
- int i;
+ int i, tqm_rings;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
rc = -ENOMEM;
goto ctx_err;
}
- ctx_pg = kzalloc(sizeof(*ctx_pg) * (bp->max_q + 1), GFP_KERNEL);
- if (!ctx_pg) {
- kfree(ctx);
- rc = -ENOMEM;
- goto ctx_err;
- }
- for (i = 0; i < bp->max_q + 1; i++, ctx_pg++)
- ctx->tqm_mem[i] = ctx_pg;
-
- bp->ctx = ctx;
ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
@@ -6483,6 +6488,20 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
ctx->ctx_kind_initializer = resp->ctx_kind_initializer;
+ ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
+ if (!ctx->tqm_fp_rings_count)
+ ctx->tqm_fp_rings_count = bp->max_q;
+
+ tqm_rings = ctx->tqm_fp_rings_count + 1;
+ ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
+ if (!ctx_pg) {
+ kfree(ctx);
+ rc = -ENOMEM;
+ goto ctx_err;
+ }
+ for (i = 0; i < tqm_rings; i++, ctx_pg++)
+ ctx->tqm_mem[i] = ctx_pg;
+ bp->ctx = ctx;
} else {
rc = 0;
}
@@ -6735,7 +6754,7 @@ static void bnxt_free_ctx_mem(struct bnxt *bp)
return;
if (ctx->tqm_mem[0]) {
- for (i = 0; i < bp->max_q + 1; i++)
+ for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
kfree(ctx->tqm_mem[0]);
ctx->tqm_mem[0] = NULL;
@@ -6756,6 +6775,7 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
struct bnxt_ctx_pg_info *ctx_pg;
struct bnxt_ctx_mem_info *ctx;
u32 mem_size, ena, entries;
+ u32 entries_sp, min;
u32 num_mr, num_ah;
u32 extra_srqs = 0;
u32 extra_qps = 0;
@@ -6845,14 +6865,17 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
skip_rdma:
- entries = ctx->qp_max_l2_entries + extra_qps;
+ min = ctx->tqm_min_entries_per_ring;
+ entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
+ 2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
+ entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
+ entries = ctx->qp_max_l2_entries + extra_qps + ctx->qp_min_qp1_entries;
entries = roundup(entries, ctx->tqm_entries_multiple);
- entries = clamp_t(u32, entries, ctx->tqm_min_entries_per_ring,
- ctx->tqm_max_entries_per_ring);
- for (i = 0; i < bp->max_q + 1; i++) {
+ entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
+ for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
ctx_pg = ctx->tqm_mem[i];
- ctx_pg->entries = entries;
- mem_size = ctx->tqm_entry_size * entries;
+ ctx_pg->entries = i ? entries : entries_sp;
+ mem_size = ctx->tqm_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
if (rc)
return rc;
@@ -10265,7 +10288,7 @@ static void bnxt_chk_missed_irq(struct bnxt *bp)
bnxt_dbg_hwrm_ring_info_get(bp,
DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
fw_ring_id, &val[0], &val[1]);
- cpr->missed_irqs++;
+ cpr->sw_stats.cmn.missed_irqs++;
}
}
}
@@ -10894,6 +10917,9 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->dev = dev;
bp->pdev = pdev;
+ /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
+ * determines the BAR size.
+ */
bp->bar0 = pci_ioremap_bar(pdev, 0);
if (!bp->bar0) {
dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
@@ -10901,13 +10927,6 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
goto init_err_release;
}
- bp->bar1 = pci_ioremap_bar(pdev, 2);
- if (!bp->bar1) {
- dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
- rc = -ENOMEM;
- goto init_err_release;
- }
-
bp->bar2 = pci_ioremap_bar(pdev, 4);
if (!bp->bar2) {
dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
@@ -11829,6 +11848,16 @@ static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
return 0;
}
+static int bnxt_map_db_bar(struct bnxt *bp)
+{
+ if (!bp->db_size)
+ return -ENODEV;
+ bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
+ if (!bp->bar1)
+ return -ENOMEM;
+ return 0;
+}
+
static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *dev;
@@ -11889,6 +11918,13 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
+ rc = bnxt_map_db_bar(bp);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
+ rc);
+ goto init_err_pci_clean;
+ }
+
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index f6a3250ef1c5..c04ac4a36005 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -537,6 +537,9 @@ struct nqe_cn {
#define DBR_TYPE_NQ_ARM (0xbULL << 60)
#define DBR_TYPE_NULL (0xfULL << 60)
+#define DB_PF_OFFSET_P5 0x10000
+#define DB_VF_OFFSET_P5 0x4000
+
#define INVALID_HW_RING_ID ((u16)-1)
/* The hardware supports certain page sizes. Use the supported page sizes
@@ -907,6 +910,20 @@ struct bnxt_rx_ring_info {
struct page_pool *page_pool;
};
+struct bnxt_rx_sw_stats {
+ u64 rx_l4_csum_errors;
+ u64 rx_buf_errors;
+};
+
+struct bnxt_cmn_sw_stats {
+ u64 missed_irqs;
+};
+
+struct bnxt_sw_stats {
+ struct bnxt_rx_sw_stats rx;
+ struct bnxt_cmn_sw_stats cmn;
+};
+
struct bnxt_cp_ring_info {
struct bnxt_napi *bnapi;
u32 cp_raw_cons;
@@ -934,9 +951,8 @@ struct bnxt_cp_ring_info {
struct ctx_hw_stats *hw_stats;
dma_addr_t hw_stats_map;
u32 hw_stats_ctx_id;
- u64 rx_l4_csum_errors;
- u64 rx_buf_errors;
- u64 missed_irqs;
+
+ struct bnxt_sw_stats sw_stats;
struct bnxt_ring_struct cp_ring_struct;
@@ -1356,6 +1372,7 @@ struct bnxt_ctx_mem_info {
u16 mrav_num_entries_units;
u8 tqm_entries_multiple;
u8 ctx_kind_initializer;
+ u8 tqm_fp_rings_count;
u32 flags;
#define BNXT_CTX_FLAG_INITED 0x01
@@ -1815,6 +1832,7 @@ struct bnxt {
/* ensure atomic 64-bit doorbell writes on 32-bit systems. */
spinlock_t db_lock;
#endif
+ int db_size;
#define BNXT_NTP_FLTR_MAX_FLTR 4096
#define BNXT_NTP_FLTR_HASH_SIZE 512
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 34046a6286e8..dd0c3f227009 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -137,7 +137,7 @@ reset_coalesce:
return rc;
}
-static const char * const bnxt_ring_stats_str[] = {
+static const char * const bnxt_ring_rx_stats_str[] = {
"rx_ucast_packets",
"rx_mcast_packets",
"rx_bcast_packets",
@@ -146,6 +146,9 @@ static const char * const bnxt_ring_stats_str[] = {
"rx_ucast_bytes",
"rx_mcast_bytes",
"rx_bcast_bytes",
+};
+
+static const char * const bnxt_ring_tx_stats_str[] = {
"tx_ucast_packets",
"tx_mcast_packets",
"tx_bcast_packets",
@@ -171,9 +174,12 @@ static const char * const bnxt_ring_tpa2_stats_str[] = {
"rx_tpa_errors",
};
-static const char * const bnxt_ring_sw_stats_str[] = {
+static const char * const bnxt_rx_sw_stats_str[] = {
"rx_l4_csum_errors",
"rx_buf_errors",
+};
+
+static const char * const bnxt_cmn_sw_stats_str[] = {
"missed_irqs",
};
@@ -303,6 +309,11 @@ static struct {
{0, "tx_total_discard_pkts"},
};
+#define NUM_RING_RX_SW_STATS ARRAY_SIZE(bnxt_rx_sw_stats_str)
+#define NUM_RING_CMN_SW_STATS ARRAY_SIZE(bnxt_cmn_sw_stats_str)
+#define NUM_RING_RX_HW_STATS ARRAY_SIZE(bnxt_ring_rx_stats_str)
+#define NUM_RING_TX_HW_STATS ARRAY_SIZE(bnxt_ring_tx_stats_str)
+
static const struct {
long offset;
char string[ETH_GSTRING_LEN];
@@ -482,12 +493,21 @@ static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp)
static int bnxt_get_num_ring_stats(struct bnxt *bp)
{
- int num_stats;
+ int rx, tx, cmn;
+ bool sh = false;
+
+ if (bp->flags & BNXT_FLAG_SHARED_RINGS)
+ sh = true;
- num_stats = ARRAY_SIZE(bnxt_ring_stats_str) +
- ARRAY_SIZE(bnxt_ring_sw_stats_str) +
- bnxt_get_num_tpa_ring_stats(bp);
- return num_stats * bp->cp_nr_rings;
+ rx = NUM_RING_RX_HW_STATS + NUM_RING_RX_SW_STATS +
+ bnxt_get_num_tpa_ring_stats(bp);
+ tx = NUM_RING_TX_HW_STATS;
+ cmn = NUM_RING_CMN_SW_STATS;
+ if (sh)
+ return (rx + tx + cmn) * bp->cp_nr_rings;
+ else
+ return rx * bp->rx_nr_rings + tx * bp->tx_nr_rings +
+ cmn * bp->cp_nr_rings;
}
static int bnxt_get_num_stats(struct bnxt *bp)
@@ -528,13 +548,29 @@ static int bnxt_get_sset_count(struct net_device *dev, int sset)
}
}
+static bool is_rx_ring(struct bnxt *bp, int ring_num)
+{
+ return ring_num < bp->rx_nr_rings;
+}
+
+static bool is_tx_ring(struct bnxt *bp, int ring_num)
+{
+ int tx_base = 0;
+
+ if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
+ tx_base = bp->rx_nr_rings;
+
+ if (ring_num >= tx_base && ring_num < (tx_base + bp->tx_nr_rings))
+ return true;
+ return false;
+}
+
static void bnxt_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *buf)
{
u32 i, j = 0;
struct bnxt *bp = netdev_priv(dev);
- u32 stat_fields = ARRAY_SIZE(bnxt_ring_stats_str) +
- bnxt_get_num_tpa_ring_stats(bp);
+ u32 tpa_stats;
if (!bp->bnapi) {
j += bnxt_get_num_ring_stats(bp) + BNXT_NUM_SW_FUNC_STATS;
@@ -544,17 +580,42 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++)
bnxt_sw_func_stats[i].counter = 0;
+ tpa_stats = bnxt_get_num_tpa_ring_stats(bp);
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
__le64 *hw_stats = (__le64 *)cpr->hw_stats;
+ u64 *sw;
int k;
- for (k = 0; k < stat_fields; j++, k++)
+ if (is_rx_ring(bp, i)) {
+ for (k = 0; k < NUM_RING_RX_HW_STATS; j++, k++)
+ buf[j] = le64_to_cpu(hw_stats[k]);
+ }
+ if (is_tx_ring(bp, i)) {
+ k = NUM_RING_RX_HW_STATS;
+ for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
+ j++, k++)
+ buf[j] = le64_to_cpu(hw_stats[k]);
+ }
+ if (!tpa_stats || !is_rx_ring(bp, i))
+ goto skip_tpa_ring_stats;
+
+ k = NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
+ for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS +
+ tpa_stats; j++, k++)
buf[j] = le64_to_cpu(hw_stats[k]);
- buf[j++] = cpr->rx_l4_csum_errors;
- buf[j++] = cpr->rx_buf_errors;
- buf[j++] = cpr->missed_irqs;
+
+skip_tpa_ring_stats:
+ sw = (u64 *)&cpr->sw_stats.rx;
+ if (is_rx_ring(bp, i)) {
+ for (k = 0; k < NUM_RING_RX_SW_STATS; j++, k++)
+ buf[j] = sw[k];
+ }
+
+ sw = (u64 *)&cpr->sw_stats.cmn;
+ for (k = 0; k < NUM_RING_CMN_SW_STATS; j++, k++)
+ buf[j] = sw[k];
bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter +=
le64_to_cpu(cpr->hw_stats->rx_discard_pkts);
@@ -632,31 +693,48 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < bp->cp_nr_rings; i++) {
- num_str = ARRAY_SIZE(bnxt_ring_stats_str);
- for (j = 0; j < num_str; j++) {
- sprintf(buf, "[%d]: %s", i,
- bnxt_ring_stats_str[j]);
- buf += ETH_GSTRING_LEN;
+ if (is_rx_ring(bp, i)) {
+ num_str = NUM_RING_RX_HW_STATS;
+ for (j = 0; j < num_str; j++) {
+ sprintf(buf, "[%d]: %s", i,
+ bnxt_ring_rx_stats_str[j]);
+ buf += ETH_GSTRING_LEN;
+ }
}
- if (!BNXT_SUPPORTS_TPA(bp))
+ if (is_tx_ring(bp, i)) {
+ num_str = NUM_RING_TX_HW_STATS;
+ for (j = 0; j < num_str; j++) {
+ sprintf(buf, "[%d]: %s", i,
+ bnxt_ring_tx_stats_str[j]);
+ buf += ETH_GSTRING_LEN;
+ }
+ }
+ num_str = bnxt_get_num_tpa_ring_stats(bp);
+ if (!num_str || !is_rx_ring(bp, i))
goto skip_tpa_stats;
- if (bp->max_tpa_v2) {
- num_str = ARRAY_SIZE(bnxt_ring_tpa2_stats_str);
+ if (bp->max_tpa_v2)
str = bnxt_ring_tpa2_stats_str;
- } else {
- num_str = ARRAY_SIZE(bnxt_ring_tpa_stats_str);
+ else
str = bnxt_ring_tpa_stats_str;
- }
+
for (j = 0; j < num_str; j++) {
sprintf(buf, "[%d]: %s", i, str[j]);
buf += ETH_GSTRING_LEN;
}
skip_tpa_stats:
- num_str = ARRAY_SIZE(bnxt_ring_sw_stats_str);
+ if (is_rx_ring(bp, i)) {
+ num_str = NUM_RING_RX_SW_STATS;
+ for (j = 0; j < num_str; j++) {
+ sprintf(buf, "[%d]: %s", i,
+ bnxt_rx_sw_stats_str[j]);
+ buf += ETH_GSTRING_LEN;
+ }
+ }
+ num_str = NUM_RING_CMN_SW_STATS;
for (j = 0; j < num_str; j++) {
sprintf(buf, "[%d]: %s", i,
- bnxt_ring_sw_stats_str[j]);
+ bnxt_cmn_sw_stats_str[j]);
buf += ETH_GSTRING_LEN;
}
}
@@ -1749,8 +1827,8 @@ static int bnxt_flash_nvram(struct net_device *dev,
return rc;
}
-static int bnxt_firmware_reset(struct net_device *dev,
- u16 dir_type)
+static int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
+ u8 self_reset, u8 flags)
{
struct hwrm_fw_reset_input req = {0};
struct bnxt *bp = netdev_priv(dev);
@@ -1758,48 +1836,77 @@ static int bnxt_firmware_reset(struct net_device *dev,
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
+ req.embedded_proc_type = proc_type;
+ req.selfrst_status = self_reset;
+ req.flags = flags;
+
+ if (proc_type == FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP) {
+ rc = hwrm_send_message_silent(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ } else {
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc == -EACCES)
+ bnxt_print_admin_err(bp);
+ }
+ return rc;
+}
+
+static int bnxt_firmware_reset(struct net_device *dev,
+ enum bnxt_nvm_directory_type dir_type)
+{
+ u8 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE;
+ u8 proc_type, flags = 0;
+
/* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */
/* (e.g. when firmware isn't already running) */
switch (dir_type) {
case BNX_DIR_TYPE_CHIMP_PATCH:
case BNX_DIR_TYPE_BOOTCODE:
case BNX_DIR_TYPE_BOOTCODE_2:
- req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT;
+ proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT;
/* Self-reset ChiMP upon next PCIe reset: */
- req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
+ self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
break;
case BNX_DIR_TYPE_APE_FW:
case BNX_DIR_TYPE_APE_PATCH:
- req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT;
+ proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT;
/* Self-reset APE upon next PCIe reset: */
- req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
+ self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
break;
case BNX_DIR_TYPE_KONG_FW:
case BNX_DIR_TYPE_KONG_PATCH:
- req.embedded_proc_type =
- FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL;
+ proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL;
break;
case BNX_DIR_TYPE_BONO_FW:
case BNX_DIR_TYPE_BONO_PATCH:
- req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE;
- break;
- case BNXT_FW_RESET_CHIP:
- req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
- req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
- if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
- req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
- break;
- case BNXT_FW_RESET_AP:
- req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP;
+ proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE;
break;
default:
return -EINVAL;
}
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc == -EACCES)
- bnxt_print_admin_err(bp);
- return rc;
+ return bnxt_hwrm_firmware_reset(dev, proc_type, self_reset, flags);
+}
+
+static int bnxt_firmware_reset_chip(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u8 flags = 0;
+
+ if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
+ flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
+
+ return bnxt_hwrm_firmware_reset(dev,
+ FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP,
+ FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP,
+ flags);
+}
+
+static int bnxt_firmware_reset_ap(struct net_device *dev)
+{
+ return bnxt_hwrm_firmware_reset(dev, FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP,
+ FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE,
+ 0);
}
static int bnxt_flash_firmware(struct net_device *dev,
@@ -1988,9 +2095,9 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev,
rc, filename);
return rc;
}
- if (bnxt_dir_type_is_ape_bin_format(dir_type) == true)
+ if (bnxt_dir_type_is_ape_bin_format(dir_type))
rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size);
- else if (bnxt_dir_type_is_other_exec_format(dir_type) == true)
+ else if (bnxt_dir_type_is_other_exec_format(dir_type))
rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size);
else
rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
@@ -2377,7 +2484,7 @@ static int bnxt_set_eeprom(struct net_device *dev,
}
/* Create or re-write an NVM item: */
- if (bnxt_dir_type_is_executable(type) == true)
+ if (bnxt_dir_type_is_executable(type))
return -EOPNOTSUPP;
ext = eeprom->magic & 0xffff;
ordinal = eeprom->offset >> 16;
@@ -2975,7 +3082,11 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
static int bnxt_reset(struct net_device *dev, u32 *flags)
{
struct bnxt *bp = netdev_priv(dev);
- int rc = 0;
+ bool reload = false;
+ u32 req = *flags;
+
+ if (!req)
+ return -EINVAL;
if (!BNXT_PF(bp)) {
netdev_err(dev, "Reset is not supported from a VF\n");
@@ -2989,33 +3100,37 @@ static int bnxt_reset(struct net_device *dev, u32 *flags)
return -EBUSY;
}
- if (*flags == ETH_RESET_ALL) {
+ if ((req & BNXT_FW_RESET_CHIP) == BNXT_FW_RESET_CHIP) {
/* This feature is not supported in older firmware versions */
- if (bp->hwrm_spec_code < 0x10803)
- return -EOPNOTSUPP;
-
- rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_CHIP);
- if (!rc) {
- netdev_info(dev, "Reset request successful.\n");
- if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET))
- netdev_info(dev, "Reload driver to complete reset\n");
- *flags = 0;
+ if (bp->hwrm_spec_code >= 0x10803) {
+ if (!bnxt_firmware_reset_chip(dev)) {
+ netdev_info(dev, "Firmware reset request successful.\n");
+ if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET))
+ reload = true;
+ *flags &= ~BNXT_FW_RESET_CHIP;
+ }
+ } else if (req == BNXT_FW_RESET_CHIP) {
+ return -EOPNOTSUPP; /* only request, fail hard */
}
- } else if (*flags == ETH_RESET_AP) {
- /* This feature is not supported in older firmware versions */
- if (bp->hwrm_spec_code < 0x10803)
- return -EOPNOTSUPP;
+ }
- rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_AP);
- if (!rc) {
- netdev_info(dev, "Reset Application Processor request successful.\n");
- *flags = 0;
+ if (req & BNXT_FW_RESET_AP) {
+ /* This feature is not supported in older firmware versions */
+ if (bp->hwrm_spec_code >= 0x10803) {
+ if (!bnxt_firmware_reset_ap(dev)) {
+ netdev_info(dev, "Reset application processor successful.\n");
+ reload = true;
+ *flags &= ~BNXT_FW_RESET_AP;
+ }
+ } else if (req == BNXT_FW_RESET_AP) {
+ return -EOPNOTSUPP; /* only request, fail hard */
}
- } else {
- rc = -EINVAL;
}
- return rc;
+ if (reload)
+ netdev_info(dev, "Reload driver to complete reset\n");
+
+ return 0;
}
static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index 3576d951727b..ce7585ff9e4d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -77,8 +77,12 @@ struct hwrm_dbg_cmn_output {
#define BNXT_LED_DFLT_ENABLES(x) \
cpu_to_le32(BNXT_LED_DFLT_ENA << (BNXT_LED_DFLT_ENA_SHIFT * (x)))
-#define BNXT_FW_RESET_AP 0xfffe
-#define BNXT_FW_RESET_CHIP 0xffff
+#define BNXT_FW_RESET_AP (ETH_RESET_AP << ETH_RESET_SHARED_SHIFT)
+#define BNXT_FW_RESET_CHIP ((ETH_RESET_MGMT | ETH_RESET_IRQ | \
+ ETH_RESET_DMA | ETH_RESET_FILTER | \
+ ETH_RESET_OFFLOAD | ETH_RESET_MAC | \
+ ETH_RESET_PHY | ETH_RESET_RAM) \
+ << ETH_RESET_SHARED_SHIFT)
extern const struct ethtool_ops bnxt_ethtool_ops;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 7cf27dffadb5..7e9235c8d21e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -2,7 +2,7 @@
*
* Copyright (c) 2014-2016 Broadcom Corporation
* Copyright (c) 2014-2018 Broadcom Limited
- * Copyright (c) 2018-2019 Broadcom Inc.
+ * Copyright (c) 2018-2020 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -207,6 +207,8 @@ struct cmd_nums {
#define HWRM_PORT_PHY_MDIO_READ 0xb6UL
#define HWRM_PORT_PHY_MDIO_BUS_ACQUIRE 0xb7UL
#define HWRM_PORT_PHY_MDIO_BUS_RELEASE 0xb8UL
+ #define HWRM_PORT_QSTATS_EXT_PFC_WD 0xb9UL
+ #define HWRM_PORT_ECN_QSTATS 0xbaUL
#define HWRM_FW_RESET 0xc0UL
#define HWRM_FW_QSTATUS 0xc1UL
#define HWRM_FW_HEALTH_CHECK 0xc2UL
@@ -220,6 +222,8 @@ struct cmd_nums {
#define HWRM_FW_SET_STRUCTURED_DATA 0xcaUL
#define HWRM_FW_GET_STRUCTURED_DATA 0xcbUL
#define HWRM_FW_IPC_MAILBOX 0xccUL
+ #define HWRM_FW_ECN_CFG 0xcdUL
+ #define HWRM_FW_ECN_QCFG 0xceUL
#define HWRM_EXEC_FWD_RESP 0xd0UL
#define HWRM_REJECT_FWD_RESP 0xd1UL
#define HWRM_FWD_RESP 0xd2UL
@@ -233,6 +237,7 @@ struct cmd_nums {
#define HWRM_TEMP_MONITOR_QUERY 0xe0UL
#define HWRM_REG_POWER_QUERY 0xe1UL
#define HWRM_CORE_FREQUENCY_QUERY 0xe2UL
+ #define HWRM_REG_POWER_HISTOGRAM 0xe3UL
#define HWRM_WOL_FILTER_ALLOC 0xf0UL
#define HWRM_WOL_FILTER_FREE 0xf1UL
#define HWRM_WOL_FILTER_QCFG 0xf2UL
@@ -331,6 +336,7 @@ struct cmd_nums {
#define HWRM_FUNC_VF_BW_CFG 0x195UL
#define HWRM_FUNC_VF_BW_QCFG 0x196UL
#define HWRM_FUNC_HOST_PF_IDS_QUERY 0x197UL
+ #define HWRM_FUNC_QSTATS_EXT 0x198UL
#define HWRM_SELFTEST_QLIST 0x200UL
#define HWRM_SELFTEST_EXEC 0x201UL
#define HWRM_SELFTEST_IRQ 0x202UL
@@ -341,6 +347,31 @@ struct cmd_nums {
#define HWRM_MFG_OTP_CFG 0x207UL
#define HWRM_MFG_OTP_QCFG 0x208UL
#define HWRM_MFG_HDMA_TEST 0x209UL
+ #define HWRM_MFG_FRU_EEPROM_WRITE 0x20aUL
+ #define HWRM_MFG_FRU_EEPROM_READ 0x20bUL
+ #define HWRM_TF 0x2bcUL
+ #define HWRM_TF_VERSION_GET 0x2bdUL
+ #define HWRM_TF_SESSION_OPEN 0x2c6UL
+ #define HWRM_TF_SESSION_ATTACH 0x2c7UL
+ #define HWRM_TF_SESSION_CLOSE 0x2c8UL
+ #define HWRM_TF_SESSION_QCFG 0x2c9UL
+ #define HWRM_TF_SESSION_RESC_QCAPS 0x2caUL
+ #define HWRM_TF_SESSION_RESC_ALLOC 0x2cbUL
+ #define HWRM_TF_SESSION_RESC_FREE 0x2ccUL
+ #define HWRM_TF_SESSION_RESC_FLUSH 0x2cdUL
+ #define HWRM_TF_TBL_TYPE_GET 0x2d0UL
+ #define HWRM_TF_TBL_TYPE_SET 0x2d1UL
+ #define HWRM_TF_CTXT_MEM_RGTR 0x2daUL
+ #define HWRM_TF_CTXT_MEM_UNRGTR 0x2dbUL
+ #define HWRM_TF_EXT_EM_QCAPS 0x2dcUL
+ #define HWRM_TF_EXT_EM_OP 0x2ddUL
+ #define HWRM_TF_EXT_EM_CFG 0x2deUL
+ #define HWRM_TF_EXT_EM_QCFG 0x2dfUL
+ #define HWRM_TF_TCAM_SET 0x2eeUL
+ #define HWRM_TF_TCAM_GET 0x2efUL
+ #define HWRM_TF_TCAM_MOVE 0x2f0UL
+ #define HWRM_TF_TCAM_FREE 0x2f1UL
+ #define HWRM_SV 0x400UL
#define HWRM_DBG_READ_DIRECT 0xff10UL
#define HWRM_DBG_READ_INDIRECT 0xff11UL
#define HWRM_DBG_WRITE_DIRECT 0xff12UL
@@ -356,6 +387,10 @@ struct cmd_nums {
#define HWRM_DBG_RING_INFO_GET 0xff1cUL
#define HWRM_DBG_CRASHDUMP_HEADER 0xff1dUL
#define HWRM_DBG_CRASHDUMP_ERASE 0xff1eUL
+ #define HWRM_DBG_DRV_TRACE 0xff1fUL
+ #define HWRM_DBG_QCAPS 0xff20UL
+ #define HWRM_DBG_QCFG 0xff21UL
+ #define HWRM_DBG_CRASHDUMP_MEDIUM_CFG 0xff22UL
#define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL
#define HWRM_NVM_VALIDATE_OPTION 0xffefUL
#define HWRM_NVM_FLUSH 0xfff0UL
@@ -429,8 +464,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 1
-#define HWRM_VERSION_RSVD 12
-#define HWRM_VERSION_STR "1.10.1.12"
+#define HWRM_VERSION_RSVD 33
+#define HWRM_VERSION_STR "1.10.1.33"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -482,6 +517,7 @@ struct hwrm_ver_get_output {
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_EEM_SUPPORTED 0x800UL
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED 0x1000UL
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_TFLIB_SUPPORTED 0x2000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED 0x4000UL
u8 roce_fw_maj_8b;
u8 roce_fw_min_8b;
u8 roce_fw_bld_8b;
@@ -647,6 +683,7 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_LINK_STATUS_CHANGE 0x3eUL
#define ASYNC_EVENT_CMPL_EVENT_ID_QUIESCE_DONE 0x3fUL
#define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE 0x40UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE 0x41UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
@@ -1089,7 +1126,7 @@ struct hwrm_func_qcaps_input {
u8 unused_0[6];
};
-/* hwrm_func_qcaps_output (size:640b/80B) */
+/* hwrm_func_qcaps_output (size:704b/88B) */
struct hwrm_func_qcaps_output {
__le16 error_code;
__le16 req_type;
@@ -1126,6 +1163,10 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD 0x2000000UL
#define FUNC_QCAPS_RESP_FLAGS_NOTIFY_VF_DEF_VNIC_CHNG_SUPPORTED 0x4000000UL
#define FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED 0x8000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_COREDUMP_CMD_SUPPORTED 0x10000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_CRASHDUMP_CMD_SUPPORTED 0x20000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_PFC_WD_STATS_SUPPORTED 0x40000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED 0x80000000UL
u8 mac_address[6];
__le16 max_rsscos_ctx;
__le16 max_cmpl_rings;
@@ -1146,7 +1187,12 @@ struct hwrm_func_qcaps_output {
__le32 max_flow_id;
__le32 max_hw_ring_grps;
__le16 max_sp_tx_rings;
- u8 unused_0;
+ u8 unused_0[2];
+ __le32 flags_ext;
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_MARK_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_STATS_SUPPORTED 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED 0x4UL
+ u8 unused_1[3];
u8 valid;
};
@@ -1161,7 +1207,7 @@ struct hwrm_func_qcfg_input {
u8 unused_0[6];
};
-/* hwrm_func_qcfg_output (size:704b/88B) */
+/* hwrm_func_qcfg_output (size:768b/96B) */
struct hwrm_func_qcfg_output {
__le16 error_code;
__le16 req_type;
@@ -1267,7 +1313,11 @@ struct hwrm_func_qcfg_output {
u8 always_1;
__le32 reset_addr_poll;
__le16 legacy_l2_db_size_kb;
- u8 unused_2[1];
+ __le16 svif_info;
+ #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_MASK 0x7fffUL
+ #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_SFT 0
+ #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_VALID 0x8000UL
+ u8 unused_2[7];
u8 valid;
};
@@ -1420,9 +1470,10 @@ struct hwrm_func_qstats_input {
__le64 resp_addr;
__le16 fid;
u8 flags;
- #define FUNC_QSTATS_REQ_FLAGS_UNUSED 0x0UL
- #define FUNC_QSTATS_REQ_FLAGS_ROCE_ONLY 0x1UL
- #define FUNC_QSTATS_REQ_FLAGS_LAST FUNC_QSTATS_REQ_FLAGS_ROCE_ONLY
+ #define FUNC_QSTATS_REQ_FLAGS_UNUSED 0x0UL
+ #define FUNC_QSTATS_REQ_FLAGS_ROCE_ONLY 0x1UL
+ #define FUNC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x2UL
+ #define FUNC_QSTATS_REQ_FLAGS_LAST FUNC_QSTATS_REQ_FLAGS_COUNTER_MASK
u8 unused_0[5];
};
@@ -1456,6 +1507,53 @@ struct hwrm_func_qstats_output {
u8 valid;
};
+/* hwrm_func_qstats_ext_input (size:192b/24B) */
+struct hwrm_func_qstats_ext_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 flags;
+ #define FUNC_QSTATS_EXT_REQ_FLAGS_UNUSED 0x0UL
+ #define FUNC_QSTATS_EXT_REQ_FLAGS_ROCE_ONLY 0x1UL
+ #define FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x2UL
+ #define FUNC_QSTATS_EXT_REQ_FLAGS_LAST FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK
+ u8 unused_0[5];
+};
+
+/* hwrm_func_qstats_ext_output (size:1472b/184B) */
+struct hwrm_func_qstats_ext_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_drop_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_drop_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_tpa_eligible_pkt;
+ __le64 rx_tpa_eligible_bytes;
+ __le64 rx_tpa_pkt;
+ __le64 rx_tpa_bytes;
+ __le64 rx_tpa_errors;
+ u8 unused_0[7];
+ u8 valid;
+};
+
/* hwrm_func_clr_stats_input (size:192b/24B) */
struct hwrm_func_clr_stats_input {
__le16 req_type;
@@ -1808,7 +1906,7 @@ struct hwrm_func_backing_store_qcaps_output {
u8 ctx_kind_initializer;
__le32 rsvd;
__le16 rsvd1;
- u8 rsvd2;
+ u8 tqm_fp_rings_count;
u8 valid;
};
@@ -2231,7 +2329,17 @@ struct hwrm_error_recovery_qcfg_output {
#define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SFT 2
__le32 reset_reg_val[16];
u8 delay_after_reset[16];
- u8 unused_1[7];
+ __le32 err_recovery_cnt_reg;
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SFT 2
+ u8 unused_1[3];
u8 valid;
};
@@ -2934,7 +3042,11 @@ struct hwrm_port_qstats_input {
__le16 target_id;
__le64 resp_addr;
__le16 port_id;
- u8 unused_0[6];
+ u8 flags;
+ #define PORT_QSTATS_REQ_FLAGS_UNUSED 0x0UL
+ #define PORT_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
+ #define PORT_QSTATS_REQ_FLAGS_LAST PORT_QSTATS_REQ_FLAGS_COUNTER_MASK
+ u8 unused_0[5];
__le64 tx_stat_host_addr;
__le64 rx_stat_host_addr;
};
@@ -3058,7 +3170,11 @@ struct hwrm_port_qstats_ext_input {
__le16 port_id;
__le16 tx_stat_size;
__le16 rx_stat_size;
- u8 unused_0[2];
+ u8 flags;
+ #define PORT_QSTATS_EXT_REQ_FLAGS_UNUSED 0x0UL
+ #define PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x1UL
+ #define PORT_QSTATS_EXT_REQ_FLAGS_LAST PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK
+ u8 unused_0;
__le64 tx_stat_host_addr;
__le64 rx_stat_host_addr;
};
@@ -3840,14 +3956,22 @@ struct hwrm_queue_pfcenable_qcfg_output {
__le16 seq_id;
__le16 resp_len;
__le32 flags;
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_WATCHDOG_ENABLED 0x100UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_WATCHDOG_ENABLED 0x200UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_WATCHDOG_ENABLED 0x400UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_WATCHDOG_ENABLED 0x800UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_WATCHDOG_ENABLED 0x1000UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_WATCHDOG_ENABLED 0x2000UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_WATCHDOG_ENABLED 0x4000UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_WATCHDOG_ENABLED 0x8000UL
u8 unused_0[3];
u8 valid;
};
@@ -3860,14 +3984,22 @@ struct hwrm_queue_pfcenable_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_ENABLED 0x1UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_ENABLED 0x2UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_ENABLED 0x4UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_ENABLED 0x8UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_ENABLED 0x10UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_ENABLED 0x20UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_ENABLED 0x40UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_ENABLED 0x80UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_ENABLED 0x1UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_ENABLED 0x2UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_ENABLED 0x4UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_ENABLED 0x8UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_ENABLED 0x10UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_ENABLED 0x20UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_ENABLED 0x40UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_ENABLED 0x80UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_WATCHDOG_ENABLED 0x100UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_WATCHDOG_ENABLED 0x200UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_WATCHDOG_ENABLED 0x400UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_WATCHDOG_ENABLED 0x800UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_WATCHDOG_ENABLED 0x1000UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_WATCHDOG_ENABLED 0x2000UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_WATCHDOG_ENABLED 0x4000UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_WATCHDOG_ENABLED 0x8000UL
__le16 port_id;
u8 unused_0[2];
};
@@ -5287,7 +5419,11 @@ struct hwrm_ring_cmpl_ring_qaggint_params_input {
__le16 target_id;
__le64 resp_addr;
__le16 ring_id;
- u8 unused_0[6];
+ __le16 flags;
+ #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_UNUSED_0_MASK 0x3UL
+ #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_UNUSED_0_SFT 0
+ #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_IS_NQ 0x4UL
+ u8 unused_0[4];
};
/* hwrm_ring_cmpl_ring_qaggint_params_output (size:256b/32B) */
@@ -7618,7 +7754,9 @@ struct hwrm_nvm_modify_input {
__le64 resp_addr;
__le64 host_src_addr;
__le16 dir_idx;
- u8 unused_0[2];
+ __le16 flags;
+ #define NVM_MODIFY_REQ_FLAGS_BATCH_MODE 0x1UL
+ #define NVM_MODIFY_REQ_FLAGS_BATCH_LAST 0x2UL
__le32 offset;
__le32 len;
u8 unused_1[4];
@@ -8027,4 +8165,18 @@ struct hwrm_selftest_irq_output {
u8 valid;
};
+/* fw_status_reg (size:32b/4B) */
+struct fw_status_reg {
+ u32 fw_status;
+ #define FW_STATUS_REG_CODE_MASK 0xffffUL
+ #define FW_STATUS_REG_CODE_SFT 0
+ #define FW_STATUS_REG_CODE_READY 0x8000UL
+ #define FW_STATUS_REG_CODE_LAST FW_STATUS_REG_CODE_READY
+ #define FW_STATUS_REG_IMAGE_DEGRADED 0x10000UL
+ #define FW_STATUS_REG_RECOVERABLE 0x20000UL
+ #define FW_STATUS_REG_CRASHDUMP_ONGOING 0x40000UL
+ #define FW_STATUS_REG_CRASHDUMP_COMPLETE 0x80000UL
+ #define FW_STATUS_REG_SHUTDOWN 0x100000UL
+};
+
#endif /* _BNXT_HSI_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index cea2f9958a1d..3a9a51f7063a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -645,7 +645,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
FUNC_CFG_REQ_ENABLES_NUM_VNICS |
FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);
- mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ mtu = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
req.mru = cpu_to_le16(mtu);
req.mtu = cpu_to_le16(mtu);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 4a316c4b3fa8..8c8368c2f335 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -104,7 +104,13 @@ static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
for (i = 0; i < num_msix; i++) {
ent[i].vector = bp->irq_tbl[idx + i].vector;
ent[i].ring_idx = idx + i;
- ent[i].db_offset = (idx + i) * 0x80;
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ ent[i].db_offset = DB_PF_OFFSET_P5;
+ if (BNXT_VF(bp))
+ ent[i].db_offset = DB_VF_OFFSET_P5;
+ } else {
+ ent[i].db_offset = (idx + i) * 0x80;
+ }
}
}
@@ -475,6 +481,8 @@ struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev)
edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP;
edev->net = dev;
edev->pdev = bp->pdev;
+ edev->l2_db_size = bp->db_size;
+ edev->l2_db_size_nc = bp->db_size;
bp->edev = edev;
}
return bp->edev;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index 9895406b9830..6b4d2556a6df 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -67,6 +67,14 @@ struct bnxt_en_dev {
#define BNXT_EN_FLAG_ULP_STOPPED 0x8
const struct bnxt_en_ops *en_ops;
struct bnxt_ulp ulp_tbl[BNXT_MAX_ULP];
+ int l2_db_size; /* Doorbell BAR size in
+ * bytes mapped by L2
+ * driver.
+ */
+ int l2_db_size_nc; /* Doorbell BAR size in
+ * bytes mapped as non-
+ * cacheable.
+ */
};
struct bnxt_en_ops {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index c6f6f2033880..5e3b4a3b69ea 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -138,6 +138,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = *data_ptr + *len;
xdp.rxq = &rxr->xdp_rxq;
+ xdp.frame_sz = PAGE_SIZE; /* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
orig_data = xdp.data;
rcu_read_lock();
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 61ab7d21f6bd..c5cca63b8571 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -1918,7 +1918,6 @@ static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
if (ret) {
atomic_dec(&cp->iscsi_conn);
- ret = 0;
goto done;
}
ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 79636c78127c..ff31da0ed846 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) controller driver
*
- * Copyright (c) 2014-2019 Broadcom
+ * Copyright (c) 2014-2020 Broadcom
*/
#define pr_fmt(fmt) "bcmgenet: " fmt
@@ -23,11 +23,6 @@
#include <linux/dma-mapping.h>
#include <linux/pm.h>
#include <linux/clk.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/of_net.h>
-#include <linux/of_platform.h>
#include <net/arp.h>
#include <linux/mii.h>
@@ -70,6 +65,9 @@
#define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \
TOTAL_DESC * DMA_DESC_SIZE)
+/* Forward declarations */
+static void bcmgenet_set_rx_mode(struct net_device *dev);
+
static inline void bcmgenet_writel(u32 value, void __iomem *offset)
{
/* MIPS chips strapped for BE will automagically configure the
@@ -461,6 +459,384 @@ static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
genet_dma_ring_regs[r]);
}
+static bool bcmgenet_hfb_is_filter_enabled(struct bcmgenet_priv *priv,
+ u32 f_index)
+{
+ u32 offset;
+ u32 reg;
+
+ offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
+ reg = bcmgenet_hfb_reg_readl(priv, offset);
+ return !!(reg & (1 << (f_index % 32)));
+}
+
+static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index)
+{
+ u32 offset;
+ u32 reg;
+
+ offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
+ reg = bcmgenet_hfb_reg_readl(priv, offset);
+ reg |= (1 << (f_index % 32));
+ bcmgenet_hfb_reg_writel(priv, reg, offset);
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ reg |= RBUF_HFB_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+}
+
+static void bcmgenet_hfb_disable_filter(struct bcmgenet_priv *priv, u32 f_index)
+{
+ u32 offset, reg, reg1;
+
+ offset = HFB_FLT_ENABLE_V3PLUS;
+ reg = bcmgenet_hfb_reg_readl(priv, offset);
+ reg1 = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32));
+ if (f_index < 32) {
+ reg1 &= ~(1 << (f_index % 32));
+ bcmgenet_hfb_reg_writel(priv, reg1, offset + sizeof(u32));
+ } else {
+ reg &= ~(1 << (f_index % 32));
+ bcmgenet_hfb_reg_writel(priv, reg, offset);
+ }
+ if (!reg && !reg1) {
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ reg &= ~RBUF_HFB_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ }
+}
+
+static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv,
+ u32 f_index, u32 rx_queue)
+{
+ u32 offset;
+ u32 reg;
+
+ offset = f_index / 8;
+ reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset);
+ reg &= ~(0xF << (4 * (f_index % 8)));
+ reg |= ((rx_queue & 0xF) << (4 * (f_index % 8)));
+ bcmgenet_rdma_writel(priv, reg, DMA_INDEX2RING_0 + offset);
+}
+
+static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv,
+ u32 f_index, u32 f_length)
+{
+ u32 offset;
+ u32 reg;
+
+ offset = HFB_FLT_LEN_V3PLUS +
+ ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) *
+ sizeof(u32);
+ reg = bcmgenet_hfb_reg_readl(priv, offset);
+ reg &= ~(0xFF << (8 * (f_index % 4)));
+ reg |= ((f_length & 0xFF) << (8 * (f_index % 4)));
+ bcmgenet_hfb_reg_writel(priv, reg, offset);
+}
+
+static int bcmgenet_hfb_find_unused_filter(struct bcmgenet_priv *priv)
+{
+ u32 f_index;
+
+ /* First MAX_NUM_OF_FS_RULES are reserved for Rx NFC filters */
+ for (f_index = MAX_NUM_OF_FS_RULES;
+ f_index < priv->hw_params->hfb_filter_cnt; f_index++)
+ if (!bcmgenet_hfb_is_filter_enabled(priv, f_index))
+ return f_index;
+
+ return -ENOMEM;
+}
+
+static int bcmgenet_hfb_validate_mask(void *mask, size_t size)
+{
+ while (size) {
+ switch (*(unsigned char *)mask++) {
+ case 0x00:
+ case 0x0f:
+ case 0xf0:
+ case 0xff:
+ size--;
+ continue;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+#define VALIDATE_MASK(x) \
+ bcmgenet_hfb_validate_mask(&(x), sizeof(x))
+
+static int bcmgenet_hfb_insert_data(u32 *f, int offset,
+ void *val, void *mask, size_t size)
+{
+ int index;
+ u32 tmp;
+
+ index = offset / 2;
+ tmp = f[index];
+
+ while (size--) {
+ if (offset++ & 1) {
+ tmp &= ~0x300FF;
+ tmp |= (*(unsigned char *)val++);
+ switch ((*(unsigned char *)mask++)) {
+ case 0xFF:
+ tmp |= 0x30000;
+ break;
+ case 0xF0:
+ tmp |= 0x20000;
+ break;
+ case 0x0F:
+ tmp |= 0x10000;
+ break;
+ }
+ f[index++] = tmp;
+ if (size)
+ tmp = f[index];
+ } else {
+ tmp &= ~0xCFF00;
+ tmp |= (*(unsigned char *)val++) << 8;
+ switch ((*(unsigned char *)mask++)) {
+ case 0xFF:
+ tmp |= 0xC0000;
+ break;
+ case 0xF0:
+ tmp |= 0x80000;
+ break;
+ case 0x0F:
+ tmp |= 0x40000;
+ break;
+ }
+ if (!size)
+ f[index] = tmp;
+ }
+ }
+
+ return 0;
+}
+
+static void bcmgenet_hfb_set_filter(struct bcmgenet_priv *priv, u32 *f_data,
+ u32 f_length, u32 rx_queue, int f_index)
+{
+ u32 base = f_index * priv->hw_params->hfb_filter_size;
+ int i;
+
+ for (i = 0; i < f_length; i++)
+ bcmgenet_hfb_writel(priv, f_data[i], (base + i) * sizeof(u32));
+
+ bcmgenet_hfb_set_filter_length(priv, f_index, 2 * f_length);
+ bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f_index, rx_queue);
+}
+
+static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
+ struct bcmgenet_rxnfc_rule *rule)
+{
+ struct ethtool_rx_flow_spec *fs = &rule->fs;
+ int err = 0, offset = 0, f_length = 0;
+ u16 val_16, mask_16;
+ u8 val_8, mask_8;
+ size_t size;
+ u32 *f_data;
+
+ f_data = kcalloc(priv->hw_params->hfb_filter_size, sizeof(u32),
+ GFP_KERNEL);
+ if (!f_data)
+ return -ENOMEM;
+
+ if (fs->flow_type & FLOW_MAC_EXT) {
+ bcmgenet_hfb_insert_data(f_data, 0,
+ &fs->h_ext.h_dest, &fs->m_ext.h_dest,
+ sizeof(fs->h_ext.h_dest));
+ }
+
+ if (fs->flow_type & FLOW_EXT) {
+ if (fs->m_ext.vlan_etype ||
+ fs->m_ext.vlan_tci) {
+ bcmgenet_hfb_insert_data(f_data, 12,
+ &fs->h_ext.vlan_etype,
+ &fs->m_ext.vlan_etype,
+ sizeof(fs->h_ext.vlan_etype));
+ bcmgenet_hfb_insert_data(f_data, 14,
+ &fs->h_ext.vlan_tci,
+ &fs->m_ext.vlan_tci,
+ sizeof(fs->h_ext.vlan_tci));
+ offset += VLAN_HLEN;
+ f_length += DIV_ROUND_UP(VLAN_HLEN, 2);
+ }
+ }
+
+ switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
+ case ETHER_FLOW:
+ f_length += DIV_ROUND_UP(ETH_HLEN, 2);
+ bcmgenet_hfb_insert_data(f_data, 0,
+ &fs->h_u.ether_spec.h_dest,
+ &fs->m_u.ether_spec.h_dest,
+ sizeof(fs->h_u.ether_spec.h_dest));
+ bcmgenet_hfb_insert_data(f_data, ETH_ALEN,
+ &fs->h_u.ether_spec.h_source,
+ &fs->m_u.ether_spec.h_source,
+ sizeof(fs->h_u.ether_spec.h_source));
+ bcmgenet_hfb_insert_data(f_data, (2 * ETH_ALEN) + offset,
+ &fs->h_u.ether_spec.h_proto,
+ &fs->m_u.ether_spec.h_proto,
+ sizeof(fs->h_u.ether_spec.h_proto));
+ break;
+ case IP_USER_FLOW:
+ f_length += DIV_ROUND_UP(ETH_HLEN + 20, 2);
+ /* Specify IP Ether Type */
+ val_16 = htons(ETH_P_IP);
+ mask_16 = 0xFFFF;
+ bcmgenet_hfb_insert_data(f_data, (2 * ETH_ALEN) + offset,
+ &val_16, &mask_16, sizeof(val_16));
+ bcmgenet_hfb_insert_data(f_data, 15 + offset,
+ &fs->h_u.usr_ip4_spec.tos,
+ &fs->m_u.usr_ip4_spec.tos,
+ sizeof(fs->h_u.usr_ip4_spec.tos));
+ bcmgenet_hfb_insert_data(f_data, 23 + offset,
+ &fs->h_u.usr_ip4_spec.proto,
+ &fs->m_u.usr_ip4_spec.proto,
+ sizeof(fs->h_u.usr_ip4_spec.proto));
+ bcmgenet_hfb_insert_data(f_data, 26 + offset,
+ &fs->h_u.usr_ip4_spec.ip4src,
+ &fs->m_u.usr_ip4_spec.ip4src,
+ sizeof(fs->h_u.usr_ip4_spec.ip4src));
+ bcmgenet_hfb_insert_data(f_data, 30 + offset,
+ &fs->h_u.usr_ip4_spec.ip4dst,
+ &fs->m_u.usr_ip4_spec.ip4dst,
+ sizeof(fs->h_u.usr_ip4_spec.ip4dst));
+ if (!fs->m_u.usr_ip4_spec.l4_4_bytes)
+ break;
+
+ /* Only supports 20 byte IPv4 header */
+ val_8 = 0x45;
+ mask_8 = 0xFF;
+ bcmgenet_hfb_insert_data(f_data, ETH_HLEN + offset,
+ &val_8, &mask_8,
+ sizeof(val_8));
+ size = sizeof(fs->h_u.usr_ip4_spec.l4_4_bytes);
+ bcmgenet_hfb_insert_data(f_data,
+ ETH_HLEN + 20 + offset,
+ &fs->h_u.usr_ip4_spec.l4_4_bytes,
+ &fs->m_u.usr_ip4_spec.l4_4_bytes,
+ size);
+ f_length += DIV_ROUND_UP(size, 2);
+ break;
+ }
+
+ if (!fs->ring_cookie || fs->ring_cookie == RX_CLS_FLOW_WAKE) {
+ /* Ring 0 flows can be handled by the default Descriptor Ring
+ * We'll map them to ring 0, but don't enable the filter
+ */
+ bcmgenet_hfb_set_filter(priv, f_data, f_length, 0,
+ fs->location);
+ rule->state = BCMGENET_RXNFC_STATE_DISABLED;
+ } else {
+ /* Other Rx rings are direct mapped here */
+ bcmgenet_hfb_set_filter(priv, f_data, f_length,
+ fs->ring_cookie, fs->location);
+ bcmgenet_hfb_enable_filter(priv, fs->location);
+ rule->state = BCMGENET_RXNFC_STATE_ENABLED;
+ }
+
+ kfree(f_data);
+
+ return err;
+}
+
+/* bcmgenet_hfb_add_filter
+ *
+ * Add new filter to Hardware Filter Block to match and direct Rx traffic to
+ * desired Rx queue.
+ *
+ * f_data is an array of unsigned 32-bit integers where each 32-bit integer
+ * provides filter data for 2 bytes (4 nibbles) of Rx frame:
+ *
+ * bits 31:20 - unused
+ * bit 19 - nibble 0 match enable
+ * bit 18 - nibble 1 match enable
+ * bit 17 - nibble 2 match enable
+ * bit 16 - nibble 3 match enable
+ * bits 15:12 - nibble 0 data
+ * bits 11:8 - nibble 1 data
+ * bits 7:4 - nibble 2 data
+ * bits 3:0 - nibble 3 data
+ *
+ * Example:
+ * In order to match:
+ * - Ethernet frame type = 0x0800 (IP)
+ * - IP version field = 4
+ * - IP protocol field = 0x11 (UDP)
+ *
+ * The following filter is needed:
+ * u32 hfb_filter_ipv4_udp[] = {
+ * Rx frame offset 0x00: 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ * Rx frame offset 0x08: 0x00000000, 0x00000000, 0x000F0800, 0x00084000,
+ * Rx frame offset 0x10: 0x00000000, 0x00000000, 0x00000000, 0x00030011,
+ * };
+ *
+ * To add the filter to HFB and direct the traffic to Rx queue 0, call:
+ * bcmgenet_hfb_add_filter(priv, hfb_filter_ipv4_udp,
+ * ARRAY_SIZE(hfb_filter_ipv4_udp), 0);
+ */
+int bcmgenet_hfb_add_filter(struct bcmgenet_priv *priv, u32 *f_data,
+ u32 f_length, u32 rx_queue)
+{
+ int f_index;
+
+ f_index = bcmgenet_hfb_find_unused_filter(priv);
+ if (f_index < 0)
+ return -ENOMEM;
+
+ if (f_length > priv->hw_params->hfb_filter_size)
+ return -EINVAL;
+
+ bcmgenet_hfb_set_filter(priv, f_data, f_length, rx_queue, f_index);
+ bcmgenet_hfb_enable_filter(priv, f_index);
+
+ return 0;
+}
+
+/* bcmgenet_hfb_clear
+ *
+ * Clear Hardware Filter Block and disable all filtering.
+ */
+static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
+{
+ u32 i;
+
+ bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);
+ bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);
+ bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4);
+
+ for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
+ bcmgenet_rdma_writel(priv, 0x0, i);
+
+ for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++)
+ bcmgenet_hfb_reg_writel(priv, 0x0,
+ HFB_FLT_LEN_V3PLUS + i * sizeof(u32));
+
+ for (i = 0; i < priv->hw_params->hfb_filter_cnt *
+ priv->hw_params->hfb_filter_size; i++)
+ bcmgenet_hfb_writel(priv, 0x0, i * sizeof(u32));
+}
+
+static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
+{
+ int i;
+
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
+ return;
+
+ INIT_LIST_HEAD(&priv->rxnfc_list);
+ for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
+ INIT_LIST_HEAD(&priv->rxnfc_rules[i].list);
+ priv->rxnfc_rules[i].state = BCMGENET_RXNFC_STATE_UNUSED;
+ }
+
+ bcmgenet_hfb_clear(priv);
+}
+
static int bcmgenet_begin(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
@@ -1045,6 +1421,229 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
return phy_ethtool_set_eee(dev->phydev, e);
}
+static int bcmgenet_validate_flow(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_usrip4_spec *l4_mask;
+ struct ethhdr *eth_mask;
+
+ if (cmd->fs.location >= MAX_NUM_OF_FS_RULES) {
+ netdev_err(dev, "rxnfc: Invalid location (%d)\n",
+ cmd->fs.location);
+ return -EINVAL;
+ }
+
+ switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
+ case IP_USER_FLOW:
+ l4_mask = &cmd->fs.m_u.usr_ip4_spec;
+ /* don't allow mask which isn't valid */
+ if (VALIDATE_MASK(l4_mask->ip4src) ||
+ VALIDATE_MASK(l4_mask->ip4dst) ||
+ VALIDATE_MASK(l4_mask->l4_4_bytes) ||
+ VALIDATE_MASK(l4_mask->proto) ||
+ VALIDATE_MASK(l4_mask->ip_ver) ||
+ VALIDATE_MASK(l4_mask->tos)) {
+ netdev_err(dev, "rxnfc: Unsupported mask\n");
+ return -EINVAL;
+ }
+ break;
+ case ETHER_FLOW:
+ eth_mask = &cmd->fs.m_u.ether_spec;
+ /* don't allow mask which isn't valid */
+ if (VALIDATE_MASK(eth_mask->h_source) ||
+ VALIDATE_MASK(eth_mask->h_source) ||
+ VALIDATE_MASK(eth_mask->h_proto)) {
+ netdev_err(dev, "rxnfc: Unsupported mask\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ netdev_err(dev, "rxnfc: Unsupported flow type (0x%x)\n",
+ cmd->fs.flow_type);
+ return -EINVAL;
+ }
+
+ if ((cmd->fs.flow_type & FLOW_EXT)) {
+ /* don't allow mask which isn't valid */
+ if (VALIDATE_MASK(cmd->fs.m_ext.vlan_etype) ||
+ VALIDATE_MASK(cmd->fs.m_ext.vlan_tci)) {
+ netdev_err(dev, "rxnfc: Unsupported mask\n");
+ return -EINVAL;
+ }
+ if (cmd->fs.m_ext.data[0] || cmd->fs.m_ext.data[1]) {
+ netdev_err(dev, "rxnfc: user-def not supported\n");
+ return -EINVAL;
+ }
+ }
+
+ if ((cmd->fs.flow_type & FLOW_MAC_EXT)) {
+ /* don't allow mask which isn't valid */
+ if (VALIDATE_MASK(cmd->fs.m_ext.h_dest)) {
+ netdev_err(dev, "rxnfc: Unsupported mask\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int bcmgenet_insert_flow(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct bcmgenet_rxnfc_rule *loc_rule;
+ int err;
+
+ if (priv->hw_params->hfb_filter_size < 128) {
+ netdev_err(dev, "rxnfc: Not supported by this device\n");
+ return -EINVAL;
+ }
+
+ if (cmd->fs.ring_cookie > priv->hw_params->rx_queues &&
+ cmd->fs.ring_cookie != RX_CLS_FLOW_WAKE) {
+ netdev_err(dev, "rxnfc: Unsupported action (%llu)\n",
+ cmd->fs.ring_cookie);
+ return -EINVAL;
+ }
+
+ err = bcmgenet_validate_flow(dev, cmd);
+ if (err)
+ return err;
+
+ loc_rule = &priv->rxnfc_rules[cmd->fs.location];
+ if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED)
+ bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
+ if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED)
+ list_del(&loc_rule->list);
+ loc_rule->state = BCMGENET_RXNFC_STATE_UNUSED;
+ memcpy(&loc_rule->fs, &cmd->fs,
+ sizeof(struct ethtool_rx_flow_spec));
+
+ err = bcmgenet_hfb_create_rxnfc_filter(priv, loc_rule);
+ if (err) {
+ netdev_err(dev, "rxnfc: Could not install rule (%d)\n",
+ err);
+ return err;
+ }
+
+ list_add_tail(&loc_rule->list, &priv->rxnfc_list);
+
+ return 0;
+}
+
+static int bcmgenet_delete_flow(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct bcmgenet_rxnfc_rule *rule;
+ int err = 0;
+
+ if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
+ return -EINVAL;
+
+ rule = &priv->rxnfc_rules[cmd->fs.location];
+ if (rule->state == BCMGENET_RXNFC_STATE_UNUSED) {
+ err = -ENOENT;
+ goto out;
+ }
+
+ if (rule->state == BCMGENET_RXNFC_STATE_ENABLED)
+ bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
+ if (rule->state != BCMGENET_RXNFC_STATE_UNUSED)
+ list_del(&rule->list);
+ rule->state = BCMGENET_RXNFC_STATE_UNUSED;
+ memset(&rule->fs, 0, sizeof(struct ethtool_rx_flow_spec));
+
+out:
+ return err;
+}
+
+static int bcmgenet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ int err = 0;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ err = bcmgenet_insert_flow(dev, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ err = bcmgenet_delete_flow(dev, cmd);
+ break;
+ default:
+ netdev_warn(priv->dev, "Unsupported ethtool command. (%d)\n",
+ cmd->cmd);
+ return -EINVAL;
+ }
+
+ return err;
+}
+
+static int bcmgenet_get_flow(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ int loc)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct bcmgenet_rxnfc_rule *rule;
+ int err = 0;
+
+ if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES)
+ return -EINVAL;
+
+ rule = &priv->rxnfc_rules[loc];
+ if (rule->state == BCMGENET_RXNFC_STATE_UNUSED)
+ err = -ENOENT;
+ else
+ memcpy(&cmd->fs, &rule->fs,
+ sizeof(struct ethtool_rx_flow_spec));
+
+ return err;
+}
+
+static int bcmgenet_get_num_flows(struct bcmgenet_priv *priv)
+{
+ struct list_head *pos;
+ int res = 0;
+
+ list_for_each(pos, &priv->rxnfc_list)
+ res++;
+
+ return res;
+}
+
+static int bcmgenet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct bcmgenet_rxnfc_rule *rule;
+ int err = 0;
+ int i = 0;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = priv->hw_params->rx_queues ?: 1;
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = bcmgenet_get_num_flows(priv);
+ cmd->data = MAX_NUM_OF_FS_RULES;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ err = bcmgenet_get_flow(dev, cmd, cmd->fs.location);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ list_for_each_entry(rule, &priv->rxnfc_list, list)
+ if (i < cmd->rule_cnt)
+ rule_locs[i++] = rule->fs.location;
+ cmd->rule_cnt = i;
+ cmd->data = MAX_NUM_OF_FS_RULES;
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
/* standard ethtool support functions. */
static const struct ethtool_ops bcmgenet_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
@@ -1069,6 +1668,8 @@ static const struct ethtool_ops bcmgenet_ethtool_ops = {
.get_link_ksettings = bcmgenet_get_link_ksettings,
.set_link_ksettings = bcmgenet_set_link_ksettings,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_rxnfc = bcmgenet_get_rxnfc,
+ .set_rxnfc = bcmgenet_set_rxnfc,
};
/* Power down the unimac, based on mode. */
@@ -2669,10 +3270,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id)
{
- struct bcmgenet_priv *priv = dev_id;
-
- pm_wakeup_event(&priv->pdev->dev, 0);
-
+ /* Acknowledge the interrupt */
return IRQ_HANDLED;
}
@@ -2710,9 +3308,8 @@ static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
unsigned char *addr)
{
- bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
- (addr[2] << 8) | addr[3], UMAC_MAC0);
- bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
+ bcmgenet_umac_writel(priv, get_unaligned_be32(&addr[0]), UMAC_MAC0);
+ bcmgenet_umac_writel(priv, get_unaligned_be16(&addr[4]), UMAC_MAC1);
}
static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv,
@@ -2721,13 +3318,9 @@ static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv,
u32 addr_tmp;
addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC0);
- addr[0] = addr_tmp >> 24;
- addr[1] = (addr_tmp >> 16) & 0xff;
- addr[2] = (addr_tmp >> 8) & 0xff;
- addr[3] = addr_tmp & 0xff;
+ put_unaligned_be32(addr_tmp, &addr[0]);
addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC1);
- addr[4] = (addr_tmp >> 8) & 0xff;
- addr[5] = addr_tmp & 0xff;
+ put_unaligned_be16(addr_tmp, &addr[4]);
}
/* Returns a reusable dma control register value */
@@ -2766,43 +3359,12 @@ static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
}
-/* bcmgenet_hfb_clear
- *
- * Clear Hardware Filter Block and disable all filtering.
- */
-static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
-{
- u32 i;
-
- bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);
- bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);
- bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4);
-
- for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
- bcmgenet_rdma_writel(priv, 0x0, i);
-
- for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++)
- bcmgenet_hfb_reg_writel(priv, 0x0,
- HFB_FLT_LEN_V3PLUS + i * sizeof(u32));
-
- for (i = 0; i < priv->hw_params->hfb_filter_cnt *
- priv->hw_params->hfb_filter_size; i++)
- bcmgenet_hfb_writel(priv, 0x0, i * sizeof(u32));
-}
-
-static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
-{
- if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
- return;
-
- bcmgenet_hfb_clear(priv);
-}
-
static void bcmgenet_netif_start(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
/* Start the network engine */
+ bcmgenet_set_rx_mode(dev);
bcmgenet_enable_rx_napi(priv);
umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
@@ -3421,8 +3983,6 @@ MODULE_DEVICE_TABLE(of, bcmgenet_match);
static int bcmgenet_probe(struct platform_device *pdev)
{
struct bcmgenet_platform_data *pd = pdev->dev.platform_data;
- struct device_node *dn = pdev->dev.of_node;
- const struct of_device_id *of_id = NULL;
const struct bcmgenet_plat_data *pdata;
struct bcmgenet_priv *priv;
struct net_device *dev;
@@ -3437,12 +3997,6 @@ static int bcmgenet_probe(struct platform_device *pdev)
return -ENOMEM;
}
- if (dn) {
- of_id = of_match_node(bcmgenet_match, dn);
- if (!of_id)
- return -EINVAL;
- }
-
priv = netdev_priv(dev);
priv->irq0 = platform_get_irq(pdev, 0);
if (priv->irq0 < 0) {
@@ -3504,13 +4058,16 @@ static int bcmgenet_probe(struct platform_device *pdev)
priv->dma_max_burst_length = DMA_MAX_BURST_LENGTH;
}
- priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
+ priv->clk = devm_clk_get_optional(&priv->pdev->dev, "enet");
if (IS_ERR(priv->clk)) {
dev_dbg(&priv->pdev->dev, "failed to get enet clock\n");
- priv->clk = NULL;
+ err = PTR_ERR(priv->clk);
+ goto err;
}
- clk_prepare_enable(priv->clk);
+ err = clk_prepare_enable(priv->clk);
+ if (err)
+ goto err;
bcmgenet_set_hw_params(priv);
@@ -3528,16 +4085,18 @@ static int bcmgenet_probe(struct platform_device *pdev)
priv->rx_buf_len = RX_BUF_LENGTH;
INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
- priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol");
+ priv->clk_wol = devm_clk_get_optional(&priv->pdev->dev, "enet-wol");
if (IS_ERR(priv->clk_wol)) {
dev_dbg(&priv->pdev->dev, "failed to get enet-wol clock\n");
- priv->clk_wol = NULL;
+ err = PTR_ERR(priv->clk_wol);
+ goto err;
}
- priv->clk_eee = devm_clk_get(&priv->pdev->dev, "enet-eee");
+ priv->clk_eee = devm_clk_get_optional(&priv->pdev->dev, "enet-eee");
if (IS_ERR(priv->clk_eee)) {
dev_dbg(&priv->pdev->dev, "failed to get enet-eee clock\n");
- priv->clk_eee = NULL;
+ err = PTR_ERR(priv->clk_eee);
+ goto err;
}
/* If this is an internal GPHY, power it on now, before UniMAC is
@@ -3546,7 +4105,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
if (device_get_phy_mode(&pdev->dev) == PHY_INTERFACE_MODE_INTERNAL)
bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
- if ((pd) && (!IS_ERR_OR_NULL(pd->mac_address)))
+ if (pd && !IS_ERR_OR_NULL(pd->mac_address))
ether_addr_copy(dev->dev_addr, pd->mac_address);
else
if (!device_get_mac_address(&pdev->dev, dev->dev_addr, ETH_ALEN))
@@ -3612,11 +4171,10 @@ static void bcmgenet_shutdown(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
-static int bcmgenet_resume(struct device *d)
+static int bcmgenet_resume_noirq(struct device *d)
{
struct net_device *dev = dev_get_drvdata(d);
struct bcmgenet_priv *priv = netdev_priv(dev);
- unsigned long dma_ctrl;
int ret;
u32 reg;
@@ -3628,6 +4186,38 @@ static int bcmgenet_resume(struct device *d)
if (ret)
return ret;
+ if (device_may_wakeup(d) && priv->wolopts) {
+ /* Account for Wake-on-LAN events and clear those events
+ * (Some devices need more time between enabling the clocks
+ * and the interrupt register reflecting the wake event so
+ * read the register twice)
+ */
+ reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT);
+ reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT);
+ if (reg & UMAC_IRQ_WAKE_EVENT)
+ pm_wakeup_event(&priv->pdev->dev, 0);
+ }
+
+ bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_WAKE_EVENT, INTRL2_CPU_CLEAR);
+
+ return 0;
+}
+
+static int bcmgenet_resume(struct device *d)
+{
+ struct net_device *dev = dev_get_drvdata(d);
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ unsigned long dma_ctrl;
+ u32 offset, reg;
+ int ret;
+
+ if (!netif_running(dev))
+ return 0;
+
+ /* From WOL-enabled suspend, switch to regular clock */
+ if (device_may_wakeup(d) && priv->wolopts)
+ bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
+
/* If this is an internal GPHY, power it back on now, before UniMAC is
* brought out of reset as absolutely no UniMAC activity is allowed
*/
@@ -3638,10 +4228,6 @@ static int bcmgenet_resume(struct device *d)
init_umac(priv);
- /* From WOL-enabled suspend, switch to regular clock */
- if (priv->wolopts)
- clk_disable_unprepare(priv->clk_wol);
-
phy_init_hw(dev->phydev);
/* Speed settings must be restored */
@@ -3653,15 +4239,17 @@ static int bcmgenet_resume(struct device *d)
bcmgenet_set_hw_addr(priv, dev->dev_addr);
+ offset = HFB_FLT_ENABLE_V3PLUS;
+ bcmgenet_hfb_reg_writel(priv, priv->hfb_en[1], offset);
+ bcmgenet_hfb_reg_writel(priv, priv->hfb_en[2], offset + sizeof(u32));
+ bcmgenet_hfb_reg_writel(priv, priv->hfb_en[0], HFB_CTRL);
+
if (priv->internal_phy) {
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
reg |= EXT_ENERGY_DET_MASK;
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
}
- if (priv->wolopts)
- bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
-
/* Disable RX/TX DMA and flush TX queues */
dma_ctrl = bcmgenet_dma_disable(priv);
@@ -3698,7 +4286,7 @@ static int bcmgenet_suspend(struct device *d)
{
struct net_device *dev = dev_get_drvdata(d);
struct bcmgenet_priv *priv = netdev_priv(dev);
- int ret = 0;
+ u32 offset;
if (!netif_running(dev))
return 0;
@@ -3710,25 +4298,53 @@ static int bcmgenet_suspend(struct device *d)
if (!device_may_wakeup(d))
phy_suspend(dev->phydev);
+ /* Preserve filter state and disable filtering */
+ priv->hfb_en[0] = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ offset = HFB_FLT_ENABLE_V3PLUS;
+ priv->hfb_en[1] = bcmgenet_hfb_reg_readl(priv, offset);
+ priv->hfb_en[2] = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32));
+ bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL);
+
+ return 0;
+}
+
+static int bcmgenet_suspend_noirq(struct device *d)
+{
+ struct net_device *dev = dev_get_drvdata(d);
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ int ret = 0;
+
+ if (!netif_running(dev))
+ return 0;
+
/* Prepare the device for Wake-on-LAN and switch to the slow clock */
- if (device_may_wakeup(d) && priv->wolopts) {
+ if (device_may_wakeup(d) && priv->wolopts)
ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
- clk_prepare_enable(priv->clk_wol);
- } else if (priv->internal_phy) {
+ else if (priv->internal_phy)
ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
- }
+
+ /* Let the framework handle resumption and leave the clocks on */
+ if (ret)
+ return ret;
/* Turn off the clocks */
clk_disable_unprepare(priv->clk);
- if (ret)
- bcmgenet_resume(d);
-
- return ret;
+ return 0;
}
+#else
+#define bcmgenet_suspend NULL
+#define bcmgenet_suspend_noirq NULL
+#define bcmgenet_resume NULL
+#define bcmgenet_resume_noirq NULL
#endif /* CONFIG_PM_SLEEP */
-static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume);
+static const struct dev_pm_ops bcmgenet_pm_ops = {
+ .suspend = bcmgenet_suspend,
+ .suspend_noirq = bcmgenet_suspend_noirq,
+ .resume = bcmgenet_resume,
+ .resume_noirq = bcmgenet_resume_noirq,
+};
static const struct acpi_device_id genet_acpi_match[] = {
{ "BCM6E4E", (kernel_ulong_t)&bcm2711_plat_data },
@@ -3744,7 +4360,7 @@ static struct platform_driver bcmgenet_driver = {
.name = "bcmgenet",
.of_match_table = bcmgenet_match,
.pm = &bcmgenet_pm_ops,
- .acpi_match_table = ACPI_PTR(genet_acpi_match),
+ .acpi_match_table = genet_acpi_match,
},
};
module_platform_driver(bcmgenet_driver);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index daf8fb2c39b6..a12cb59298f4 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2014-2017 Broadcom
+ * Copyright (c) 2014-2020 Broadcom
*/
#ifndef __BCMGENET_H__
@@ -14,6 +14,7 @@
#include <linux/if_vlan.h>
#include <linux/phy.h>
#include <linux/dim.h>
+#include <linux/ethtool.h>
/* total number of Buffer Descriptors, same for Rx/Tx */
#define TOTAL_DESC 256
@@ -31,6 +32,7 @@
#define DMA_MAX_BURST_LENGTH 0x10
/* misc. configuration */
+#define MAX_NUM_OF_FS_RULES 16
#define CLEAR_ALL_HFB 0xFF
#define DMA_FC_THRESH_HI (TOTAL_DESC >> 4)
#define DMA_FC_THRESH_LO 5
@@ -310,6 +312,8 @@ struct bcmgenet_mib_counters {
#define UMAC_IRQ_HFB_SM (1 << 10)
#define UMAC_IRQ_HFB_MM (1 << 11)
#define UMAC_IRQ_MPD_R (1 << 12)
+#define UMAC_IRQ_WAKE_EVENT (UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM | \
+ UMAC_IRQ_MPD_R)
#define UMAC_IRQ_RXDMA_MBDONE (1 << 13)
#define UMAC_IRQ_RXDMA_PDONE (1 << 14)
#define UMAC_IRQ_RXDMA_BDONE (1 << 15)
@@ -608,6 +612,18 @@ struct bcmgenet_rx_ring {
struct bcmgenet_priv *priv;
};
+enum bcmgenet_rxnfc_state {
+ BCMGENET_RXNFC_STATE_UNUSED = 0,
+ BCMGENET_RXNFC_STATE_DISABLED,
+ BCMGENET_RXNFC_STATE_ENABLED
+};
+
+struct bcmgenet_rxnfc_rule {
+ struct list_head list;
+ struct ethtool_rx_flow_spec fs;
+ enum bcmgenet_rxnfc_state state;
+};
+
/* device context */
struct bcmgenet_priv {
void __iomem *base;
@@ -626,6 +642,8 @@ struct bcmgenet_priv {
struct enet_cb *rx_cbs;
unsigned int num_rx_bds;
unsigned int rx_buf_len;
+ struct bcmgenet_rxnfc_rule rxnfc_rules[MAX_NUM_OF_FS_RULES];
+ struct list_head rxnfc_list;
struct bcmgenet_rx_ring rx_rings[DESC_INDEX + 1];
@@ -676,6 +694,9 @@ struct bcmgenet_priv {
/* WOL */
struct clk *clk_wol;
u32 wolopts;
+ u8 sopass[SOPASS_MAX];
+ bool wol_active;
+ u32 hfb_en[3];
struct bcmgenet_mib_counters mib;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index c9a43695b182..4ea6a26b04f7 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support
*
- * Copyright (c) 2014-2017 Broadcom
+ * Copyright (c) 2014-2020 Broadcom
*/
#define pr_fmt(fmt) "bcmgenet_wol: " fmt
@@ -41,18 +41,13 @@
void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- u32 reg;
- wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE;
+ wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
wol->wolopts = priv->wolopts;
memset(wol->sopass, 0, sizeof(wol->sopass));
- if (wol->wolopts & WAKE_MAGICSECURE) {
- reg = bcmgenet_umac_readl(priv, UMAC_MPD_PW_MS);
- put_unaligned_be16(reg, &wol->sopass[0]);
- reg = bcmgenet_umac_readl(priv, UMAC_MPD_PW_LS);
- put_unaligned_be32(reg, &wol->sopass[2]);
- }
+ if (wol->wolopts & WAKE_MAGICSECURE)
+ memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
}
/* ethtool function - set WOL (Wake on LAN) settings.
@@ -62,25 +57,15 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct device *kdev = &priv->pdev->dev;
- u32 reg;
if (!device_can_wakeup(kdev))
return -ENOTSUPP;
- if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE))
+ if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER))
return -EINVAL;
- reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
- if (wol->wolopts & WAKE_MAGICSECURE) {
- bcmgenet_umac_writel(priv, get_unaligned_be16(&wol->sopass[0]),
- UMAC_MPD_PW_MS);
- bcmgenet_umac_writel(priv, get_unaligned_be32(&wol->sopass[2]),
- UMAC_MPD_PW_LS);
- reg |= MPD_PW_EN;
- } else {
- reg &= ~MPD_PW_EN;
- }
- bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
+ if (wol->wolopts & WAKE_MAGICSECURE)
+ memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass));
/* Flag the device and relevant IRQ as wakeup capable */
if (wol->wolopts) {
@@ -120,12 +105,21 @@ static int bcmgenet_poll_wol_status(struct bcmgenet_priv *priv)
return retries;
}
+static void bcmgenet_set_mpd_password(struct bcmgenet_priv *priv)
+{
+ bcmgenet_umac_writel(priv, get_unaligned_be16(&priv->sopass[0]),
+ UMAC_MPD_PW_MS);
+ bcmgenet_umac_writel(priv, get_unaligned_be32(&priv->sopass[2]),
+ UMAC_MPD_PW_LS);
+}
+
int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
enum bcmgenet_power_mode mode)
{
struct net_device *dev = priv->dev;
+ struct bcmgenet_rxnfc_rule *rule;
+ u32 reg, hfb_ctrl_reg, hfb_enable = 0;
int retries = 0;
- u32 reg;
if (mode != GENET_POWER_WOL_MAGIC) {
netif_err(priv, wol, dev, "unsupported mode: %d\n", mode);
@@ -142,22 +136,48 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
mdelay(10);
- reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
- reg |= MPD_EN;
- bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
+ if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) {
+ reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
+ reg |= MPD_EN;
+ if (priv->wolopts & WAKE_MAGICSECURE) {
+ bcmgenet_set_mpd_password(priv);
+ reg |= MPD_PW_EN;
+ }
+ bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
+ }
+
+ hfb_ctrl_reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ if (priv->wolopts & WAKE_FILTER) {
+ list_for_each_entry(rule, &priv->rxnfc_list, list)
+ if (rule->fs.ring_cookie == RX_CLS_FLOW_WAKE)
+ hfb_enable |= (1 << rule->fs.location);
+ reg = (hfb_ctrl_reg & ~RBUF_HFB_EN) | RBUF_ACPI_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ }
/* Do not leave UniMAC in MPD mode only */
retries = bcmgenet_poll_wol_status(priv);
if (retries < 0) {
reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
- reg &= ~MPD_EN;
+ reg &= ~(MPD_EN | MPD_PW_EN);
bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
+ bcmgenet_hfb_reg_writel(priv, hfb_ctrl_reg, HFB_CTRL);
return retries;
}
netif_dbg(priv, wol, dev, "MPD WOL-ready status set after %d msec\n",
retries);
+ clk_prepare_enable(priv->clk_wol);
+ priv->wol_active = 1;
+
+ if (hfb_enable) {
+ bcmgenet_hfb_reg_writel(priv, hfb_enable,
+ HFB_FLT_ENABLE_V3PLUS + 4);
+ hfb_ctrl_reg = RBUF_HFB_EN | RBUF_ACPI_EN;
+ bcmgenet_hfb_reg_writel(priv, hfb_ctrl_reg, HFB_CTRL);
+ }
+
/* Enable CRC forward */
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
priv->crc_fwd_en = 1;
@@ -173,6 +193,12 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
}
+ reg = UMAC_IRQ_MPD_R;
+ if (hfb_enable)
+ reg |= UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM;
+
+ bcmgenet_intrl2_0_writel(priv, reg, INTRL2_CPU_MASK_CLEAR);
+
return 0;
}
@@ -186,12 +212,22 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
return;
}
+ if (!priv->wol_active)
+ return; /* failed to suspend so skip the rest */
+
+ priv->wol_active = 0;
+ clk_disable_unprepare(priv->clk_wol);
+
+ /* Disable Magic Packet Detection */
reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
- if (!(reg & MPD_EN))
- return; /* already powered up so skip the rest */
- reg &= ~MPD_EN;
+ reg &= ~(MPD_EN | MPD_PW_EN);
bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
+ /* Disable WAKE_FILTER Detection */
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ reg &= ~(RBUF_HFB_EN | RBUF_ACPI_EN);
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+
/* Disable CRC Forward */
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
reg &= ~CMD_CRC_FWD;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index ff98a82b7bc4..7a3b22b35238 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -10797,17 +10797,15 @@ static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
#ifdef CONFIG_TIGON3_HWMON
static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
{
+ u32 off, len = TG3_OCIR_LEN;
int i;
- for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
- u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
-
+ for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) {
tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
- off += len;
if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
!(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
- memset(ocir, 0, TG3_OCIR_LEN);
+ memset(ocir, 0, len);
}
}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index d7e805749a5b..e40c64b79f66 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -782,7 +782,6 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget)
if ((work_done < budget && tx_done) ||
(iq && iq->pkt_in_done >= MAX_REG_CNT) ||
(droq->pkt_count >= MAX_REG_CNT)) {
- tx_done = 1;
napi_complete_done(napi, work_done);
octeon_enable_irq(droq->oct_dev, droq->q_no);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
index 3d01d3602d8f..fb380b4f3e02 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -712,18 +712,6 @@ struct octeon_device *lio_get_device(u32 octeon_id);
*/
int lio_get_device_id(void *dev);
-static inline u16 OCTEON_MAJOR_REV(struct octeon_device *oct)
-{
- u16 rev = (oct->rev_id & 0xC) >> 2;
-
- return (rev == 0) ? 1 : rev;
-}
-
-static inline u16 OCTEON_MINOR_REV(struct octeon_device *oct)
-{
- return oct->rev_id & 0x3;
-}
-
/** Read windowed register.
* @param oct - pointer to the Octeon device.
* @param addr - Address of the register to read.
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index b4b33368698f..2ba0ce115e63 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -552,6 +552,7 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + len;
xdp.rxq = &rq->xdp_rxq;
+ xdp.frame_sz = RCV_FRAG_LEN + XDP_PACKET_HEADROOM;
orig_data = xdp.data;
rcu_read_lock();
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
index 9909bfda167e..82cdfa51ce37 100644
--- a/drivers/net/ethernet/chelsio/Kconfig
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -26,7 +26,7 @@ config CHELSIO_T1
This driver supports Chelsio gigabit and 10-gigabit
Ethernet cards. More information about adapter features and
performance tuning is in
- <file:Documentation/networking/device_drivers/chelsio/cxgb.txt>.
+ <file:Documentation/networking/device_drivers/chelsio/cxgb.rst>.
For general information about Chelsio and our products, visit
our website at <http://www.chelsio.com>.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index e46a14f44a6f..fc1405a8ed74 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -466,8 +466,6 @@ static inline struct mbox_cmd *mbox_cmd_log_entry(struct mbox_cmd_log *log,
return &((struct mbox_cmd *)&(log)[1])[entry_idx];
}
-#include "t4fw_api.h"
-
#define FW_VERSION(chip) ( \
FW_HDR_FW_VER_MAJOR_G(chip##FW_VERSION_MAJOR) | \
FW_HDR_FW_VER_MINOR_G(chip##FW_VERSION_MINOR) | \
@@ -1127,19 +1125,20 @@ struct adapter {
* programmed with various parameters.
*/
struct ch_sched_params {
- s8 type; /* packet or flow */
+ u8 type; /* packet or flow */
union {
struct {
- s8 level; /* scheduler hierarchy level */
- s8 mode; /* per-class or per-flow */
- s8 rateunit; /* bit or packet rate */
- s8 ratemode; /* %port relative or kbps absolute */
- s8 channel; /* scheduler channel [0..N] */
- s8 class; /* scheduler class [0..N] */
- s32 minrate; /* minimum rate */
- s32 maxrate; /* maximum rate */
- s16 weight; /* percent weight */
- s16 pktsize; /* average packet size */
+ u8 level; /* scheduler hierarchy level */
+ u8 mode; /* per-class or per-flow */
+ u8 rateunit; /* bit or packet rate */
+ u8 ratemode; /* %port relative or kbps absolute */
+ u8 channel; /* scheduler channel [0..N] */
+ u8 class; /* scheduler class [0..N] */
+ u32 minrate; /* minimum rate */
+ u32 maxrate; /* maximum rate */
+ u16 weight; /* percent weight */
+ u16 pktsize; /* average packet size */
+ u16 burstsize; /* burst buffer size */
} params;
} u;
};
@@ -1954,9 +1953,10 @@ int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
enum ctxt_type ctype, u32 *data);
int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid,
enum ctxt_type ctype, u32 *data);
-int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
- int rateunit, int ratemode, int channel, int class,
- int minrate, int maxrate, int weight, int pktsize);
+int t4_sched_params(struct adapter *adapter, u8 type, u8 level, u8 mode,
+ u8 rateunit, u8 ratemode, u8 channel, u8 class,
+ u32 minrate, u32 maxrate, u16 weight, u16 pktsize,
+ u16 burstsize);
void t4_sge_decode_idma_state(struct adapter *adapter, int state);
void t4_idma_monitor_init(struct adapter *adapter,
struct sge_idma_monitor_state *idma);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index ebed99f3d4cf..7818c392da50 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -49,6 +49,7 @@
#include "cudbg_lib_common.h"
#include "cudbg_entity.h"
#include "cudbg_lib.h"
+#include "cxgb4_tc_mqprio.h"
/* generic seq_file support for showing a table of size rows x width. */
static void *seq_tab_get_idx(struct seq_tab *tb, loff_t pos)
@@ -2657,32 +2658,19 @@ static int sge_qinfo_uld_ciq_entries(const struct adapter *adap, int uld)
static int sge_qinfo_show(struct seq_file *seq, void *v)
{
- int eth_entries, ctrl_entries, eo_entries = 0;
+ int eth_entries, ctrl_entries, eohw_entries = 0, eosw_entries = 0;
int uld_rxq_entries[CXGB4_ULD_MAX] = { 0 };
int uld_ciq_entries[CXGB4_ULD_MAX] = { 0 };
int uld_txq_entries[CXGB4_TX_MAX] = { 0 };
const struct sge_uld_txq_info *utxq_info;
const struct sge_uld_rxq_info *urxq_info;
+ struct cxgb4_tc_port_mqprio *port_mqprio;
struct adapter *adap = seq->private;
- int i, n, r = (uintptr_t)v - 1;
+ int i, j, n, r = (uintptr_t)v - 1;
struct sge *s = &adap->sge;
eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
- if (adap->sge.eohw_txq)
- eo_entries = DIV_ROUND_UP(adap->sge.eoqsets, 4);
-
- mutex_lock(&uld_mutex);
- if (s->uld_txq_info)
- for (i = 0; i < ARRAY_SIZE(uld_txq_entries); i++)
- uld_txq_entries[i] = sge_qinfo_uld_txq_entries(adap, i);
-
- if (s->uld_rxq_info) {
- for (i = 0; i < ARRAY_SIZE(uld_rxq_entries); i++) {
- uld_rxq_entries[i] = sge_qinfo_uld_rxq_entries(adap, i);
- uld_ciq_entries[i] = sge_qinfo_uld_ciq_entries(adap, i);
- }
- }
if (r)
seq_putc(seq, '\n');
@@ -2759,11 +2747,21 @@ do { \
RL("FLLow:", fl.low);
RL("FLStarving:", fl.starving);
- goto unlock;
+ goto out;
}
r -= eth_entries;
- if (r < eo_entries) {
+ if (!adap->tc_mqprio)
+ goto skip_mqprio;
+
+ mutex_lock(&adap->tc_mqprio->mqprio_mutex);
+ if (!refcount_read(&adap->tc_mqprio->refcnt)) {
+ mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
+ goto skip_mqprio;
+ }
+
+ eohw_entries = DIV_ROUND_UP(adap->sge.eoqsets, 4);
+ if (r < eohw_entries) {
int base_qset = r * 4;
const struct sge_ofld_rxq *rx = &s->eohw_rxq[base_qset];
const struct sge_eohw_txq *tx = &s->eohw_txq[base_qset];
@@ -2808,10 +2806,71 @@ do { \
RL("FLLow:", fl.low);
RL("FLStarving:", fl.starving);
- goto unlock;
+ mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
+ goto out;
+ }
+
+ r -= eohw_entries;
+ for (j = 0; j < adap->params.nports; j++) {
+ int entries;
+ u8 tc;
+
+ port_mqprio = &adap->tc_mqprio->port_mqprio[j];
+ entries = 0;
+ for (tc = 0; tc < port_mqprio->mqprio.qopt.num_tc; tc++)
+ entries += port_mqprio->mqprio.qopt.count[tc];
+
+ if (!entries)
+ continue;
+
+ eosw_entries = DIV_ROUND_UP(entries, 4);
+ if (r < eosw_entries) {
+ const struct sge_eosw_txq *tx;
+
+ n = min(4, entries - 4 * r);
+ tx = &port_mqprio->eosw_txq[4 * r];
+
+ S("QType:", "EOSW-TXQ");
+ S("Interface:",
+ adap->port[j] ? adap->port[j]->name : "N/A");
+ T("EOTID:", hwtid);
+ T("HWQID:", hwqid);
+ T("State:", state);
+ T("Size:", ndesc);
+ T("In-Use:", inuse);
+ T("Credits:", cred);
+ T("Compl:", ncompl);
+ T("Last-Compl:", last_compl);
+ T("PIDX:", pidx);
+ T("Last-PIDX:", last_pidx);
+ T("CIDX:", cidx);
+ T("Last-CIDX:", last_cidx);
+ T("FLOWC-IDX:", flowc_idx);
+
+ mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
+ goto out;
+ }
+
+ r -= eosw_entries;
+ }
+ mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
+
+skip_mqprio:
+ if (!is_uld(adap))
+ goto skip_uld;
+
+ mutex_lock(&uld_mutex);
+ if (s->uld_txq_info)
+ for (i = 0; i < ARRAY_SIZE(uld_txq_entries); i++)
+ uld_txq_entries[i] = sge_qinfo_uld_txq_entries(adap, i);
+
+ if (s->uld_rxq_info) {
+ for (i = 0; i < ARRAY_SIZE(uld_rxq_entries); i++) {
+ uld_rxq_entries[i] = sge_qinfo_uld_rxq_entries(adap, i);
+ uld_ciq_entries[i] = sge_qinfo_uld_ciq_entries(adap, i);
+ }
}
- r -= eo_entries;
if (r < uld_txq_entries[CXGB4_TX_OFLD]) {
const struct sge_uld_txq *tx;
@@ -2994,6 +3053,9 @@ do { \
}
r -= uld_txq_entries[CXGB4_TX_CRYPTO];
+ mutex_unlock(&uld_mutex);
+
+skip_uld:
if (r < ctrl_entries) {
const struct sge_ctrl_txq *tx = &s->ctrlq[r * 4];
@@ -3008,7 +3070,7 @@ do { \
TL("TxQFull:", q.stops);
TL("TxQRestarts:", q.restarts);
- goto unlock;
+ goto out;
}
r -= ctrl_entries;
@@ -3026,11 +3088,9 @@ do { \
seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
s->counter_val[evtq->pktcnt_idx]);
- goto unlock;
+ goto out;
}
-unlock:
- mutex_unlock(&uld_mutex);
#undef R
#undef RL
#undef T
@@ -3039,13 +3099,38 @@ unlock:
#undef R3
#undef T3
#undef S3
+out:
+ return 0;
+
+unlock:
+ mutex_unlock(&uld_mutex);
return 0;
}
static int sge_queue_entries(const struct adapter *adap)
{
- int tot_uld_entries = 0;
- int i;
+ int i, tot_uld_entries = 0, eohw_entries = 0, eosw_entries = 0;
+
+ if (adap->tc_mqprio) {
+ struct cxgb4_tc_port_mqprio *port_mqprio;
+ u8 tc;
+
+ mutex_lock(&adap->tc_mqprio->mqprio_mutex);
+ if (adap->sge.eohw_txq)
+ eohw_entries = DIV_ROUND_UP(adap->sge.eoqsets, 4);
+
+ for (i = 0; i < adap->params.nports; i++) {
+ u32 entries = 0;
+
+ port_mqprio = &adap->tc_mqprio->port_mqprio[i];
+ for (tc = 0; tc < port_mqprio->mqprio.qopt.num_tc; tc++)
+ entries += port_mqprio->mqprio.qopt.count[tc];
+
+ if (entries)
+ eosw_entries += DIV_ROUND_UP(entries, 4);
+ }
+ mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
+ }
if (!is_uld(adap))
goto lld_only;
@@ -3062,8 +3147,7 @@ static int sge_queue_entries(const struct adapter *adap)
lld_only:
return DIV_ROUND_UP(adap->sge.ethqsets, 4) +
- (adap->sge.eohw_txq ? DIV_ROUND_UP(adap->sge.eoqsets, 4) : 0) +
- tot_uld_entries +
+ eohw_entries + eosw_entries + tot_uld_entries +
DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1;
}
@@ -3244,6 +3328,10 @@ static int tid_info_show(struct seq_file *seq, void *v)
if (t->nhpftids)
seq_printf(seq, "HPFTID range: %u..%u\n", t->hpftid_base,
t->hpftid_base + t->nhpftids - 1);
+ if (t->neotids)
+ seq_printf(seq, "EOTID range: %u..%u, in use: %u\n",
+ t->eotid_base, t->eotid_base + t->neotids - 1,
+ atomic_read(&t->eotids_in_use));
if (t->ntids)
seq_printf(seq, "HW TID usage: %u IP users, %u IPv6 users\n",
t4_read_reg(adap, LE_DB_ACT_CNT_IPV4_A),
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index a70018f067aa..d05c2371d8c7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -1579,6 +1579,7 @@ static int tid_init(struct tid_info *t)
atomic_set(&t->tids_in_use, 0);
atomic_set(&t->conns_in_use, 0);
atomic_set(&t->hash_tids_in_use, 0);
+ atomic_set(&t->eotids_in_use, 0);
/* Setup the free list for atid_tab and clear the stid bitmap. */
if (natids) {
@@ -3021,7 +3022,7 @@ static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf,
SCHED_CLASS_RATEUNIT_BITS,
SCHED_CLASS_RATEMODE_ABS,
pi->tx_chan, class_id, 0,
- max_tx_rate * 1000, 0, pktsize);
+ max_tx_rate * 1000, 0, pktsize, 0);
if (ret) {
dev_err(adap->pdev_dev, "Err %d for Traffic Class config\n",
ret);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
index e6af4906d674..ae7123a9de8e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
@@ -342,6 +342,13 @@ static int cxgb4_mqprio_alloc_tc(struct net_device *dev,
p.u.params.minrate = div_u64(mqprio->min_rate[i] * 8, 1000);
p.u.params.maxrate = div_u64(mqprio->max_rate[i] * 8, 1000);
+ /* Request larger burst buffer for smaller MTU, so
+ * that hardware can work on more data per burst
+ * cycle.
+ */
+ if (dev->mtu <= ETH_DATA_LEN)
+ p.u.params.burstsize = 8 * dev->mtu;
+
e = cxgb4_sched_class_alloc(dev, &p);
if (!e) {
ret = -ENOMEM;
@@ -567,6 +574,7 @@ static void cxgb4_mqprio_disable_offload(struct net_device *dev)
int cxgb4_setup_tc_mqprio(struct net_device *dev,
struct tc_mqprio_qopt_offload *mqprio)
{
+ struct adapter *adap = netdev2adap(dev);
bool needs_bring_up = false;
int ret;
@@ -574,6 +582,8 @@ int cxgb4_setup_tc_mqprio(struct net_device *dev,
if (ret)
return ret;
+ mutex_lock(&adap->tc_mqprio->mqprio_mutex);
+
/* To configure tc params, the current allocated EOTIDs must
* be freed up. However, they can't be freed up if there's
* traffic running on the interface. So, ensure interface is
@@ -609,6 +619,7 @@ out:
if (needs_bring_up)
cxgb_open(dev);
+ mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
return ret;
}
@@ -621,6 +632,7 @@ void cxgb4_mqprio_stop_offload(struct adapter *adap)
if (!adap->tc_mqprio || !adap->tc_mqprio->port_mqprio)
return;
+ mutex_lock(&adap->tc_mqprio->mqprio_mutex);
for_each_port(adap, i) {
dev = adap->port[i];
if (!dev)
@@ -632,6 +644,7 @@ void cxgb4_mqprio_stop_offload(struct adapter *adap)
cxgb4_mqprio_disable_offload(dev);
}
+ mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
}
int cxgb4_init_tc_mqprio(struct adapter *adap)
@@ -653,6 +666,8 @@ int cxgb4_init_tc_mqprio(struct adapter *adap)
goto out_free_mqprio;
}
+ mutex_init(&tc_mqprio->mqprio_mutex);
+
tc_mqprio->port_mqprio = tc_port_mqprio;
for (i = 0; i < adap->params.nports; i++) {
port_mqprio = &tc_mqprio->port_mqprio[i];
@@ -687,6 +702,7 @@ void cxgb4_cleanup_tc_mqprio(struct adapter *adap)
u8 i;
if (adap->tc_mqprio) {
+ mutex_lock(&adap->tc_mqprio->mqprio_mutex);
if (adap->tc_mqprio->port_mqprio) {
for (i = 0; i < adap->params.nports; i++) {
struct net_device *dev = adap->port[i];
@@ -698,6 +714,7 @@ void cxgb4_cleanup_tc_mqprio(struct adapter *adap)
}
kfree(adap->tc_mqprio->port_mqprio);
}
+ mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
kfree(adap->tc_mqprio);
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h
index ff8794132b22..be96f1dc0372 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h
@@ -33,6 +33,7 @@ struct cxgb4_tc_port_mqprio {
struct cxgb4_tc_mqprio {
refcount_t refcnt; /* Refcount for adapter-wide resources */
+ struct mutex mqprio_mutex; /* Lock for accessing MQPRIO info */
struct cxgb4_tc_port_mqprio *port_mqprio; /* Per port MQPRIO info */
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index be831317520a..16796785eea3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -147,6 +147,9 @@ struct tid_info {
/* TIDs in the HASH */
atomic_t hash_tids_in_use;
atomic_t conns_in_use;
+ /* ETHOFLD TIDs used for rate limiting */
+ atomic_t eotids_in_use;
+
/* lock for setting/clearing filter bitmap */
spinlock_t ftid_lock;
@@ -221,12 +224,14 @@ static inline void cxgb4_alloc_eotid(struct tid_info *t, u32 eotid, void *data)
{
set_bit(eotid, t->eotid_bmap);
t->eotid_tab[eotid].data = data;
+ atomic_inc(&t->eotids_in_use);
}
static inline void cxgb4_free_eotid(struct tid_info *t, u32 eotid)
{
clear_bit(eotid, t->eotid_bmap);
t->eotid_tab[eotid].data = NULL;
+ atomic_dec(&t->eotids_in_use);
}
int cxgb4_alloc_atid(struct tid_info *t, void *data);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c
index cebe1412d960..fde93c50cfec 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c
@@ -57,7 +57,8 @@ static int t4_sched_class_fw_cmd(struct port_info *pi,
p->u.params.ratemode,
p->u.params.channel, e->idx,
p->u.params.minrate, p->u.params.maxrate,
- p->u.params.weight, p->u.params.pktsize);
+ p->u.params.weight, p->u.params.pktsize,
+ p->u.params.burstsize);
break;
default:
err = -ENOTSUPP;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 6516c45864b3..1359158652b7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2091,10 +2091,9 @@ static inline u8 ethofld_calc_tx_flits(struct adapter *adap,
return flits + nsgl;
}
-static inline void *write_eo_wr(struct adapter *adap,
- struct sge_eosw_txq *eosw_txq,
- struct sk_buff *skb, struct fw_eth_tx_eo_wr *wr,
- u32 hdr_len, u32 wrlen)
+static void *write_eo_wr(struct adapter *adap, struct sge_eosw_txq *eosw_txq,
+ struct sk_buff *skb, struct fw_eth_tx_eo_wr *wr,
+ u32 hdr_len, u32 wrlen)
{
const struct skb_shared_info *ssi = skb_shinfo(skb);
struct cpl_tx_pkt_core *cpl;
@@ -2113,7 +2112,8 @@ static inline void *write_eo_wr(struct adapter *adap,
immd_len += hdr_len;
if (!eosw_txq->ncompl ||
- eosw_txq->last_compl >= adap->params.ofldq_wr_cred / 2) {
+ (eosw_txq->last_compl + wrlen16) >=
+ (adap->params.ofldq_wr_cred / 2)) {
compl = true;
eosw_txq->ncompl++;
eosw_txq->last_compl = 0;
@@ -2153,8 +2153,8 @@ static inline void *write_eo_wr(struct adapter *adap,
return cpl;
}
-static void ethofld_hard_xmit(struct net_device *dev,
- struct sge_eosw_txq *eosw_txq)
+static int ethofld_hard_xmit(struct net_device *dev,
+ struct sge_eosw_txq *eosw_txq)
{
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
@@ -2167,8 +2167,8 @@ static void ethofld_hard_xmit(struct net_device *dev,
bool skip_eotx_wr = false;
struct tx_sw_desc *d;
struct sk_buff *skb;
+ int left, ret = 0;
u8 flits, ndesc;
- int left;
eohw_txq = &adap->sge.eohw_txq[eosw_txq->hwqid];
spin_lock(&eohw_txq->lock);
@@ -2198,11 +2198,19 @@ static void ethofld_hard_xmit(struct net_device *dev,
wrlen = flits * 8;
wrlen16 = DIV_ROUND_UP(wrlen, 16);
- /* If there are no CPL credits, then wait for credits
- * to come back and retry again
+ left = txq_avail(&eohw_txq->q) - ndesc;
+
+ /* If there are no descriptors left in hardware queues or no
+ * CPL credits left in software queues, then wait for them
+ * to come back and retry again. Note that we always request
+ * for credits update via interrupt for every half credits
+ * consumed. So, the interrupt will eventually restore the
+ * credits and invoke the Tx path again.
*/
- if (unlikely(wrlen16 > eosw_txq->cred))
+ if (unlikely(left < 0 || wrlen16 > eosw_txq->cred)) {
+ ret = -ENOMEM;
goto out_unlock;
+ }
if (unlikely(skip_eotx_wr)) {
start = (u64 *)wr;
@@ -2231,7 +2239,8 @@ write_wr_headers:
sgl = (u64 *)inline_tx_skb_header(skb, &eohw_txq->q, (void *)start,
hdr_len);
if (data_len) {
- if (unlikely(cxgb4_map_skb(adap->pdev_dev, skb, d->addr))) {
+ ret = cxgb4_map_skb(adap->pdev_dev, skb, d->addr);
+ if (unlikely(ret)) {
memset(d->addr, 0, sizeof(d->addr));
eohw_txq->mapping_err++;
goto out_unlock;
@@ -2277,12 +2286,13 @@ write_wr_headers:
out_unlock:
spin_unlock(&eohw_txq->lock);
+ return ret;
}
static void ethofld_xmit(struct net_device *dev, struct sge_eosw_txq *eosw_txq)
{
struct sk_buff *skb;
- int pktcount;
+ int pktcount, ret;
switch (eosw_txq->state) {
case CXGB4_EO_STATE_ACTIVE:
@@ -2307,7 +2317,9 @@ static void ethofld_xmit(struct net_device *dev, struct sge_eosw_txq *eosw_txq)
continue;
}
- ethofld_hard_xmit(dev, eosw_txq);
+ ret = ethofld_hard_xmit(dev, eosw_txq);
+ if (ret)
+ break;
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 2a3480fc1d91..1c8068c02728 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -10361,9 +10361,10 @@ int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid,
return ret;
}
-int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
- int rateunit, int ratemode, int channel, int class,
- int minrate, int maxrate, int weight, int pktsize)
+int t4_sched_params(struct adapter *adapter, u8 type, u8 level, u8 mode,
+ u8 rateunit, u8 ratemode, u8 channel, u8 class,
+ u32 minrate, u32 maxrate, u16 weight, u16 pktsize,
+ u16 burstsize)
{
struct fw_sched_cmd cmd;
@@ -10385,6 +10386,7 @@ int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
cmd.u.params.max = cpu_to_be32(maxrate);
cmd.u.params.weight = cpu_to_be16(weight);
cmd.u.params.pktsize = cpu_to_be16(pktsize);
+ cmd.u.params.burstsize = cpu_to_be16(burstsize);
return t4_wr_mbox_meat(adapter, adapter->mbox, &cmd, sizeof(cmd),
NULL, 1);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 9cc3541a7e1c..cec865a97464 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2480,7 +2480,7 @@ static int setup_debugfs(struct adapter *adapter)
for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
debugfs_create_file(debugfs_files[i].name,
debugfs_files[i].mode,
- adapter->debugfs_root, (void *)adapter,
+ adapter->debugfs_root, adapter,
debugfs_files[i].fops);
return 0;
diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig
index 48f3198381bc..8d845f5ee0c5 100644
--- a/drivers/net/ethernet/cirrus/Kconfig
+++ b/drivers/net/ethernet/cirrus/Kconfig
@@ -24,7 +24,7 @@ config CS89x0
---help---
Support for CS89x0 chipset based Ethernet cards. If you have a
network (Ethernet) card of this type, say Y and read the file
- <file:Documentation/networking/device_drivers/cirrus/cs89x0.txt>.
+ <file:Documentation/networking/device_drivers/cirrus/cs89x0.rst>.
To compile this driver as a module, choose M here. The module
will be called cs89x0.
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 5bff5c2be88b..8d13ea370db1 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -1224,7 +1224,8 @@ map_error:
return -ENOMEM;
}
-static int gmac_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t gmac_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
{
struct gemini_ethernet_port *port = netdev_priv(netdev);
unsigned short m = (1 << port->txq_order) - 1;
diff --git a/drivers/net/ethernet/dec/tulip/Kconfig b/drivers/net/ethernet/dec/tulip/Kconfig
index 8ce6888ea722..177f36f4b89d 100644
--- a/drivers/net/ethernet/dec/tulip/Kconfig
+++ b/drivers/net/ethernet/dec/tulip/Kconfig
@@ -114,7 +114,7 @@ config DE4X5
These include the DE425, DE434, DE435, DE450 and DE500 models. If
you have a network card of this type, say Y. More specific
information is contained in
- <file:Documentation/networking/device_drivers/dec/de4x5.txt>.
+ <file:Documentation/networking/device_drivers/dec/de4x5.rst>.
To compile this driver as a module, choose M here. The module will
be called de4x5.
@@ -138,7 +138,7 @@ config DM9102
This driver is for DM9102(A)/DM9132/DM9801 compatible PCI cards from
Davicom (<http://www.davicom.com.tw/>). If you have such a network
(Ethernet) card, say Y. Some information is contained in the file
- <file:Documentation/networking/device_drivers/dec/dmfe.txt>.
+ <file:Documentation/networking/device_drivers/dec/dmfe.rst>.
To compile this driver as a module, choose M here. The module will
be called dmfe.
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index f16853c3c851..0ccd9994ad45 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -951,7 +951,7 @@ static void reset_init_sia(struct net_device *dev, s32 sicr, s32 strr, s32 si
static int test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec);
static int test_tp(struct net_device *dev, s32 msec);
static int EISA_signature(char *name, struct device *device);
-static int PCI_signature(char *name, struct de4x5_private *lp);
+static void PCI_signature(char *name, struct de4x5_private *lp);
static void DevicePresent(struct net_device *dev, u_long iobase);
static void enet_addr_rst(u_long aprom_addr);
static int de4x5_bad_srom(struct de4x5_private *lp);
@@ -3902,14 +3902,14 @@ EISA_signature(char *name, struct device *device)
/*
** Look for a particular board name in the PCI configuration space
*/
-static int
+static void
PCI_signature(char *name, struct de4x5_private *lp)
{
- int i, status = 0, siglen = ARRAY_SIZE(de4x5_signatures);
+ int i, siglen = ARRAY_SIZE(de4x5_signatures);
if (lp->chipset == DC21040) {
strcpy(name, "DE434/5");
- return status;
+ return;
} else { /* Search for a DEC name in the SROM */
int tmp = *((char *)&lp->srom + 19) * 3;
strncpy(name, (char *)&lp->srom + 26 + tmp, 8);
@@ -3935,8 +3935,6 @@ PCI_signature(char *name, struct de4x5_private *lp)
} else if ((lp->chipset & ~0x00ff) == DC2114x) {
lp->useSROM = true;
}
-
- return status;
}
/*
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 643090555cc7..5143722c4419 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -1869,7 +1869,7 @@ Compile command:
gcc -D__KERNEL__ -DMODULE -I/usr/src/linux/include -Wall -Wstrict-prototypes -O2 -c dl2k.c
-Read Documentation/networking/device_drivers/dlink/dl2k.txt for details.
+Read Documentation/networking/device_drivers/dlink/dl2k.rst for details.
*/
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index 057a508dd6e2..db98274501a0 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -776,8 +776,7 @@ static int dnet_probe(struct platform_device *pdev)
spin_lock_init(&bp->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- bp->regs = devm_ioremap_resource(&pdev->dev, res);
+ bp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(bp->regs)) {
err = PTR_ERR(bp->regs);
goto err_out_free_dev;
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 32cf54f0e35b..473b337b2e3b 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -1057,9 +1057,6 @@ static int ftmac100_probe(struct platform_device *pdev)
struct ftmac100 *priv;
int err;
- if (!pdev)
- return -ENODEV;
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENXIO;
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 2cd1f8efdfa3..c4416a5f8816 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2107,7 +2107,7 @@ workaround:
/* Workaround for DPAA_A050385 requires data start to be aligned */
start = PTR_ALIGN(new_skb->data, DPAA_A050385_ALIGN);
- if (start - new_skb->data != 0)
+ if (start - new_skb->data)
skb_reserve(new_skb, start - new_skb->data);
skb_put(new_skb, skb->len);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
index a9afe46b837f..0a31e4268dfb 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
@@ -127,16 +127,19 @@ static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset)
int i;
seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name);
- seq_printf(file, "%s%16s%16s%16s%16s\n",
- "CHID", "CPU", "Deq busy", "CDANs", "Buf count");
+ seq_printf(file, "%s%16s%16s%16s%16s%16s%16s\n",
+ "CHID", "CPU", "Deq busy", "Frames", "CDANs",
+ "Avg Frm/CDAN", "Buf count");
for (i = 0; i < priv->num_channels; i++) {
ch = priv->channel[i];
- seq_printf(file, "%4d%16d%16llu%16llu%16d\n",
+ seq_printf(file, "%4d%16d%16llu%16llu%16llu%16llu%16d\n",
ch->ch_id,
ch->nctx.desired_cpu,
ch->stats.dequeue_portal_busy,
+ ch->stats.frames,
ch->stats.cdan,
+ div64_u64(ch->stats.frames, ch->stats.cdan),
ch->buf_count);
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index d97c320a2dc0..5fbaa51b38bc 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2014-2016 Freescale Semiconductor Inc.
- * Copyright 2016-2019 NXP
+ * Copyright 2016-2020 NXP
*/
#include <linux/init.h>
#include <linux/module.h>
@@ -244,6 +244,35 @@ static void xdp_release_buf(struct dpaa2_eth_priv *priv,
ch->xdp.drop_cnt = 0;
}
+static int dpaa2_eth_xdp_flush(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_fq *fq,
+ struct dpaa2_eth_xdp_fds *xdp_fds)
+{
+ int total_enqueued = 0, retries = 0, enqueued;
+ struct dpaa2_eth_drv_stats *percpu_extras;
+ int num_fds, err, max_retries;
+ struct dpaa2_fd *fds;
+
+ percpu_extras = this_cpu_ptr(priv->percpu_extras);
+
+ /* try to enqueue all the FDs until the max number of retries is hit */
+ fds = xdp_fds->fds;
+ num_fds = xdp_fds->num;
+ max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES;
+ while (total_enqueued < num_fds && retries < max_retries) {
+ err = priv->enqueue(priv, fq, &fds[total_enqueued],
+ 0, num_fds - total_enqueued, &enqueued);
+ if (err == -EBUSY) {
+ percpu_extras->tx_portal_busy += ++retries;
+ continue;
+ }
+ total_enqueued += enqueued;
+ }
+ xdp_fds->num = 0;
+
+ return total_enqueued;
+}
+
static int xdp_enqueue(struct dpaa2_eth_priv *priv, struct dpaa2_fd *fd,
void *buf_start, u16 queue_id)
{
@@ -268,7 +297,7 @@ static int xdp_enqueue(struct dpaa2_eth_priv *priv, struct dpaa2_fd *fd,
fq = &priv->fq[queue_id];
for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
- err = priv->enqueue(priv, fq, fd, 0);
+ err = priv->enqueue(priv, fq, fd, 0, 1, NULL);
if (err != -EBUSY)
break;
}
@@ -302,6 +331,9 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv,
xdp_set_data_meta_invalid(&xdp);
xdp.rxq = &ch->xdp_rxq;
+ xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE -
+ (dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM);
+
xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
/* xdp.data pointer may have changed */
@@ -337,7 +369,11 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv,
dma_unmap_page(priv->net_dev->dev.parent, addr,
priv->rx_buf_size, DMA_BIDIRECTIONAL);
ch->buf_count--;
+
+ /* Allow redirect use of full headroom */
xdp.data_hard_start = vaddr;
+ xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE;
+
err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
if (unlikely(err))
ch->stats.xdp_drop++;
@@ -493,6 +529,7 @@ static int consume_frames(struct dpaa2_eth_channel *ch,
return 0;
fq->stats.frames += cleaned;
+ ch->stats.frames += cleaned;
/* A dequeue operation only pulls frames from a single queue
* into the store. Return the frame queue as an out param.
@@ -847,7 +884,7 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
* the Tx confirmation callback for this frame
*/
for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
- err = priv->enqueue(priv, fq, &fd, prio);
+ err = priv->enqueue(priv, fq, &fd, prio, 1, NULL);
if (err != -EBUSY)
break;
}
@@ -1880,20 +1917,16 @@ static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
return 0;
}
-static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev,
- struct xdp_frame *xdpf)
+static int dpaa2_eth_xdp_create_fd(struct net_device *net_dev,
+ struct xdp_frame *xdpf,
+ struct dpaa2_fd *fd)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
struct device *dev = net_dev->dev.parent;
- struct rtnl_link_stats64 *percpu_stats;
- struct dpaa2_eth_drv_stats *percpu_extras;
unsigned int needed_headroom;
struct dpaa2_eth_swa *swa;
- struct dpaa2_eth_fq *fq;
- struct dpaa2_fd fd;
void *buffer_start, *aligned_start;
dma_addr_t addr;
- int err, i;
/* We require a minimum headroom to be able to transmit the frame.
* Otherwise return an error and let the original net_device handle it
@@ -1902,11 +1935,8 @@ static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev,
if (xdpf->headroom < needed_headroom)
return -EINVAL;
- percpu_stats = this_cpu_ptr(priv->percpu_stats);
- percpu_extras = this_cpu_ptr(priv->percpu_extras);
-
/* Setup the FD fields */
- memset(&fd, 0, sizeof(fd));
+ memset(fd, 0, sizeof(*fd));
/* Align FD address, if possible */
buffer_start = xdpf->data - needed_headroom;
@@ -1924,32 +1954,14 @@ static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev,
addr = dma_map_single(dev, buffer_start,
swa->xdp.dma_size,
DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(dev, addr))) {
- percpu_stats->tx_dropped++;
+ if (unlikely(dma_mapping_error(dev, addr)))
return -ENOMEM;
- }
-
- dpaa2_fd_set_addr(&fd, addr);
- dpaa2_fd_set_offset(&fd, xdpf->data - buffer_start);
- dpaa2_fd_set_len(&fd, xdpf->len);
- dpaa2_fd_set_format(&fd, dpaa2_fd_single);
- dpaa2_fd_set_ctrl(&fd, FD_CTRL_PTA);
-
- fq = &priv->fq[smp_processor_id() % dpaa2_eth_queue_count(priv)];
- for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
- err = priv->enqueue(priv, fq, &fd, 0);
- if (err != -EBUSY)
- break;
- }
- percpu_extras->tx_portal_busy += i;
- if (unlikely(err < 0)) {
- percpu_stats->tx_errors++;
- /* let the Rx device handle the cleanup */
- return err;
- }
- percpu_stats->tx_packets++;
- percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
+ dpaa2_fd_set_addr(fd, addr);
+ dpaa2_fd_set_offset(fd, xdpf->data - buffer_start);
+ dpaa2_fd_set_len(fd, xdpf->len);
+ dpaa2_fd_set_format(fd, dpaa2_fd_single);
+ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
return 0;
}
@@ -1957,8 +1969,12 @@ static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev,
static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
struct xdp_frame **frames, u32 flags)
{
- int drops = 0;
- int i, err;
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct dpaa2_eth_xdp_fds *xdp_redirect_fds;
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa2_eth_fq *fq;
+ struct dpaa2_fd *fds;
+ int enqueued, i, err;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL;
@@ -1966,17 +1982,31 @@ static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
if (!netif_running(net_dev))
return -ENETDOWN;
- for (i = 0; i < n; i++) {
- struct xdp_frame *xdpf = frames[i];
+ fq = &priv->fq[smp_processor_id()];
+ xdp_redirect_fds = &fq->xdp_redirect_fds;
+ fds = xdp_redirect_fds->fds;
- err = dpaa2_eth_xdp_xmit_frame(net_dev, xdpf);
- if (err) {
- xdp_return_frame_rx_napi(xdpf);
- drops++;
- }
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+
+ /* create a FD for each xdp_frame in the list received */
+ for (i = 0; i < n; i++) {
+ err = dpaa2_eth_xdp_create_fd(net_dev, frames[i], &fds[i]);
+ if (err)
+ break;
}
+ xdp_redirect_fds->num = i;
- return n - drops;
+ /* enqueue all the frame descriptors */
+ enqueued = dpaa2_eth_xdp_flush(priv, fq, xdp_redirect_fds);
+
+ /* update statistics */
+ percpu_stats->tx_packets += enqueued;
+ for (i = 0; i < enqueued; i++)
+ percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
+ for (i = enqueued; i < n; i++)
+ xdp_return_frame_rx_napi(frames[i]);
+
+ return enqueued;
}
static int update_xps(struct dpaa2_eth_priv *priv)
@@ -2018,7 +2048,7 @@ static int dpaa2_eth_setup_tc(struct net_device *net_dev,
int i;
if (type != TC_SETUP_QDISC_MQPRIO)
- return -EINVAL;
+ return -EOPNOTSUPP;
mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
num_queues = dpaa2_eth_queue_count(priv);
@@ -2030,7 +2060,7 @@ static int dpaa2_eth_setup_tc(struct net_device *net_dev,
if (num_tc > dpaa2_eth_tc_count(priv)) {
netdev_err(net_dev, "Max %d traffic classes supported\n",
dpaa2_eth_tc_count(priv));
- return -EINVAL;
+ return -EOPNOTSUPP;
}
if (!num_tc) {
@@ -2528,19 +2558,38 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv)
static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_fq *fq,
- struct dpaa2_fd *fd, u8 prio)
+ struct dpaa2_fd *fd, u8 prio,
+ u32 num_frames __always_unused,
+ int *frames_enqueued)
{
- return dpaa2_io_service_enqueue_qd(fq->channel->dpio,
- priv->tx_qdid, prio,
- fq->tx_qdbin, fd);
+ int err;
+
+ err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
+ priv->tx_qdid, prio,
+ fq->tx_qdbin, fd);
+ if (!err && frames_enqueued)
+ *frames_enqueued = 1;
+ return err;
}
-static inline int dpaa2_eth_enqueue_fq(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_fq *fq,
- struct dpaa2_fd *fd, u8 prio)
+static inline int dpaa2_eth_enqueue_fq_multiple(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_fq *fq,
+ struct dpaa2_fd *fd,
+ u8 prio, u32 num_frames,
+ int *frames_enqueued)
{
- return dpaa2_io_service_enqueue_fq(fq->channel->dpio,
- fq->tx_fqid[prio], fd);
+ int err;
+
+ err = dpaa2_io_service_enqueue_multiple_fq(fq->channel->dpio,
+ fq->tx_fqid[prio],
+ fd, num_frames);
+
+ if (err == 0)
+ return -EBUSY;
+
+ if (frames_enqueued)
+ *frames_enqueued = err;
+ return 0;
}
static void set_enqueue_mode(struct dpaa2_eth_priv *priv)
@@ -2549,7 +2598,7 @@ static void set_enqueue_mode(struct dpaa2_eth_priv *priv)
DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
priv->enqueue = dpaa2_eth_enqueue_qd;
else
- priv->enqueue = dpaa2_eth_enqueue_fq;
+ priv->enqueue = dpaa2_eth_enqueue_fq_multiple;
}
static int set_pause(struct dpaa2_eth_priv *priv)
@@ -2610,7 +2659,7 @@ static void update_tx_fqids(struct dpaa2_eth_priv *priv)
}
}
- priv->enqueue = dpaa2_eth_enqueue_fq;
+ priv->enqueue = dpaa2_eth_enqueue_fq_multiple;
return;
@@ -2684,8 +2733,10 @@ static int setup_dpni(struct fsl_mc_device *ls_dev)
priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) *
dpaa2_eth_fs_count(priv), GFP_KERNEL);
- if (!priv->cls_rules)
+ if (!priv->cls_rules) {
+ err = -ENOMEM;
goto close;
+ }
return 0;
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 13242bf5b427..42f0a7a80afa 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/* Copyright 2014-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP
+ * Copyright 2016-2020 NXP
*/
#ifndef __DPAA2_ETH_H
@@ -288,6 +288,8 @@ struct dpaa2_eth_ch_stats {
__u64 xdp_tx;
__u64 xdp_tx_err;
__u64 xdp_redirect;
+ /* Must be last, does not show up in ethtool stats */
+ __u64 frames;
};
/* Maximum number of queues associated with a DPNI */
@@ -308,6 +310,11 @@ enum dpaa2_eth_fq_type {
struct dpaa2_eth_priv;
+struct dpaa2_eth_xdp_fds {
+ struct dpaa2_fd fds[DEV_MAP_BULK_SIZE];
+ ssize_t num;
+};
+
struct dpaa2_eth_fq {
u32 fqid;
u32 tx_qdbin;
@@ -325,6 +332,8 @@ struct dpaa2_eth_fq {
const struct dpaa2_fd *fd,
struct dpaa2_eth_fq *fq);
struct dpaa2_eth_fq_stats stats;
+
+ struct dpaa2_eth_xdp_fds xdp_redirect_fds;
};
struct dpaa2_eth_ch_xdp {
@@ -371,7 +380,9 @@ struct dpaa2_eth_priv {
struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES];
int (*enqueue)(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_fq *fq,
- struct dpaa2_fd *fd, u8 prio);
+ struct dpaa2_fd *fd, u8 prio,
+ u32 num_frames,
+ int *frames_enqueued);
u8 num_channels;
struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS];
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index b7141fdc279e..049afd1d6252 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -277,7 +277,7 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
/* Per-channel stats */
for (k = 0; k < priv->num_channels; k++) {
ch_stats = &priv->channel[k]->stats;
- for (j = 0; j < sizeof(*ch_stats) / sizeof(__u64); j++)
+ for (j = 0; j < sizeof(*ch_stats) / sizeof(__u64) - 1; j++)
*((__u64 *)data + i + j) += *((__u64 *)ch_stats + j);
}
i += j;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index ccf2611f4a20..298c55786fd9 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -756,6 +756,9 @@ void enetc_get_si_caps(struct enetc_si *si)
if (val & ENETC_SIPCAPR0_QBV)
si->hw_features |= ENETC_SI_F_QBV;
+
+ if (val & ENETC_SIPCAPR0_PSFP)
+ si->hw_features |= ENETC_SI_F_PSFP;
}
static int enetc_dma_alloc_bdr(struct enetc_bdr *r, size_t bd_size)
@@ -1518,6 +1521,8 @@ int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
return enetc_setup_tc_cbs(ndev, type_data);
case TC_SETUP_QDISC_ETF:
return enetc_setup_tc_txtime(ndev, type_data);
+ case TC_SETUP_BLOCK:
+ return enetc_setup_tc_psfp(ndev, type_data);
default:
return -EOPNOTSUPP;
}
@@ -1567,15 +1572,42 @@ static int enetc_set_rss(struct net_device *ndev, int en)
return 0;
}
+static int enetc_set_psfp(struct net_device *ndev, int en)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int err;
+
+ if (en) {
+ err = enetc_psfp_enable(priv);
+ if (err)
+ return err;
+
+ priv->active_offloads |= ENETC_F_QCI;
+ return 0;
+ }
+
+ err = enetc_psfp_disable(priv);
+ if (err)
+ return err;
+
+ priv->active_offloads &= ~ENETC_F_QCI;
+
+ return 0;
+}
+
int enetc_set_features(struct net_device *ndev,
netdev_features_t features)
{
netdev_features_t changed = ndev->features ^ features;
+ int err = 0;
if (changed & NETIF_F_RXHASH)
enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
- return 0;
+ if (changed & NETIF_F_HW_TC)
+ err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
+
+ return err;
}
#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 56c43f35b633..b705464f6882 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -151,6 +151,7 @@ enum enetc_errata {
};
#define ENETC_SI_F_QBV BIT(0)
+#define ENETC_SI_F_PSFP BIT(1)
/* PCI IEP device data */
struct enetc_si {
@@ -203,12 +204,20 @@ struct enetc_cls_rule {
};
#define ENETC_MAX_BDR_INT 2 /* fixed to max # of available cpus */
+struct psfp_cap {
+ u32 max_streamid;
+ u32 max_psfp_filter;
+ u32 max_psfp_gate;
+ u32 max_psfp_gatelist;
+ u32 max_psfp_meter;
+};
/* TODO: more hardware offloads */
enum enetc_active_offloads {
ENETC_F_RX_TSTAMP = BIT(0),
ENETC_F_TX_TSTAMP = BIT(1),
ENETC_F_QBV = BIT(2),
+ ENETC_F_QCI = BIT(3),
};
struct enetc_ndev_priv {
@@ -231,6 +240,8 @@ struct enetc_ndev_priv {
struct enetc_cls_rule *cls_rules;
+ struct psfp_cap psfp_cap;
+
struct device_node *phy_node;
phy_interface_t if_mode;
};
@@ -289,9 +300,84 @@ int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data);
void enetc_sched_speed_set(struct net_device *ndev);
int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data);
int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data);
+int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv);
+int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data);
+int enetc_psfp_init(struct enetc_ndev_priv *priv);
+int enetc_psfp_clean(struct enetc_ndev_priv *priv);
+
+static inline void enetc_get_max_cap(struct enetc_ndev_priv *priv)
+{
+ u32 reg;
+
+ reg = enetc_port_rd(&priv->si->hw, ENETC_PSIDCAPR);
+ priv->psfp_cap.max_streamid = reg & ENETC_PSIDCAPR_MSK;
+ /* Port stream filter capability */
+ reg = enetc_port_rd(&priv->si->hw, ENETC_PSFCAPR);
+ priv->psfp_cap.max_psfp_filter = reg & ENETC_PSFCAPR_MSK;
+ /* Port stream gate capability */
+ reg = enetc_port_rd(&priv->si->hw, ENETC_PSGCAPR);
+ priv->psfp_cap.max_psfp_gate = (reg & ENETC_PSGCAPR_SGIT_MSK);
+ priv->psfp_cap.max_psfp_gatelist = (reg & ENETC_PSGCAPR_GCL_MSK) >> 16;
+ /* Port flow meter capability */
+ reg = enetc_port_rd(&priv->si->hw, ENETC_PFMCAPR);
+ priv->psfp_cap.max_psfp_meter = reg & ENETC_PFMCAPR_MSK;
+}
+
+static inline int enetc_psfp_enable(struct enetc_ndev_priv *priv)
+{
+ struct enetc_hw *hw = &priv->si->hw;
+ int err;
+
+ enetc_get_max_cap(priv);
+
+ err = enetc_psfp_init(priv);
+ if (err)
+ return err;
+
+ enetc_wr(hw, ENETC_PPSFPMR, enetc_rd(hw, ENETC_PPSFPMR) |
+ ENETC_PPSFPMR_PSFPEN | ENETC_PPSFPMR_VS |
+ ENETC_PPSFPMR_PVC | ENETC_PPSFPMR_PVZC);
+
+ return 0;
+}
+
+static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv)
+{
+ struct enetc_hw *hw = &priv->si->hw;
+ int err;
+
+ err = enetc_psfp_clean(priv);
+ if (err)
+ return err;
+
+ enetc_wr(hw, ENETC_PPSFPMR, enetc_rd(hw, ENETC_PPSFPMR) &
+ ~ENETC_PPSFPMR_PSFPEN & ~ENETC_PPSFPMR_VS &
+ ~ENETC_PPSFPMR_PVC & ~ENETC_PPSFPMR_PVZC);
+
+ memset(&priv->psfp_cap, 0, sizeof(struct psfp_cap));
+
+ return 0;
+}
+
#else
#define enetc_setup_tc_taprio(ndev, type_data) -EOPNOTSUPP
#define enetc_sched_speed_set(ndev) (void)0
#define enetc_setup_tc_cbs(ndev, type_data) -EOPNOTSUPP
#define enetc_setup_tc_txtime(ndev, type_data) -EOPNOTSUPP
+#define enetc_setup_tc_psfp(ndev, type_data) -EOPNOTSUPP
+#define enetc_setup_tc_block_cb NULL
+
+#define enetc_get_max_cap(p) \
+ memset(&((p)->psfp_cap), 0, sizeof(struct psfp_cap))
+
+static inline int enetc_psfp_enable(struct enetc_ndev_priv *priv)
+{
+ return 0;
+}
+
+static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv)
+{
+ return 0;
+}
#endif
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 2a6523136947..6314051bc6c1 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -19,6 +19,7 @@
#define ENETC_SICTR1 0x1c
#define ENETC_SIPCAPR0 0x20
#define ENETC_SIPCAPR0_QBV BIT(4)
+#define ENETC_SIPCAPR0_PSFP BIT(9)
#define ENETC_SIPCAPR0_RSS BIT(8)
#define ENETC_SIPCAPR1 0x24
#define ENETC_SITGTGR 0x30
@@ -228,6 +229,15 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PM0_IFM_RLP (BIT(5) | BIT(11))
#define ENETC_PM0_IFM_RGAUTO (BIT(15) | ENETC_PMO_IFM_RG | BIT(1))
#define ENETC_PM0_IFM_XGMII BIT(12)
+#define ENETC_PSIDCAPR 0x1b08
+#define ENETC_PSIDCAPR_MSK GENMASK(15, 0)
+#define ENETC_PSFCAPR 0x1b18
+#define ENETC_PSFCAPR_MSK GENMASK(15, 0)
+#define ENETC_PSGCAPR 0x1b28
+#define ENETC_PSGCAPR_GCL_MSK GENMASK(18, 16)
+#define ENETC_PSGCAPR_SGIT_MSK GENMASK(15, 0)
+#define ENETC_PFMCAPR 0x1b38
+#define ENETC_PFMCAPR_MSK GENMASK(15, 0)
/* MAC counters */
#define ENETC_PM0_REOCT 0x8100
@@ -557,6 +567,9 @@ enum bdcr_cmd_class {
BDCR_CMD_RFS,
BDCR_CMD_PORT_GCL,
BDCR_CMD_RECV_CLASSIFIER,
+ BDCR_CMD_STREAM_IDENTIFY,
+ BDCR_CMD_STREAM_FILTER,
+ BDCR_CMD_STREAM_GCL,
__BDCR_CMD_MAX_LEN,
BDCR_CMD_MAX_LEN = __BDCR_CMD_MAX_LEN - 1,
};
@@ -588,13 +601,152 @@ struct tgs_gcl_data {
struct gce entry[];
};
+/* class 7, command 0, Stream Identity Entry Configuration */
+struct streamid_conf {
+ __le32 stream_handle; /* init gate value */
+ __le32 iports;
+ u8 id_type;
+ u8 oui[3];
+ u8 res[3];
+ u8 en;
+};
+
+#define ENETC_CBDR_SID_VID_MASK 0xfff
+#define ENETC_CBDR_SID_VIDM BIT(12)
+#define ENETC_CBDR_SID_TG_MASK 0xc000
+/* streamid_conf address point to this data space */
+struct streamid_data {
+ union {
+ u8 dmac[6];
+ u8 smac[6];
+ };
+ u16 vid_vidm_tg;
+};
+
+#define ENETC_CBDR_SFI_PRI_MASK 0x7
+#define ENETC_CBDR_SFI_PRIM BIT(3)
+#define ENETC_CBDR_SFI_BLOV BIT(4)
+#define ENETC_CBDR_SFI_BLEN BIT(5)
+#define ENETC_CBDR_SFI_MSDUEN BIT(6)
+#define ENETC_CBDR_SFI_FMITEN BIT(7)
+#define ENETC_CBDR_SFI_ENABLE BIT(7)
+/* class 8, command 0, Stream Filter Instance, Short Format */
+struct sfi_conf {
+ __le32 stream_handle;
+ u8 multi;
+ u8 res[2];
+ u8 sthm;
+ /* Max Service Data Unit or Flow Meter Instance Table index.
+ * Depending on the value of FLT this represents either Max
+ * Service Data Unit (max frame size) allowed by the filter
+ * entry or is an index into the Flow Meter Instance table
+ * index identifying the policer which will be used to police
+ * it.
+ */
+ __le16 fm_inst_table_index;
+ __le16 msdu;
+ __le16 sg_inst_table_index;
+ u8 res1[2];
+ __le32 input_ports;
+ u8 res2[3];
+ u8 en;
+};
+
+/* class 8, command 2 stream Filter Instance status query short format
+ * command no need structure define
+ * Stream Filter Instance Query Statistics Response data
+ */
+struct sfi_counter_data {
+ u32 matchl;
+ u32 matchh;
+ u32 msdu_dropl;
+ u32 msdu_droph;
+ u32 stream_gate_dropl;
+ u32 stream_gate_droph;
+ u32 flow_meter_dropl;
+ u32 flow_meter_droph;
+};
+
+#define ENETC_CBDR_SGI_OIPV_MASK 0x7
+#define ENETC_CBDR_SGI_OIPV_EN BIT(3)
+#define ENETC_CBDR_SGI_CGTST BIT(6)
+#define ENETC_CBDR_SGI_OGTST BIT(7)
+#define ENETC_CBDR_SGI_CFG_CHG BIT(1)
+#define ENETC_CBDR_SGI_CFG_PND BIT(2)
+#define ENETC_CBDR_SGI_OEX BIT(4)
+#define ENETC_CBDR_SGI_OEXEN BIT(5)
+#define ENETC_CBDR_SGI_IRX BIT(6)
+#define ENETC_CBDR_SGI_IRXEN BIT(7)
+#define ENETC_CBDR_SGI_ACLLEN_MASK 0x3
+#define ENETC_CBDR_SGI_OCLLEN_MASK 0xc
+#define ENETC_CBDR_SGI_EN BIT(7)
+/* class 9, command 0, Stream Gate Instance Table, Short Format
+ * class 9, command 2, Stream Gate Instance Table entry query write back
+ * Short Format
+ */
+struct sgi_table {
+ u8 res[8];
+ u8 oipv;
+ u8 res0[2];
+ u8 ocgtst;
+ u8 res1[7];
+ u8 gset;
+ u8 oacl_len;
+ u8 res2[2];
+ u8 en;
+};
+
+#define ENETC_CBDR_SGI_AIPV_MASK 0x7
+#define ENETC_CBDR_SGI_AIPV_EN BIT(3)
+#define ENETC_CBDR_SGI_AGTST BIT(7)
+
+/* class 9, command 1, Stream Gate Control List, Long Format */
+struct sgcl_conf {
+ u8 aipv;
+ u8 res[2];
+ u8 agtst;
+ u8 res1[4];
+ union {
+ struct {
+ u8 res2[4];
+ u8 acl_len;
+ u8 res3[3];
+ };
+ u8 cct[8]; /* Config change time */
+ };
+};
+
+#define ENETC_CBDR_SGL_IOMEN BIT(0)
+#define ENETC_CBDR_SGL_IPVEN BIT(3)
+#define ENETC_CBDR_SGL_GTST BIT(4)
+#define ENETC_CBDR_SGL_IPV_MASK 0xe
+/* Stream Gate Control List Entry */
+struct sgce {
+ u32 interval;
+ u8 msdu[3];
+ u8 multi;
+};
+
+/* stream control list class 9 , cmd 1 data buffer */
+struct sgcl_data {
+ u32 btl;
+ u32 bth;
+ u32 ct;
+ u32 cte;
+ struct sgce sgcl[0];
+};
+
struct enetc_cbd {
union{
+ struct sfi_conf sfi_conf;
+ struct sgi_table sgi_table;
struct {
__le32 addr[2];
union {
__le32 opt[4];
struct tgs_gcl_conf gcl_conf;
+ struct streamid_conf sid_set;
+ struct sgcl_conf sgcl_conf;
};
}; /* Long format */
__le32 data[6];
@@ -621,3 +773,10 @@ struct enetc_cbd {
/* Port time specific departure */
#define ENETC_PTCTSDR(n) (0x1210 + 4 * (n))
#define ENETC_TSDE BIT(31)
+
+/* PSFP setting */
+#define ENETC_PPSFPMR 0x11b00
+#define ENETC_PPSFPMR_PSFPEN BIT(0)
+#define ENETC_PPSFPMR_VS BIT(1)
+#define ENETC_PPSFPMR_PVC BIT(2)
+#define ENETC_PPSFPMR_PVZC BIT(3)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 85e2b741df41..824d211ec00f 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -50,21 +50,6 @@ static void enetc_set_vlan_promisc(struct enetc_hw *hw, char si_map)
enetc_port_wr(hw, ENETC_PSIPVMR, ENETC_PSIPVMR_SET_VP(si_map) | val);
}
-static bool enetc_si_vlan_promisc_is_on(struct enetc_pf *pf, int si_idx)
-{
- return pf->vlan_promisc_simap & BIT(si_idx);
-}
-
-static bool enetc_vlan_filter_is_on(struct enetc_pf *pf)
-{
- int i;
-
- for_each_set_bit(i, pf->active_vlans, VLAN_N_VID)
- return true;
-
- return false;
-}
-
static void enetc_enable_si_vlan_promisc(struct enetc_pf *pf, int si_idx)
{
pf->vlan_promisc_simap |= BIT(si_idx);
@@ -204,6 +189,7 @@ static void enetc_pf_set_rx_mode(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_pf *pf = enetc_si_priv(priv->si);
+ char vlan_promisc_simap = pf->vlan_promisc_simap;
struct enetc_hw *hw = &priv->si->hw;
bool uprom = false, mprom = false;
struct enetc_mac_filter *filter;
@@ -216,16 +202,16 @@ static void enetc_pf_set_rx_mode(struct net_device *ndev)
psipmr = ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0);
uprom = true;
mprom = true;
- /* enable VLAN promisc mode for SI0 */
- if (!enetc_si_vlan_promisc_is_on(pf, 0))
- enetc_enable_si_vlan_promisc(pf, 0);
-
+ /* Enable VLAN promiscuous mode for SI0 (PF) */
+ vlan_promisc_simap |= BIT(0);
} else if (ndev->flags & IFF_ALLMULTI) {
/* enable multi cast promisc mode for SI0 (PF) */
psipmr = ENETC_PSIPMR_SET_MP(0);
mprom = true;
}
+ enetc_set_vlan_promisc(&pf->si->hw, vlan_promisc_simap);
+
/* first 2 filter entries belong to PF */
if (!uprom) {
/* Update unicast filters */
@@ -306,9 +292,6 @@ static int enetc_vlan_rx_add_vid(struct net_device *ndev, __be16 prot, u16 vid)
struct enetc_pf *pf = enetc_si_priv(priv->si);
int idx;
- if (enetc_si_vlan_promisc_is_on(pf, 0))
- enetc_disable_si_vlan_promisc(pf, 0);
-
__set_bit(vid, pf->active_vlans);
idx = enetc_vid_hash_idx(vid);
@@ -326,9 +309,6 @@ static int enetc_vlan_rx_del_vid(struct net_device *ndev, __be16 prot, u16 vid)
__clear_bit(vid, pf->active_vlans);
enetc_sync_vlan_ht_filter(pf, true);
- if (!enetc_vlan_filter_is_on(pf))
- enetc_enable_si_vlan_promisc(pf, 0);
-
return 0;
}
@@ -677,6 +657,15 @@ static int enetc_pf_set_features(struct net_device *ndev,
enetc_enable_txvlan(&priv->si->hw, 0,
!!(features & NETIF_F_HW_VLAN_CTAG_TX));
+ if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+
+ if (!!(features & NETIF_F_HW_VLAN_CTAG_FILTER))
+ enetc_disable_si_vlan_promisc(pf, 0);
+ else
+ enetc_enable_si_vlan_promisc(pf, 0);
+ }
+
if (changed & NETIF_F_LOOPBACK)
enetc_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK));
@@ -719,12 +708,11 @@ static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_LOOPBACK;
+ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK;
ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG |
NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER;
+ NETIF_F_HW_VLAN_CTAG_RX;
if (si->num_rss)
ndev->hw_features |= NETIF_F_RXHASH;
@@ -739,6 +727,12 @@ static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
if (si->hw_features & ENETC_SI_F_QBV)
priv->active_offloads |= ENETC_F_QBV;
+ if (si->hw_features & ENETC_SI_F_PSFP && !enetc_psfp_enable(priv)) {
+ priv->active_offloads |= ENETC_F_QCI;
+ ndev->features |= NETIF_F_HW_TC;
+ ndev->hw_features |= NETIF_F_HW_TC;
+ }
+
/* pick up primary MAC address from SI */
enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr);
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index 0c6bf3a55a9a..fd3df19eaa32 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -5,6 +5,9 @@
#include <net/pkt_sched.h>
#include <linux/math64.h>
+#include <linux/refcount.h>
+#include <net/pkt_cls.h>
+#include <net/tc_act/tc_gate.h>
static u16 enetc_get_max_gcl_len(struct enetc_hw *hw)
{
@@ -331,3 +334,1103 @@ int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data)
return 0;
}
+
+enum streamid_type {
+ STREAMID_TYPE_RESERVED = 0,
+ STREAMID_TYPE_NULL,
+ STREAMID_TYPE_SMAC,
+};
+
+enum streamid_vlan_tagged {
+ STREAMID_VLAN_RESERVED = 0,
+ STREAMID_VLAN_TAGGED,
+ STREAMID_VLAN_UNTAGGED,
+ STREAMID_VLAN_ALL,
+};
+
+#define ENETC_PSFP_WILDCARD -1
+#define HANDLE_OFFSET 100
+
+enum forward_type {
+ FILTER_ACTION_TYPE_PSFP = BIT(0),
+ FILTER_ACTION_TYPE_ACL = BIT(1),
+ FILTER_ACTION_TYPE_BOTH = GENMASK(1, 0),
+};
+
+/* This is for limit output type for input actions */
+struct actions_fwd {
+ u64 actions;
+ u64 keys; /* include the must needed keys */
+ enum forward_type output;
+};
+
+struct psfp_streamfilter_counters {
+ u64 matching_frames_count;
+ u64 passing_frames_count;
+ u64 not_passing_frames_count;
+ u64 passing_sdu_count;
+ u64 not_passing_sdu_count;
+ u64 red_frames_count;
+};
+
+struct enetc_streamid {
+ u32 index;
+ union {
+ u8 src_mac[6];
+ u8 dst_mac[6];
+ };
+ u8 filtertype;
+ u16 vid;
+ u8 tagged;
+ s32 handle;
+};
+
+struct enetc_psfp_filter {
+ u32 index;
+ s32 handle;
+ s8 prio;
+ u32 gate_id;
+ s32 meter_id;
+ refcount_t refcount;
+ struct hlist_node node;
+};
+
+struct enetc_psfp_gate {
+ u32 index;
+ s8 init_ipv;
+ u64 basetime;
+ u64 cycletime;
+ u64 cycletimext;
+ u32 num_entries;
+ refcount_t refcount;
+ struct hlist_node node;
+ struct action_gate_entry entries[0];
+};
+
+struct enetc_stream_filter {
+ struct enetc_streamid sid;
+ u32 sfi_index;
+ u32 sgi_index;
+ struct flow_stats stats;
+ struct hlist_node node;
+};
+
+struct enetc_psfp {
+ unsigned long dev_bitmap;
+ unsigned long *psfp_sfi_bitmap;
+ struct hlist_head stream_list;
+ struct hlist_head psfp_filter_list;
+ struct hlist_head psfp_gate_list;
+ spinlock_t psfp_lock; /* spinlock for the struct enetc_psfp r/w */
+};
+
+static struct actions_fwd enetc_act_fwd[] = {
+ {
+ BIT(FLOW_ACTION_GATE),
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS),
+ FILTER_ACTION_TYPE_PSFP
+ },
+ /* example for ACL actions */
+ {
+ BIT(FLOW_ACTION_DROP),
+ 0,
+ FILTER_ACTION_TYPE_ACL
+ }
+};
+
+static struct enetc_psfp epsfp = {
+ .psfp_sfi_bitmap = NULL,
+};
+
+static LIST_HEAD(enetc_block_cb_list);
+
+static inline int enetc_get_port(struct enetc_ndev_priv *priv)
+{
+ return priv->si->pdev->devfn & 0x7;
+}
+
+/* Stream Identity Entry Set Descriptor */
+static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
+ struct enetc_streamid *sid,
+ u8 enable)
+{
+ struct enetc_cbd cbd = {.cmd = 0};
+ struct streamid_data *si_data;
+ struct streamid_conf *si_conf;
+ u16 data_size;
+ dma_addr_t dma;
+ int err;
+
+ if (sid->index >= priv->psfp_cap.max_streamid)
+ return -EINVAL;
+
+ if (sid->filtertype != STREAMID_TYPE_NULL &&
+ sid->filtertype != STREAMID_TYPE_SMAC)
+ return -EOPNOTSUPP;
+
+ /* Disable operation before enable */
+ cbd.index = cpu_to_le16((u16)sid->index);
+ cbd.cls = BDCR_CMD_STREAM_IDENTIFY;
+ cbd.status_flags = 0;
+
+ data_size = sizeof(struct streamid_data);
+ si_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
+ cbd.length = cpu_to_le16(data_size);
+
+ dma = dma_map_single(&priv->si->pdev->dev, si_data,
+ data_size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
+ netdev_err(priv->si->ndev, "DMA mapping failed!\n");
+ kfree(si_data);
+ return -ENOMEM;
+ }
+
+ cbd.addr[0] = lower_32_bits(dma);
+ cbd.addr[1] = upper_32_bits(dma);
+ memset(si_data->dmac, 0xff, ETH_ALEN);
+ si_data->vid_vidm_tg =
+ cpu_to_le16(ENETC_CBDR_SID_VID_MASK
+ + ((0x3 << 14) | ENETC_CBDR_SID_VIDM));
+
+ si_conf = &cbd.sid_set;
+ /* Only one port supported for one entry, set itself */
+ si_conf->iports = 1 << enetc_get_port(priv);
+ si_conf->id_type = 1;
+ si_conf->oui[2] = 0x0;
+ si_conf->oui[1] = 0x80;
+ si_conf->oui[0] = 0xC2;
+
+ err = enetc_send_cmd(priv->si, &cbd);
+ if (err)
+ return -EINVAL;
+
+ if (!enable) {
+ kfree(si_data);
+ return 0;
+ }
+
+ /* Enable the entry overwrite again incase space flushed by hardware */
+ memset(&cbd, 0, sizeof(cbd));
+
+ cbd.index = cpu_to_le16((u16)sid->index);
+ cbd.cmd = 0;
+ cbd.cls = BDCR_CMD_STREAM_IDENTIFY;
+ cbd.status_flags = 0;
+
+ si_conf->en = 0x80;
+ si_conf->stream_handle = cpu_to_le32(sid->handle);
+ si_conf->iports = 1 << enetc_get_port(priv);
+ si_conf->id_type = sid->filtertype;
+ si_conf->oui[2] = 0x0;
+ si_conf->oui[1] = 0x80;
+ si_conf->oui[0] = 0xC2;
+
+ memset(si_data, 0, data_size);
+
+ cbd.length = cpu_to_le16(data_size);
+
+ cbd.addr[0] = lower_32_bits(dma);
+ cbd.addr[1] = upper_32_bits(dma);
+
+ /* VIDM default to be 1.
+ * VID Match. If set (b1) then the VID must match, otherwise
+ * any VID is considered a match. VIDM setting is only used
+ * when TG is set to b01.
+ */
+ if (si_conf->id_type == STREAMID_TYPE_NULL) {
+ ether_addr_copy(si_data->dmac, sid->dst_mac);
+ si_data->vid_vidm_tg =
+ cpu_to_le16((sid->vid & ENETC_CBDR_SID_VID_MASK) +
+ ((((u16)(sid->tagged) & 0x3) << 14)
+ | ENETC_CBDR_SID_VIDM));
+ } else if (si_conf->id_type == STREAMID_TYPE_SMAC) {
+ ether_addr_copy(si_data->smac, sid->src_mac);
+ si_data->vid_vidm_tg =
+ cpu_to_le16((sid->vid & ENETC_CBDR_SID_VID_MASK) +
+ ((((u16)(sid->tagged) & 0x3) << 14)
+ | ENETC_CBDR_SID_VIDM));
+ }
+
+ err = enetc_send_cmd(priv->si, &cbd);
+ kfree(si_data);
+
+ return err;
+}
+
+/* Stream Filter Instance Set Descriptor */
+static int enetc_streamfilter_hw_set(struct enetc_ndev_priv *priv,
+ struct enetc_psfp_filter *sfi,
+ u8 enable)
+{
+ struct enetc_cbd cbd = {.cmd = 0};
+ struct sfi_conf *sfi_config;
+
+ cbd.index = cpu_to_le16(sfi->index);
+ cbd.cls = BDCR_CMD_STREAM_FILTER;
+ cbd.status_flags = 0x80;
+ cbd.length = cpu_to_le16(1);
+
+ sfi_config = &cbd.sfi_conf;
+ if (!enable)
+ goto exit;
+
+ sfi_config->en = 0x80;
+
+ if (sfi->handle >= 0) {
+ sfi_config->stream_handle =
+ cpu_to_le32(sfi->handle);
+ sfi_config->sthm |= 0x80;
+ }
+
+ sfi_config->sg_inst_table_index = cpu_to_le16(sfi->gate_id);
+ sfi_config->input_ports = 1 << enetc_get_port(priv);
+
+ /* The priority value which may be matched against the
+ * frame’s priority value to determine a match for this entry.
+ */
+ if (sfi->prio >= 0)
+ sfi_config->multi |= (sfi->prio & 0x7) | 0x8;
+
+ /* Filter Type. Identifies the contents of the MSDU/FM_INST_INDEX
+ * field as being either an MSDU value or an index into the Flow
+ * Meter Instance table.
+ * TODO: no limit max sdu
+ */
+
+ if (sfi->meter_id >= 0) {
+ sfi_config->fm_inst_table_index = cpu_to_le16(sfi->meter_id);
+ sfi_config->multi |= 0x80;
+ }
+
+exit:
+ return enetc_send_cmd(priv->si, &cbd);
+}
+
+static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv,
+ u32 index,
+ struct psfp_streamfilter_counters *cnt)
+{
+ struct enetc_cbd cbd = { .cmd = 2 };
+ struct sfi_counter_data *data_buf;
+ dma_addr_t dma;
+ u16 data_size;
+ int err;
+
+ cbd.index = cpu_to_le16((u16)index);
+ cbd.cmd = 2;
+ cbd.cls = BDCR_CMD_STREAM_FILTER;
+ cbd.status_flags = 0;
+
+ data_size = sizeof(struct sfi_counter_data);
+ data_buf = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
+ if (!data_buf)
+ return -ENOMEM;
+
+ dma = dma_map_single(&priv->si->pdev->dev, data_buf,
+ data_size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
+ netdev_err(priv->si->ndev, "DMA mapping failed!\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+ cbd.addr[0] = lower_32_bits(dma);
+ cbd.addr[1] = upper_32_bits(dma);
+
+ cbd.length = cpu_to_le16(data_size);
+
+ err = enetc_send_cmd(priv->si, &cbd);
+ if (err)
+ goto exit;
+
+ cnt->matching_frames_count =
+ ((u64)le32_to_cpu(data_buf->matchh) << 32)
+ + data_buf->matchl;
+
+ cnt->not_passing_sdu_count =
+ ((u64)le32_to_cpu(data_buf->msdu_droph) << 32)
+ + data_buf->msdu_dropl;
+
+ cnt->passing_sdu_count = cnt->matching_frames_count
+ - cnt->not_passing_sdu_count;
+
+ cnt->not_passing_frames_count =
+ ((u64)le32_to_cpu(data_buf->stream_gate_droph) << 32)
+ + le32_to_cpu(data_buf->stream_gate_dropl);
+
+ cnt->passing_frames_count = cnt->matching_frames_count
+ - cnt->not_passing_sdu_count
+ - cnt->not_passing_frames_count;
+
+ cnt->red_frames_count =
+ ((u64)le32_to_cpu(data_buf->flow_meter_droph) << 32)
+ + le32_to_cpu(data_buf->flow_meter_dropl);
+
+exit:
+ kfree(data_buf);
+ return err;
+}
+
+static u64 get_ptp_now(struct enetc_hw *hw)
+{
+ u64 now_lo, now_hi, now;
+
+ now_lo = enetc_rd(hw, ENETC_SICTR0);
+ now_hi = enetc_rd(hw, ENETC_SICTR1);
+ now = now_lo | now_hi << 32;
+
+ return now;
+}
+
+static int get_start_ns(u64 now, u64 cycle, u64 *start)
+{
+ u64 n;
+
+ if (!cycle)
+ return -EFAULT;
+
+ n = div64_u64(now, cycle);
+
+ *start = (n + 1) * cycle;
+
+ return 0;
+}
+
+/* Stream Gate Instance Set Descriptor */
+static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv,
+ struct enetc_psfp_gate *sgi,
+ u8 enable)
+{
+ struct enetc_cbd cbd = { .cmd = 0 };
+ struct sgi_table *sgi_config;
+ struct sgcl_conf *sgcl_config;
+ struct sgcl_data *sgcl_data;
+ struct sgce *sgce;
+ dma_addr_t dma;
+ u16 data_size;
+ int err, i;
+ u64 now;
+
+ cbd.index = cpu_to_le16(sgi->index);
+ cbd.cmd = 0;
+ cbd.cls = BDCR_CMD_STREAM_GCL;
+ cbd.status_flags = 0x80;
+
+ /* disable */
+ if (!enable)
+ return enetc_send_cmd(priv->si, &cbd);
+
+ if (!sgi->num_entries)
+ return 0;
+
+ if (sgi->num_entries > priv->psfp_cap.max_psfp_gatelist ||
+ !sgi->cycletime)
+ return -EINVAL;
+
+ /* enable */
+ sgi_config = &cbd.sgi_table;
+
+ /* Keep open before gate list start */
+ sgi_config->ocgtst = 0x80;
+
+ sgi_config->oipv = (sgi->init_ipv < 0) ?
+ 0x0 : ((sgi->init_ipv & 0x7) | 0x8);
+
+ sgi_config->en = 0x80;
+
+ /* Basic config */
+ err = enetc_send_cmd(priv->si, &cbd);
+ if (err)
+ return -EINVAL;
+
+ memset(&cbd, 0, sizeof(cbd));
+
+ cbd.index = cpu_to_le16(sgi->index);
+ cbd.cmd = 1;
+ cbd.cls = BDCR_CMD_STREAM_GCL;
+ cbd.status_flags = 0;
+
+ sgcl_config = &cbd.sgcl_conf;
+
+ sgcl_config->acl_len = (sgi->num_entries - 1) & 0x3;
+
+ data_size = struct_size(sgcl_data, sgcl, sgi->num_entries);
+
+ sgcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
+ if (!sgcl_data)
+ return -ENOMEM;
+
+ cbd.length = cpu_to_le16(data_size);
+
+ dma = dma_map_single(&priv->si->pdev->dev,
+ sgcl_data, data_size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
+ netdev_err(priv->si->ndev, "DMA mapping failed!\n");
+ kfree(sgcl_data);
+ return -ENOMEM;
+ }
+
+ cbd.addr[0] = lower_32_bits(dma);
+ cbd.addr[1] = upper_32_bits(dma);
+
+ sgce = &sgcl_data->sgcl[0];
+
+ sgcl_config->agtst = 0x80;
+
+ sgcl_data->ct = cpu_to_le32(sgi->cycletime);
+ sgcl_data->cte = cpu_to_le32(sgi->cycletimext);
+
+ if (sgi->init_ipv >= 0)
+ sgcl_config->aipv = (sgi->init_ipv & 0x7) | 0x8;
+
+ for (i = 0; i < sgi->num_entries; i++) {
+ struct action_gate_entry *from = &sgi->entries[i];
+ struct sgce *to = &sgce[i];
+
+ if (from->gate_state)
+ to->multi |= 0x10;
+
+ if (from->ipv >= 0)
+ to->multi |= ((from->ipv & 0x7) << 5) | 0x08;
+
+ if (from->maxoctets >= 0) {
+ to->multi |= 0x01;
+ to->msdu[0] = from->maxoctets & 0xFF;
+ to->msdu[1] = (from->maxoctets >> 8) & 0xFF;
+ to->msdu[2] = (from->maxoctets >> 16) & 0xFF;
+ }
+
+ to->interval = cpu_to_le32(from->interval);
+ }
+
+ /* If basetime is less than now, calculate start time */
+ now = get_ptp_now(&priv->si->hw);
+
+ if (sgi->basetime < now) {
+ u64 start;
+
+ err = get_start_ns(now, sgi->cycletime, &start);
+ if (err)
+ goto exit;
+ sgcl_data->btl = cpu_to_le32(lower_32_bits(start));
+ sgcl_data->bth = cpu_to_le32(upper_32_bits(start));
+ } else {
+ u32 hi, lo;
+
+ hi = upper_32_bits(sgi->basetime);
+ lo = lower_32_bits(sgi->basetime);
+ sgcl_data->bth = cpu_to_le32(hi);
+ sgcl_data->btl = cpu_to_le32(lo);
+ }
+
+ err = enetc_send_cmd(priv->si, &cbd);
+
+exit:
+ kfree(sgcl_data);
+
+ return err;
+}
+
+static struct enetc_stream_filter *enetc_get_stream_by_index(u32 index)
+{
+ struct enetc_stream_filter *f;
+
+ hlist_for_each_entry(f, &epsfp.stream_list, node)
+ if (f->sid.index == index)
+ return f;
+
+ return NULL;
+}
+
+static struct enetc_psfp_gate *enetc_get_gate_by_index(u32 index)
+{
+ struct enetc_psfp_gate *g;
+
+ hlist_for_each_entry(g, &epsfp.psfp_gate_list, node)
+ if (g->index == index)
+ return g;
+
+ return NULL;
+}
+
+static struct enetc_psfp_filter *enetc_get_filter_by_index(u32 index)
+{
+ struct enetc_psfp_filter *s;
+
+ hlist_for_each_entry(s, &epsfp.psfp_filter_list, node)
+ if (s->index == index)
+ return s;
+
+ return NULL;
+}
+
+static struct enetc_psfp_filter
+ *enetc_psfp_check_sfi(struct enetc_psfp_filter *sfi)
+{
+ struct enetc_psfp_filter *s;
+
+ hlist_for_each_entry(s, &epsfp.psfp_filter_list, node)
+ if (s->gate_id == sfi->gate_id &&
+ s->prio == sfi->prio &&
+ s->meter_id == sfi->meter_id)
+ return s;
+
+ return NULL;
+}
+
+static int enetc_get_free_index(struct enetc_ndev_priv *priv)
+{
+ u32 max_size = priv->psfp_cap.max_psfp_filter;
+ unsigned long index;
+
+ index = find_first_zero_bit(epsfp.psfp_sfi_bitmap, max_size);
+ if (index == max_size)
+ return -1;
+
+ return index;
+}
+
+static void stream_filter_unref(struct enetc_ndev_priv *priv, u32 index)
+{
+ struct enetc_psfp_filter *sfi;
+ u8 z;
+
+ sfi = enetc_get_filter_by_index(index);
+ WARN_ON(!sfi);
+ z = refcount_dec_and_test(&sfi->refcount);
+
+ if (z) {
+ enetc_streamfilter_hw_set(priv, sfi, false);
+ hlist_del(&sfi->node);
+ kfree(sfi);
+ clear_bit(index, epsfp.psfp_sfi_bitmap);
+ }
+}
+
+static void stream_gate_unref(struct enetc_ndev_priv *priv, u32 index)
+{
+ struct enetc_psfp_gate *sgi;
+ u8 z;
+
+ sgi = enetc_get_gate_by_index(index);
+ WARN_ON(!sgi);
+ z = refcount_dec_and_test(&sgi->refcount);
+ if (z) {
+ enetc_streamgate_hw_set(priv, sgi, false);
+ hlist_del(&sgi->node);
+ kfree(sgi);
+ }
+}
+
+static void remove_one_chain(struct enetc_ndev_priv *priv,
+ struct enetc_stream_filter *filter)
+{
+ stream_gate_unref(priv, filter->sgi_index);
+ stream_filter_unref(priv, filter->sfi_index);
+
+ hlist_del(&filter->node);
+ kfree(filter);
+}
+
+static int enetc_psfp_hw_set(struct enetc_ndev_priv *priv,
+ struct enetc_streamid *sid,
+ struct enetc_psfp_filter *sfi,
+ struct enetc_psfp_gate *sgi)
+{
+ int err;
+
+ err = enetc_streamid_hw_set(priv, sid, true);
+ if (err)
+ return err;
+
+ if (sfi) {
+ err = enetc_streamfilter_hw_set(priv, sfi, true);
+ if (err)
+ goto revert_sid;
+ }
+
+ err = enetc_streamgate_hw_set(priv, sgi, true);
+ if (err)
+ goto revert_sfi;
+
+ return 0;
+
+revert_sfi:
+ if (sfi)
+ enetc_streamfilter_hw_set(priv, sfi, false);
+revert_sid:
+ enetc_streamid_hw_set(priv, sid, false);
+ return err;
+}
+
+static struct actions_fwd *enetc_check_flow_actions(u64 acts,
+ unsigned int inputkeys)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(enetc_act_fwd); i++)
+ if (acts == enetc_act_fwd[i].actions &&
+ inputkeys & enetc_act_fwd[i].keys)
+ return &enetc_act_fwd[i];
+
+ return NULL;
+}
+
+static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
+ struct flow_cls_offload *f)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct enetc_stream_filter *filter, *old_filter;
+ struct enetc_psfp_filter *sfi, *old_sfi;
+ struct enetc_psfp_gate *sgi, *old_sgi;
+ struct flow_action_entry *entry;
+ struct action_gate_entry *e;
+ u8 sfi_overwrite = 0;
+ int entries_size;
+ int i, err;
+
+ if (f->common.chain_index >= priv->psfp_cap.max_streamid) {
+ NL_SET_ERR_MSG_MOD(extack, "No Stream identify resource!");
+ return -ENOSPC;
+ }
+
+ flow_action_for_each(i, entry, &rule->action)
+ if (entry->id == FLOW_ACTION_GATE)
+ break;
+
+ if (entry->id != FLOW_ACTION_GATE)
+ return -EINVAL;
+
+ filter = kzalloc(sizeof(*filter), GFP_KERNEL);
+ if (!filter)
+ return -ENOMEM;
+
+ filter->sid.index = f->common.chain_index;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
+
+ if (!is_zero_ether_addr(match.mask->dst) &&
+ !is_zero_ether_addr(match.mask->src)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot match on both source and destination MAC");
+ err = EINVAL;
+ goto free_filter;
+ }
+
+ if (!is_zero_ether_addr(match.mask->dst)) {
+ if (!is_broadcast_ether_addr(match.mask->dst)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Masked matching on destination MAC not supported");
+ err = EINVAL;
+ goto free_filter;
+ }
+ ether_addr_copy(filter->sid.dst_mac, match.key->dst);
+ filter->sid.filtertype = STREAMID_TYPE_NULL;
+ }
+
+ if (!is_zero_ether_addr(match.mask->src)) {
+ if (!is_broadcast_ether_addr(match.mask->src)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Masked matching on source MAC not supported");
+ err = EINVAL;
+ goto free_filter;
+ }
+ ether_addr_copy(filter->sid.src_mac, match.key->src);
+ filter->sid.filtertype = STREAMID_TYPE_SMAC;
+ }
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported, must include ETH_ADDRS");
+ err = EINVAL;
+ goto free_filter;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
+ if (match.mask->vlan_priority) {
+ if (match.mask->vlan_priority !=
+ (VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT)) {
+ NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority");
+ err = -EINVAL;
+ goto free_filter;
+ }
+ }
+
+ if (match.mask->vlan_id) {
+ if (match.mask->vlan_id != VLAN_VID_MASK) {
+ NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN id");
+ err = -EINVAL;
+ goto free_filter;
+ }
+
+ filter->sid.vid = match.key->vlan_id;
+ if (!filter->sid.vid)
+ filter->sid.tagged = STREAMID_VLAN_UNTAGGED;
+ else
+ filter->sid.tagged = STREAMID_VLAN_TAGGED;
+ }
+ } else {
+ filter->sid.tagged = STREAMID_VLAN_ALL;
+ }
+
+ /* parsing gate action */
+ if (entry->gate.index >= priv->psfp_cap.max_psfp_gate) {
+ NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!");
+ err = -ENOSPC;
+ goto free_filter;
+ }
+
+ if (entry->gate.num_entries >= priv->psfp_cap.max_psfp_gatelist) {
+ NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!");
+ err = -ENOSPC;
+ goto free_filter;
+ }
+
+ entries_size = struct_size(sgi, entries, entry->gate.num_entries);
+ sgi = kzalloc(entries_size, GFP_KERNEL);
+ if (!sgi) {
+ err = -ENOMEM;
+ goto free_filter;
+ }
+
+ refcount_set(&sgi->refcount, 1);
+ sgi->index = entry->gate.index;
+ sgi->init_ipv = entry->gate.prio;
+ sgi->basetime = entry->gate.basetime;
+ sgi->cycletime = entry->gate.cycletime;
+ sgi->num_entries = entry->gate.num_entries;
+
+ e = sgi->entries;
+ for (i = 0; i < entry->gate.num_entries; i++) {
+ e[i].gate_state = entry->gate.entries[i].gate_state;
+ e[i].interval = entry->gate.entries[i].interval;
+ e[i].ipv = entry->gate.entries[i].ipv;
+ e[i].maxoctets = entry->gate.entries[i].maxoctets;
+ }
+
+ filter->sgi_index = sgi->index;
+
+ sfi = kzalloc(sizeof(*sfi), GFP_KERNEL);
+ if (!sfi) {
+ err = -ENOMEM;
+ goto free_gate;
+ }
+
+ refcount_set(&sfi->refcount, 1);
+ sfi->gate_id = sgi->index;
+
+ /* flow meter not support yet */
+ sfi->meter_id = ENETC_PSFP_WILDCARD;
+
+ /* prio ref the filter prio */
+ if (f->common.prio && f->common.prio <= BIT(3))
+ sfi->prio = f->common.prio - 1;
+ else
+ sfi->prio = ENETC_PSFP_WILDCARD;
+
+ old_sfi = enetc_psfp_check_sfi(sfi);
+ if (!old_sfi) {
+ int index;
+
+ index = enetc_get_free_index(priv);
+ if (sfi->handle < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "No Stream Filter resource!");
+ err = -ENOSPC;
+ goto free_sfi;
+ }
+
+ sfi->index = index;
+ sfi->handle = index + HANDLE_OFFSET;
+ /* Update the stream filter handle also */
+ filter->sid.handle = sfi->handle;
+ filter->sfi_index = sfi->index;
+ sfi_overwrite = 0;
+ } else {
+ filter->sfi_index = old_sfi->index;
+ filter->sid.handle = old_sfi->handle;
+ sfi_overwrite = 1;
+ }
+
+ err = enetc_psfp_hw_set(priv, &filter->sid,
+ sfi_overwrite ? NULL : sfi, sgi);
+ if (err)
+ goto free_sfi;
+
+ spin_lock(&epsfp.psfp_lock);
+ /* Remove the old node if exist and update with a new node */
+ old_sgi = enetc_get_gate_by_index(filter->sgi_index);
+ if (old_sgi) {
+ refcount_set(&sgi->refcount,
+ refcount_read(&old_sgi->refcount) + 1);
+ hlist_del(&old_sgi->node);
+ kfree(old_sgi);
+ }
+
+ hlist_add_head(&sgi->node, &epsfp.psfp_gate_list);
+
+ if (!old_sfi) {
+ hlist_add_head(&sfi->node, &epsfp.psfp_filter_list);
+ set_bit(sfi->index, epsfp.psfp_sfi_bitmap);
+ } else {
+ kfree(sfi);
+ refcount_inc(&old_sfi->refcount);
+ }
+
+ old_filter = enetc_get_stream_by_index(filter->sid.index);
+ if (old_filter)
+ remove_one_chain(priv, old_filter);
+
+ filter->stats.lastused = jiffies;
+ hlist_add_head(&filter->node, &epsfp.stream_list);
+
+ spin_unlock(&epsfp.psfp_lock);
+
+ return 0;
+
+free_sfi:
+ kfree(sfi);
+free_gate:
+ kfree(sgi);
+free_filter:
+ kfree(filter);
+
+ return err;
+}
+
+static int enetc_config_clsflower(struct enetc_ndev_priv *priv,
+ struct flow_cls_offload *cls_flower)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower);
+ struct netlink_ext_ack *extack = cls_flower->common.extack;
+ struct flow_dissector *dissector = rule->match.dissector;
+ struct flow_action *action = &rule->action;
+ struct flow_action_entry *entry;
+ struct actions_fwd *fwd;
+ u64 actions = 0;
+ int i, err;
+
+ if (!flow_action_has_entries(action)) {
+ NL_SET_ERR_MSG_MOD(extack, "At least one action is needed");
+ return -EINVAL;
+ }
+
+ flow_action_for_each(i, entry, action)
+ actions |= BIT(entry->id);
+
+ fwd = enetc_check_flow_actions(actions, dissector->used_keys);
+ if (!fwd) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported filter type!");
+ return -EOPNOTSUPP;
+ }
+
+ if (fwd->output & FILTER_ACTION_TYPE_PSFP) {
+ err = enetc_psfp_parse_clsflower(priv, cls_flower);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid PSFP inputs");
+ return err;
+ }
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported actions");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int enetc_psfp_destroy_clsflower(struct enetc_ndev_priv *priv,
+ struct flow_cls_offload *f)
+{
+ struct enetc_stream_filter *filter;
+ struct netlink_ext_ack *extack = f->common.extack;
+ int err;
+
+ if (f->common.chain_index >= priv->psfp_cap.max_streamid) {
+ NL_SET_ERR_MSG_MOD(extack, "No Stream identify resource!");
+ return -ENOSPC;
+ }
+
+ filter = enetc_get_stream_by_index(f->common.chain_index);
+ if (!filter)
+ return -EINVAL;
+
+ err = enetc_streamid_hw_set(priv, &filter->sid, false);
+ if (err)
+ return err;
+
+ remove_one_chain(priv, filter);
+
+ return 0;
+}
+
+static int enetc_destroy_clsflower(struct enetc_ndev_priv *priv,
+ struct flow_cls_offload *f)
+{
+ return enetc_psfp_destroy_clsflower(priv, f);
+}
+
+static int enetc_psfp_get_stats(struct enetc_ndev_priv *priv,
+ struct flow_cls_offload *f)
+{
+ struct psfp_streamfilter_counters counters = {};
+ struct enetc_stream_filter *filter;
+ struct flow_stats stats = {};
+ int err;
+
+ filter = enetc_get_stream_by_index(f->common.chain_index);
+ if (!filter)
+ return -EINVAL;
+
+ err = enetc_streamcounter_hw_get(priv, filter->sfi_index, &counters);
+ if (err)
+ return -EINVAL;
+
+ spin_lock(&epsfp.psfp_lock);
+ stats.pkts = counters.matching_frames_count - filter->stats.pkts;
+ stats.lastused = filter->stats.lastused;
+ filter->stats.pkts += stats.pkts;
+ spin_unlock(&epsfp.psfp_lock);
+
+ flow_stats_update(&f->stats, 0x0, stats.pkts, stats.lastused,
+ FLOW_ACTION_HW_STATS_DELAYED);
+
+ return 0;
+}
+
+static int enetc_setup_tc_cls_flower(struct enetc_ndev_priv *priv,
+ struct flow_cls_offload *cls_flower)
+{
+ switch (cls_flower->command) {
+ case FLOW_CLS_REPLACE:
+ return enetc_config_clsflower(priv, cls_flower);
+ case FLOW_CLS_DESTROY:
+ return enetc_destroy_clsflower(priv, cls_flower);
+ case FLOW_CLS_STATS:
+ return enetc_psfp_get_stats(priv, cls_flower);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static inline void clean_psfp_sfi_bitmap(void)
+{
+ bitmap_free(epsfp.psfp_sfi_bitmap);
+ epsfp.psfp_sfi_bitmap = NULL;
+}
+
+static void clean_stream_list(void)
+{
+ struct enetc_stream_filter *s;
+ struct hlist_node *tmp;
+
+ hlist_for_each_entry_safe(s, tmp, &epsfp.stream_list, node) {
+ hlist_del(&s->node);
+ kfree(s);
+ }
+}
+
+static void clean_sfi_list(void)
+{
+ struct enetc_psfp_filter *sfi;
+ struct hlist_node *tmp;
+
+ hlist_for_each_entry_safe(sfi, tmp, &epsfp.psfp_filter_list, node) {
+ hlist_del(&sfi->node);
+ kfree(sfi);
+ }
+}
+
+static void clean_sgi_list(void)
+{
+ struct enetc_psfp_gate *sgi;
+ struct hlist_node *tmp;
+
+ hlist_for_each_entry_safe(sgi, tmp, &epsfp.psfp_gate_list, node) {
+ hlist_del(&sgi->node);
+ kfree(sgi);
+ }
+}
+
+static void clean_psfp_all(void)
+{
+ /* Disable all list nodes and free all memory */
+ clean_sfi_list();
+ clean_sgi_list();
+ clean_stream_list();
+ epsfp.dev_bitmap = 0;
+ clean_psfp_sfi_bitmap();
+}
+
+int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct net_device *ndev = cb_priv;
+
+ if (!tc_can_offload(ndev))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return enetc_setup_tc_cls_flower(netdev_priv(ndev), type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int enetc_psfp_init(struct enetc_ndev_priv *priv)
+{
+ if (epsfp.psfp_sfi_bitmap)
+ return 0;
+
+ epsfp.psfp_sfi_bitmap = bitmap_zalloc(priv->psfp_cap.max_psfp_filter,
+ GFP_KERNEL);
+ if (!epsfp.psfp_sfi_bitmap)
+ return -ENOMEM;
+
+ spin_lock_init(&epsfp.psfp_lock);
+
+ if (list_empty(&enetc_block_cb_list))
+ epsfp.dev_bitmap = 0;
+
+ return 0;
+}
+
+int enetc_psfp_clean(struct enetc_ndev_priv *priv)
+{
+ if (!list_empty(&enetc_block_cb_list))
+ return -EBUSY;
+
+ clean_psfp_all();
+
+ return 0;
+}
+
+int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct flow_block_offload *f = type_data;
+ int err;
+
+ err = flow_block_cb_setup_simple(f, &enetc_block_cb_list,
+ enetc_setup_tc_block_cb,
+ ndev, ndev, true);
+ if (err)
+ return err;
+
+ switch (f->command) {
+ case FLOW_BLOCK_BIND:
+ set_bit(enetc_get_port(priv), &epsfp.dev_bitmap);
+ break;
+ case FLOW_BLOCK_UNBIND:
+ clear_bit(enetc_get_port(priv), &epsfp.dev_bitmap);
+ if (!epsfp.dev_bitmap)
+ clean_psfp_all();
+ break;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index e74dd1f86bba..a6cdd5b61921 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -376,8 +376,7 @@ struct bufdesc_ex {
#define FEC_ENET_TS_AVAIL ((uint)0x00010000)
#define FEC_ENET_TS_TIMER ((uint)0x00008000)
-#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII)
-#define FEC_NAPI_IMASK FEC_ENET_MII
+#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF)
#define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF))
/* ENET interrupt coalescing macro define */
@@ -543,7 +542,6 @@ struct fec_enet_private {
int link;
int full_duplex;
int speed;
- struct completion mdio_done;
int irq[FEC_IRQ_NUM];
bool bufdesc_ex;
int pause_flag;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index dc6f8763a5d4..2e209142f2d1 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -976,8 +976,8 @@ fec_restart(struct net_device *ndev)
writel((__force u32)cpu_to_be32(temp_mac[1]),
fep->hwp + FEC_ADDR_HIGH);
- /* Clear any outstanding interrupt. */
- writel(0xffffffff, fep->hwp + FEC_IEVENT);
+ /* Clear any outstanding interrupt, except MDIO. */
+ writel((0xffffffff & ~FEC_ENET_MII), fep->hwp + FEC_IEVENT);
fec_enet_bd_init(ndev);
@@ -1123,7 +1123,7 @@ fec_restart(struct net_device *ndev)
if (fep->link)
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
else
- writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
+ writel(0, fep->hwp + FEC_IMASK);
/* Init the interrupt coalescing */
fec_enet_itr_coal_init(ndev);
@@ -1652,6 +1652,10 @@ fec_enet_interrupt(int irq, void *dev_id)
irqreturn_t ret = IRQ_NONE;
int_events = readl(fep->hwp + FEC_IEVENT);
+
+ /* Don't clear MDIO events, we poll for those */
+ int_events &= ~FEC_ENET_MII;
+
writel(int_events, fep->hwp + FEC_IEVENT);
fec_enet_collect_events(fep, int_events);
@@ -1659,16 +1663,12 @@ fec_enet_interrupt(int irq, void *dev_id)
ret = IRQ_HANDLED;
if (napi_schedule_prep(&fep->napi)) {
- /* Disable the NAPI interrupts */
- writel(FEC_NAPI_IMASK, fep->hwp + FEC_IMASK);
+ /* Disable interrupts */
+ writel(0, fep->hwp + FEC_IMASK);
__napi_schedule(&fep->napi);
}
}
- if (int_events & FEC_ENET_MII) {
- ret = IRQ_HANDLED;
- complete(&fep->mdio_done);
- }
return ret;
}
@@ -1818,11 +1818,24 @@ static void fec_enet_adjust_link(struct net_device *ndev)
phy_print_status(phy_dev);
}
+static int fec_enet_mdio_wait(struct fec_enet_private *fep)
+{
+ uint ievent;
+ int ret;
+
+ ret = readl_poll_timeout_atomic(fep->hwp + FEC_IEVENT, ievent,
+ ievent & FEC_ENET_MII, 2, 30000);
+
+ if (!ret)
+ writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
+
+ return ret;
+}
+
static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
{
struct fec_enet_private *fep = bus->priv;
struct device *dev = &fep->pdev->dev;
- unsigned long time_left;
int ret = 0, frame_start, frame_addr, frame_op;
bool is_c45 = !!(regnum & MII_ADDR_C45);
@@ -1830,8 +1843,6 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
if (ret < 0)
return ret;
- reinit_completion(&fep->mdio_done);
-
if (is_c45) {
frame_start = FEC_MMFR_ST_C45;
@@ -1843,11 +1854,9 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
fep->hwp + FEC_MII_DATA);
/* wait for end of transfer */
- time_left = wait_for_completion_timeout(&fep->mdio_done,
- usecs_to_jiffies(FEC_MII_TIMEOUT));
- if (time_left == 0) {
+ ret = fec_enet_mdio_wait(fep);
+ if (ret) {
netdev_err(fep->netdev, "MDIO address write timeout\n");
- ret = -ETIMEDOUT;
goto out;
}
@@ -1866,11 +1875,9 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
/* wait for end of transfer */
- time_left = wait_for_completion_timeout(&fep->mdio_done,
- usecs_to_jiffies(FEC_MII_TIMEOUT));
- if (time_left == 0) {
+ ret = fec_enet_mdio_wait(fep);
+ if (ret) {
netdev_err(fep->netdev, "MDIO read timeout\n");
- ret = -ETIMEDOUT;
goto out;
}
@@ -1888,7 +1895,6 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
{
struct fec_enet_private *fep = bus->priv;
struct device *dev = &fep->pdev->dev;
- unsigned long time_left;
int ret, frame_start, frame_addr;
bool is_c45 = !!(regnum & MII_ADDR_C45);
@@ -1898,8 +1904,6 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
else
ret = 0;
- reinit_completion(&fep->mdio_done);
-
if (is_c45) {
frame_start = FEC_MMFR_ST_C45;
@@ -1911,11 +1915,9 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
fep->hwp + FEC_MII_DATA);
/* wait for end of transfer */
- time_left = wait_for_completion_timeout(&fep->mdio_done,
- usecs_to_jiffies(FEC_MII_TIMEOUT));
- if (time_left == 0) {
+ ret = fec_enet_mdio_wait(fep);
+ if (ret) {
netdev_err(fep->netdev, "MDIO address write timeout\n");
- ret = -ETIMEDOUT;
goto out;
}
} else {
@@ -1931,12 +1933,9 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
fep->hwp + FEC_MII_DATA);
/* wait for end of transfer */
- time_left = wait_for_completion_timeout(&fep->mdio_done,
- usecs_to_jiffies(FEC_MII_TIMEOUT));
- if (time_left == 0) {
+ ret = fec_enet_mdio_wait(fep);
+ if (ret)
netdev_err(fep->netdev, "MDIO write timeout\n");
- ret = -ETIMEDOUT;
- }
out:
pm_runtime_mark_last_busy(dev);
@@ -2065,9 +2064,11 @@ static int fec_enet_mii_init(struct platform_device *pdev)
static struct mii_bus *fec0_mii_bus;
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
+ bool suppress_preamble = false;
struct device_node *node;
int err = -ENXIO;
u32 mii_speed, holdtime;
+ u32 bus_freq;
/*
* The i.MX28 dual fec interfaces are not equal.
@@ -2095,15 +2096,23 @@ static int fec_enet_mii_init(struct platform_device *pdev)
return -ENOENT;
}
+ bus_freq = 2500000; /* 2.5MHz by default */
+ node = of_get_child_by_name(pdev->dev.of_node, "mdio");
+ if (node) {
+ of_property_read_u32(node, "clock-frequency", &bus_freq);
+ suppress_preamble = of_property_read_bool(node,
+ "suppress-preamble");
+ }
+
/*
- * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
+ * Set MII speed (= clk_get_rate() / 2 * phy_speed)
*
* The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
* for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. The i.MX28
* Reference Manual has an error on this, and gets fixed on i.MX6Q
* document.
*/
- mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
+ mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), bus_freq * 2);
if (fep->quirks & FEC_QUIRK_ENET_MAC)
mii_speed--;
if (mii_speed > 63) {
@@ -2130,8 +2139,24 @@ static int fec_enet_mii_init(struct platform_device *pdev)
fep->phy_speed = mii_speed << 1 | holdtime << 8;
+ if (suppress_preamble)
+ fep->phy_speed |= BIT(7);
+
+ /* Clear MMFR to avoid to generate MII event by writing MSCR.
+ * MII event generation condition:
+ * - writing MSCR:
+ * - mmfr[31:0]_not_zero & mscr[7:0]_is_zero &
+ * mscr_reg_data_in[7:0] != 0
+ * - writing MMFR:
+ * - mscr[7:0]_not_zero
+ */
+ writel(0, fep->hwp + FEC_MII_DATA);
+
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+ /* Clear any pending transaction complete indication */
+ writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
+
fep->mii_bus = mdiobus_alloc();
if (fep->mii_bus == NULL) {
err = -ENOMEM;
@@ -2146,7 +2171,6 @@ static int fec_enet_mii_init(struct platform_device *pdev)
fep->mii_bus->priv = fep;
fep->mii_bus->parent = &pdev->dev;
- node = of_get_child_by_name(pdev->dev.of_node, "mdio");
err = of_mdiobus_register(fep->mii_bus, node);
of_node_put(node);
if (err)
@@ -3674,7 +3698,6 @@ fec_probe(struct platform_device *pdev)
fep->irq[i] = irq;
}
- init_completion(&fep->mdio_done);
ret = fec_enet_mii_init(pdev);
if (ret)
goto failed_mii_init;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 8aace2de0cc9..9a907947ba19 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -697,9 +697,9 @@ hns_mac_register_phydev(struct mii_bus *mdio, struct hns_mac_cb *mac_cb,
return rc;
if (!strcmp(phy_type, phy_modes(PHY_INTERFACE_MODE_XGMII)))
- is_c45 = 1;
+ is_c45 = true;
else if (!strcmp(phy_type, phy_modes(PHY_INTERFACE_MODE_SGMII)))
- is_c45 = 0;
+ is_c45 = false;
else
return -ENODATA;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index 948e67ef30fd..1ffe8fac702d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -24,7 +24,7 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_GET_RETA, /* (VF -> PF) get RETA */
HCLGE_MBX_GET_RSS_KEY, /* (VF -> PF) get RSS key */
HCLGE_MBX_GET_MAC_ADDR, /* (VF -> PF) get MAC addr */
- HCLGE_MBX_PF_VF_RESP, /* (PF -> VF) generate respone to VF */
+ HCLGE_MBX_PF_VF_RESP, /* (PF -> VF) generate response to VF */
HCLGE_MBX_GET_BDNUM, /* (VF -> PF) get BD num */
HCLGE_MBX_GET_BUFSIZE, /* (VF -> PF) get buffer size */
HCLGE_MBX_GET_STREAMID, /* (VF -> PF) get stream id */
@@ -45,6 +45,7 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */
HCLGE_MBX_PUSH_PROMISC_INFO, /* (PF -> VF) push vf promisc info */
HCLGE_MBX_VF_UNINIT, /* (VF -> PF) vf is unintializing */
+ HCLGE_MBX_HANDLE_VF_TBL, /* (VF -> PF) store/clear hw table */
HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf flr status */
HCLGE_MBX_PUSH_LINK_STATUS, /* (M7 -> PF) get port link status */
@@ -70,6 +71,10 @@ enum hclge_mbx_vlan_cfg_subcode {
HCLGE_MBX_GET_PORT_BASE_VLAN_STATE, /* get port based vlan state */
};
+enum hclge_mbx_tbl_cfg_subcode {
+ HCLGE_MBX_VPORT_LIST_CLEAR,
+};
+
#define HCLGE_MBX_MAX_MSG_SIZE 14
#define HCLGE_MBX_MAX_RESP_DATA_SIZE 8U
#define HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM 4
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 5587605d6deb..7506cabaa16e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -233,7 +233,6 @@ struct hnae3_ae_dev {
struct list_head node;
u32 flag;
unsigned long hw_err_reset_req;
- enum hnae3_reset_type reset_type;
void *priv;
};
@@ -270,6 +269,8 @@ struct hnae3_ae_dev {
* Set loopback
* set_promisc_mode
* Set promisc mode
+ * request_update_promisc_mode
+ * request to hclge(vf) to update promisc mode
* set_mtu()
* set mtu
* get_pauseparam()
@@ -354,8 +355,6 @@ struct hnae3_ae_dev {
* Set vlan filter config of Ports
* set_vf_vlan_filter()
* Set vlan filter config of vf
- * restore_vlan_table()
- * Restore vlan filter entries after reset
* enable_hw_strip_rxvtag()
* Enable/disable hardware strip vlan tag of packets received
* set_gro_en
@@ -375,6 +374,8 @@ struct hnae3_ae_dev {
* Set the max tx rate of specified vf.
* set_vf_mac
* Configure the default MAC for specified VF
+ * get_module_eeprom
+ * Get the optical module eeprom info.
*/
struct hnae3_ae_ops {
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
@@ -408,6 +409,7 @@ struct hnae3_ae_ops {
int (*set_promisc_mode)(struct hnae3_handle *handle, bool en_uc_pmc,
bool en_mc_pmc);
+ void (*request_update_promisc_mode)(struct hnae3_handle *handle);
int (*set_mtu)(struct hnae3_handle *handle, int new_mtu);
void (*get_pauseparam)(struct hnae3_handle *handle,
@@ -525,7 +527,6 @@ struct hnae3_ae_ops {
struct ethtool_rxnfc *cmd);
int (*get_fd_all_rules)(struct hnae3_handle *handle,
struct ethtool_rxnfc *cmd, u32 *rule_locs);
- int (*restore_fd_rules)(struct hnae3_handle *handle);
void (*enable_fd)(struct hnae3_handle *handle, bool enable);
int (*add_arfs_entry)(struct hnae3_handle *handle, u16 queue_id,
u16 flow_id, struct flow_keys *fkeys);
@@ -539,7 +540,6 @@ struct hnae3_ae_ops {
void (*set_timer_task)(struct hnae3_handle *handle, bool enable);
int (*mac_connect_phy)(struct hnae3_handle *handle);
void (*mac_disconnect_phy)(struct hnae3_handle *handle);
- void (*restore_vlan_table)(struct hnae3_handle *handle);
int (*get_vf_config)(struct hnae3_handle *handle, int vf,
struct ifla_vf_info *ivf);
int (*set_vf_link_state)(struct hnae3_handle *handle, int vf,
@@ -550,6 +550,9 @@ struct hnae3_ae_ops {
int (*set_vf_rate)(struct hnae3_handle *handle, int vf,
int min_tx_rate, int max_tx_rate, bool force);
int (*set_vf_mac)(struct hnae3_handle *handle, int vf, u8 *p);
+ int (*get_module_eeprom)(struct hnae3_handle *handle, u32 offset,
+ u32 len, u8 *data);
+ bool (*get_cmdq_stat)(struct hnae3_handle *handle);
};
struct hnae3_dcb_ops {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index e1d88095a77e..fe7fb565da19 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -262,6 +262,8 @@ static void hns3_dbg_help(struct hnae3_handle *h)
dev_info(&h->pdev->dev, "dump mac tnl status\n");
dev_info(&h->pdev->dev, "dump loopback\n");
dev_info(&h->pdev->dev, "dump qs shaper [qs id]\n");
+ dev_info(&h->pdev->dev, "dump uc mac list <func id>\n");
+ dev_info(&h->pdev->dev, "dump mc mac list <func id>\n");
memset(printf_buf, 0, HNS3_DBG_BUF_LEN);
strncat(printf_buf, "dump reg [[bios common] [ssu <port_id>]",
@@ -270,7 +272,7 @@ static void hns3_dbg_help(struct hnae3_handle *h)
" [igu egu <port_id>] [rpu <tc_queue_num>]",
HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1);
strncat(printf_buf + strlen(printf_buf),
- " [rtc] [ppp] [rcb] [tqp <queue_num>]]\n",
+ " [rtc] [ppp] [rcb] [tqp <queue_num>] [mac]]\n",
HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1);
dev_info(&h->pdev->dev, "%s", printf_buf);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index da98fd7c8eca..9fe40c7773b4 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -15,7 +15,6 @@
#include <linux/aer.h>
#include <linux/skbuff.h>
#include <linux/sctp.h>
-#include <linux/vermagic.h>
#include <net/gre.h>
#include <net/ip6_checksum.h>
#include <net/pkt_cls.h>
@@ -41,10 +40,8 @@
} while (0)
static void hns3_clear_all_ring(struct hnae3_handle *h, bool force);
-static void hns3_remove_hw_addr(struct net_device *netdev);
static const char hns3_driver_name[] = "hns3";
-const char hns3_driver_version[] = VERMAGIC_STRING;
static const char hns3_driver_string[] =
"Hisilicon Ethernet Network Driver for Hip08 Family";
static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
@@ -550,6 +547,13 @@ static int hns3_nic_uc_unsync(struct net_device *netdev,
{
struct hnae3_handle *h = hns3_get_handle(netdev);
+ /* need ignore the request of removing device address, because
+ * we store the device address and other addresses of uc list
+ * in the function's mac filter list.
+ */
+ if (ether_addr_equal(addr, netdev->dev_addr))
+ return 0;
+
if (h->ae_algo->ops->rm_uc_addr)
return h->ae_algo->ops->rm_uc_addr(h, addr);
@@ -597,34 +601,25 @@ static void hns3_nic_set_rx_mode(struct net_device *netdev)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
u8 new_flags;
- int ret;
new_flags = hns3_get_netdev_flags(netdev);
- ret = __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync);
- if (ret) {
- netdev_err(netdev, "sync uc address fail\n");
- if (ret == -ENOSPC)
- new_flags |= HNAE3_OVERFLOW_UPE;
- }
-
- if (netdev->flags & IFF_MULTICAST) {
- ret = __dev_mc_sync(netdev, hns3_nic_mc_sync,
- hns3_nic_mc_unsync);
- if (ret) {
- netdev_err(netdev, "sync mc address fail\n");
- if (ret == -ENOSPC)
- new_flags |= HNAE3_OVERFLOW_MPE;
- }
- }
+ __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync);
+ __dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync);
/* User mode Promisc mode enable and vlan filtering is disabled to
- * let all packets in. MAC-VLAN Table overflow Promisc enabled and
- * vlan fitering is enabled
+ * let all packets in.
*/
- hns3_enable_vlan_filter(netdev, new_flags & HNAE3_VLAN_FLTR);
h->netdev_flags = new_flags;
- hns3_update_promisc_mode(netdev, new_flags);
+ hns3_request_update_promisc_mode(h);
+}
+
+void hns3_request_update_promisc_mode(struct hnae3_handle *handle)
+{
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+
+ if (ops->request_update_promisc_mode)
+ ops->request_update_promisc_mode(handle);
}
int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags)
@@ -1450,9 +1445,6 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
bd_num += ret;
- if (!skb_has_frag_list(skb))
- goto out;
-
skb_walk_frags(skb, frag_skb) {
ret = hns3_fill_skb_to_desc(ring, frag_skb,
DESC_TYPE_FRAGLIST_SKB);
@@ -1461,7 +1453,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
bd_num += ret;
}
-out:
+
pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) :
(ring->desc_num - 1);
ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |=
@@ -2107,7 +2099,6 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ae_dev->pdev = pdev;
ae_dev->flag = ent->driver_data;
- ae_dev->reset_type = HNAE3_NONE_RESET;
hns3_get_dev_capability(pdev, ae_dev);
pci_set_drvdata(pdev, ae_dev);
@@ -3909,9 +3900,11 @@ static int hns3_init_mac_addr(struct net_device *netdev)
eth_hw_addr_random(netdev);
dev_warn(priv->dev, "using random MAC address %pM\n",
netdev->dev_addr);
- } else {
+ } else if (!ether_addr_equal(netdev->dev_addr, mac_addr_temp)) {
ether_addr_copy(netdev->dev_addr, mac_addr_temp);
ether_addr_copy(netdev->perm_addr, mac_addr_temp);
+ } else {
+ return 0;
}
if (h->ae_algo->ops->set_mac_addr)
@@ -3939,17 +3932,6 @@ static void hns3_uninit_phy(struct net_device *netdev)
h->ae_algo->ops->mac_disconnect_phy(h);
}
-static int hns3_restore_fd_rules(struct net_device *netdev)
-{
- struct hnae3_handle *h = hns3_get_handle(netdev);
- int ret = 0;
-
- if (h->ae_algo->ops->restore_fd_rules)
- ret = h->ae_algo->ops->restore_fd_rules(h);
-
- return ret;
-}
-
static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
@@ -4121,8 +4103,6 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret;
- hns3_remove_hw_addr(netdev);
-
if (netdev->reg_state != NETREG_UNINITIALIZED)
unregister_netdev(netdev);
@@ -4193,56 +4173,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
return hns3_nic_set_real_num_queue(ndev);
}
-static int hns3_recover_hw_addr(struct net_device *ndev)
-{
- struct netdev_hw_addr_list *list;
- struct netdev_hw_addr *ha, *tmp;
- int ret = 0;
-
- netif_addr_lock_bh(ndev);
- /* go through and sync uc_addr entries to the device */
- list = &ndev->uc;
- list_for_each_entry_safe(ha, tmp, &list->list, list) {
- ret = hns3_nic_uc_sync(ndev, ha->addr);
- if (ret)
- goto out;
- }
-
- /* go through and sync mc_addr entries to the device */
- list = &ndev->mc;
- list_for_each_entry_safe(ha, tmp, &list->list, list) {
- ret = hns3_nic_mc_sync(ndev, ha->addr);
- if (ret)
- goto out;
- }
-
-out:
- netif_addr_unlock_bh(ndev);
- return ret;
-}
-
-static void hns3_remove_hw_addr(struct net_device *netdev)
-{
- struct netdev_hw_addr_list *list;
- struct netdev_hw_addr *ha, *tmp;
-
- hns3_nic_uc_unsync(netdev, netdev->dev_addr);
-
- netif_addr_lock_bh(netdev);
- /* go through and unsync uc_addr entries to the device */
- list = &netdev->uc;
- list_for_each_entry_safe(ha, tmp, &list->list, list)
- hns3_nic_uc_unsync(netdev, ha->addr);
-
- /* go through and unsync mc_addr entries to the device */
- list = &netdev->mc;
- list_for_each_entry_safe(ha, tmp, &list->list, list)
- if (ha->refcount > 1)
- hns3_nic_mc_unsync(netdev, ha->addr);
-
- netif_addr_unlock_bh(netdev);
-}
-
static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
{
while (ring->next_to_clean != ring->next_to_use) {
@@ -4401,7 +4331,6 @@ static void hns3_restore_coal(struct hns3_nic_priv *priv)
static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct net_device *ndev = kinfo->netdev;
struct hns3_nic_priv *priv = netdev_priv(ndev);
@@ -4409,15 +4338,6 @@ static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
return 0;
- /* it is cumbersome for hardware to pick-and-choose entries for deletion
- * from table space. Hence, for function reset software intervention is
- * required to delete the entries
- */
- if (hns3_dev_ongoing_func_reset(ae_dev)) {
- hns3_remove_hw_addr(ndev);
- hns3_del_all_fd_rules(ndev, false);
- }
-
if (!netif_running(ndev))
return 0;
@@ -4484,6 +4404,9 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
goto err_init_irq_fail;
}
+ if (!hns3_is_phys_func(handle->pdev))
+ hns3_init_mac_addr(netdev);
+
ret = hns3_client_start(handle);
if (ret) {
dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
@@ -4509,33 +4432,6 @@ err_put_ring:
return ret;
}
-static int hns3_reset_notify_restore_enet(struct hnae3_handle *handle)
-{
- struct net_device *netdev = handle->kinfo.netdev;
- bool vlan_filter_enable;
- int ret;
-
- ret = hns3_init_mac_addr(netdev);
- if (ret)
- return ret;
-
- ret = hns3_recover_hw_addr(netdev);
- if (ret)
- return ret;
-
- ret = hns3_update_promisc_mode(netdev, handle->netdev_flags);
- if (ret)
- return ret;
-
- vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true;
- hns3_enable_vlan_filter(netdev, vlan_filter_enable);
-
- if (handle->ae_algo->ops->restore_vlan_table)
- handle->ae_algo->ops->restore_vlan_table(handle);
-
- return hns3_restore_fd_rules(netdev);
-}
-
static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
{
struct net_device *netdev = handle->kinfo.netdev;
@@ -4585,9 +4481,6 @@ static int hns3_reset_notify(struct hnae3_handle *handle,
case HNAE3_UNINIT_CLIENT:
ret = hns3_reset_notify_uninit_enet(handle);
break;
- case HNAE3_RESTORE_CLIENT:
- ret = hns3_reset_notify_restore_enet(handle);
- break;
default:
break;
}
@@ -4765,4 +4658,3 @@ MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
MODULE_LICENSE("GPL");
MODULE_ALIAS("pci:hns-nic");
-MODULE_VERSION(HNS3_MOD_VERSION);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index abefd7a179f7..60f82ad89957 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -8,10 +8,6 @@
#include "hnae3.h"
-#define HNS3_MOD_VERSION "1.0"
-
-extern const char hns3_driver_version[];
-
enum hns3_nic_state {
HNS3_NIC_STATE_TESTING,
HNS3_NIC_STATE_RESETTING,
@@ -50,23 +46,6 @@ enum hns3_nic_state {
#define HNS3_RING_CFG_VF_NUM_REG 0x00080
#define HNS3_RING_ASID_REG 0x0008C
#define HNS3_RING_EN_REG 0x00090
-#define HNS3_RING_T0_BE_RST 0x00094
-#define HNS3_RING_COULD_BE_RST 0x00098
-#define HNS3_RING_WRR_WEIGHT_REG 0x0009c
-
-#define HNS3_RING_INTMSK_RXWL_REG 0x000A0
-#define HNS3_RING_INTSTS_RX_RING_REG 0x000A4
-#define HNS3_RX_RING_INT_STS_REG 0x000A8
-#define HNS3_RING_INTMSK_TXWL_REG 0x000AC
-#define HNS3_RING_INTSTS_TX_RING_REG 0x000B0
-#define HNS3_TX_RING_INT_STS_REG 0x000B4
-#define HNS3_RING_INTMSK_RX_OVERTIME_REG 0x000B8
-#define HNS3_RING_INTSTS_RX_OVERTIME_REG 0x000BC
-#define HNS3_RING_INTMSK_TX_OVERTIME_REG 0x000C4
-#define HNS3_RING_INTSTS_TX_OVERTIME_REG 0x000C8
-
-#define HNS3_RING_MB_CTRL_REG 0x00100
-#define HNS3_RING_MB_DATA_BASE_REG 0x00200
#define HNS3_TX_REG_OFFSET 0x40
@@ -580,15 +559,6 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
writel(value, reg_addr + reg);
}
-static inline bool hns3_dev_ongoing_func_reset(struct hnae3_ae_dev *ae_dev)
-{
- return (ae_dev && (ae_dev->reset_type == HNAE3_FUNC_RESET ||
- ae_dev->reset_type == HNAE3_FLR_RESET ||
- ae_dev->reset_type == HNAE3_VF_FUNC_RESET ||
- ae_dev->reset_type == HNAE3_VF_FULL_RESET ||
- ae_dev->reset_type == HNAE3_VF_PF_FUNC_RESET));
-}
-
#define hns3_read_dev(a, reg) \
hns3_read_reg((a)->io_base, (reg))
@@ -662,6 +632,7 @@ void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
void hns3_enable_vlan_filter(struct net_device *netdev, bool enable);
int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags);
+void hns3_request_update_promisc_mode(struct hnae3_handle *handle);
#ifdef CONFIG_HNS3_DCB
void hns3_dcbnl_setup(struct hnae3_handle *handle);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 28b81f24afa1..6b1545f982aa 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -4,6 +4,7 @@
#include <linux/etherdevice.h>
#include <linux/string.h>
#include <linux/phy.h>
+#include <linux/sfp.h>
#include "hns3_enet.h"
@@ -12,6 +13,11 @@ struct hns3_stats {
int stats_offset;
};
+struct hns3_sfp_type {
+ u8 type;
+ u8 ext_type;
+};
+
/* tqp related stats */
#define HNS3_TQP_STAT(_string, _member) { \
.stats_string = _string, \
@@ -99,7 +105,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
h->ae_algo->ops->set_promisc_mode(h, true, true);
} else {
/* recover promisc mode before loopback test */
- hns3_update_promisc_mode(ndev, h->netdev_flags);
+ hns3_request_update_promisc_mode(h);
vlan_filter_enable = ndev->flags & IFF_PROMISC ? false : true;
hns3_enable_vlan_filter(ndev, vlan_filter_enable);
}
@@ -546,10 +552,6 @@ static void hns3_get_drvinfo(struct net_device *netdev,
return;
}
- strncpy(drvinfo->version, hns3_driver_version,
- sizeof(drvinfo->version));
- drvinfo->version[sizeof(drvinfo->version) - 1] = '\0';
-
strncpy(drvinfo->driver, h->pdev->driver->name,
sizeof(drvinfo->driver));
drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0';
@@ -771,8 +773,13 @@ static int hns3_set_link_ksettings(struct net_device *netdev,
cmd->base.autoneg, cmd->base.speed, cmd->base.duplex);
/* Only support ksettings_set for netdev with phy attached for now */
- if (netdev->phydev)
+ if (netdev->phydev) {
+ if (cmd->base.speed == SPEED_1000 &&
+ cmd->base.autoneg == AUTONEG_DISABLE)
+ return -EINVAL;
+
return phy_ethtool_ksettings_set(netdev->phydev, cmd);
+ }
if (handle->pdev->revision == 0x20)
return -EOPNOTSUPP;
@@ -1390,6 +1397,73 @@ static int hns3_set_fecparam(struct net_device *netdev,
return ops->set_fec(handle, fec_mode);
}
+static int hns3_get_module_info(struct net_device *netdev,
+ struct ethtool_modinfo *modinfo)
+{
+#define HNS3_SFF_8636_V1_3 0x03
+
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hns3_sfp_type sfp_type;
+ int ret;
+
+ if (handle->pdev->revision == 0x20 || !ops->get_module_eeprom)
+ return -EOPNOTSUPP;
+
+ memset(&sfp_type, 0, sizeof(sfp_type));
+ ret = ops->get_module_eeprom(handle, 0, sizeof(sfp_type) / sizeof(u8),
+ (u8 *)&sfp_type);
+ if (ret)
+ return ret;
+
+ switch (sfp_type.type) {
+ case SFF8024_ID_SFP:
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ break;
+ case SFF8024_ID_QSFP_8438:
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
+ break;
+ case SFF8024_ID_QSFP_8436_8636:
+ if (sfp_type.ext_type < HNS3_SFF_8636_V1_3) {
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
+ }
+ break;
+ case SFF8024_ID_QSFP28_8636:
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
+ break;
+ default:
+ netdev_err(netdev, "Optical module unknown: %#x\n",
+ sfp_type.type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hns3_get_module_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+
+ if (handle->pdev->revision == 0x20 || !ops->get_module_eeprom)
+ return -EOPNOTSUPP;
+
+ if (!ee->len)
+ return -EINVAL;
+
+ memset(data, 0, ee->len);
+
+ return ops->get_module_eeprom(handle, ee->offset, ee->len, data);
+}
+
#define HNS3_ETHTOOL_COALESCE (ETHTOOL_COALESCE_USECS | \
ETHTOOL_COALESCE_USE_ADAPTIVE | \
ETHTOOL_COALESCE_RX_USECS_HIGH | \
@@ -1453,6 +1527,8 @@ static const struct ethtool_ops hns3_ethtool_ops = {
.set_msglevel = hns3_set_msglevel,
.get_fecparam = hns3_get_fecparam,
.set_fecparam = hns3_set_fecparam,
+ .get_module_info = hns3_get_module_info,
+ .get_module_eeprom = hns3_get_module_eeprom,
};
void hns3_ethtool_set_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
index 0fb61d440d3b..6c28c8f6292c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
@@ -4,6 +4,7 @@
#
ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
+ccflags-y += -I $(srctree)/$(src)
obj-$(CONFIG_HNS3_HCLGE) += hclge.o
hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 96498d9b4754..e3bab8f3847f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -184,11 +184,11 @@ enum hclge_opcode_type {
/* TQP commands */
HCLGE_OPC_CFG_TX_QUEUE = 0x0B01,
HCLGE_OPC_QUERY_TX_POINTER = 0x0B02,
- HCLGE_OPC_QUERY_TX_STATUS = 0x0B03,
+ HCLGE_OPC_QUERY_TX_STATS = 0x0B03,
HCLGE_OPC_TQP_TX_QUEUE_TC = 0x0B04,
HCLGE_OPC_CFG_RX_QUEUE = 0x0B11,
HCLGE_OPC_QUERY_RX_POINTER = 0x0B12,
- HCLGE_OPC_QUERY_RX_STATUS = 0x0B13,
+ HCLGE_OPC_QUERY_RX_STATS = 0x0B13,
HCLGE_OPC_STASH_RX_QUEUE_LRO = 0x0B16,
HCLGE_OPC_CFG_RX_QUEUE_LRO = 0x0B17,
HCLGE_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
@@ -270,6 +270,8 @@ enum hclge_opcode_type {
HCLGE_OPC_M7_COMPAT_CFG = 0x701A,
/* SFP command */
+ HCLGE_OPC_GET_SFP_EEPROM = 0x7100,
+ HCLGE_OPC_GET_SFP_EXIST = 0x7101,
HCLGE_OPC_GET_SFP_INFO = 0x7104,
/* Error INT commands */
@@ -733,31 +735,6 @@ struct hclge_mac_mgr_tbl_entry_cmd {
u8 rsv3[2];
};
-struct hclge_mac_vlan_add_cmd {
- __le16 flags;
- __le16 mac_addr_hi16;
- __le32 mac_addr_lo32;
- __le32 mac_addr_msk_hi32;
- __le16 mac_addr_msk_lo16;
- __le16 vlan_tag;
- __le16 ingress_port;
- __le16 egress_port;
- u8 rsv[4];
-};
-
-#define HNS3_MAC_VLAN_CFG_FLAG_BIT 0
-struct hclge_mac_vlan_remove_cmd {
- __le16 flags;
- __le16 mac_addr_hi16;
- __le32 mac_addr_lo32;
- __le32 mac_addr_msk_hi32;
- __le16 mac_addr_msk_lo16;
- __le16 vlan_tag;
- __le16 ingress_port;
- __le16 egress_port;
- u8 rsv[4];
-};
-
struct hclge_vlan_filter_ctrl_cmd {
u8 vlan_type;
u8 vlan_fe;
@@ -1079,6 +1056,19 @@ struct hclge_firmware_compat_cmd {
u8 rsv[20];
};
+#define HCLGE_SFP_INFO_CMD_NUM 6
+#define HCLGE_SFP_INFO_BD0_LEN 20
+#define HCLGE_SFP_INFO_BDX_LEN 24
+#define HCLGE_SFP_INFO_MAX_LEN \
+ (HCLGE_SFP_INFO_BD0_LEN + \
+ (HCLGE_SFP_INFO_CMD_NUM - 1) * HCLGE_SFP_INFO_BDX_LEN)
+
+struct hclge_sfp_info_bd0_cmd {
+ __le16 offset;
+ __le16 read_len;
+ u8 data[HCLGE_SFP_INFO_BD0_LEN];
+};
+
int hclge_cmd_init(struct hclge_dev *hdev);
static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
{
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 17228288d4df..26f6f068b01d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -143,7 +143,7 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
return;
}
- buf_len = sizeof(struct hclge_desc) * bd_num;
+ buf_len = sizeof(struct hclge_desc) * bd_num;
desc_src = kzalloc(buf_len, GFP_KERNEL);
if (!desc_src)
return;
@@ -173,6 +173,114 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
kfree(desc_src);
}
+static void hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev)
+{
+ struct hclge_config_mac_mode_cmd *req;
+ struct hclge_desc desc;
+ u32 loop_en;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump mac enable status, ret = %d\n", ret);
+ return;
+ }
+
+ req = (struct hclge_config_mac_mode_cmd *)desc.data;
+ loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
+
+ dev_info(&hdev->pdev->dev, "config_mac_trans_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_TX_EN_B));
+ dev_info(&hdev->pdev->dev, "config_mac_rcv_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_RX_EN_B));
+ dev_info(&hdev->pdev->dev, "config_pad_trans_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_PAD_TX_B));
+ dev_info(&hdev->pdev->dev, "config_pad_rcv_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_PAD_RX_B));
+ dev_info(&hdev->pdev->dev, "config_1588_trans_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_1588_TX_B));
+ dev_info(&hdev->pdev->dev, "config_1588_rcv_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_1588_RX_B));
+ dev_info(&hdev->pdev->dev, "config_mac_app_loop_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_APP_LP_B));
+ dev_info(&hdev->pdev->dev, "config_mac_line_loop_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_LINE_LP_B));
+ dev_info(&hdev->pdev->dev, "config_mac_fcs_tx_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_FCS_TX_B));
+ dev_info(&hdev->pdev->dev, "config_mac_rx_oversize_truncate_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B));
+ dev_info(&hdev->pdev->dev, "config_mac_rx_fcs_strip_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B));
+ dev_info(&hdev->pdev->dev, "config_mac_rx_fcs_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_B));
+ dev_info(&hdev->pdev->dev, "config_mac_tx_under_min_err_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B));
+ dev_info(&hdev->pdev->dev, "config_mac_tx_oversize_truncate_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B));
+}
+
+static void hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev)
+{
+ struct hclge_config_max_frm_size_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, true);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump mac frame size, ret = %d\n", ret);
+ return;
+ }
+
+ req = (struct hclge_config_max_frm_size_cmd *)desc.data;
+
+ dev_info(&hdev->pdev->dev, "max_frame_size: %u\n",
+ le16_to_cpu(req->max_frm_size));
+ dev_info(&hdev->pdev->dev, "min_frame_size: %u\n", req->min_frm_size);
+}
+
+static void hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev)
+{
+#define HCLGE_MAC_SPEED_SHIFT 0
+#define HCLGE_MAC_SPEED_MASK GENMASK(5, 0)
+#define HCLGE_MAC_DUPLEX_SHIFT 7
+
+ struct hclge_config_mac_speed_dup_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, true);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump mac speed duplex, ret = %d\n", ret);
+ return;
+ }
+
+ req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
+
+ dev_info(&hdev->pdev->dev, "speed: %#lx\n",
+ hnae3_get_field(req->speed_dup, HCLGE_MAC_SPEED_MASK,
+ HCLGE_MAC_SPEED_SHIFT));
+ dev_info(&hdev->pdev->dev, "duplex: %#x\n",
+ hnae3_get_bit(req->speed_dup, HCLGE_MAC_DUPLEX_SHIFT));
+}
+
+static void hclge_dbg_dump_mac(struct hclge_dev *hdev)
+{
+ hclge_dbg_dump_mac_enable_status(hdev);
+
+ hclge_dbg_dump_mac_frame_size(hdev);
+
+ hclge_dbg_dump_mac_speed_duplex(hdev);
+}
+
static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf)
{
struct device *dev = &hdev->pdev->dev;
@@ -304,6 +412,11 @@ static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf)
}
}
+ if (strncmp(cmd_buf, "mac", strlen("mac")) == 0) {
+ hclge_dbg_dump_mac(hdev);
+ has_dump = true;
+ }
+
if (strncmp(cmd_buf, "dcb", 3) == 0) {
hclge_dbg_dump_dcb(hdev, &cmd_buf[sizeof("dcb")]);
has_dump = true;
@@ -578,7 +691,7 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
enum hclge_opcode_type cmd;
struct hclge_desc desc;
int queue_id, group_id;
- u32 qset_maping[32];
+ u32 qset_mapping[32];
int tc_id, qset_id;
int pri_id, ret;
u32 i;
@@ -633,7 +746,7 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
if (ret)
goto err_tm_map_cmd_send;
- qset_maping[group_id] =
+ qset_mapping[group_id] =
le32_to_cpu(bp_to_qs_map_cmd->qs_bit_map);
}
@@ -643,11 +756,11 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
for (group_id = 0; group_id < 4; group_id++) {
dev_info(&hdev->pdev->dev,
"%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
- group_id * 256, qset_maping[(u32)(i + 7)],
- qset_maping[(u32)(i + 6)], qset_maping[(u32)(i + 5)],
- qset_maping[(u32)(i + 4)], qset_maping[(u32)(i + 3)],
- qset_maping[(u32)(i + 2)], qset_maping[(u32)(i + 1)],
- qset_maping[i]);
+ group_id * 256, qset_mapping[(u32)(i + 7)],
+ qset_mapping[(u32)(i + 6)], qset_mapping[(u32)(i + 5)],
+ qset_mapping[(u32)(i + 4)], qset_mapping[(u32)(i + 3)],
+ qset_mapping[(u32)(i + 2)], qset_mapping[(u32)(i + 1)],
+ qset_mapping[i]);
i += 8;
}
@@ -1145,6 +1258,7 @@ static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev,
{
#define HCLGE_MAX_NCL_CONFIG_OFFSET 4096
#define HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD (20 + 24 * 4)
+#define HCLGE_NCL_CONFIG_PARAM_NUM 2
struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM];
int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM;
@@ -1154,13 +1268,17 @@ static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev,
int ret;
ret = sscanf(cmd_buf, "%x %x", &offset, &length);
- if (ret != 2 || offset >= HCLGE_MAX_NCL_CONFIG_OFFSET ||
- length > HCLGE_MAX_NCL_CONFIG_OFFSET - offset) {
- dev_err(&hdev->pdev->dev, "Invalid offset or length.\n");
+ if (ret != HCLGE_NCL_CONFIG_PARAM_NUM) {
+ dev_err(&hdev->pdev->dev,
+ "Too few parameters, num = %d.\n", ret);
return;
}
- if (offset < 0 || length <= 0) {
- dev_err(&hdev->pdev->dev, "Non-positive offset or length.\n");
+
+ if (offset < 0 || offset >= HCLGE_MAX_NCL_CONFIG_OFFSET ||
+ length <= 0 || length > HCLGE_MAX_NCL_CONFIG_OFFSET - offset) {
+ dev_err(&hdev->pdev->dev,
+ "Invalid input, offset = %d, length = %d.\n",
+ offset, length);
return;
}
@@ -1328,6 +1446,49 @@ static void hclge_dbg_dump_qs_shaper(struct hclge_dev *hdev,
hclge_dbg_dump_qs_shaper_single(hdev, qsid);
}
+static int hclge_dbg_dump_mac_list(struct hclge_dev *hdev, const char *cmd_buf,
+ bool is_unicast)
+{
+ struct hclge_mac_node *mac_node, *tmp;
+ struct hclge_vport *vport;
+ struct list_head *list;
+ u32 func_id;
+ int ret;
+
+ ret = kstrtouint(cmd_buf, 0, &func_id);
+ if (ret < 0) {
+ dev_err(&hdev->pdev->dev,
+ "dump mac list: bad command string, ret = %d\n", ret);
+ return -EINVAL;
+ }
+
+ if (func_id >= hdev->num_alloc_vport) {
+ dev_err(&hdev->pdev->dev,
+ "function id(%u) is out of range(0-%u)\n", func_id,
+ hdev->num_alloc_vport - 1);
+ return -EINVAL;
+ }
+
+ vport = &hdev->vport[func_id];
+
+ list = is_unicast ? &vport->uc_mac_list : &vport->mc_mac_list;
+
+ dev_info(&hdev->pdev->dev, "vport %u %s mac list:\n",
+ func_id, is_unicast ? "uc" : "mc");
+ dev_info(&hdev->pdev->dev, "mac address state\n");
+
+ spin_lock_bh(&vport->mac_list_lock);
+
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ dev_info(&hdev->pdev->dev, "%pM %d\n",
+ mac_node->mac_addr, mac_node->state);
+ }
+
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ return 0;
+}
+
int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
{
#define DUMP_REG "dump reg"
@@ -1372,6 +1533,14 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
} else if (strncmp(cmd_buf, "dump qs shaper", 14) == 0) {
hclge_dbg_dump_qs_shaper(hdev,
&cmd_buf[sizeof("dump qs shaper")]);
+ } else if (strncmp(cmd_buf, "dump uc mac list", 16) == 0) {
+ hclge_dbg_dump_mac_list(hdev,
+ &cmd_buf[sizeof("dump uc mac list")],
+ true);
+ } else if (strncmp(cmd_buf, "dump mc mac list", 16) == 0) {
+ hclge_dbg_dump_mac_list(hdev,
+ &cmd_buf[sizeof("dump mc mac list")],
+ false);
} else {
dev_info(&hdev->pdev->dev, "unknown command\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
index 876fd81ad2f1..608fe26fc3fe 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
@@ -16,7 +16,6 @@
#define HCLGE_RAS_REG_NFE_MASK 0xFF00
#define HCLGE_RAS_REG_ROCEE_ERR_MASK 0x3000000
-#define HCLGE_VECTOR0_PF_OTHER_INT_STS_REG 0x20800
#define HCLGE_VECTOR0_REG_MSIX_MASK 0x1FF00
#define HCLGE_IMP_TCM_ECC_ERR_INT_EN 0xFFFF0000
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index a758f9ae32be..b796d3fb5b0b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -62,14 +62,16 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev);
static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
-static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
- u16 *allocated_size, bool is_alloc);
static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
unsigned long *addr);
static int hclge_set_default_loopback(struct hclge_dev *hdev);
+static void hclge_sync_mac_table(struct hclge_dev *hdev);
+static void hclge_restore_hw_table(struct hclge_dev *hdev);
+static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
+
static struct hnae3_ae_algo ae_algo;
static struct workqueue_struct *hclge_wq;
@@ -550,7 +552,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
queue = handle->kinfo.tqp[i];
tqp = container_of(queue, struct hclge_tqp, q);
/* command : HCLGE_OPC_QUERY_IGU_STAT */
- hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
true);
desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
@@ -570,7 +572,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
tqp = container_of(queue, struct hclge_tqp, q);
/* command : HCLGE_OPC_QUERY_IGU_STAT */
hclge_cmd_setup_basic_desc(&desc[0],
- HCLGE_OPC_QUERY_TX_STATUS,
+ HCLGE_OPC_QUERY_TX_STATS,
true);
desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
@@ -1361,10 +1363,8 @@ static int hclge_configure(struct hclge_dev *hdev)
int ret;
ret = hclge_get_cfg(hdev, &cfg);
- if (ret) {
- dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
+ if (ret)
return ret;
- }
hdev->num_vmdq_vport = cfg.vmdq_vport_num;
hdev->base_tqp_pid = 0;
@@ -1687,6 +1687,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
INIT_LIST_HEAD(&vport->vlan_list);
INIT_LIST_HEAD(&vport->uc_mac_list);
INIT_LIST_HEAD(&vport->mc_mac_list);
+ spin_lock_init(&vport->mac_list_lock);
if (i == 0)
ret = hclge_vport_setup(vport, tqp_main_vport);
@@ -2965,13 +2966,11 @@ static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
{
- u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
+ u32 cmdq_src_reg, msix_src_reg;
/* fetch the events from their corresponding regs */
- rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
- msix_src_reg = hclge_read_dev(&hdev->hw,
- HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
+ msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
/* Assumption: If by any chance reset and mailbox events are reported
* together then we will only process reset event in this go and will
@@ -2981,7 +2980,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
*
* check for vector0 reset event sources
*/
- if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
+ if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
@@ -2990,7 +2989,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
return HCLGE_VECTOR0_EVENT_RST;
}
- if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
+ if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
dev_info(&hdev->pdev->dev, "global reset interrupt\n");
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
@@ -3480,7 +3479,7 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
/* first, resolve any unknown reset type to the known type(s) */
if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
- HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
+ HCLGE_MISC_VECTOR_INT_STS);
/* we will intentionally ignore any errors from this function
* as we will end up in *some* reset request in any case
*/
@@ -3729,22 +3728,13 @@ static int hclge_reset_stack(struct hclge_dev *hdev)
if (ret)
return ret;
- ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
- if (ret)
- return ret;
-
- return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
+ return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
}
static int hclge_reset_prepare(struct hclge_dev *hdev)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
int ret;
- /* Initialize ae_dev reset status as well, in case enet layer wants to
- * know if device is undergoing reset
- */
- ae_dev->reset_type = hdev->reset_type;
hdev->rst_stats.reset_cnt++;
/* perform reset of the stack & ae device for a client */
ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
@@ -3806,7 +3796,6 @@ static int hclge_reset_rebuild(struct hclge_dev *hdev)
hdev->last_reset_time = jiffies;
hdev->rst_stats.reset_fail_cnt = 0;
hdev->rst_stats.reset_done_cnt++;
- ae_dev->reset_type = HNAE3_NONE_RESET;
clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
/* if default_reset_request has a higher level reset request,
@@ -3973,6 +3962,8 @@ static void hclge_periodic_service_task(struct hclge_dev *hdev)
* updated when it is triggered by mbx.
*/
hclge_update_link_status(hdev);
+ hclge_sync_mac_table(hdev);
+ hclge_sync_promisc_mode(hdev);
if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
delta = jiffies - hdev->last_serv_processed;
@@ -4722,7 +4713,8 @@ static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
dev_err(&hdev->pdev->dev,
- "Set promisc mode fail, status is %d.\n", ret);
+ "failed to set vport %d promisc mode, ret = %d.\n",
+ param->vf_id, ret);
return ret;
}
@@ -4772,6 +4764,14 @@ static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
en_bc_pmc);
}
+static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
+}
+
static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
{
struct hclge_get_fd_mode_cmd *req;
@@ -4822,7 +4822,8 @@ static int hclge_get_fd_allocation(struct hclge_dev *hdev,
return ret;
}
-static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
+static int hclge_set_fd_key_config(struct hclge_dev *hdev,
+ enum HCLGE_FD_STAGE stage_num)
{
struct hclge_set_fd_key_config_cmd *req;
struct hclge_fd_key_cfg *stage;
@@ -4876,9 +4877,6 @@ static int hclge_init_fd_config(struct hclge_dev *hdev)
return -EOPNOTSUPP;
}
- hdev->fd_cfg.proto_support =
- TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
- UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
@@ -4892,11 +4890,9 @@ static int hclge_init_fd_config(struct hclge_dev *hdev)
BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
/* If use max 400bit key, we can support tuples for ether type */
- if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
- hdev->fd_cfg.proto_support |= ETHER_FLOW;
+ if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
key_cfg->tuple_active |=
BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
- }
/* roce_type is used to filter roce frames
* dst_vport is used to specify the rule
@@ -5006,8 +5002,6 @@ static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
return true;
switch (tuple_bit) {
- case 0:
- return false;
case BIT(INNER_DST_MAC):
for (i = 0; i < ETH_ALEN; i++) {
calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
@@ -5165,9 +5159,10 @@ static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
u8 *cur_key_x, *cur_key_y;
- unsigned int i;
- int ret, tuple_size;
u8 meta_data_region;
+ u8 tuple_size;
+ int ret;
+ u32 i;
memset(key_x, 0, sizeof(key_x));
memset(key_y, 0, sizeof(key_y));
@@ -5244,172 +5239,255 @@ static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
}
-static int hclge_fd_check_spec(struct hclge_dev *hdev,
- struct ethtool_rx_flow_spec *fs, u32 *unused)
+static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
+ u32 *unused_tuple)
{
- struct ethtool_tcpip4_spec *tcp_ip4_spec;
- struct ethtool_usrip4_spec *usr_ip4_spec;
- struct ethtool_tcpip6_spec *tcp_ip6_spec;
- struct ethtool_usrip6_spec *usr_ip6_spec;
- struct ethhdr *ether_spec;
-
- if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
+ if (!spec || !unused_tuple)
return -EINVAL;
- if (!(fs->flow_type & hdev->fd_cfg.proto_support))
- return -EOPNOTSUPP;
-
- if ((fs->flow_type & FLOW_EXT) &&
- (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
- dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
- return -EOPNOTSUPP;
- }
+ *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
- switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
- case SCTP_V4_FLOW:
- case TCP_V4_FLOW:
- case UDP_V4_FLOW:
- tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
- *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
+ if (!spec->ip4src)
+ *unused_tuple |= BIT(INNER_SRC_IP);
- if (!tcp_ip4_spec->ip4src)
- *unused |= BIT(INNER_SRC_IP);
+ if (!spec->ip4dst)
+ *unused_tuple |= BIT(INNER_DST_IP);
- if (!tcp_ip4_spec->ip4dst)
- *unused |= BIT(INNER_DST_IP);
+ if (!spec->psrc)
+ *unused_tuple |= BIT(INNER_SRC_PORT);
- if (!tcp_ip4_spec->psrc)
- *unused |= BIT(INNER_SRC_PORT);
+ if (!spec->pdst)
+ *unused_tuple |= BIT(INNER_DST_PORT);
- if (!tcp_ip4_spec->pdst)
- *unused |= BIT(INNER_DST_PORT);
+ if (!spec->tos)
+ *unused_tuple |= BIT(INNER_IP_TOS);
- if (!tcp_ip4_spec->tos)
- *unused |= BIT(INNER_IP_TOS);
+ return 0;
+}
- break;
- case IP_USER_FLOW:
- usr_ip4_spec = &fs->h_u.usr_ip4_spec;
- *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
- BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
+static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
+ u32 *unused_tuple)
+{
+ if (!spec || !unused_tuple)
+ return -EINVAL;
- if (!usr_ip4_spec->ip4src)
- *unused |= BIT(INNER_SRC_IP);
+ *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
+ BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
- if (!usr_ip4_spec->ip4dst)
- *unused |= BIT(INNER_DST_IP);
+ if (!spec->ip4src)
+ *unused_tuple |= BIT(INNER_SRC_IP);
- if (!usr_ip4_spec->tos)
- *unused |= BIT(INNER_IP_TOS);
+ if (!spec->ip4dst)
+ *unused_tuple |= BIT(INNER_DST_IP);
- if (!usr_ip4_spec->proto)
- *unused |= BIT(INNER_IP_PROTO);
+ if (!spec->tos)
+ *unused_tuple |= BIT(INNER_IP_TOS);
- if (usr_ip4_spec->l4_4_bytes)
- return -EOPNOTSUPP;
+ if (!spec->proto)
+ *unused_tuple |= BIT(INNER_IP_PROTO);
- if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
- return -EOPNOTSUPP;
+ if (spec->l4_4_bytes)
+ return -EOPNOTSUPP;
- break;
- case SCTP_V6_FLOW:
- case TCP_V6_FLOW:
- case UDP_V6_FLOW:
- tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
- *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
- BIT(INNER_IP_TOS);
+ if (spec->ip_ver != ETH_RX_NFC_IP4)
+ return -EOPNOTSUPP;
- /* check whether src/dst ip address used */
- if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
- !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
- *unused |= BIT(INNER_SRC_IP);
+ return 0;
+}
- if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
- !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
- *unused |= BIT(INNER_DST_IP);
+static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
+ u32 *unused_tuple)
+{
+ if (!spec || !unused_tuple)
+ return -EINVAL;
- if (!tcp_ip6_spec->psrc)
- *unused |= BIT(INNER_SRC_PORT);
+ *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
+ BIT(INNER_IP_TOS);
- if (!tcp_ip6_spec->pdst)
- *unused |= BIT(INNER_DST_PORT);
+ /* check whether src/dst ip address used */
+ if (!spec->ip6src[0] && !spec->ip6src[1] &&
+ !spec->ip6src[2] && !spec->ip6src[3])
+ *unused_tuple |= BIT(INNER_SRC_IP);
- if (tcp_ip6_spec->tclass)
- return -EOPNOTSUPP;
+ if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
+ !spec->ip6dst[2] && !spec->ip6dst[3])
+ *unused_tuple |= BIT(INNER_DST_IP);
- break;
- case IPV6_USER_FLOW:
- usr_ip6_spec = &fs->h_u.usr_ip6_spec;
- *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
- BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
- BIT(INNER_DST_PORT);
+ if (!spec->psrc)
+ *unused_tuple |= BIT(INNER_SRC_PORT);
- /* check whether src/dst ip address used */
- if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
- !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
- *unused |= BIT(INNER_SRC_IP);
+ if (!spec->pdst)
+ *unused_tuple |= BIT(INNER_DST_PORT);
- if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
- !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
- *unused |= BIT(INNER_DST_IP);
+ if (spec->tclass)
+ return -EOPNOTSUPP;
- if (!usr_ip6_spec->l4_proto)
- *unused |= BIT(INNER_IP_PROTO);
+ return 0;
+}
- if (usr_ip6_spec->tclass)
- return -EOPNOTSUPP;
+static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
+ u32 *unused_tuple)
+{
+ if (!spec || !unused_tuple)
+ return -EINVAL;
- if (usr_ip6_spec->l4_4_bytes)
- return -EOPNOTSUPP;
+ *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
+ BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
- break;
- case ETHER_FLOW:
- ether_spec = &fs->h_u.ether_spec;
- *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
- BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
- BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
+ /* check whether src/dst ip address used */
+ if (!spec->ip6src[0] && !spec->ip6src[1] &&
+ !spec->ip6src[2] && !spec->ip6src[3])
+ *unused_tuple |= BIT(INNER_SRC_IP);
- if (is_zero_ether_addr(ether_spec->h_source))
- *unused |= BIT(INNER_SRC_MAC);
+ if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
+ !spec->ip6dst[2] && !spec->ip6dst[3])
+ *unused_tuple |= BIT(INNER_DST_IP);
- if (is_zero_ether_addr(ether_spec->h_dest))
- *unused |= BIT(INNER_DST_MAC);
+ if (!spec->l4_proto)
+ *unused_tuple |= BIT(INNER_IP_PROTO);
- if (!ether_spec->h_proto)
- *unused |= BIT(INNER_ETH_TYPE);
+ if (spec->tclass)
+ return -EOPNOTSUPP;
- break;
- default:
+ if (spec->l4_4_bytes)
return -EOPNOTSUPP;
- }
- if ((fs->flow_type & FLOW_EXT)) {
- if (fs->h_ext.vlan_etype)
+ return 0;
+}
+
+static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
+{
+ if (!spec || !unused_tuple)
+ return -EINVAL;
+
+ *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
+ BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
+ BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
+
+ if (is_zero_ether_addr(spec->h_source))
+ *unused_tuple |= BIT(INNER_SRC_MAC);
+
+ if (is_zero_ether_addr(spec->h_dest))
+ *unused_tuple |= BIT(INNER_DST_MAC);
+
+ if (!spec->h_proto)
+ *unused_tuple |= BIT(INNER_ETH_TYPE);
+
+ return 0;
+}
+
+static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
+ struct ethtool_rx_flow_spec *fs,
+ u32 *unused_tuple)
+{
+ if (fs->flow_type & FLOW_EXT) {
+ if (fs->h_ext.vlan_etype) {
+ dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
return -EOPNOTSUPP;
+ }
+
if (!fs->h_ext.vlan_tci)
- *unused |= BIT(INNER_VLAN_TAG_FST);
+ *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
- if (fs->m_ext.vlan_tci) {
- if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
- return -EINVAL;
+ if (fs->m_ext.vlan_tci &&
+ be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
+ dev_err(&hdev->pdev->dev,
+ "failed to config vlan_tci, invalid vlan_tci: %u, max is %u.\n",
+ ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
+ return -EINVAL;
}
} else {
- *unused |= BIT(INNER_VLAN_TAG_FST);
+ *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
}
if (fs->flow_type & FLOW_MAC_EXT) {
- if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
+ if (hdev->fd_cfg.fd_mode !=
+ HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
+ dev_err(&hdev->pdev->dev,
+ "FLOW_MAC_EXT is not supported in current fd mode!\n");
return -EOPNOTSUPP;
+ }
if (is_zero_ether_addr(fs->h_ext.h_dest))
- *unused |= BIT(INNER_DST_MAC);
+ *unused_tuple |= BIT(INNER_DST_MAC);
else
- *unused &= ~(BIT(INNER_DST_MAC));
+ *unused_tuple &= ~BIT(INNER_DST_MAC);
}
return 0;
}
+static int hclge_fd_check_spec(struct hclge_dev *hdev,
+ struct ethtool_rx_flow_spec *fs,
+ u32 *unused_tuple)
+{
+ u32 flow_type;
+ int ret;
+
+ if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
+ dev_err(&hdev->pdev->dev,
+ "failed to config fd rules, invalid rule location: %u, max is %u\n.",
+ fs->location,
+ hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
+ return -EINVAL;
+ }
+
+ if ((fs->flow_type & FLOW_EXT) &&
+ (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
+ dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
+ switch (flow_type) {
+ case SCTP_V4_FLOW:
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
+ unused_tuple);
+ break;
+ case IP_USER_FLOW:
+ ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
+ unused_tuple);
+ break;
+ case SCTP_V6_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
+ unused_tuple);
+ break;
+ case IPV6_USER_FLOW:
+ ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
+ unused_tuple);
+ break;
+ case ETHER_FLOW:
+ if (hdev->fd_cfg.fd_mode !=
+ HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
+ dev_err(&hdev->pdev->dev,
+ "ETHER_FLOW is not supported in current fd mode!\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
+ unused_tuple);
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "unsupported protocol type, protocol type = %#x\n",
+ flow_type);
+ return -EOPNOTSUPP;
+ }
+
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to check flow union tuple, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
+}
+
static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
{
struct hclge_fd_rule *rule = NULL;
@@ -5618,7 +5696,7 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev,
break;
}
- if ((fs->flow_type & FLOW_EXT)) {
+ if (fs->flow_type & FLOW_EXT) {
rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
}
@@ -5673,22 +5751,23 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
u8 action;
int ret;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_dev_fd_supported(hdev)) {
+ dev_err(&hdev->pdev->dev,
+ "flow table director is not supported\n");
return -EOPNOTSUPP;
+ }
if (!hdev->fd_en) {
- dev_warn(&hdev->pdev->dev,
- "Please enable flow director first\n");
+ dev_err(&hdev->pdev->dev,
+ "please enable flow director first\n");
return -EOPNOTSUPP;
}
fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
ret = hclge_fd_check_spec(hdev, fs, &unused);
- if (ret) {
- dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
+ if (ret)
return ret;
- }
if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
action = HCLGE_FD_ACTION_DROP_PACKET;
@@ -5729,7 +5808,6 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
}
rule->flow_type = fs->flow_type;
-
rule->location = fs->location;
rule->unused_tuple = unused;
rule->vf_id = dst_vport_id;
@@ -5877,6 +5955,149 @@ static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
return 0;
}
+static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
+ struct ethtool_tcpip4_spec *spec,
+ struct ethtool_tcpip4_spec *spec_mask)
+{
+ spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
+ spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
+ 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
+
+ spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
+ spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
+ 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
+
+ spec->psrc = cpu_to_be16(rule->tuples.src_port);
+ spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
+ 0 : cpu_to_be16(rule->tuples_mask.src_port);
+
+ spec->pdst = cpu_to_be16(rule->tuples.dst_port);
+ spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
+ 0 : cpu_to_be16(rule->tuples_mask.dst_port);
+
+ spec->tos = rule->tuples.ip_tos;
+ spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
+ 0 : rule->tuples_mask.ip_tos;
+}
+
+static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
+ struct ethtool_usrip4_spec *spec,
+ struct ethtool_usrip4_spec *spec_mask)
+{
+ spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
+ spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
+ 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
+
+ spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
+ spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
+ 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
+
+ spec->tos = rule->tuples.ip_tos;
+ spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
+ 0 : rule->tuples_mask.ip_tos;
+
+ spec->proto = rule->tuples.ip_proto;
+ spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
+ 0 : rule->tuples_mask.ip_proto;
+
+ spec->ip_ver = ETH_RX_NFC_IP4;
+}
+
+static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
+ struct ethtool_tcpip6_spec *spec,
+ struct ethtool_tcpip6_spec *spec_mask)
+{
+ cpu_to_be32_array(spec->ip6src,
+ rule->tuples.src_ip, IPV6_SIZE);
+ cpu_to_be32_array(spec->ip6dst,
+ rule->tuples.dst_ip, IPV6_SIZE);
+ if (rule->unused_tuple & BIT(INNER_SRC_IP))
+ memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
+ else
+ cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
+ IPV6_SIZE);
+
+ if (rule->unused_tuple & BIT(INNER_DST_IP))
+ memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
+ else
+ cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
+ IPV6_SIZE);
+
+ spec->psrc = cpu_to_be16(rule->tuples.src_port);
+ spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
+ 0 : cpu_to_be16(rule->tuples_mask.src_port);
+
+ spec->pdst = cpu_to_be16(rule->tuples.dst_port);
+ spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
+ 0 : cpu_to_be16(rule->tuples_mask.dst_port);
+}
+
+static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
+ struct ethtool_usrip6_spec *spec,
+ struct ethtool_usrip6_spec *spec_mask)
+{
+ cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
+ cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
+ if (rule->unused_tuple & BIT(INNER_SRC_IP))
+ memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
+ else
+ cpu_to_be32_array(spec_mask->ip6src,
+ rule->tuples_mask.src_ip, IPV6_SIZE);
+
+ if (rule->unused_tuple & BIT(INNER_DST_IP))
+ memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
+ else
+ cpu_to_be32_array(spec_mask->ip6dst,
+ rule->tuples_mask.dst_ip, IPV6_SIZE);
+
+ spec->l4_proto = rule->tuples.ip_proto;
+ spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
+ 0 : rule->tuples_mask.ip_proto;
+}
+
+static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
+ struct ethhdr *spec,
+ struct ethhdr *spec_mask)
+{
+ ether_addr_copy(spec->h_source, rule->tuples.src_mac);
+ ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
+
+ if (rule->unused_tuple & BIT(INNER_SRC_MAC))
+ eth_zero_addr(spec_mask->h_source);
+ else
+ ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
+
+ if (rule->unused_tuple & BIT(INNER_DST_MAC))
+ eth_zero_addr(spec_mask->h_dest);
+ else
+ ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
+
+ spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
+ spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
+ 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
+}
+
+static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
+ struct hclge_fd_rule *rule)
+{
+ if (fs->flow_type & FLOW_EXT) {
+ fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
+ fs->m_ext.vlan_tci =
+ rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
+ cpu_to_be16(VLAN_VID_MASK) :
+ cpu_to_be16(rule->tuples_mask.vlan_tag1);
+ }
+
+ if (fs->flow_type & FLOW_MAC_EXT) {
+ ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
+ if (rule->unused_tuple & BIT(INNER_DST_MAC))
+ eth_zero_addr(fs->m_u.ether_spec.h_dest);
+ else
+ ether_addr_copy(fs->m_u.ether_spec.h_dest,
+ rule->tuples_mask.dst_mac);
+ }
+}
+
static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
struct ethtool_rxnfc *cmd)
{
@@ -5909,162 +6130,34 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
case SCTP_V4_FLOW:
case TCP_V4_FLOW:
case UDP_V4_FLOW:
- fs->h_u.tcp_ip4_spec.ip4src =
- cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
- fs->m_u.tcp_ip4_spec.ip4src =
- rule->unused_tuple & BIT(INNER_SRC_IP) ?
- 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
-
- fs->h_u.tcp_ip4_spec.ip4dst =
- cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
- fs->m_u.tcp_ip4_spec.ip4dst =
- rule->unused_tuple & BIT(INNER_DST_IP) ?
- 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
-
- fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
- fs->m_u.tcp_ip4_spec.psrc =
- rule->unused_tuple & BIT(INNER_SRC_PORT) ?
- 0 : cpu_to_be16(rule->tuples_mask.src_port);
-
- fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
- fs->m_u.tcp_ip4_spec.pdst =
- rule->unused_tuple & BIT(INNER_DST_PORT) ?
- 0 : cpu_to_be16(rule->tuples_mask.dst_port);
-
- fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
- fs->m_u.tcp_ip4_spec.tos =
- rule->unused_tuple & BIT(INNER_IP_TOS) ?
- 0 : rule->tuples_mask.ip_tos;
-
+ hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
+ &fs->m_u.tcp_ip4_spec);
break;
case IP_USER_FLOW:
- fs->h_u.usr_ip4_spec.ip4src =
- cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
- fs->m_u.tcp_ip4_spec.ip4src =
- rule->unused_tuple & BIT(INNER_SRC_IP) ?
- 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
-
- fs->h_u.usr_ip4_spec.ip4dst =
- cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
- fs->m_u.usr_ip4_spec.ip4dst =
- rule->unused_tuple & BIT(INNER_DST_IP) ?
- 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
-
- fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
- fs->m_u.usr_ip4_spec.tos =
- rule->unused_tuple & BIT(INNER_IP_TOS) ?
- 0 : rule->tuples_mask.ip_tos;
-
- fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
- fs->m_u.usr_ip4_spec.proto =
- rule->unused_tuple & BIT(INNER_IP_PROTO) ?
- 0 : rule->tuples_mask.ip_proto;
-
- fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
-
+ hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
+ &fs->m_u.usr_ip4_spec);
break;
case SCTP_V6_FLOW:
case TCP_V6_FLOW:
case UDP_V6_FLOW:
- cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
- rule->tuples.src_ip, IPV6_SIZE);
- if (rule->unused_tuple & BIT(INNER_SRC_IP))
- memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
- sizeof(int) * IPV6_SIZE);
- else
- cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
- rule->tuples_mask.src_ip, IPV6_SIZE);
-
- cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
- rule->tuples.dst_ip, IPV6_SIZE);
- if (rule->unused_tuple & BIT(INNER_DST_IP))
- memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
- sizeof(int) * IPV6_SIZE);
- else
- cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
- rule->tuples_mask.dst_ip, IPV6_SIZE);
-
- fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
- fs->m_u.tcp_ip6_spec.psrc =
- rule->unused_tuple & BIT(INNER_SRC_PORT) ?
- 0 : cpu_to_be16(rule->tuples_mask.src_port);
-
- fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
- fs->m_u.tcp_ip6_spec.pdst =
- rule->unused_tuple & BIT(INNER_DST_PORT) ?
- 0 : cpu_to_be16(rule->tuples_mask.dst_port);
-
+ hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
+ &fs->m_u.tcp_ip6_spec);
break;
case IPV6_USER_FLOW:
- cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
- rule->tuples.src_ip, IPV6_SIZE);
- if (rule->unused_tuple & BIT(INNER_SRC_IP))
- memset(fs->m_u.usr_ip6_spec.ip6src, 0,
- sizeof(int) * IPV6_SIZE);
- else
- cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
- rule->tuples_mask.src_ip, IPV6_SIZE);
-
- cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
- rule->tuples.dst_ip, IPV6_SIZE);
- if (rule->unused_tuple & BIT(INNER_DST_IP))
- memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
- sizeof(int) * IPV6_SIZE);
- else
- cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
- rule->tuples_mask.dst_ip, IPV6_SIZE);
-
- fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
- fs->m_u.usr_ip6_spec.l4_proto =
- rule->unused_tuple & BIT(INNER_IP_PROTO) ?
- 0 : rule->tuples_mask.ip_proto;
-
- break;
- case ETHER_FLOW:
- ether_addr_copy(fs->h_u.ether_spec.h_source,
- rule->tuples.src_mac);
- if (rule->unused_tuple & BIT(INNER_SRC_MAC))
- eth_zero_addr(fs->m_u.ether_spec.h_source);
- else
- ether_addr_copy(fs->m_u.ether_spec.h_source,
- rule->tuples_mask.src_mac);
-
- ether_addr_copy(fs->h_u.ether_spec.h_dest,
- rule->tuples.dst_mac);
- if (rule->unused_tuple & BIT(INNER_DST_MAC))
- eth_zero_addr(fs->m_u.ether_spec.h_dest);
- else
- ether_addr_copy(fs->m_u.ether_spec.h_dest,
- rule->tuples_mask.dst_mac);
-
- fs->h_u.ether_spec.h_proto =
- cpu_to_be16(rule->tuples.ether_proto);
- fs->m_u.ether_spec.h_proto =
- rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
- 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
-
+ hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
+ &fs->m_u.usr_ip6_spec);
break;
+ /* The flow type of fd rule has been checked before adding in to rule
+ * list. As other flow types have been handled, it must be ETHER_FLOW
+ * for the default case
+ */
default:
- spin_unlock_bh(&hdev->fd_rule_lock);
- return -EOPNOTSUPP;
- }
-
- if (fs->flow_type & FLOW_EXT) {
- fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
- fs->m_ext.vlan_tci =
- rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
- cpu_to_be16(VLAN_VID_MASK) :
- cpu_to_be16(rule->tuples_mask.vlan_tag1);
+ hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
+ &fs->m_u.ether_spec);
+ break;
}
- if (fs->flow_type & FLOW_MAC_EXT) {
- ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
- if (rule->unused_tuple & BIT(INNER_DST_MAC))
- eth_zero_addr(fs->m_u.ether_spec.h_dest);
- else
- ether_addr_copy(fs->m_u.ether_spec.h_dest,
- rule->tuples_mask.dst_mac);
- }
+ hclge_fd_get_ext_info(fs, rule);
if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
fs->ring_cookie = RX_CLS_FLOW_DISC;
@@ -6202,7 +6295,6 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
*/
if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
spin_unlock_bh(&hdev->fd_rule_lock);
-
return -EOPNOTSUPP;
}
@@ -6216,14 +6308,12 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
spin_unlock_bh(&hdev->fd_rule_lock);
-
return -ENOSPC;
}
rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
if (!rule) {
spin_unlock_bh(&hdev->fd_rule_lock);
-
return -ENOMEM;
}
@@ -6310,6 +6400,14 @@ static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
}
+static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+}
+
static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -6834,8 +6932,22 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
int hclge_vport_start(struct hclge_vport *vport)
{
+ struct hclge_dev *hdev = vport->back;
+
set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
vport->last_active_jiffies = jiffies;
+
+ if (test_bit(vport->vport_id, hdev->vport_config_block)) {
+ if (vport->vport_id) {
+ hclge_restore_mac_table_common(vport);
+ hclge_restore_vport_vlan_table(vport);
+ } else {
+ hclge_restore_hw_table(hdev);
+ }
+ }
+
+ clear_bit(vport->vport_id, hdev->vport_config_block);
+
return 0;
}
@@ -6872,17 +6984,11 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
}
if (op == HCLGE_MAC_VLAN_ADD) {
- if ((!resp_code) || (resp_code == 1)) {
+ if (!resp_code || resp_code == 1)
return 0;
- } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
- dev_err(&hdev->pdev->dev,
- "add mac addr failed for uc_overflow.\n");
- return -ENOSPC;
- } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
- dev_err(&hdev->pdev->dev,
- "add mac addr failed for mc_overflow.\n");
+ else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
+ resp_code == HCLGE_ADD_MC_OVERFLOW)
return -ENOSPC;
- }
dev_err(&hdev->pdev->dev,
"add mac addr failed for undefined, code=%u.\n",
@@ -7106,52 +7212,8 @@ static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
return cfg_status;
}
-static int hclge_init_umv_space(struct hclge_dev *hdev)
-{
- u16 allocated_size = 0;
- int ret;
-
- ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
- true);
- if (ret)
- return ret;
-
- if (allocated_size < hdev->wanted_umv_size)
- dev_warn(&hdev->pdev->dev,
- "Alloc umv space failed, want %u, get %u\n",
- hdev->wanted_umv_size, allocated_size);
-
- mutex_init(&hdev->umv_mutex);
- hdev->max_umv_size = allocated_size;
- /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
- * preserve some unicast mac vlan table entries shared by pf
- * and its vfs.
- */
- hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
- hdev->share_umv_size = hdev->priv_umv_size +
- hdev->max_umv_size % (hdev->num_req_vfs + 2);
-
- return 0;
-}
-
-static int hclge_uninit_umv_space(struct hclge_dev *hdev)
-{
- int ret;
-
- if (hdev->max_umv_size > 0) {
- ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
- false);
- if (ret)
- return ret;
- hdev->max_umv_size = 0;
- }
- mutex_destroy(&hdev->umv_mutex);
-
- return 0;
-}
-
static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
- u16 *allocated_size, bool is_alloc)
+ u16 *allocated_size)
{
struct hclge_umv_spc_alc_cmd *req;
struct hclge_desc desc;
@@ -7159,21 +7221,39 @@ static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
req = (struct hclge_umv_spc_alc_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
- if (!is_alloc)
- hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
req->space_size = cpu_to_le32(space_size);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
- dev_err(&hdev->pdev->dev,
- "%s umv space failed for cmd_send, ret =%d\n",
- is_alloc ? "allocate" : "free", ret);
+ dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
+ ret);
return ret;
}
- if (is_alloc && allocated_size)
- *allocated_size = le32_to_cpu(desc.data[1]);
+ *allocated_size = le32_to_cpu(desc.data[1]);
+
+ return 0;
+}
+
+static int hclge_init_umv_space(struct hclge_dev *hdev)
+{
+ u16 allocated_size = 0;
+ int ret;
+
+ ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
+ if (ret)
+ return ret;
+
+ if (allocated_size < hdev->wanted_umv_size)
+ dev_warn(&hdev->pdev->dev,
+ "failed to alloc umv space, want %u, get %u\n",
+ hdev->wanted_umv_size, allocated_size);
+
+ hdev->max_umv_size = allocated_size;
+ hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
+ hdev->share_umv_size = hdev->priv_umv_size +
+ hdev->max_umv_size % (hdev->num_alloc_vport + 1);
return 0;
}
@@ -7188,21 +7268,25 @@ static void hclge_reset_umv_space(struct hclge_dev *hdev)
vport->used_umv_num = 0;
}
- mutex_lock(&hdev->umv_mutex);
+ mutex_lock(&hdev->vport_lock);
hdev->share_umv_size = hdev->priv_umv_size +
- hdev->max_umv_size % (hdev->num_req_vfs + 2);
- mutex_unlock(&hdev->umv_mutex);
+ hdev->max_umv_size % (hdev->num_alloc_vport + 1);
+ mutex_unlock(&hdev->vport_lock);
}
-static bool hclge_is_umv_space_full(struct hclge_vport *vport)
+static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
{
struct hclge_dev *hdev = vport->back;
bool is_full;
- mutex_lock(&hdev->umv_mutex);
+ if (need_lock)
+ mutex_lock(&hdev->vport_lock);
+
is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
hdev->share_umv_size == 0);
- mutex_unlock(&hdev->umv_mutex);
+
+ if (need_lock)
+ mutex_unlock(&hdev->vport_lock);
return is_full;
}
@@ -7211,7 +7295,6 @@ static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
{
struct hclge_dev *hdev = vport->back;
- mutex_lock(&hdev->umv_mutex);
if (is_free) {
if (vport->used_umv_num > hdev->priv_umv_size)
hdev->share_umv_size++;
@@ -7224,7 +7307,99 @@ static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
hdev->share_umv_size--;
vport->used_umv_num++;
}
- mutex_unlock(&hdev->umv_mutex);
+}
+
+static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
+ const u8 *mac_addr)
+{
+ struct hclge_mac_node *mac_node, *tmp;
+
+ list_for_each_entry_safe(mac_node, tmp, list, node)
+ if (ether_addr_equal(mac_addr, mac_node->mac_addr))
+ return mac_node;
+
+ return NULL;
+}
+
+static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
+ enum HCLGE_MAC_NODE_STATE state)
+{
+ switch (state) {
+ /* from set_rx_mode or tmp_add_list */
+ case HCLGE_MAC_TO_ADD:
+ if (mac_node->state == HCLGE_MAC_TO_DEL)
+ mac_node->state = HCLGE_MAC_ACTIVE;
+ break;
+ /* only from set_rx_mode */
+ case HCLGE_MAC_TO_DEL:
+ if (mac_node->state == HCLGE_MAC_TO_ADD) {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ } else {
+ mac_node->state = HCLGE_MAC_TO_DEL;
+ }
+ break;
+ /* only from tmp_add_list, the mac_node->state won't be
+ * ACTIVE.
+ */
+ case HCLGE_MAC_ACTIVE:
+ if (mac_node->state == HCLGE_MAC_TO_ADD)
+ mac_node->state = HCLGE_MAC_ACTIVE;
+
+ break;
+ }
+}
+
+int hclge_update_mac_list(struct hclge_vport *vport,
+ enum HCLGE_MAC_NODE_STATE state,
+ enum HCLGE_MAC_ADDR_TYPE mac_type,
+ const unsigned char *addr)
+{
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_mac_node *mac_node;
+ struct list_head *list;
+
+ list = (mac_type == HCLGE_MAC_ADDR_UC) ?
+ &vport->uc_mac_list : &vport->mc_mac_list;
+
+ spin_lock_bh(&vport->mac_list_lock);
+
+ /* if the mac addr is already in the mac list, no need to add a new
+ * one into it, just check the mac addr state, convert it to a new
+ * new state, or just remove it, or do nothing.
+ */
+ mac_node = hclge_find_mac_node(list, addr);
+ if (mac_node) {
+ hclge_update_mac_node(mac_node, state);
+ spin_unlock_bh(&vport->mac_list_lock);
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
+ return 0;
+ }
+
+ /* if this address is never added, unnecessary to delete */
+ if (state == HCLGE_MAC_TO_DEL) {
+ spin_unlock_bh(&vport->mac_list_lock);
+ dev_err(&hdev->pdev->dev,
+ "failed to delete address %pM from mac list\n",
+ addr);
+ return -ENOENT;
+ }
+
+ mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
+ if (!mac_node) {
+ spin_unlock_bh(&vport->mac_list_lock);
+ return -ENOMEM;
+ }
+
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
+
+ mac_node->state = state;
+ ether_addr_copy(mac_node->mac_addr, addr);
+ list_add_tail(&mac_node->node, list);
+
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ return 0;
}
static int hclge_add_uc_addr(struct hnae3_handle *handle,
@@ -7232,7 +7407,8 @@ static int hclge_add_uc_addr(struct hnae3_handle *handle,
{
struct hclge_vport *vport = hclge_get_vport(handle);
- return hclge_add_uc_addr_common(vport, addr);
+ return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
+ addr);
}
int hclge_add_uc_addr_common(struct hclge_vport *vport,
@@ -7271,15 +7447,19 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
*/
ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
if (ret == -ENOENT) {
- if (!hclge_is_umv_space_full(vport)) {
+ mutex_lock(&hdev->vport_lock);
+ if (!hclge_is_umv_space_full(vport, false)) {
ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
if (!ret)
hclge_update_umv_space(vport, false);
+ mutex_unlock(&hdev->vport_lock);
return ret;
}
+ mutex_unlock(&hdev->vport_lock);
- dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
- hdev->priv_umv_size);
+ if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
+ dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
+ hdev->priv_umv_size);
return -ENOSPC;
}
@@ -7303,7 +7483,8 @@ static int hclge_rm_uc_addr(struct hnae3_handle *handle,
{
struct hclge_vport *vport = hclge_get_vport(handle);
- return hclge_rm_uc_addr_common(vport, addr);
+ return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
+ addr);
}
int hclge_rm_uc_addr_common(struct hclge_vport *vport,
@@ -7326,8 +7507,13 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport,
hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hclge_prepare_mac_addr(&req, addr, false);
ret = hclge_remove_mac_vlan_tbl(vport, &req);
- if (!ret)
+ if (!ret) {
+ mutex_lock(&hdev->vport_lock);
hclge_update_umv_space(vport, true);
+ mutex_unlock(&hdev->vport_lock);
+ } else if (ret == -ENOENT) {
+ ret = 0;
+ }
return ret;
}
@@ -7337,7 +7523,8 @@ static int hclge_add_mc_addr(struct hnae3_handle *handle,
{
struct hclge_vport *vport = hclge_get_vport(handle);
- return hclge_add_mc_addr_common(vport, addr);
+ return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
+ addr);
}
int hclge_add_mc_addr_common(struct hclge_vport *vport,
@@ -7369,7 +7556,9 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
return status;
status = hclge_add_mac_vlan_tbl(vport, &req, desc);
- if (status == -ENOSPC)
+ /* if already overflow, not to print each time */
+ if (status == -ENOSPC &&
+ !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
return status;
@@ -7380,7 +7569,8 @@ static int hclge_rm_mc_addr(struct hnae3_handle *handle,
{
struct hclge_vport *vport = hclge_get_vport(handle);
- return hclge_rm_mc_addr_common(vport, addr);
+ return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
+ addr);
}
int hclge_rm_mc_addr_common(struct hclge_vport *vport,
@@ -7415,111 +7605,354 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
/* Not all the vfid is zero, update the vfid */
status = hclge_add_mac_vlan_tbl(vport, &req, desc);
- } else {
- /* Maybe this mac address is in mta table, but it cannot be
- * deleted here because an entry of mta represents an address
- * range rather than a specific address. the delete action to
- * all entries will take effect in update_mta_status called by
- * hns3_nic_set_rx_mode.
- */
+ } else if (status == -ENOENT) {
status = 0;
}
return status;
}
-void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
- enum HCLGE_MAC_ADDR_TYPE mac_type)
+static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
+ struct list_head *list,
+ int (*sync)(struct hclge_vport *,
+ const unsigned char *))
{
- struct hclge_vport_mac_addr_cfg *mac_cfg;
- struct list_head *list;
+ struct hclge_mac_node *mac_node, *tmp;
+ int ret;
- if (!vport->vport_id)
- return;
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ ret = sync(vport, mac_node->mac_addr);
+ if (!ret) {
+ mac_node->state = HCLGE_MAC_ACTIVE;
+ } else {
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
+ &vport->state);
+ break;
+ }
+ }
+}
- mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
- if (!mac_cfg)
- return;
+static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
+ struct list_head *list,
+ int (*unsync)(struct hclge_vport *,
+ const unsigned char *))
+{
+ struct hclge_mac_node *mac_node, *tmp;
+ int ret;
- mac_cfg->hd_tbl_status = true;
- memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ ret = unsync(vport, mac_node->mac_addr);
+ if (!ret || ret == -ENOENT) {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ } else {
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
+ &vport->state);
+ break;
+ }
+ }
+}
- list = (mac_type == HCLGE_MAC_ADDR_UC) ?
- &vport->uc_mac_list : &vport->mc_mac_list;
+static bool hclge_sync_from_add_list(struct list_head *add_list,
+ struct list_head *mac_list)
+{
+ struct hclge_mac_node *mac_node, *tmp, *new_node;
+ bool all_added = true;
+
+ list_for_each_entry_safe(mac_node, tmp, add_list, node) {
+ if (mac_node->state == HCLGE_MAC_TO_ADD)
+ all_added = false;
+
+ /* if the mac address from tmp_add_list is not in the
+ * uc/mc_mac_list, it means have received a TO_DEL request
+ * during the time window of adding the mac address into mac
+ * table. if mac_node state is ACTIVE, then change it to TO_DEL,
+ * then it will be removed at next time. else it must be TO_ADD,
+ * this address hasn't been added into mac table,
+ * so just remove the mac node.
+ */
+ new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
+ if (new_node) {
+ hclge_update_mac_node(new_node, mac_node->state);
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
+ mac_node->state = HCLGE_MAC_TO_DEL;
+ list_del(&mac_node->node);
+ list_add_tail(&mac_node->node, mac_list);
+ } else {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ }
+ }
- list_add_tail(&mac_cfg->node, list);
+ return all_added;
}
-void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
- bool is_write_tbl,
- enum HCLGE_MAC_ADDR_TYPE mac_type)
+static void hclge_sync_from_del_list(struct list_head *del_list,
+ struct list_head *mac_list)
{
- struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
- struct list_head *list;
- bool uc_flag, mc_flag;
+ struct hclge_mac_node *mac_node, *tmp, *new_node;
- list = (mac_type == HCLGE_MAC_ADDR_UC) ?
- &vport->uc_mac_list : &vport->mc_mac_list;
+ list_for_each_entry_safe(mac_node, tmp, del_list, node) {
+ new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
+ if (new_node) {
+ /* If the mac addr exists in the mac list, it means
+ * received a new TO_ADD request during the time window
+ * of configuring the mac address. For the mac node
+ * state is TO_ADD, and the address is already in the
+ * in the hardware(due to delete fail), so we just need
+ * to change the mac node state to ACTIVE.
+ */
+ new_node->state = HCLGE_MAC_ACTIVE;
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ } else {
+ list_del(&mac_node->node);
+ list_add_tail(&mac_node->node, mac_list);
+ }
+ }
+}
+
+static void hclge_update_overflow_flags(struct hclge_vport *vport,
+ enum HCLGE_MAC_ADDR_TYPE mac_type,
+ bool is_all_added)
+{
+ if (mac_type == HCLGE_MAC_ADDR_UC) {
+ if (is_all_added)
+ vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
+ else
+ vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
+ } else {
+ if (is_all_added)
+ vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
+ else
+ vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
+ }
+}
- uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
- mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
+static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
+ enum HCLGE_MAC_ADDR_TYPE mac_type)
+{
+ struct hclge_mac_node *mac_node, *tmp, *new_node;
+ struct list_head tmp_add_list, tmp_del_list;
+ struct list_head *list;
+ bool all_added;
- list_for_each_entry_safe(mac_cfg, tmp, list, node) {
- if (ether_addr_equal(mac_cfg->mac_addr, mac_addr)) {
- if (uc_flag && mac_cfg->hd_tbl_status)
- hclge_rm_uc_addr_common(vport, mac_addr);
+ INIT_LIST_HEAD(&tmp_add_list);
+ INIT_LIST_HEAD(&tmp_del_list);
+
+ /* move the mac addr to the tmp_add_list and tmp_del_list, then
+ * we can add/delete these mac addr outside the spin lock
+ */
+ list = (mac_type == HCLGE_MAC_ADDR_UC) ?
+ &vport->uc_mac_list : &vport->mc_mac_list;
- if (mc_flag && mac_cfg->hd_tbl_status)
- hclge_rm_mc_addr_common(vport, mac_addr);
+ spin_lock_bh(&vport->mac_list_lock);
- list_del(&mac_cfg->node);
- kfree(mac_cfg);
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ switch (mac_node->state) {
+ case HCLGE_MAC_TO_DEL:
+ list_del(&mac_node->node);
+ list_add_tail(&mac_node->node, &tmp_del_list);
+ break;
+ case HCLGE_MAC_TO_ADD:
+ new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
+ if (!new_node)
+ goto stop_traverse;
+ ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
+ new_node->state = mac_node->state;
+ list_add_tail(&new_node->node, &tmp_add_list);
+ break;
+ default:
break;
}
}
+
+stop_traverse:
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ /* delete first, in order to get max mac table space for adding */
+ if (mac_type == HCLGE_MAC_ADDR_UC) {
+ hclge_unsync_vport_mac_list(vport, &tmp_del_list,
+ hclge_rm_uc_addr_common);
+ hclge_sync_vport_mac_list(vport, &tmp_add_list,
+ hclge_add_uc_addr_common);
+ } else {
+ hclge_unsync_vport_mac_list(vport, &tmp_del_list,
+ hclge_rm_mc_addr_common);
+ hclge_sync_vport_mac_list(vport, &tmp_add_list,
+ hclge_add_mc_addr_common);
+ }
+
+ /* if some mac addresses were added/deleted fail, move back to the
+ * mac_list, and retry at next time.
+ */
+ spin_lock_bh(&vport->mac_list_lock);
+
+ hclge_sync_from_del_list(&tmp_del_list, list);
+ all_added = hclge_sync_from_add_list(&tmp_add_list, list);
+
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ hclge_update_overflow_flags(vport, mac_type, all_added);
+}
+
+static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
+{
+ struct hclge_dev *hdev = vport->back;
+
+ if (test_bit(vport->vport_id, hdev->vport_config_block))
+ return false;
+
+ if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
+ return true;
+
+ return false;
+}
+
+static void hclge_sync_mac_table(struct hclge_dev *hdev)
+{
+ int i;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ struct hclge_vport *vport = &hdev->vport[i];
+
+ if (!hclge_need_sync_mac_table(vport))
+ continue;
+
+ hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
+ hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
+ }
}
void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
enum HCLGE_MAC_ADDR_TYPE mac_type)
{
- struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
- struct list_head *list;
+ int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
+ struct hclge_mac_node *mac_cfg, *tmp;
+ struct hclge_dev *hdev = vport->back;
+ struct list_head tmp_del_list, *list;
+ int ret;
- list = (mac_type == HCLGE_MAC_ADDR_UC) ?
- &vport->uc_mac_list : &vport->mc_mac_list;
+ if (mac_type == HCLGE_MAC_ADDR_UC) {
+ list = &vport->uc_mac_list;
+ unsync = hclge_rm_uc_addr_common;
+ } else {
+ list = &vport->mc_mac_list;
+ unsync = hclge_rm_mc_addr_common;
+ }
- list_for_each_entry_safe(mac_cfg, tmp, list, node) {
- if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
- hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
+ INIT_LIST_HEAD(&tmp_del_list);
- if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
- hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
+ if (!is_del_list)
+ set_bit(vport->vport_id, hdev->vport_config_block);
- mac_cfg->hd_tbl_status = false;
- if (is_del_list) {
+ spin_lock_bh(&vport->mac_list_lock);
+
+ list_for_each_entry_safe(mac_cfg, tmp, list, node) {
+ switch (mac_cfg->state) {
+ case HCLGE_MAC_TO_DEL:
+ case HCLGE_MAC_ACTIVE:
list_del(&mac_cfg->node);
- kfree(mac_cfg);
+ list_add_tail(&mac_cfg->node, &tmp_del_list);
+ break;
+ case HCLGE_MAC_TO_ADD:
+ if (is_del_list) {
+ list_del(&mac_cfg->node);
+ kfree(mac_cfg);
+ }
+ break;
+ }
+ }
+
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ list_for_each_entry_safe(mac_cfg, tmp, &tmp_del_list, node) {
+ ret = unsync(vport, mac_cfg->mac_addr);
+ if (!ret || ret == -ENOENT) {
+ /* clear all mac addr from hardware, but remain these
+ * mac addr in the mac list, and restore them after
+ * vf reset finished.
+ */
+ if (!is_del_list &&
+ mac_cfg->state == HCLGE_MAC_ACTIVE) {
+ mac_cfg->state = HCLGE_MAC_TO_ADD;
+ } else {
+ list_del(&mac_cfg->node);
+ kfree(mac_cfg);
+ }
+ } else if (is_del_list) {
+ mac_cfg->state = HCLGE_MAC_TO_DEL;
}
}
+
+ spin_lock_bh(&vport->mac_list_lock);
+
+ hclge_sync_from_del_list(&tmp_del_list, list);
+
+ spin_unlock_bh(&vport->mac_list_lock);
+}
+
+/* remove all mac address when uninitailize */
+static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
+ enum HCLGE_MAC_ADDR_TYPE mac_type)
+{
+ struct hclge_mac_node *mac_node, *tmp;
+ struct hclge_dev *hdev = vport->back;
+ struct list_head tmp_del_list, *list;
+
+ INIT_LIST_HEAD(&tmp_del_list);
+
+ list = (mac_type == HCLGE_MAC_ADDR_UC) ?
+ &vport->uc_mac_list : &vport->mc_mac_list;
+
+ spin_lock_bh(&vport->mac_list_lock);
+
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ switch (mac_node->state) {
+ case HCLGE_MAC_TO_DEL:
+ case HCLGE_MAC_ACTIVE:
+ list_del(&mac_node->node);
+ list_add_tail(&mac_node->node, &tmp_del_list);
+ break;
+ case HCLGE_MAC_TO_ADD:
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ break;
+ }
+ }
+
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ if (mac_type == HCLGE_MAC_ADDR_UC)
+ hclge_unsync_vport_mac_list(vport, &tmp_del_list,
+ hclge_rm_uc_addr_common);
+ else
+ hclge_unsync_vport_mac_list(vport, &tmp_del_list,
+ hclge_rm_mc_addr_common);
+
+ if (!list_empty(&tmp_del_list))
+ dev_warn(&hdev->pdev->dev,
+ "uninit %s mac list for vport %u not completely.\n",
+ mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
+ vport->vport_id);
+
+ list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ }
}
-void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
+static void hclge_uninit_mac_table(struct hclge_dev *hdev)
{
- struct hclge_vport_mac_addr_cfg *mac, *tmp;
struct hclge_vport *vport;
int i;
for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i];
- list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
- list_del(&mac->node);
- kfree(mac);
- }
-
- list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
- list_del(&mac->node);
- kfree(mac);
- }
+ hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
+ hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
}
}
@@ -7683,12 +8116,57 @@ static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
ether_addr_copy(p, hdev->hw.mac.mac_addr);
}
+int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
+ const u8 *old_addr, const u8 *new_addr)
+{
+ struct list_head *list = &vport->uc_mac_list;
+ struct hclge_mac_node *old_node, *new_node;
+
+ new_node = hclge_find_mac_node(list, new_addr);
+ if (!new_node) {
+ new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
+ if (!new_node)
+ return -ENOMEM;
+
+ new_node->state = HCLGE_MAC_TO_ADD;
+ ether_addr_copy(new_node->mac_addr, new_addr);
+ list_add(&new_node->node, list);
+ } else {
+ if (new_node->state == HCLGE_MAC_TO_DEL)
+ new_node->state = HCLGE_MAC_ACTIVE;
+
+ /* make sure the new addr is in the list head, avoid dev
+ * addr may be not re-added into mac table for the umv space
+ * limitation after global/imp reset which will clear mac
+ * table by hardware.
+ */
+ list_move(&new_node->node, list);
+ }
+
+ if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
+ old_node = hclge_find_mac_node(list, old_addr);
+ if (old_node) {
+ if (old_node->state == HCLGE_MAC_TO_ADD) {
+ list_del(&old_node->node);
+ kfree(old_node);
+ } else {
+ old_node->state = HCLGE_MAC_TO_DEL;
+ }
+ }
+ }
+
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
+
+ return 0;
+}
+
static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
bool is_first)
{
const unsigned char *new_addr = (const unsigned char *)p;
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
+ unsigned char *old_addr = NULL;
int ret;
/* mac addr check */
@@ -7696,39 +8174,42 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
is_broadcast_ether_addr(new_addr) ||
is_multicast_ether_addr(new_addr)) {
dev_err(&hdev->pdev->dev,
- "Change uc mac err! invalid mac:%pM.\n",
+ "change uc mac err! invalid mac: %pM.\n",
new_addr);
return -EINVAL;
}
- if ((!is_first || is_kdump_kernel()) &&
- hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
- dev_warn(&hdev->pdev->dev,
- "remove old uc mac address fail.\n");
-
- ret = hclge_add_uc_addr(handle, new_addr);
+ ret = hclge_pause_addr_cfg(hdev, new_addr);
if (ret) {
dev_err(&hdev->pdev->dev,
- "add uc mac address fail, ret =%d.\n",
+ "failed to configure mac pause address, ret = %d\n",
ret);
-
- if (!is_first &&
- hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
- dev_err(&hdev->pdev->dev,
- "restore uc mac address fail.\n");
-
- return -EIO;
+ return ret;
}
- ret = hclge_pause_addr_cfg(hdev, new_addr);
+ if (!is_first)
+ old_addr = hdev->hw.mac.mac_addr;
+
+ spin_lock_bh(&vport->mac_list_lock);
+ ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
if (ret) {
dev_err(&hdev->pdev->dev,
- "configure mac pause address fail, ret =%d.\n",
- ret);
- return -EIO;
- }
+ "failed to change the mac addr:%pM, ret = %d\n",
+ new_addr, ret);
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ if (!is_first)
+ hclge_pause_addr_cfg(hdev, old_addr);
+ return ret;
+ }
+ /* we must update dev addr with spin lock protect, preventing dev addr
+ * being removed by set_rx_mode path.
+ */
ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ hclge_task_schedule(hdev, 0);
return 0;
}
@@ -8308,42 +8789,80 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
}
}
-static void hclge_restore_vlan_table(struct hnae3_handle *handle)
+void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
{
- struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_vport_vlan_cfg *vlan, *tmp;
struct hclge_dev *hdev = vport->back;
u16 vlan_proto;
- u16 state, vlan_id;
- int i;
+ u16 vlan_id;
+ u16 state;
+ int ret;
- for (i = 0; i < hdev->num_alloc_vport; i++) {
- vport = &hdev->vport[i];
- vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
- vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
- state = vport->port_base_vlan_cfg.state;
+ vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
+ vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
+ state = vport->port_base_vlan_cfg.state;
- if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
- hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
- vport->vport_id, vlan_id,
- false);
- continue;
- }
+ if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
+ clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
+ hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
+ vport->vport_id, vlan_id,
+ false);
+ return;
+ }
- list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
- int ret;
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
+ vport->vport_id,
+ vlan->vlan_id, false);
+ if (ret)
+ break;
+ vlan->hd_tbl_status = true;
+ }
+}
- if (!vlan->hd_tbl_status)
- continue;
- ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
- vport->vport_id,
- vlan->vlan_id, false);
- if (ret)
- break;
+/* For global reset and imp reset, hardware will clear the mac table,
+ * so we change the mac address state from ACTIVE to TO_ADD, then they
+ * can be restored in the service task after reset complete. Furtherly,
+ * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
+ * be restored after reset, so just remove these mac nodes from mac_list.
+ */
+static void hclge_mac_node_convert_for_reset(struct list_head *list)
+{
+ struct hclge_mac_node *mac_node, *tmp;
+
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ if (mac_node->state == HCLGE_MAC_ACTIVE) {
+ mac_node->state = HCLGE_MAC_TO_ADD;
+ } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
+ list_del(&mac_node->node);
+ kfree(mac_node);
}
}
}
+void hclge_restore_mac_table_common(struct hclge_vport *vport)
+{
+ spin_lock_bh(&vport->mac_list_lock);
+
+ hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
+ hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
+
+ spin_unlock_bh(&vport->mac_list_lock);
+}
+
+static void hclge_restore_hw_table(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = &hdev->vport[0];
+ struct hnae3_handle *handle = &vport->nic;
+
+ hclge_restore_mac_table_common(vport);
+ hclge_restore_vport_vlan_table(vport);
+ set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
+
+ hclge_restore_fd_entries(handle);
+}
+
int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -9658,7 +10177,7 @@ static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
dev_warn(&hdev->pdev->dev,
"vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
vf);
- else if (enable && hclge_is_umv_space_full(vport))
+ else if (enable && hclge_is_umv_space_full(vport, true))
dev_warn(&hdev->pdev->dev,
"vf %d mac table is full, enable spoof check may cause its packet send fail\n",
vf);
@@ -9835,8 +10354,16 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
set_bit(HCLGE_STATE_DOWN, &hdev->state);
hclge_stats_clear(hdev);
- memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
- memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
+ /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
+ * so here should not clean table in memory.
+ */
+ if (hdev->reset_type == HNAE3_IMP_RESET ||
+ hdev->reset_type == HNAE3_GLOBAL_RESET) {
+ memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
+ memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
+ bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
+ hclge_reset_umv_space(hdev);
+ }
ret = hclge_cmd_init(hdev);
if (ret) {
@@ -9850,8 +10377,6 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
- hclge_reset_umv_space(hdev);
-
ret = hclge_mac_init(hdev);
if (ret) {
dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
@@ -9947,12 +10472,11 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_clear_vf_vlan(hdev);
hclge_misc_affinity_teardown(hdev);
hclge_state_uninit(hdev);
+ hclge_uninit_mac_table(hdev);
if (mac->phydev)
mdiobus_unregister(mac->mdio_bus);
- hclge_uninit_umv_space(hdev);
-
/* Disable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, false);
synchronize_irq(hdev->misc_vector.vector_irq);
@@ -9966,7 +10490,6 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_misc_irq_uninit(hdev);
hclge_pci_uninit(hdev);
mutex_destroy(&hdev->vport_lock);
- hclge_uninit_vport_mac_table(hdev);
hclge_uninit_vport_vlan_table(hdev);
ae_dev->priv = NULL;
}
@@ -10576,6 +11099,131 @@ static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
return hclge_config_gro(hdev, enable);
}
+static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = &hdev->vport[0];
+ struct hnae3_handle *handle = &vport->nic;
+ u8 tmp_flags = 0;
+ int ret;
+
+ if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
+ set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
+ vport->last_promisc_flags = vport->overflow_promisc_flags;
+ }
+
+ if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
+ tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
+ ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
+ tmp_flags & HNAE3_MPE);
+ if (!ret) {
+ clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
+ hclge_enable_vlan_filter(handle,
+ tmp_flags & HNAE3_VLAN_FLTR);
+ }
+ }
+}
+
+static bool hclge_module_existed(struct hclge_dev *hdev)
+{
+ struct hclge_desc desc;
+ u32 existed;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get SFP exist state, ret = %d\n", ret);
+ return false;
+ }
+
+ existed = le32_to_cpu(desc.data[0]);
+
+ return existed != 0;
+}
+
+/* need 6 bds(total 140 bytes) in one reading
+ * return the number of bytes actually read, 0 means read failed.
+ */
+static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
+ u32 len, u8 *data)
+{
+ struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
+ struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
+ u16 read_len;
+ u16 copy_len;
+ int ret;
+ int i;
+
+ /* setup all 6 bds to read module eeprom info. */
+ for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
+ true);
+
+ /* bd0~bd4 need next flag */
+ if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
+ desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ }
+
+ /* setup bd0, this bd contains offset and read length. */
+ sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
+ sfp_info_bd0->offset = cpu_to_le16((u16)offset);
+ read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
+ sfp_info_bd0->read_len = cpu_to_le16(read_len);
+
+ ret = hclge_cmd_send(&hdev->hw, desc, i);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get SFP eeprom info, ret = %d\n", ret);
+ return 0;
+ }
+
+ /* copy sfp info from bd0 to out buffer. */
+ copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
+ memcpy(data, sfp_info_bd0->data, copy_len);
+ read_len = copy_len;
+
+ /* copy sfp info from bd1~bd5 to out buffer if needed. */
+ for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
+ if (read_len >= len)
+ return read_len;
+
+ copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
+ memcpy(data + read_len, desc[i].data, copy_len);
+ read_len += copy_len;
+ }
+
+ return read_len;
+}
+
+static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
+ u32 len, u8 *data)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 read_len = 0;
+ u16 data_len;
+
+ if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
+ return -EOPNOTSUPP;
+
+ if (!hclge_module_existed(hdev))
+ return -ENXIO;
+
+ while (read_len < len) {
+ data_len = hclge_get_sfp_eeprom_info(hdev,
+ offset + read_len,
+ len - read_len,
+ data + read_len);
+ if (!data_len)
+ return -EIO;
+
+ read_len += data_len;
+ }
+
+ return 0;
+}
+
static const struct hnae3_ae_ops hclge_ops = {
.init_ae_dev = hclge_init_ae_dev,
.uninit_ae_dev = hclge_uninit_ae_dev,
@@ -10588,6 +11236,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_vector = hclge_get_vector,
.put_vector = hclge_put_vector,
.set_promisc_mode = hclge_set_promisc_mode,
+ .request_update_promisc_mode = hclge_request_update_promisc_mode,
.set_loopback = hclge_set_loopback,
.start = hclge_ae_start,
.stop = hclge_ae_stop,
@@ -10649,7 +11298,6 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
.get_fd_rule_info = hclge_get_fd_rule_info,
.get_fd_all_rules = hclge_get_all_rules,
- .restore_fd_rules = hclge_restore_fd_entries,
.enable_fd = hclge_enable_fd,
.add_arfs_entry = hclge_add_fd_entry_by_arfs,
.dbg_run_cmd = hclge_dbg_run_cmd,
@@ -10662,13 +11310,14 @@ static const struct hnae3_ae_ops hclge_ops = {
.set_timer_task = hclge_set_timer_task,
.mac_connect_phy = hclge_mac_connect_phy,
.mac_disconnect_phy = hclge_mac_disconnect_phy,
- .restore_vlan_table = hclge_restore_vlan_table,
.get_vf_config = hclge_get_vf_config,
.set_vf_link_state = hclge_set_vf_link_state,
.set_vf_spoofchk = hclge_set_vf_spoofchk,
.set_vf_trust = hclge_set_vf_trust,
.set_vf_rate = hclge_set_vf_rate,
.set_vf_mac = hclge_set_vf_mac,
+ .get_module_eeprom = hclge_get_module_eeprom,
+ .get_cmdq_stat = hclge_get_cmdq_stat,
};
static struct hnae3_ae_algo ae_algo = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 71df23d5f1b4..913c4f677404 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -217,6 +217,7 @@ enum HCLGE_DEV_STATE {
HCLGE_STATE_STATISTICS_UPDATING,
HCLGE_STATE_CMD_DISABLE,
HCLGE_STATE_LINK_UPDATING,
+ HCLGE_STATE_PROMISC_CHANGED,
HCLGE_STATE_RST_FAIL,
HCLGE_STATE_MAX
};
@@ -580,7 +581,6 @@ struct hclge_fd_key_cfg {
struct hclge_fd_cfg {
u8 fd_mode;
u16 max_key_length; /* use bit as unit */
- u32 proto_support;
u32 rule_num[MAX_STAGE_NUM]; /* rule entry number */
u16 cnt_num[MAX_STAGE_NUM]; /* rule hit counter number */
struct hclge_fd_key_cfg key_cfg[MAX_STAGE_NUM];
@@ -631,9 +631,15 @@ struct hclge_fd_ad_data {
u16 rule_id;
};
-struct hclge_vport_mac_addr_cfg {
+enum HCLGE_MAC_NODE_STATE {
+ HCLGE_MAC_TO_ADD,
+ HCLGE_MAC_TO_DEL,
+ HCLGE_MAC_ACTIVE
+};
+
+struct hclge_mac_node {
struct list_head node;
- int hd_tbl_status;
+ enum HCLGE_MAC_NODE_STATE state;
u8 mac_addr[ETH_ALEN];
};
@@ -806,6 +812,8 @@ struct hclge_dev {
unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)];
unsigned long vf_vlan_full[BITS_TO_LONGS(HCLGE_VPORT_NUM)];
+ unsigned long vport_config_block[BITS_TO_LONGS(HCLGE_VPORT_NUM)];
+
struct hclge_fd_cfg fd_cfg;
struct hlist_head fd_rule_list;
spinlock_t fd_rule_lock; /* protect fd_rule_list and fd_bmap */
@@ -823,7 +831,6 @@ struct hclge_dev {
u16 priv_umv_size;
/* unicast mac vlan space shared by PF and its VFs */
u16 share_umv_size;
- struct mutex umv_mutex; /* protect share_umv_size */
DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats,
HCLGE_MAC_TNL_LOG_SIZE);
@@ -867,6 +874,7 @@ struct hclge_rss_tuple_cfg {
enum HCLGE_VPORT_STATE {
HCLGE_VPORT_STATE_ALIVE,
+ HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
HCLGE_VPORT_STATE_MAX
};
@@ -923,6 +931,10 @@ struct hclge_vport {
u32 mps; /* Max packet size */
struct hclge_vf_info vf_info;
+ u8 overflow_promisc_flags;
+ u8 last_promisc_flags;
+
+ spinlock_t mac_list_lock; /* protect mac address need to add/detele */
struct list_head uc_mac_list; /* Store VF unicast table */
struct list_head mc_mac_list; /* Store VF multicast table */
struct list_head vlan_list; /* Store VF vlan table */
@@ -978,16 +990,18 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf);
u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id);
int hclge_notify_client(struct hclge_dev *hdev,
enum hnae3_reset_notify_type type);
-void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
- enum HCLGE_MAC_ADDR_TYPE mac_type);
-void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
- bool is_write_tbl,
- enum HCLGE_MAC_ADDR_TYPE mac_type);
+int hclge_update_mac_list(struct hclge_vport *vport,
+ enum HCLGE_MAC_NODE_STATE state,
+ enum HCLGE_MAC_ADDR_TYPE mac_type,
+ const unsigned char *addr);
+int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
+ const u8 *old_addr, const u8 *new_addr);
void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
enum HCLGE_MAC_ADDR_TYPE mac_type);
-void hclge_uninit_vport_mac_table(struct hclge_dev *hdev);
void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list);
void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev);
+void hclge_restore_mac_table_common(struct hclge_vport *vport);
+void hclge_restore_vport_vlan_table(struct hclge_vport *vport);
int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
struct hclge_vlan_info *vlan_info);
int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 7f24fcb4f96a..0874ae47cb03 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -5,6 +5,9 @@
#include "hclge_mbx.h"
#include "hnae3.h"
+#define CREATE_TRACE_POINTS
+#include "hclge_trace.h"
+
static u16 hclge_errno_to_resp(int errno)
{
return abs(errno);
@@ -90,6 +93,8 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
memcpy(&resp_pf_to_vf->msg.vf_mbx_msg_code, msg, msg_len);
+ trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf);
+
status = hclge_cmd_send(&hdev->hw, &desc, 1);
if (status)
dev_err(&hdev->pdev->dev,
@@ -270,26 +275,17 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
if (!is_valid_ether_addr(mac_addr))
return -EINVAL;
- hclge_rm_uc_addr_common(vport, old_addr);
- status = hclge_add_uc_addr_common(vport, mac_addr);
- if (status) {
- hclge_add_uc_addr_common(vport, old_addr);
- } else {
- hclge_rm_vport_mac_table(vport, mac_addr,
- false, HCLGE_MAC_ADDR_UC);
- hclge_add_vport_mac_table(vport, mac_addr,
- HCLGE_MAC_ADDR_UC);
- }
+ spin_lock_bh(&vport->mac_list_lock);
+ status = hclge_update_mac_node_for_dev_addr(vport, old_addr,
+ mac_addr);
+ spin_unlock_bh(&vport->mac_list_lock);
+ hclge_task_schedule(hdev, 0);
} else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_ADD) {
- status = hclge_add_uc_addr_common(vport, mac_addr);
- if (!status)
- hclge_add_vport_mac_table(vport, mac_addr,
- HCLGE_MAC_ADDR_UC);
+ status = hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD,
+ HCLGE_MAC_ADDR_UC, mac_addr);
} else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_REMOVE) {
- status = hclge_rm_uc_addr_common(vport, mac_addr);
- if (!status)
- hclge_rm_vport_mac_table(vport, mac_addr,
- false, HCLGE_MAC_ADDR_UC);
+ status = hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL,
+ HCLGE_MAC_ADDR_UC, mac_addr);
} else {
dev_err(&hdev->pdev->dev,
"failed to set unicast mac addr, unknown subcode %u\n",
@@ -305,18 +301,13 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
{
const u8 *mac_addr = (const u8 *)(mbx_req->msg.data);
struct hclge_dev *hdev = vport->back;
- int status;
if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_ADD) {
- status = hclge_add_mc_addr_common(vport, mac_addr);
- if (!status)
- hclge_add_vport_mac_table(vport, mac_addr,
- HCLGE_MAC_ADDR_MC);
+ hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD,
+ HCLGE_MAC_ADDR_MC, mac_addr);
} else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_REMOVE) {
- status = hclge_rm_mc_addr_common(vport, mac_addr);
- if (!status)
- hclge_rm_vport_mac_table(vport, mac_addr,
- false, HCLGE_MAC_ADDR_MC);
+ hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL,
+ HCLGE_MAC_ADDR_MC, mac_addr);
} else {
dev_err(&hdev->pdev->dev,
"failed to set mcast mac addr, unknown subcode %u\n",
@@ -324,7 +315,7 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
return -EIO;
}
- return status;
+ return 0;
}
int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
@@ -638,6 +629,23 @@ static void hclge_handle_ncsi_error(struct hclge_dev *hdev)
ae_dev->ops->reset_event(hdev->pdev, NULL);
}
+static void hclge_handle_vf_tbl(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_vf_vlan_cfg *msg_cmd;
+
+ msg_cmd = (struct hclge_vf_vlan_cfg *)&mbx_req->msg;
+ if (msg_cmd->subcode == HCLGE_MBX_VPORT_LIST_CLEAR) {
+ hclge_rm_vport_all_mac_table(vport, true, HCLGE_MAC_ADDR_UC);
+ hclge_rm_vport_all_mac_table(vport, true, HCLGE_MAC_ADDR_MC);
+ hclge_rm_vport_all_vlan_table(vport, true);
+ } else {
+ dev_warn(&hdev->pdev->dev, "Invalid cmd(%u)\n",
+ msg_cmd->subcode);
+ }
+}
+
void hclge_mbx_handler(struct hclge_dev *hdev)
{
struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq;
@@ -645,6 +653,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
struct hclge_mbx_vf_to_pf_cmd *req;
struct hclge_vport *vport;
struct hclge_desc *desc;
+ bool is_del = false;
unsigned int flag;
int ret = 0;
@@ -674,6 +683,8 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
vport = &hdev->vport[req->mbx_src_vfid];
+ trace_hclge_pf_mbx_get(hdev, req);
+
switch (req->msg.code) {
case HCLGE_MBX_MAP_RING_TO_VECTOR:
ret = hclge_map_unmap_ring_to_vf_vector(vport, true,
@@ -731,7 +742,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
ret = hclge_get_link_info(vport, req);
if (ret)
dev_err(&hdev->pdev->dev,
- "PF fail(%d) to get link stat for VF\n",
+ "failed to inform link stat to VF, ret = %d\n",
ret);
break;
case HCLGE_MBX_QUEUE_RESET:
@@ -760,11 +771,12 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
break;
case HCLGE_MBX_GET_VF_FLR_STATUS:
case HCLGE_MBX_VF_UNINIT:
- hclge_rm_vport_all_mac_table(vport, true,
+ is_del = req->msg.code == HCLGE_MBX_VF_UNINIT;
+ hclge_rm_vport_all_mac_table(vport, is_del,
HCLGE_MAC_ADDR_UC);
- hclge_rm_vport_all_mac_table(vport, true,
+ hclge_rm_vport_all_mac_table(vport, is_del,
HCLGE_MAC_ADDR_MC);
- hclge_rm_vport_all_vlan_table(vport, true);
+ hclge_rm_vport_all_vlan_table(vport, is_del);
break;
case HCLGE_MBX_GET_MEDIA_TYPE:
hclge_get_vf_media_type(vport, &resp_msg);
@@ -778,6 +790,9 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
case HCLGE_MBX_NCSI_ERROR:
hclge_handle_ncsi_error(hdev);
break;
+ case HCLGE_MBX_HANDLE_VF_TBL:
+ hclge_handle_vf_tbl(vport, req);
+ break;
default:
dev_err(&hdev->pdev->dev,
"un-supported mailbox message, code = %u\n",
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
new file mode 100644
index 000000000000..5b0b71bd6120
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2018-2020 Hisilicon Limited. */
+
+/* This must be outside ifdef _HCLGE_TRACE_H */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hns3
+
+#if !defined(_HCLGE_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _HCLGE_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+#define PF_GET_MBX_LEN (sizeof(struct hclge_mbx_vf_to_pf_cmd) / sizeof(u32))
+#define PF_SEND_MBX_LEN (sizeof(struct hclge_mbx_pf_to_vf_cmd) / sizeof(u32))
+
+TRACE_EVENT(hclge_pf_mbx_get,
+ TP_PROTO(
+ struct hclge_dev *hdev,
+ struct hclge_mbx_vf_to_pf_cmd *req),
+ TP_ARGS(hdev, req),
+
+ TP_STRUCT__entry(
+ __field(u8, vfid)
+ __field(u8, code)
+ __field(u8, subcode)
+ __string(pciname, pci_name(hdev->pdev))
+ __string(devname, &hdev->vport[0].nic.kinfo.netdev->name)
+ __array(u32, mbx_data, PF_GET_MBX_LEN)
+ ),
+
+ TP_fast_assign(
+ __entry->vfid = req->mbx_src_vfid;
+ __entry->code = req->msg.code;
+ __entry->subcode = req->msg.subcode;
+ __assign_str(pciname, pci_name(hdev->pdev));
+ __assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name);
+ memcpy(__entry->mbx_data, req,
+ sizeof(struct hclge_mbx_vf_to_pf_cmd));
+ ),
+
+ TP_printk(
+ "%s %s vfid:%u code:%u subcode:%u data:%s",
+ __get_str(pciname), __get_str(devname), __entry->vfid,
+ __entry->code, __entry->subcode,
+ __print_array(__entry->mbx_data, PF_GET_MBX_LEN, sizeof(u32))
+ )
+);
+
+TRACE_EVENT(hclge_pf_mbx_send,
+ TP_PROTO(
+ struct hclge_dev *hdev,
+ struct hclge_mbx_pf_to_vf_cmd *req),
+ TP_ARGS(hdev, req),
+
+ TP_STRUCT__entry(
+ __field(u8, vfid)
+ __field(u16, code)
+ __string(pciname, pci_name(hdev->pdev))
+ __string(devname, &hdev->vport[0].nic.kinfo.netdev->name)
+ __array(u32, mbx_data, PF_SEND_MBX_LEN)
+ ),
+
+ TP_fast_assign(
+ __entry->vfid = req->dest_vfid;
+ __entry->code = req->msg.code;
+ __assign_str(pciname, pci_name(hdev->pdev));
+ __assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name);
+ memcpy(__entry->mbx_data, req,
+ sizeof(struct hclge_mbx_pf_to_vf_cmd));
+ ),
+
+ TP_printk(
+ "%s %s vfid:%u code:%u data:%s",
+ __get_str(pciname), __get_str(devname), __entry->vfid,
+ __entry->code,
+ __print_array(__entry->mbx_data, PF_SEND_MBX_LEN, sizeof(u32))
+ )
+);
+
+#endif /* _HCLGE_TRACE_H_ */
+
+/* This must be outside ifdef _HCLGE_TRACE_H */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE hclge_trace
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
index 53804d95ea90..2c26ea607a53 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
@@ -4,6 +4,7 @@
#
ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
+ccflags-y += -I $(srctree)/$(src)
obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index e02d427131ee..32341dcaa6c1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -1164,6 +1164,27 @@ static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
en_bc_pmc);
}
+static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
+}
+
+static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev)
+{
+ struct hnae3_handle *handle = &hdev->nic;
+ bool en_uc_pmc = handle->netdev_flags & HNAE3_UPE;
+ bool en_mc_pmc = handle->netdev_flags & HNAE3_MPE;
+ int ret;
+
+ if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state)) {
+ ret = hclgevf_set_promisc_mode(handle, en_uc_pmc, en_mc_pmc);
+ if (!ret)
+ clear_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
+ }
+}
+
static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, unsigned int tqp_id,
int stream_id, bool enable)
{
@@ -1245,10 +1266,12 @@ static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
int status;
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0);
- send_msg.subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD :
- HCLGE_MBX_MAC_VLAN_UC_MODIFY;
+ send_msg.subcode = HCLGE_MBX_MAC_VLAN_UC_MODIFY;
ether_addr_copy(send_msg.data, new_mac_addr);
- ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr);
+ if (is_first && !hdev->has_pf_mac)
+ eth_zero_addr(&send_msg.data[ETH_ALEN]);
+ else
+ ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr);
status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
if (!status)
ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
@@ -1256,54 +1279,302 @@ static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
return status;
}
-static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
- const unsigned char *addr)
+static struct hclgevf_mac_addr_node *
+hclgevf_find_mac_node(struct list_head *list, const u8 *mac_addr)
+{
+ struct hclgevf_mac_addr_node *mac_node, *tmp;
+
+ list_for_each_entry_safe(mac_node, tmp, list, node)
+ if (ether_addr_equal(mac_addr, mac_node->mac_addr))
+ return mac_node;
+
+ return NULL;
+}
+
+static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node *mac_node,
+ enum HCLGEVF_MAC_NODE_STATE state)
+{
+ switch (state) {
+ /* from set_rx_mode or tmp_add_list */
+ case HCLGEVF_MAC_TO_ADD:
+ if (mac_node->state == HCLGEVF_MAC_TO_DEL)
+ mac_node->state = HCLGEVF_MAC_ACTIVE;
+ break;
+ /* only from set_rx_mode */
+ case HCLGEVF_MAC_TO_DEL:
+ if (mac_node->state == HCLGEVF_MAC_TO_ADD) {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ } else {
+ mac_node->state = HCLGEVF_MAC_TO_DEL;
+ }
+ break;
+ /* only from tmp_add_list, the mac_node->state won't be
+ * HCLGEVF_MAC_ACTIVE
+ */
+ case HCLGEVF_MAC_ACTIVE:
+ if (mac_node->state == HCLGEVF_MAC_TO_ADD)
+ mac_node->state = HCLGEVF_MAC_ACTIVE;
+ break;
+ }
+}
+
+static int hclgevf_update_mac_list(struct hnae3_handle *handle,
+ enum HCLGEVF_MAC_NODE_STATE state,
+ enum HCLGEVF_MAC_ADDR_TYPE mac_type,
+ const unsigned char *addr)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct hclge_vf_to_pf_msg send_msg;
+ struct hclgevf_mac_addr_node *mac_node;
+ struct list_head *list;
- hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST,
- HCLGE_MBX_MAC_VLAN_UC_ADD);
- ether_addr_copy(send_msg.data, addr);
- return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+ list = (mac_type == HCLGEVF_MAC_ADDR_UC) ?
+ &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list;
+
+ spin_lock_bh(&hdev->mac_table.mac_list_lock);
+
+ /* if the mac addr is already in the mac list, no need to add a new
+ * one into it, just check the mac addr state, convert it to a new
+ * new state, or just remove it, or do nothing.
+ */
+ mac_node = hclgevf_find_mac_node(list, addr);
+ if (mac_node) {
+ hclgevf_update_mac_node(mac_node, state);
+ spin_unlock_bh(&hdev->mac_table.mac_list_lock);
+ return 0;
+ }
+ /* if this address is never added, unnecessary to delete */
+ if (state == HCLGEVF_MAC_TO_DEL) {
+ spin_unlock_bh(&hdev->mac_table.mac_list_lock);
+ return -ENOENT;
+ }
+
+ mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
+ if (!mac_node) {
+ spin_unlock_bh(&hdev->mac_table.mac_list_lock);
+ return -ENOMEM;
+ }
+
+ mac_node->state = state;
+ ether_addr_copy(mac_node->mac_addr, addr);
+ list_add_tail(&mac_node->node, list);
+
+ spin_unlock_bh(&hdev->mac_table.mac_list_lock);
+ return 0;
+}
+
+static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
+ const unsigned char *addr)
+{
+ return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD,
+ HCLGEVF_MAC_ADDR_UC, addr);
}
static int hclgevf_rm_uc_addr(struct hnae3_handle *handle,
const unsigned char *addr)
{
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct hclge_vf_to_pf_msg send_msg;
-
- hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST,
- HCLGE_MBX_MAC_VLAN_UC_REMOVE);
- ether_addr_copy(send_msg.data, addr);
- return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+ return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL,
+ HCLGEVF_MAC_ADDR_UC, addr);
}
static int hclgevf_add_mc_addr(struct hnae3_handle *handle,
const unsigned char *addr)
{
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct hclge_vf_to_pf_msg send_msg;
-
- hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MULTICAST,
- HCLGE_MBX_MAC_VLAN_MC_ADD);
- ether_addr_copy(send_msg.data, addr);
- return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+ return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD,
+ HCLGEVF_MAC_ADDR_MC, addr);
}
static int hclgevf_rm_mc_addr(struct hnae3_handle *handle,
const unsigned char *addr)
{
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL,
+ HCLGEVF_MAC_ADDR_MC, addr);
+}
+
+static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev,
+ struct hclgevf_mac_addr_node *mac_node,
+ enum HCLGEVF_MAC_ADDR_TYPE mac_type)
+{
struct hclge_vf_to_pf_msg send_msg;
+ u8 code, subcode;
- hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MULTICAST,
- HCLGE_MBX_MAC_VLAN_MC_REMOVE);
- ether_addr_copy(send_msg.data, addr);
+ if (mac_type == HCLGEVF_MAC_ADDR_UC) {
+ code = HCLGE_MBX_SET_UNICAST;
+ if (mac_node->state == HCLGEVF_MAC_TO_ADD)
+ subcode = HCLGE_MBX_MAC_VLAN_UC_ADD;
+ else
+ subcode = HCLGE_MBX_MAC_VLAN_UC_REMOVE;
+ } else {
+ code = HCLGE_MBX_SET_MULTICAST;
+ if (mac_node->state == HCLGEVF_MAC_TO_ADD)
+ subcode = HCLGE_MBX_MAC_VLAN_MC_ADD;
+ else
+ subcode = HCLGE_MBX_MAC_VLAN_MC_REMOVE;
+ }
+
+ hclgevf_build_send_msg(&send_msg, code, subcode);
+ ether_addr_copy(send_msg.data, mac_node->mac_addr);
return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
}
+static void hclgevf_config_mac_list(struct hclgevf_dev *hdev,
+ struct list_head *list,
+ enum HCLGEVF_MAC_ADDR_TYPE mac_type)
+{
+ struct hclgevf_mac_addr_node *mac_node, *tmp;
+ int ret;
+
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to configure mac %pM, state = %d, ret = %d\n",
+ mac_node->mac_addr, mac_node->state, ret);
+ return;
+ }
+ if (mac_node->state == HCLGEVF_MAC_TO_ADD) {
+ mac_node->state = HCLGEVF_MAC_ACTIVE;
+ } else {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ }
+ }
+}
+
+static void hclgevf_sync_from_add_list(struct list_head *add_list,
+ struct list_head *mac_list)
+{
+ struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
+
+ list_for_each_entry_safe(mac_node, tmp, add_list, node) {
+ /* if the mac address from tmp_add_list is not in the
+ * uc/mc_mac_list, it means have received a TO_DEL request
+ * during the time window of sending mac config request to PF
+ * If mac_node state is ACTIVE, then change its state to TO_DEL,
+ * then it will be removed at next time. If is TO_ADD, it means
+ * send TO_ADD request failed, so just remove the mac node.
+ */
+ new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr);
+ if (new_node) {
+ hclgevf_update_mac_node(new_node, mac_node->state);
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ } else if (mac_node->state == HCLGEVF_MAC_ACTIVE) {
+ mac_node->state = HCLGEVF_MAC_TO_DEL;
+ list_del(&mac_node->node);
+ list_add_tail(&mac_node->node, mac_list);
+ } else {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ }
+ }
+}
+
+static void hclgevf_sync_from_del_list(struct list_head *del_list,
+ struct list_head *mac_list)
+{
+ struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
+
+ list_for_each_entry_safe(mac_node, tmp, del_list, node) {
+ new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr);
+ if (new_node) {
+ /* If the mac addr is exist in the mac list, it means
+ * received a new request TO_ADD during the time window
+ * of sending mac addr configurrequest to PF, so just
+ * change the mac state to ACTIVE.
+ */
+ new_node->state = HCLGEVF_MAC_ACTIVE;
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ } else {
+ list_del(&mac_node->node);
+ list_add_tail(&mac_node->node, mac_list);
+ }
+ }
+}
+
+static void hclgevf_clear_list(struct list_head *list)
+{
+ struct hclgevf_mac_addr_node *mac_node, *tmp;
+
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ }
+}
+
+static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev,
+ enum HCLGEVF_MAC_ADDR_TYPE mac_type)
+{
+ struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
+ struct list_head tmp_add_list, tmp_del_list;
+ struct list_head *list;
+
+ INIT_LIST_HEAD(&tmp_add_list);
+ INIT_LIST_HEAD(&tmp_del_list);
+
+ /* move the mac addr to the tmp_add_list and tmp_del_list, then
+ * we can add/delete these mac addr outside the spin lock
+ */
+ list = (mac_type == HCLGEVF_MAC_ADDR_UC) ?
+ &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list;
+
+ spin_lock_bh(&hdev->mac_table.mac_list_lock);
+
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ switch (mac_node->state) {
+ case HCLGEVF_MAC_TO_DEL:
+ list_del(&mac_node->node);
+ list_add_tail(&mac_node->node, &tmp_del_list);
+ break;
+ case HCLGEVF_MAC_TO_ADD:
+ new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
+ if (!new_node)
+ goto stop_traverse;
+
+ ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
+ new_node->state = mac_node->state;
+ list_add_tail(&new_node->node, &tmp_add_list);
+ break;
+ default:
+ break;
+ }
+ }
+
+stop_traverse:
+ spin_unlock_bh(&hdev->mac_table.mac_list_lock);
+
+ /* delete first, in order to get max mac table space for adding */
+ hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type);
+ hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type);
+
+ /* if some mac addresses were added/deleted fail, move back to the
+ * mac_list, and retry at next time.
+ */
+ spin_lock_bh(&hdev->mac_table.mac_list_lock);
+
+ hclgevf_sync_from_del_list(&tmp_del_list, list);
+ hclgevf_sync_from_add_list(&tmp_add_list, list);
+
+ spin_unlock_bh(&hdev->mac_table.mac_list_lock);
+}
+
+static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev)
+{
+ hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC);
+ hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC);
+}
+
+static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev)
+{
+ spin_lock_bh(&hdev->mac_table.mac_list_lock);
+
+ hclgevf_clear_list(&hdev->mac_table.uc_mac_list);
+ hclgevf_clear_list(&hdev->mac_table.mc_mac_list);
+
+ spin_unlock_bh(&hdev->mac_table.mac_list_lock);
+}
+
static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
__be16 proto, u16 vlan_id,
bool is_kill)
@@ -1506,10 +1777,6 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
if (ret)
return ret;
- ret = hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT);
- if (ret)
- return ret;
-
/* clear handshake status with IMP */
hclgevf_reset_handshake(hdev, false);
@@ -1589,13 +1856,8 @@ static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
static int hclgevf_reset_prepare(struct hclgevf_dev *hdev)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
int ret;
- /* Initialize ae_dev reset status as well, in case enet layer wants to
- * know if device is undergoing reset
- */
- ae_dev->reset_type = hdev->reset_type;
hdev->rst_stats.rst_cnt++;
rtnl_lock();
@@ -1610,7 +1872,6 @@ static int hclgevf_reset_prepare(struct hclgevf_dev *hdev)
static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
int ret;
hdev->rst_stats.hw_rst_done_cnt++;
@@ -1625,7 +1886,6 @@ static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev)
}
hdev->last_reset_time = jiffies;
- ae_dev->reset_type = HNAE3_NONE_RESET;
hdev->rst_stats.rst_done_cnt++;
hdev->rst_stats.rst_fail_cnt = 0;
clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
@@ -1951,6 +2211,10 @@ static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev)
hclgevf_sync_vlan_filter(hdev);
+ hclgevf_sync_mac_table(hdev);
+
+ hclgevf_sync_promisc_mode(hdev);
+
hdev->last_serv_processed = jiffies;
out:
@@ -2313,6 +2577,10 @@ static void hclgevf_state_init(struct hclgevf_dev *hdev)
mutex_init(&hdev->mbx_resp.mbx_mutex);
sema_init(&hdev->reset_sem, 1);
+ spin_lock_init(&hdev->mac_table.mac_list_lock);
+ INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list);
+ INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list);
+
/* bring the device down */
set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
}
@@ -2695,6 +2963,15 @@ static int hclgevf_pci_reset(struct hclgevf_dev *hdev)
return ret;
}
+static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev)
+{
+ struct hclge_vf_to_pf_msg send_msg;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_HANDLE_VF_TBL,
+ HCLGE_MBX_VPORT_LIST_CLEAR);
+ return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+}
+
static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
@@ -2730,6 +3007,8 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
return ret;
}
+ set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
+
dev_info(&hdev->pdev->dev, "Reset done\n");
return 0;
@@ -2802,6 +3081,15 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
}
+ /* ensure vf tbl list as empty before init*/
+ ret = hclgevf_clear_vport_list(hdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to clear tbl list configuration, ret = %d.\n",
+ ret);
+ goto err_config;
+ }
+
ret = hclgevf_init_vlan_config(hdev);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -2846,6 +3134,7 @@ static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
hclgevf_pci_uninit(hdev);
hclgevf_cmd_uninit(hdev);
+ hclgevf_uninit_mac_list(hdev);
}
static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
@@ -3213,6 +3502,7 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.set_timer_task = hclgevf_set_timer_task,
.get_link_mode = hclgevf_get_link_mode,
.set_promisc_mode = hclgevf_set_promisc_mode,
+ .request_update_promisc_mode = hclgevf_request_update_promisc_mode,
};
static struct hnae3_ae_algo ae_algovf = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index 3b88d866facc..f19583c4bc9b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -148,6 +148,7 @@ enum hclgevf_states {
HCLGEVF_STATE_MBX_HANDLING,
HCLGEVF_STATE_CMD_DISABLE,
HCLGEVF_STATE_LINK_UPDATING,
+ HCLGEVF_STATE_PROMISC_CHANGED,
HCLGEVF_STATE_RST_FAIL,
};
@@ -234,6 +235,29 @@ struct hclgevf_rst_stats {
u32 rst_fail_cnt; /* the number of VF reset fail */
};
+enum HCLGEVF_MAC_ADDR_TYPE {
+ HCLGEVF_MAC_ADDR_UC,
+ HCLGEVF_MAC_ADDR_MC
+};
+
+enum HCLGEVF_MAC_NODE_STATE {
+ HCLGEVF_MAC_TO_ADD,
+ HCLGEVF_MAC_TO_DEL,
+ HCLGEVF_MAC_ACTIVE
+};
+
+struct hclgevf_mac_addr_node {
+ struct list_head node;
+ enum HCLGEVF_MAC_NODE_STATE state;
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct hclgevf_mac_table_cfg {
+ spinlock_t mac_list_lock; /* protect mac address need to add/detele */
+ struct list_head uc_mac_list;
+ struct list_head mc_mac_list;
+};
+
struct hclgevf_dev {
struct pci_dev *pdev;
struct hnae3_ae_dev *ae_dev;
@@ -282,6 +306,8 @@ struct hclgevf_dev {
unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)];
+ struct hclgevf_mac_table_cfg mac_table;
+
bool mbx_event_pending;
struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */
struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index 9b8154955f91..5b2dcd97c107 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -5,6 +5,9 @@
#include "hclgevf_main.h"
#include "hnae3.h"
+#define CREATE_TRACE_POINTS
+#include "hclgevf_trace.h"
+
static int hclgevf_resp_to_errno(u16 resp_code)
{
return resp_code ? -resp_code : 0;
@@ -106,6 +109,8 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev,
memcpy(&req->msg, send_msg, sizeof(struct hclge_vf_to_pf_msg));
+ trace_hclge_vf_mbx_send(hdev, req);
+
/* synchronous send */
if (need_resp) {
mutex_lock(&hdev->mbx_resp.mbx_mutex);
@@ -179,6 +184,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
continue;
}
+ trace_hclge_vf_mbx_get(hdev, req);
+
/* synchronous messages are time critical and need preferential
* treatment. Therefore, we need to acknowledge all the sync
* responses as quickly as possible so that waiting tasks do not
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
new file mode 100644
index 000000000000..e4bfb6191fef
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2018-2019 Hisilicon Limited. */
+
+/* This must be outside ifdef _HCLGEVF_TRACE_H */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hns3
+
+#if !defined(_HCLGEVF_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _HCLGEVF_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+#define VF_GET_MBX_LEN (sizeof(struct hclge_mbx_pf_to_vf_cmd) / sizeof(u32))
+#define VF_SEND_MBX_LEN (sizeof(struct hclge_mbx_vf_to_pf_cmd) / sizeof(u32))
+
+TRACE_EVENT(hclge_vf_mbx_get,
+ TP_PROTO(
+ struct hclgevf_dev *hdev,
+ struct hclge_mbx_pf_to_vf_cmd *req),
+ TP_ARGS(hdev, req),
+
+ TP_STRUCT__entry(
+ __field(u8, vfid)
+ __field(u16, code)
+ __string(pciname, pci_name(hdev->pdev))
+ __string(devname, &hdev->nic.kinfo.netdev->name)
+ __array(u32, mbx_data, VF_GET_MBX_LEN)
+ ),
+
+ TP_fast_assign(
+ __entry->vfid = req->dest_vfid;
+ __entry->code = req->msg.code;
+ __assign_str(pciname, pci_name(hdev->pdev));
+ __assign_str(devname, &hdev->nic.kinfo.netdev->name);
+ memcpy(__entry->mbx_data, req,
+ sizeof(struct hclge_mbx_pf_to_vf_cmd));
+ ),
+
+ TP_printk(
+ "%s %s vfid:%u code:%u data:%s",
+ __get_str(pciname), __get_str(devname), __entry->vfid,
+ __entry->code,
+ __print_array(__entry->mbx_data, VF_GET_MBX_LEN, sizeof(u32))
+ )
+);
+
+TRACE_EVENT(hclge_vf_mbx_send,
+ TP_PROTO(
+ struct hclgevf_dev *hdev,
+ struct hclge_mbx_vf_to_pf_cmd *req),
+ TP_ARGS(hdev, req),
+
+ TP_STRUCT__entry(
+ __field(u8, vfid)
+ __field(u8, code)
+ __field(u8, subcode)
+ __string(pciname, pci_name(hdev->pdev))
+ __string(devname, &hdev->nic.kinfo.netdev->name)
+ __array(u32, mbx_data, VF_SEND_MBX_LEN)
+ ),
+
+ TP_fast_assign(
+ __entry->vfid = req->mbx_src_vfid;
+ __entry->code = req->msg.code;
+ __entry->subcode = req->msg.subcode;
+ __assign_str(pciname, pci_name(hdev->pdev));
+ __assign_str(devname, &hdev->nic.kinfo.netdev->name);
+ memcpy(__entry->mbx_data, req,
+ sizeof(struct hclge_mbx_vf_to_pf_cmd));
+ ),
+
+ TP_printk(
+ "%s %s vfid:%u code:%u subcode:%u data:%s",
+ __get_str(pciname), __get_str(devname), __entry->vfid,
+ __entry->code, __entry->subcode,
+ __print_array(__entry->mbx_data, VF_SEND_MBX_LEN, sizeof(u32))
+ )
+);
+
+#endif /* _HCLGEVF_TRACE_H_ */
+
+/* This must be outside ifdef _HCLGEVF_TRACE_H */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE hclgevf_trace
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/huawei/hinic/Makefile b/drivers/net/ethernet/huawei/hinic/Makefile
index fe88ab88cacc..32a011ca44c3 100644
--- a/drivers/net/ethernet/huawei/hinic/Makefile
+++ b/drivers/net/ethernet/huawei/hinic/Makefile
@@ -4,4 +4,4 @@ obj-$(CONFIG_HINIC) += hinic.o
hinic-y := hinic_main.o hinic_tx.o hinic_rx.o hinic_port.o hinic_hw_dev.o \
hinic_hw_io.o hinic_hw_qp.o hinic_hw_cmdq.o hinic_hw_wq.o \
hinic_hw_mgmt.o hinic_hw_api_cmd.o hinic_hw_eqs.o hinic_hw_if.o \
- hinic_common.o hinic_ethtool.o
+ hinic_common.o hinic_ethtool.o hinic_hw_mbox.o hinic_sriov.o
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_dev.h
index a209b14160cc..48b40be3e84d 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_dev.h
@@ -16,6 +16,7 @@
#include "hinic_hw_dev.h"
#include "hinic_tx.h"
#include "hinic_rx.h"
+#include "hinic_sriov.h"
#define HINIC_DRV_NAME "hinic"
@@ -23,6 +24,7 @@ enum hinic_flags {
HINIC_LINK_UP = BIT(0),
HINIC_INTF_UP = BIT(1),
HINIC_RSS_ENABLE = BIT(2),
+ HINIC_LINK_DOWN = BIT(3),
};
struct hinic_rx_mode_work {
@@ -67,6 +69,8 @@ struct hinic_dev {
struct hinic_txq *txqs;
struct hinic_rxq *rxqs;
+ u16 sq_depth;
+ u16 rq_depth;
struct hinic_txq_stats tx_stats;
struct hinic_rxq_stats rx_stats;
@@ -78,6 +82,7 @@ struct hinic_dev {
struct hinic_rss_type rss_type;
u8 *rss_hkey_user;
s32 *rss_indir_user;
+ struct hinic_sriov_info sriov_info;
};
#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
index 966aea949c0b..ace18d258049 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
@@ -33,6 +33,99 @@
#include "hinic_rx.h"
#include "hinic_dev.h"
+#define SET_LINK_STR_MAX_LEN 128
+
+#define GET_SUPPORTED_MODE 0
+#define GET_ADVERTISED_MODE 1
+
+#define ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE(ecmd, mode) \
+ ((ecmd)->supported |= \
+ (1UL << hw_to_ethtool_link_mode_table[mode].link_mode_bit))
+#define ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE(ecmd, mode) \
+ ((ecmd)->advertising |= \
+ (1UL << hw_to_ethtool_link_mode_table[mode].link_mode_bit))
+#define ETHTOOL_ADD_SUPPORTED_LINK_MODE(ecmd, mode) \
+ ((ecmd)->supported |= SUPPORTED_##mode)
+#define ETHTOOL_ADD_ADVERTISED_LINK_MODE(ecmd, mode) \
+ ((ecmd)->advertising |= ADVERTISED_##mode)
+
+struct hw2ethtool_link_mode {
+ enum ethtool_link_mode_bit_indices link_mode_bit;
+ u32 speed;
+ enum hinic_link_mode hw_link_mode;
+};
+
+struct cmd_link_settings {
+ u64 supported;
+ u64 advertising;
+
+ u32 speed;
+ u8 duplex;
+ u8 port;
+ u8 autoneg;
+};
+
+static u32 hw_to_ethtool_speed[LINK_SPEED_LEVELS] = {
+ SPEED_10, SPEED_100,
+ SPEED_1000, SPEED_10000,
+ SPEED_25000, SPEED_40000,
+ SPEED_100000
+};
+
+static struct hw2ethtool_link_mode
+ hw_to_ethtool_link_mode_table[HINIC_LINK_MODE_NUMBERS] = {
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ .speed = SPEED_10000,
+ .hw_link_mode = HINIC_10GE_BASE_KR,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ .speed = SPEED_40000,
+ .hw_link_mode = HINIC_40GE_BASE_KR4,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ .speed = SPEED_40000,
+ .hw_link_mode = HINIC_40GE_BASE_CR4,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ .speed = SPEED_100000,
+ .hw_link_mode = HINIC_100GE_BASE_KR4,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ .speed = SPEED_100000,
+ .hw_link_mode = HINIC_100GE_BASE_CR4,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ .speed = SPEED_25000,
+ .hw_link_mode = HINIC_25GE_BASE_KR_S,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ .speed = SPEED_25000,
+ .hw_link_mode = HINIC_25GE_BASE_CR_S,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ .speed = SPEED_25000,
+ .hw_link_mode = HINIC_25GE_BASE_KR,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ .speed = SPEED_25000,
+ .hw_link_mode = HINIC_25GE_BASE_CR,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ .speed = SPEED_1000,
+ .hw_link_mode = HINIC_GE_BASE_KX,
+ },
+};
+
static void set_link_speed(struct ethtool_link_ksettings *link_ksettings,
enum hinic_speed speed)
{
@@ -71,18 +164,91 @@ static void set_link_speed(struct ethtool_link_ksettings *link_ksettings,
}
}
+static int hinic_get_link_mode_index(enum hinic_link_mode link_mode)
+{
+ int i = 0;
+
+ for (i = 0; i < HINIC_LINK_MODE_NUMBERS; i++) {
+ if (link_mode == hw_to_ethtool_link_mode_table[i].hw_link_mode)
+ break;
+ }
+
+ return i;
+}
+
+static void hinic_add_ethtool_link_mode(struct cmd_link_settings *link_settings,
+ enum hinic_link_mode hw_link_mode,
+ u32 name)
+{
+ enum hinic_link_mode link_mode;
+ int idx = 0;
+
+ for (link_mode = 0; link_mode < HINIC_LINK_MODE_NUMBERS; link_mode++) {
+ if (hw_link_mode & ((u32)1 << link_mode)) {
+ idx = hinic_get_link_mode_index(link_mode);
+ if (idx >= HINIC_LINK_MODE_NUMBERS)
+ continue;
+
+ if (name == GET_SUPPORTED_MODE)
+ ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE
+ (link_settings, idx);
+ else
+ ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE
+ (link_settings, idx);
+ }
+ }
+}
+
+static void hinic_link_port_type(struct cmd_link_settings *link_settings,
+ enum hinic_port_type port_type)
+{
+ switch (port_type) {
+ case HINIC_PORT_ELEC:
+ case HINIC_PORT_TP:
+ ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, TP);
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, TP);
+ link_settings->port = PORT_TP;
+ break;
+
+ case HINIC_PORT_AOC:
+ case HINIC_PORT_FIBRE:
+ ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, FIBRE);
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, FIBRE);
+ link_settings->port = PORT_FIBRE;
+ break;
+
+ case HINIC_PORT_COPPER:
+ ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, FIBRE);
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, FIBRE);
+ link_settings->port = PORT_DA;
+ break;
+
+ case HINIC_PORT_BACKPLANE:
+ ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, Backplane);
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Backplane);
+ link_settings->port = PORT_NONE;
+ break;
+
+ default:
+ link_settings->port = PORT_OTHER;
+ break;
+ }
+}
+
static int hinic_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings
*link_ksettings)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_link_mode_cmd link_mode = { 0 };
+ struct hinic_pause_config pause_info = { 0 };
+ struct cmd_link_settings settings = { 0 };
enum hinic_port_link_state link_state;
struct hinic_port_cap port_cap;
int err;
+ ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
- ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
- Autoneg);
link_ksettings->base.speed = SPEED_UNKNOWN;
link_ksettings->base.autoneg = AUTONEG_DISABLE;
@@ -92,14 +258,19 @@ static int hinic_get_link_ksettings(struct net_device *netdev,
if (err)
return err;
+ hinic_link_port_type(&settings, port_cap.port_type);
+ link_ksettings->base.port = settings.port;
+
err = hinic_port_link_state(nic_dev, &link_state);
if (err)
return err;
- if (link_state != HINIC_LINK_STATE_UP)
- return err;
-
- set_link_speed(link_ksettings, port_cap.speed);
+ if (link_state == HINIC_LINK_STATE_UP) {
+ set_link_speed(link_ksettings, port_cap.speed);
+ link_ksettings->base.duplex =
+ (port_cap.duplex == HINIC_DUPLEX_FULL) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ }
if (!!(port_cap.autoneg_cap & HINIC_AUTONEG_SUPPORTED))
ethtool_link_ksettings_add_link_mode(link_ksettings,
@@ -108,11 +279,243 @@ static int hinic_get_link_ksettings(struct net_device *netdev,
if (port_cap.autoneg_state == HINIC_AUTONEG_ACTIVE)
link_ksettings->base.autoneg = AUTONEG_ENABLE;
- link_ksettings->base.duplex = (port_cap.duplex == HINIC_DUPLEX_FULL) ?
- DUPLEX_FULL : DUPLEX_HALF;
+ err = hinic_get_link_mode(nic_dev->hwdev, &link_mode);
+ if (err || link_mode.supported == HINIC_SUPPORTED_UNKNOWN ||
+ link_mode.advertised == HINIC_SUPPORTED_UNKNOWN)
+ return -EIO;
+
+ hinic_add_ethtool_link_mode(&settings, link_mode.supported,
+ GET_SUPPORTED_MODE);
+ hinic_add_ethtool_link_mode(&settings, link_mode.advertised,
+ GET_ADVERTISED_MODE);
+
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) {
+ err = hinic_get_hw_pause_info(nic_dev->hwdev, &pause_info);
+ if (err)
+ return err;
+ ETHTOOL_ADD_SUPPORTED_LINK_MODE(&settings, Pause);
+ if (pause_info.rx_pause && pause_info.tx_pause) {
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Pause);
+ } else if (pause_info.tx_pause) {
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Asym_Pause);
+ } else if (pause_info.rx_pause) {
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Pause);
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Asym_Pause);
+ }
+ }
+
+ bitmap_copy(link_ksettings->link_modes.supported,
+ (unsigned long *)&settings.supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_copy(link_ksettings->link_modes.advertising,
+ (unsigned long *)&settings.advertising,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+
+ return 0;
+}
+
+static int hinic_ethtool_to_hw_speed_level(u32 speed)
+{
+ int i;
+
+ for (i = 0; i < LINK_SPEED_LEVELS; i++) {
+ if (hw_to_ethtool_speed[i] == speed)
+ break;
+ }
+
+ return i;
+}
+
+static bool hinic_is_support_speed(enum hinic_link_mode supported_link,
+ u32 speed)
+{
+ enum hinic_link_mode link_mode;
+ int idx;
+
+ for (link_mode = 0; link_mode < HINIC_LINK_MODE_NUMBERS; link_mode++) {
+ if (!(supported_link & ((u32)1 << link_mode)))
+ continue;
+
+ idx = hinic_get_link_mode_index(link_mode);
+ if (idx >= HINIC_LINK_MODE_NUMBERS)
+ continue;
+
+ if (hw_to_ethtool_link_mode_table[idx].speed == speed)
+ return true;
+ }
+
+ return false;
+}
+
+static bool hinic_is_speed_legal(struct hinic_dev *nic_dev, u32 speed)
+{
+ struct hinic_link_mode_cmd link_mode = { 0 };
+ struct net_device *netdev = nic_dev->netdev;
+ enum nic_speed_level speed_level = 0;
+ int err;
+
+ err = hinic_get_link_mode(nic_dev->hwdev, &link_mode);
+ if (err)
+ return false;
+
+ if (link_mode.supported == HINIC_SUPPORTED_UNKNOWN ||
+ link_mode.advertised == HINIC_SUPPORTED_UNKNOWN)
+ return false;
+
+ speed_level = hinic_ethtool_to_hw_speed_level(speed);
+ if (speed_level >= LINK_SPEED_LEVELS ||
+ !hinic_is_support_speed(link_mode.supported, speed)) {
+ netif_err(nic_dev, drv, netdev,
+ "Unsupported speed: %d\n", speed);
+ return false;
+ }
+
+ return true;
+}
+
+static int get_link_settings_type(struct hinic_dev *nic_dev,
+ u8 autoneg, u32 speed, u32 *set_settings)
+{
+ struct hinic_port_cap port_cap = { 0 };
+ int err;
+
+ err = hinic_port_get_cap(nic_dev, &port_cap);
+ if (err)
+ return err;
+
+ /* always set autonegotiation */
+ if (port_cap.autoneg_cap)
+ *set_settings |= HILINK_LINK_SET_AUTONEG;
+
+ if (autoneg == AUTONEG_ENABLE) {
+ if (!port_cap.autoneg_cap) {
+ netif_err(nic_dev, drv, nic_dev->netdev, "Not support autoneg\n");
+ return -EOPNOTSUPP;
+ }
+ } else if (speed != (u32)SPEED_UNKNOWN) {
+ /* set speed only when autoneg is disabled */
+ if (!hinic_is_speed_legal(nic_dev, speed))
+ return -EINVAL;
+ *set_settings |= HILINK_LINK_SET_SPEED;
+ } else {
+ netif_err(nic_dev, drv, nic_dev->netdev, "Need to set speed when autoneg is off\n");
+ return -EOPNOTSUPP;
+ }
+
return 0;
}
+static int set_link_settings_separate_cmd(struct hinic_dev *nic_dev,
+ u32 set_settings, u8 autoneg,
+ u32 speed)
+{
+ enum nic_speed_level speed_level = 0;
+ int err = 0;
+
+ if (set_settings & HILINK_LINK_SET_AUTONEG) {
+ err = hinic_set_autoneg(nic_dev->hwdev,
+ (autoneg == AUTONEG_ENABLE));
+ if (err)
+ netif_err(nic_dev, drv, nic_dev->netdev, "%s autoneg failed\n",
+ (autoneg == AUTONEG_ENABLE) ?
+ "Enable" : "Disable");
+ else
+ netif_info(nic_dev, drv, nic_dev->netdev, "%s autoneg successfully\n",
+ (autoneg == AUTONEG_ENABLE) ?
+ "Enable" : "Disable");
+ }
+
+ if (!err && (set_settings & HILINK_LINK_SET_SPEED)) {
+ speed_level = hinic_ethtool_to_hw_speed_level(speed);
+ err = hinic_set_speed(nic_dev->hwdev, speed_level);
+ if (err)
+ netif_err(nic_dev, drv, nic_dev->netdev, "Set speed %d failed\n",
+ speed);
+ else
+ netif_info(nic_dev, drv, nic_dev->netdev, "Set speed %d successfully\n",
+ speed);
+ }
+
+ return err;
+}
+
+static int hinic_set_settings_to_hw(struct hinic_dev *nic_dev,
+ u32 set_settings, u8 autoneg, u32 speed)
+{
+ struct hinic_link_ksettings_info settings = {0};
+ char set_link_str[SET_LINK_STR_MAX_LEN] = {0};
+ struct net_device *netdev = nic_dev->netdev;
+ enum nic_speed_level speed_level = 0;
+ int err;
+
+ err = snprintf(set_link_str, SET_LINK_STR_MAX_LEN, "%s",
+ (set_settings & HILINK_LINK_SET_AUTONEG) ?
+ (autoneg ? "autong enable " : "autong disable ") : "");
+ if (err < 0 || err >= SET_LINK_STR_MAX_LEN) {
+ netif_err(nic_dev, drv, netdev, "Failed to snprintf link state, function return(%d) and dest_len(%d)\n",
+ err, SET_LINK_STR_MAX_LEN);
+ return -EFAULT;
+ }
+
+ if (set_settings & HILINK_LINK_SET_SPEED) {
+ speed_level = hinic_ethtool_to_hw_speed_level(speed);
+ err = snprintf(set_link_str, SET_LINK_STR_MAX_LEN,
+ "%sspeed %d ", set_link_str, speed);
+ if (err <= 0 || err >= SET_LINK_STR_MAX_LEN) {
+ netif_err(nic_dev, drv, netdev, "Failed to snprintf link speed, function return(%d) and dest_len(%d)\n",
+ err, SET_LINK_STR_MAX_LEN);
+ return -EFAULT;
+ }
+ }
+
+ settings.func_id = HINIC_HWIF_FUNC_IDX(nic_dev->hwdev->hwif);
+ settings.valid_bitmap = set_settings;
+ settings.autoneg = autoneg;
+ settings.speed = speed_level;
+
+ err = hinic_set_link_settings(nic_dev->hwdev, &settings);
+ if (err != HINIC_MGMT_CMD_UNSUPPORTED) {
+ if (err)
+ netif_err(nic_dev, drv, netdev, "Set %s failed\n",
+ set_link_str);
+ else
+ netif_info(nic_dev, drv, netdev, "Set %s successfully\n",
+ set_link_str);
+
+ return err;
+ }
+
+ return set_link_settings_separate_cmd(nic_dev, set_settings, autoneg,
+ speed);
+}
+
+static int set_link_settings(struct net_device *netdev, u8 autoneg, u32 speed)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ u32 set_settings = 0;
+ int err;
+
+ err = get_link_settings_type(nic_dev, autoneg, speed, &set_settings);
+ if (err)
+ return err;
+
+ if (set_settings)
+ err = hinic_set_settings_to_hw(nic_dev, set_settings,
+ autoneg, speed);
+ else
+ netif_info(nic_dev, drv, netdev, "Nothing changed, exit without setting anything\n");
+
+ return err;
+}
+
+static int hinic_set_link_ksettings(struct net_device *netdev, const struct
+ ethtool_link_ksettings *link_settings)
+{
+ /* only support to set autoneg and speed */
+ return set_link_settings(netdev, link_settings->base.autoneg,
+ link_settings->base.speed);
+}
+
static void hinic_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
@@ -135,12 +538,81 @@ static void hinic_get_drvinfo(struct net_device *netdev,
static void hinic_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
- ring->rx_max_pending = HINIC_RQ_DEPTH;
- ring->tx_max_pending = HINIC_SQ_DEPTH;
- ring->rx_pending = HINIC_RQ_DEPTH;
- ring->tx_pending = HINIC_SQ_DEPTH;
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+
+ ring->rx_max_pending = HINIC_MAX_QUEUE_DEPTH;
+ ring->tx_max_pending = HINIC_MAX_QUEUE_DEPTH;
+ ring->rx_pending = nic_dev->rq_depth;
+ ring->tx_pending = nic_dev->sq_depth;
}
+static int check_ringparam_valid(struct hinic_dev *nic_dev,
+ struct ethtool_ringparam *ring)
+{
+ if (ring->rx_jumbo_pending || ring->rx_mini_pending) {
+ netif_err(nic_dev, drv, nic_dev->netdev,
+ "Unsupported rx_jumbo_pending/rx_mini_pending\n");
+ return -EINVAL;
+ }
+
+ if (ring->tx_pending > HINIC_MAX_QUEUE_DEPTH ||
+ ring->tx_pending < HINIC_MIN_QUEUE_DEPTH ||
+ ring->rx_pending > HINIC_MAX_QUEUE_DEPTH ||
+ ring->rx_pending < HINIC_MIN_QUEUE_DEPTH) {
+ netif_err(nic_dev, drv, nic_dev->netdev,
+ "Queue depth out of range [%d-%d]\n",
+ HINIC_MIN_QUEUE_DEPTH, HINIC_MAX_QUEUE_DEPTH);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hinic_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ u16 new_sq_depth, new_rq_depth;
+ int err;
+
+ err = check_ringparam_valid(nic_dev, ring);
+ if (err)
+ return err;
+
+ new_sq_depth = (u16)(1U << (u16)ilog2(ring->tx_pending));
+ new_rq_depth = (u16)(1U << (u16)ilog2(ring->rx_pending));
+
+ if (new_sq_depth == nic_dev->sq_depth &&
+ new_rq_depth == nic_dev->rq_depth)
+ return 0;
+
+ netif_info(nic_dev, drv, netdev,
+ "Change Tx/Rx ring depth from %d/%d to %d/%d\n",
+ nic_dev->sq_depth, nic_dev->rq_depth,
+ new_sq_depth, new_rq_depth);
+
+ nic_dev->sq_depth = new_sq_depth;
+ nic_dev->rq_depth = new_rq_depth;
+
+ if (netif_running(netdev)) {
+ netif_info(nic_dev, drv, netdev, "Restarting netdev\n");
+ err = hinic_close(netdev);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Failed to close netdev\n");
+ return -EFAULT;
+ }
+
+ err = hinic_open(netdev);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Failed to open netdev\n");
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
static void hinic_get_channels(struct net_device *netdev,
struct ethtool_channels *channels)
{
@@ -741,9 +1213,11 @@ static void hinic_get_strings(struct net_device *netdev,
static const struct ethtool_ops hinic_ethtool_ops = {
.get_link_ksettings = hinic_get_link_ksettings,
+ .set_link_ksettings = hinic_set_link_ksettings,
.get_drvinfo = hinic_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = hinic_get_ringparam,
+ .set_ringparam = hinic_set_ringparam,
.get_channels = hinic_get_channels,
.get_rxnfc = hinic_get_rxnfc,
.set_rxnfc = hinic_set_rxnfc,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
index 5f2d57d1b2d3..cb5b6e5f787f 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
@@ -64,7 +64,7 @@
#define CMDQ_WQE_SIZE 64
#define CMDQ_DEPTH SZ_4K
-#define CMDQ_WQ_PAGE_SIZE SZ_4K
+#define CMDQ_WQ_PAGE_SIZE SZ_256K
#define WQE_LCMD_SIZE 64
#define WQE_SCMD_SIZE 64
@@ -705,7 +705,7 @@ static void cmdq_init_queue_ctxt(struct hinic_cmdq_ctxt *cmdq_ctxt,
/* The data in the HW is in Big Endian Format */
wq_first_page_paddr = be64_to_cpu(*wq->block_vaddr);
- pfn = CMDQ_PFN(wq_first_page_paddr, wq->wq_page_size);
+ pfn = CMDQ_PFN(wq_first_page_paddr, SZ_4K);
ctxt_info->curr_wqe_page_pfn =
HINIC_CMDQ_CTXT_PAGE_INFO_SET(pfn, CURR_WQE_PAGE_PFN) |
@@ -714,16 +714,19 @@ static void cmdq_init_queue_ctxt(struct hinic_cmdq_ctxt *cmdq_ctxt,
HINIC_CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_EN) |
HINIC_CMDQ_CTXT_PAGE_INFO_SET(cmdq->wrapped, WRAPPED);
- /* block PFN - Read Modify Write */
- cmdq_first_block_paddr = cmdq_pages->page_paddr;
+ if (wq->num_q_pages != 1) {
+ /* block PFN - Read Modify Write */
+ cmdq_first_block_paddr = cmdq_pages->page_paddr;
- pfn = CMDQ_PFN(cmdq_first_block_paddr, wq->wq_page_size);
+ pfn = CMDQ_PFN(cmdq_first_block_paddr, wq->wq_page_size);
+ }
ctxt_info->wq_block_pfn =
HINIC_CMDQ_CTXT_BLOCK_INFO_SET(pfn, WQ_BLOCK_PFN) |
HINIC_CMDQ_CTXT_BLOCK_INFO_SET(atomic_read(&wq->cons_idx), CI);
cmdq_ctxt->func_idx = HINIC_HWIF_FUNC_IDX(cmdqs->hwif);
+ cmdq_ctxt->ppf_idx = HINIC_HWIF_PPF_IDX(cmdqs->hwif);
cmdq_ctxt->cmdq_type = cmdq->cmdq_type;
}
@@ -795,11 +798,6 @@ static int init_cmdqs_ctxt(struct hinic_hwdev *hwdev,
size_t cmdq_ctxts_size;
int err;
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI function type\n");
- return -EINVAL;
- }
-
cmdq_ctxts_size = HINIC_MAX_CMDQ_TYPES * sizeof(*cmdq_ctxts);
cmdq_ctxts = devm_kzalloc(&pdev->dev, cmdq_ctxts_size, GFP_KERNEL);
if (!cmdq_ctxts)
@@ -851,6 +849,25 @@ err_init_cmdq:
return err;
}
+static int hinic_set_cmdq_depth(struct hinic_hwdev *hwdev, u16 cmdq_depth)
+{
+ struct hinic_cmd_hw_ioctxt hw_ioctxt = { 0 };
+ struct hinic_pfhwdev *pfhwdev;
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+ hw_ioctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
+ hw_ioctxt.ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif);
+
+ hw_ioctxt.set_cmdq_depth = HW_IOCTXT_SET_CMDQ_DEPTH_ENABLE;
+ hw_ioctxt.cmdq_depth = (u8)ilog2(cmdq_depth);
+
+ return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
+ HINIC_COMM_CMD_HWCTXT_SET,
+ &hw_ioctxt, sizeof(hw_ioctxt), NULL,
+ NULL, HINIC_MGMT_MSG_SYNC);
+}
+
/**
* hinic_init_cmdqs - init all cmdqs
* @cmdqs: cmdqs to init
@@ -901,8 +918,18 @@ int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
hinic_ceq_register_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ, cmdqs,
cmdq_ceq_handler);
+
+ err = hinic_set_cmdq_depth(hwdev, CMDQ_DEPTH);
+ if (err) {
+ dev_err(&hwif->pdev->dev, "Failed to set cmdq depth\n");
+ goto err_set_cmdq_depth;
+ }
+
return 0;
+err_set_cmdq_depth:
+ hinic_ceq_unregister_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ);
+
err_cmdq_ctxt:
hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs,
HINIC_MAX_CMDQ_TYPES);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
index 7a434b653faa..3e4b0aef9fe6 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
@@ -122,7 +122,7 @@ struct hinic_cmdq_ctxt {
u16 func_idx;
u8 cmdq_type;
- u8 rsvd1[1];
+ u8 ppf_idx;
u8 rsvd2[4];
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h
index cdec1d0a3962..7e84e4e33fff 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h
@@ -10,7 +10,7 @@
/* HW interface registers */
#define HINIC_CSR_FUNC_ATTR0_ADDR 0x0
#define HINIC_CSR_FUNC_ATTR1_ADDR 0x4
-
+#define HINIC_CSR_FUNC_ATTR2_ADDR 0x8
#define HINIC_CSR_FUNC_ATTR4_ADDR 0x10
#define HINIC_CSR_FUNC_ATTR5_ADDR 0x14
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
index c7c75b772a86..0245da02efbb 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
@@ -15,7 +15,9 @@
#include <linux/jiffies.h>
#include <linux/log2.h>
#include <linux/err.h>
+#include <linux/netdevice.h>
+#include "hinic_sriov.h"
#include "hinic_hw_if.h"
#include "hinic_hw_eqs.h"
#include "hinic_hw_mgmt.h"
@@ -42,24 +44,6 @@ enum io_status {
IO_RUNNING = 1,
};
-enum hw_ioctxt_set_cmdq_depth {
- HW_IOCTXT_SET_CMDQ_DEPTH_DEFAULT,
-};
-
-/* HW struct */
-struct hinic_dev_cap {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u8 rsvd1[5];
- u8 intr_type;
- u8 rsvd2[66];
- u16 max_sqs;
- u16 max_rqs;
- u8 rsvd3[208];
-};
-
/**
* get_capability - convert device capabilities to NIC capabilities
* @hwdev: the HW device to set and convert device capabilities for
@@ -67,16 +51,13 @@ struct hinic_dev_cap {
*
* Return 0 - Success, negative - Failure
**/
-static int get_capability(struct hinic_hwdev *hwdev,
- struct hinic_dev_cap *dev_cap)
+static int parse_capability(struct hinic_hwdev *hwdev,
+ struct hinic_dev_cap *dev_cap)
{
struct hinic_cap *nic_cap = &hwdev->nic_cap;
int num_aeqs, num_ceqs, num_irqs;
- if (!HINIC_IS_PF(hwdev->hwif) && !HINIC_IS_PPF(hwdev->hwif))
- return -EINVAL;
-
- if (dev_cap->intr_type != INTR_MSIX_TYPE)
+ if (!HINIC_IS_VF(hwdev->hwif) && dev_cap->intr_type != INTR_MSIX_TYPE)
return -EFAULT;
num_aeqs = HINIC_HWIF_NUM_AEQS(hwdev->hwif);
@@ -89,13 +70,19 @@ static int get_capability(struct hinic_hwdev *hwdev,
if (nic_cap->num_qps > HINIC_Q_CTXT_MAX)
nic_cap->num_qps = HINIC_Q_CTXT_MAX;
- nic_cap->max_qps = dev_cap->max_sqs + 1;
- if (nic_cap->max_qps != (dev_cap->max_rqs + 1))
- return -EFAULT;
+ if (!HINIC_IS_VF(hwdev->hwif))
+ nic_cap->max_qps = dev_cap->max_sqs + 1;
+ else
+ nic_cap->max_qps = dev_cap->max_sqs;
if (nic_cap->num_qps > nic_cap->max_qps)
nic_cap->num_qps = nic_cap->max_qps;
+ if (!HINIC_IS_VF(hwdev->hwif)) {
+ nic_cap->max_vf = dev_cap->max_vf;
+ nic_cap->max_vf_qps = dev_cap->max_vf_sqs + 1;
+ }
+
return 0;
}
@@ -105,27 +92,26 @@ static int get_capability(struct hinic_hwdev *hwdev,
*
* Return 0 - Success, negative - Failure
**/
-static int get_cap_from_fw(struct hinic_pfhwdev *pfhwdev)
+static int get_capability(struct hinic_pfhwdev *pfhwdev)
{
struct hinic_hwdev *hwdev = &pfhwdev->hwdev;
struct hinic_hwif *hwif = hwdev->hwif;
struct pci_dev *pdev = hwif->pdev;
struct hinic_dev_cap dev_cap;
- u16 in_len, out_len;
+ u16 out_len;
int err;
- in_len = 0;
out_len = sizeof(dev_cap);
err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_CFGM,
- HINIC_CFG_NIC_CAP, &dev_cap, in_len, &dev_cap,
- &out_len, HINIC_MGMT_MSG_SYNC);
+ HINIC_CFG_NIC_CAP, &dev_cap, sizeof(dev_cap),
+ &dev_cap, &out_len, HINIC_MGMT_MSG_SYNC);
if (err) {
dev_err(&pdev->dev, "Failed to get capability from FW\n");
return err;
}
- return get_capability(hwdev, &dev_cap);
+ return parse_capability(hwdev, &dev_cap);
}
/**
@@ -144,15 +130,14 @@ static int get_dev_cap(struct hinic_hwdev *hwdev)
switch (HINIC_FUNC_TYPE(hwif)) {
case HINIC_PPF:
case HINIC_PF:
+ case HINIC_VF:
pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- err = get_cap_from_fw(pfhwdev);
+ err = get_capability(pfhwdev);
if (err) {
- dev_err(&pdev->dev, "Failed to get capability from FW\n");
+ dev_err(&pdev->dev, "Failed to get capability\n");
return err;
}
break;
-
default:
dev_err(&pdev->dev, "Unsupported PCI Function type\n");
return -EINVAL;
@@ -225,15 +210,8 @@ static void disable_msix(struct hinic_hwdev *hwdev)
int hinic_port_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_port_cmd cmd,
void *buf_in, u16 in_size, void *buf_out, u16 *out_size)
{
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
struct hinic_pfhwdev *pfhwdev;
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "unsupported PCI Function type\n");
- return -EINVAL;
- }
-
pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC, cmd,
@@ -241,6 +219,19 @@ int hinic_port_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_port_cmd cmd,
HINIC_MGMT_MSG_SYNC);
}
+int hinic_hilink_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_hilink_cmd cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size)
+{
+ struct hinic_pfhwdev *pfhwdev;
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+ return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_HILINK, cmd,
+ buf_in, in_size, buf_out, out_size,
+ HINIC_MGMT_MSG_SYNC);
+}
+
/**
* init_fw_ctxt- Init Firmware tables before network mgmt and io operations
* @hwdev: the NIC HW device
@@ -252,14 +243,9 @@ static int init_fw_ctxt(struct hinic_hwdev *hwdev)
struct hinic_hwif *hwif = hwdev->hwif;
struct pci_dev *pdev = hwif->pdev;
struct hinic_cmd_fw_ctxt fw_ctxt;
- u16 out_size;
+ u16 out_size = sizeof(fw_ctxt);
int err;
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- return -EINVAL;
- }
-
fw_ctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
fw_ctxt.rx_buf_sz = HINIC_RX_BUF_SZ;
@@ -283,19 +269,13 @@ static int init_fw_ctxt(struct hinic_hwdev *hwdev)
*
* Return 0 - Success, negative - Failure
**/
-static int set_hw_ioctxt(struct hinic_hwdev *hwdev, unsigned int rq_depth,
- unsigned int sq_depth)
+static int set_hw_ioctxt(struct hinic_hwdev *hwdev, unsigned int sq_depth,
+ unsigned int rq_depth)
{
struct hinic_hwif *hwif = hwdev->hwif;
struct hinic_cmd_hw_ioctxt hw_ioctxt;
- struct pci_dev *pdev = hwif->pdev;
struct hinic_pfhwdev *pfhwdev;
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- return -EINVAL;
- }
-
hw_ioctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
hw_ioctxt.ppf_idx = HINIC_HWIF_PPF_IDX(hwif);
@@ -374,11 +354,6 @@ static int clear_io_resources(struct hinic_hwdev *hwdev)
struct hinic_pfhwdev *pfhwdev;
int err;
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- return -EINVAL;
- }
-
/* sleep 100ms to wait for firmware stopping I/O */
msleep(100);
@@ -410,14 +385,8 @@ static int set_resources_state(struct hinic_hwdev *hwdev,
{
struct hinic_cmd_set_res_state res_state;
struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
struct hinic_pfhwdev *pfhwdev;
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- return -EINVAL;
- }
-
res_state.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
res_state.state = state;
@@ -441,8 +410,8 @@ static int get_base_qpn(struct hinic_hwdev *hwdev, u16 *base_qpn)
{
struct hinic_cmd_base_qpn cmd_base_qpn;
struct hinic_hwif *hwif = hwdev->hwif;
+ u16 out_size = sizeof(cmd_base_qpn);
struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
int err;
cmd_base_qpn.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
@@ -466,7 +435,7 @@ static int get_base_qpn(struct hinic_hwdev *hwdev, u16 *base_qpn)
*
* Return 0 - Success, negative - Failure
**/
-int hinic_hwdev_ifup(struct hinic_hwdev *hwdev)
+int hinic_hwdev_ifup(struct hinic_hwdev *hwdev, u16 sq_depth, u16 rq_depth)
{
struct hinic_func_to_io *func_to_io = &hwdev->func_to_io;
struct hinic_cap *nic_cap = &hwdev->nic_cap;
@@ -488,6 +457,9 @@ int hinic_hwdev_ifup(struct hinic_hwdev *hwdev)
num_ceqs = HINIC_HWIF_NUM_CEQS(hwif);
ceq_msix_entries = &hwdev->msix_entries[num_aeqs];
+ func_to_io->hwdev = hwdev;
+ func_to_io->sq_depth = sq_depth;
+ func_to_io->rq_depth = rq_depth;
err = hinic_io_init(func_to_io, hwif, nic_cap->max_qps, num_ceqs,
ceq_msix_entries);
@@ -513,7 +485,7 @@ int hinic_hwdev_ifup(struct hinic_hwdev *hwdev)
hinic_db_state_set(hwif, HINIC_DB_ENABLE);
}
- err = set_hw_ioctxt(hwdev, HINIC_SQ_DEPTH, HINIC_RQ_DEPTH);
+ err = set_hw_ioctxt(hwdev, sq_depth, rq_depth);
if (err) {
dev_err(&pdev->dev, "Failed to set HW IO ctxt\n");
goto err_hw_ioctxt;
@@ -558,17 +530,10 @@ void hinic_hwdev_cb_register(struct hinic_hwdev *hwdev,
u16 in_size, void *buf_out,
u16 *out_size))
{
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
struct hinic_pfhwdev *pfhwdev;
struct hinic_nic_cb *nic_cb;
u8 cmd_cb;
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "unsupported PCI Function type\n");
- return;
- }
-
pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
cmd_cb = cmd - HINIC_MGMT_MSG_CMD_BASE;
@@ -588,15 +553,12 @@ void hinic_hwdev_cb_unregister(struct hinic_hwdev *hwdev,
enum hinic_mgmt_msg_cmd cmd)
{
struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
struct hinic_pfhwdev *pfhwdev;
struct hinic_nic_cb *nic_cb;
u8 cmd_cb;
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "unsupported PCI Function type\n");
+ if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif))
return;
- }
pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
@@ -676,10 +638,23 @@ static int init_pfhwdev(struct hinic_pfhwdev *pfhwdev)
return err;
}
- hinic_register_mgmt_msg_cb(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC,
- pfhwdev, nic_mgmt_msg_handler);
+ err = hinic_func_to_func_init(hwdev);
+ if (err) {
+ dev_err(&hwif->pdev->dev, "Failed to init mailbox\n");
+ hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt);
+ return err;
+ }
+
+ if (!HINIC_IS_VF(hwif))
+ hinic_register_mgmt_msg_cb(&pfhwdev->pf_to_mgmt,
+ HINIC_MOD_L2NIC, pfhwdev,
+ nic_mgmt_msg_handler);
+ else
+ hinic_register_vf_mbox_cb(hwdev, HINIC_MOD_L2NIC,
+ nic_mgmt_msg_handler);
hinic_set_pf_action(hwif, HINIC_PF_MGMT_ACTIVE);
+
return 0;
}
@@ -693,11 +668,43 @@ static void free_pfhwdev(struct hinic_pfhwdev *pfhwdev)
hinic_set_pf_action(hwdev->hwif, HINIC_PF_MGMT_INIT);
- hinic_unregister_mgmt_msg_cb(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC);
+ if (!HINIC_IS_VF(hwdev->hwif))
+ hinic_unregister_mgmt_msg_cb(&pfhwdev->pf_to_mgmt,
+ HINIC_MOD_L2NIC);
+ else
+ hinic_unregister_vf_mbox_cb(hwdev, HINIC_MOD_L2NIC);
+
+ hinic_func_to_func_free(hwdev);
hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt);
}
+static int hinic_l2nic_reset(struct hinic_hwdev *hwdev)
+{
+ struct hinic_cmd_l2nic_reset l2nic_reset = {0};
+ u16 out_size = sizeof(l2nic_reset);
+ struct hinic_pfhwdev *pfhwdev;
+ int err;
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+ l2nic_reset.func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
+ /* 0 represents standard l2nic reset flow */
+ l2nic_reset.reset_flag = 0;
+
+ err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
+ HINIC_COMM_CMD_L2NIC_RESET, &l2nic_reset,
+ sizeof(l2nic_reset), &l2nic_reset,
+ &out_size, HINIC_MGMT_MSG_SYNC);
+ if (err || !out_size || l2nic_reset.status) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to reset L2NIC resources, err: %d, status: 0x%x, out_size: 0x%x\n",
+ err, l2nic_reset.status, out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
/**
* hinic_init_hwdev - Initialize the NIC HW
* @pdev: the NIC pci device
@@ -723,12 +730,6 @@ struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev)
return ERR_PTR(err);
}
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- err = -EFAULT;
- goto err_func_type;
- }
-
pfhwdev = devm_kzalloc(&pdev->dev, sizeof(*pfhwdev), GFP_KERNEL);
if (!pfhwdev) {
err = -ENOMEM;
@@ -766,12 +767,22 @@ struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev)
goto err_init_pfhwdev;
}
+ err = hinic_l2nic_reset(hwdev);
+ if (err)
+ goto err_l2nic_reset;
+
err = get_dev_cap(hwdev);
if (err) {
dev_err(&pdev->dev, "Failed to get device capabilities\n");
goto err_dev_cap;
}
+ err = hinic_vf_func_init(hwdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init nic mbox\n");
+ goto err_vf_func_init;
+ }
+
err = init_fw_ctxt(hwdev);
if (err) {
dev_err(&pdev->dev, "Failed to init function table\n");
@@ -788,6 +799,9 @@ struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev)
err_resources_state:
err_init_fw_ctxt:
+ hinic_vf_func_free(hwdev);
+err_vf_func_init:
+err_l2nic_reset:
err_dev_cap:
free_pfhwdev(pfhwdev);
@@ -799,7 +813,6 @@ err_aeqs_init:
err_init_msix:
err_pfhwdev_alloc:
-err_func_type:
hinic_free_hwif(hwif);
return ERR_PTR(err);
}
@@ -930,15 +943,9 @@ int hinic_hwdev_hw_ci_addr_set(struct hinic_hwdev *hwdev, struct hinic_sq *sq,
{
struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq);
struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
struct hinic_pfhwdev *pfhwdev;
struct hinic_cmd_hw_ci hw_ci;
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- return -EINVAL;
- }
-
hw_ci.dma_attr_off = 0;
hw_ci.pending_limit = pending_limit;
hw_ci.coalesc_timer = coalesc_timer;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
index 66fd2340d447..71ea7e46dbbc 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
@@ -16,18 +16,33 @@
#include "hinic_hw_mgmt.h"
#include "hinic_hw_qp.h"
#include "hinic_hw_io.h"
+#include "hinic_hw_mbox.h"
#define HINIC_MAX_QPS 32
#define HINIC_MGMT_NUM_MSG_CMD (HINIC_MGMT_MSG_CMD_MAX - \
HINIC_MGMT_MSG_CMD_BASE)
+#define HINIC_PF_SET_VF_ALREADY 0x4
+#define HINIC_MGMT_STATUS_EXIST 0x6
+#define HINIC_MGMT_CMD_UNSUPPORTED 0xFF
+
struct hinic_cap {
u16 max_qps;
u16 num_qps;
+ u8 max_vf;
+ u16 max_vf_qps;
+};
+
+enum hw_ioctxt_set_cmdq_depth {
+ HW_IOCTXT_SET_CMDQ_DEPTH_DEFAULT,
+ HW_IOCTXT_SET_CMDQ_DEPTH_ENABLE,
};
enum hinic_port_cmd {
+ HINIC_PORT_CMD_VF_REGISTER = 0x0,
+ HINIC_PORT_CMD_VF_UNREGISTER = 0x1,
+
HINIC_PORT_CMD_CHANGE_MTU = 2,
HINIC_PORT_CMD_ADD_VLAN = 3,
@@ -39,6 +54,9 @@ enum hinic_port_cmd {
HINIC_PORT_CMD_SET_RX_MODE = 12,
+ HINIC_PORT_CMD_GET_PAUSE_INFO = 20,
+ HINIC_PORT_CMD_SET_PAUSE_INFO = 21,
+
HINIC_PORT_CMD_GET_LINK_STATE = 24,
HINIC_PORT_CMD_SET_LRO = 25,
@@ -77,19 +95,45 @@ enum hinic_port_cmd {
HINIC_PORT_CMD_FWCTXT_INIT = 69,
+ HINIC_PORT_CMD_ENABLE_SPOOFCHK = 78,
+
HINIC_PORT_CMD_GET_MGMT_VERSION = 88,
HINIC_PORT_CMD_SET_FUNC_STATE = 93,
HINIC_PORT_CMD_GET_GLOBAL_QPN = 102,
+ HINIC_PORT_CMD_SET_VF_RATE = 105,
+
+ HINIC_PORT_CMD_SET_VF_VLAN = 106,
+
+ HINIC_PORT_CMD_CLR_VF_VLAN,
+
HINIC_PORT_CMD_SET_TSO = 112,
HINIC_PORT_CMD_SET_RQ_IQ_MAP = 115,
+ HINIC_PORT_CMD_LINK_STATUS_REPORT = 160,
+
+ HINIC_PORT_CMD_UPDATE_MAC = 164,
+
HINIC_PORT_CMD_GET_CAP = 170,
+ HINIC_PORT_CMD_GET_LINK_MODE = 217,
+
+ HINIC_PORT_CMD_SET_SPEED = 218,
+
+ HINIC_PORT_CMD_SET_AUTONEG = 219,
+
HINIC_PORT_CMD_SET_LRO_TIMER = 244,
+
+ HINIC_PORT_CMD_SET_VF_MAX_MIN_RATE = 249,
+};
+
+/* cmd of mgmt CPU message for HILINK module */
+enum hinic_hilink_cmd {
+ HINIC_HILINK_CMD_GET_LINK_INFO = 0x3,
+ HINIC_HILINK_CMD_SET_LINK_SETTINGS = 0x8,
};
enum hinic_ucode_cmd {
@@ -191,6 +235,17 @@ struct hinic_cmd_set_res_state {
u32 rsvd2;
};
+struct hinic_ceq_ctrl_reg {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 q_id;
+ u32 ctrl0;
+ u32 ctrl1;
+};
+
struct hinic_cmd_base_qpn {
u8 status;
u8 version;
@@ -219,12 +274,22 @@ struct hinic_cmd_hw_ci {
u64 ci_addr;
};
+struct hinic_cmd_l2nic_reset {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 reset_flag;
+};
+
struct hinic_hwdev {
struct hinic_hwif *hwif;
struct msix_entry *msix_entries;
struct hinic_aeqs aeqs;
struct hinic_func_to_io func_to_io;
+ struct hinic_mbox_func_to_func *func_to_func;
struct hinic_cap nic_cap;
};
@@ -246,6 +311,25 @@ struct hinic_pfhwdev {
struct hinic_nic_cb nic_cb[HINIC_MGMT_NUM_MSG_CMD];
};
+struct hinic_dev_cap {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 rsvd1[5];
+ u8 intr_type;
+ u8 max_cos_id;
+ u8 er_id;
+ u8 port_id;
+ u8 max_vf;
+ u8 rsvd2[62];
+ u16 max_sqs;
+ u16 max_rqs;
+ u16 max_vf_sqs;
+ u16 max_vf_rqs;
+ u8 rsvd3[204];
+};
+
void hinic_hwdev_cb_register(struct hinic_hwdev *hwdev,
enum hinic_mgmt_msg_cmd cmd, void *handle,
void (*handler)(void *handle, void *buf_in,
@@ -259,7 +343,11 @@ int hinic_port_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_port_cmd cmd,
void *buf_in, u16 in_size, void *buf_out,
u16 *out_size);
-int hinic_hwdev_ifup(struct hinic_hwdev *hwdev);
+int hinic_hilink_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_hilink_cmd cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size);
+
+int hinic_hwdev_ifup(struct hinic_hwdev *hwdev, u16 sq_depth, u16 rq_depth);
void hinic_hwdev_ifdown(struct hinic_hwdev *hwdev);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
index c0b6bcb067cd..397936cac304 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
@@ -17,6 +17,7 @@
#include <asm/byteorder.h>
#include <asm/barrier.h>
+#include "hinic_hw_dev.h"
#include "hinic_hw_csr.h"
#include "hinic_hw_if.h"
#include "hinic_hw_eqs.h"
@@ -416,11 +417,11 @@ static irqreturn_t ceq_interrupt(int irq, void *data)
return IRQ_HANDLED;
}
-static void set_ctrl0(struct hinic_eq *eq)
+static u32 get_ctrl0_val(struct hinic_eq *eq, u32 addr)
{
struct msix_entry *msix_entry = &eq->msix_entry;
enum hinic_eq_type type = eq->type;
- u32 addr, val, ctrl0;
+ u32 val, ctrl0;
if (type == HINIC_AEQ) {
/* RMW Ctrl0 */
@@ -440,9 +441,7 @@ static void set_ctrl0(struct hinic_eq *eq)
HINIC_AEQ_CTRL_0_SET(EQ_INT_MODE_ARMED, INT_MODE);
val |= ctrl0;
-
- hinic_hwif_write_reg(eq->hwif, addr, val);
- } else if (type == HINIC_CEQ) {
+ } else {
/* RMW Ctrl0 */
addr = HINIC_CSR_CEQ_CTRL_0_ADDR(eq->q_id);
@@ -462,16 +461,28 @@ static void set_ctrl0(struct hinic_eq *eq)
HINIC_CEQ_CTRL_0_SET(EQ_INT_MODE_ARMED, INTR_MODE);
val |= ctrl0;
-
- hinic_hwif_write_reg(eq->hwif, addr, val);
}
+ return val;
}
-static void set_ctrl1(struct hinic_eq *eq)
+static void set_ctrl0(struct hinic_eq *eq)
{
+ u32 val, addr;
+
+ if (eq->type == HINIC_AEQ)
+ addr = HINIC_CSR_AEQ_CTRL_0_ADDR(eq->q_id);
+ else
+ addr = HINIC_CSR_CEQ_CTRL_0_ADDR(eq->q_id);
+
+ val = get_ctrl0_val(eq, addr);
+
+ hinic_hwif_write_reg(eq->hwif, addr, val);
+}
+
+static u32 get_ctrl1_val(struct hinic_eq *eq, u32 addr)
+{
+ u32 page_size_val, elem_size, val, ctrl1;
enum hinic_eq_type type = eq->type;
- u32 page_size_val, elem_size;
- u32 addr, val, ctrl1;
if (type == HINIC_AEQ) {
/* RMW Ctrl1 */
@@ -491,9 +502,7 @@ static void set_ctrl1(struct hinic_eq *eq)
HINIC_AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
val |= ctrl1;
-
- hinic_hwif_write_reg(eq->hwif, addr, val);
- } else if (type == HINIC_CEQ) {
+ } else {
/* RMW Ctrl1 */
addr = HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id);
@@ -508,19 +517,70 @@ static void set_ctrl1(struct hinic_eq *eq)
HINIC_CEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
val |= ctrl1;
+ }
+ return val;
+}
- hinic_hwif_write_reg(eq->hwif, addr, val);
+static void set_ctrl1(struct hinic_eq *eq)
+{
+ u32 addr, val;
+
+ if (eq->type == HINIC_AEQ)
+ addr = HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id);
+ else
+ addr = HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id);
+
+ val = get_ctrl1_val(eq, addr);
+
+ hinic_hwif_write_reg(eq->hwif, addr, val);
+}
+
+static int set_ceq_ctrl_reg(struct hinic_eq *eq)
+{
+ struct hinic_ceq_ctrl_reg ceq_ctrl = {0};
+ struct hinic_hwdev *hwdev = eq->hwdev;
+ u16 out_size = sizeof(ceq_ctrl);
+ u16 in_size = sizeof(ceq_ctrl);
+ struct hinic_pfhwdev *pfhwdev;
+ u32 addr;
+ int err;
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+ addr = HINIC_CSR_CEQ_CTRL_0_ADDR(eq->q_id);
+ ceq_ctrl.ctrl0 = get_ctrl0_val(eq, addr);
+ addr = HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id);
+ ceq_ctrl.ctrl1 = get_ctrl1_val(eq, addr);
+
+ ceq_ctrl.func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
+ ceq_ctrl.q_id = eq->q_id;
+
+ err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
+ HINIC_COMM_CMD_CEQ_CTRL_REG_WR_BY_UP,
+ &ceq_ctrl, in_size,
+ &ceq_ctrl, &out_size, HINIC_MGMT_MSG_SYNC);
+ if (err || !out_size || ceq_ctrl.status) {
+ dev_err(&hwdev->hwif->pdev->dev,
+ "Failed to set ceq %d ctrl reg, err: %d status: 0x%x, out_size: 0x%x\n",
+ eq->q_id, err, ceq_ctrl.status, out_size);
+ return -EFAULT;
}
+
+ return 0;
}
/**
* set_eq_ctrls - setting eq's ctrl registers
* @eq: the Event Queue for setting
**/
-static void set_eq_ctrls(struct hinic_eq *eq)
+static int set_eq_ctrls(struct hinic_eq *eq)
{
+ if (HINIC_IS_VF(eq->hwif) && eq->type == HINIC_CEQ)
+ return set_ceq_ctrl_reg(eq);
+
set_ctrl0(eq);
set_ctrl1(eq);
+ return 0;
}
/**
@@ -703,7 +763,12 @@ static int init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif,
return -EINVAL;
}
- set_eq_ctrls(eq);
+ err = set_eq_ctrls(eq);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to set eq ctrls\n");
+ return err;
+ }
+
eq_update_ci(eq, EQ_ARMED);
err = alloc_eq_pages(eq);
@@ -859,6 +924,7 @@ int hinic_ceqs_init(struct hinic_ceqs *ceqs, struct hinic_hwif *hwif,
ceqs->num_ceqs = num_ceqs;
for (q_id = 0; q_id < num_ceqs; q_id++) {
+ ceqs->ceq[q_id].hwdev = ceqs->hwdev;
err = init_eq(&ceqs->ceq[q_id], hwif, HINIC_CEQ, q_id, q_len,
page_size, msix_entries[q_id]);
if (err) {
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h
index d35f2068ee0c..74b9ff90640c 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h
@@ -143,8 +143,9 @@ enum hinic_eq_type {
};
enum hinic_aeq_type {
+ HINIC_MBX_FROM_FUNC = 1,
HINIC_MSG_FROM_MGMT_CPU = 2,
-
+ HINIC_MBX_SEND_RSLT = 5,
HINIC_MAX_AEQ_EVENTS,
};
@@ -171,7 +172,7 @@ struct hinic_eq_work {
struct hinic_eq {
struct hinic_hwif *hwif;
-
+ struct hinic_hwdev *hwdev;
enum hinic_eq_type type;
int q_id;
u32 q_len;
@@ -219,7 +220,7 @@ struct hinic_ceq_cb {
struct hinic_ceqs {
struct hinic_hwif *hwif;
-
+ struct hinic_hwdev *hwdev;
struct hinic_eq ceq[HINIC_MAX_CEQS];
int num_ceqs;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
index 07bbfbf68577..cf127d896ba6 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
@@ -10,6 +10,7 @@
#include <linux/io.h>
#include <linux/types.h>
#include <linux/bitops.h>
+#include <linux/delay.h>
#include "hinic_hw_csr.h"
#include "hinic_hw_if.h"
@@ -18,6 +19,8 @@
#define VALID_MSIX_IDX(attr, msix_index) ((msix_index) < (attr)->num_irqs)
+#define WAIT_HWIF_READY_TIMEOUT 10000
+
/**
* hinic_msix_attr_set - set message attribute for msix entry
* @hwif: the HW interface of a pci function device
@@ -115,8 +118,12 @@ int hinic_msix_attr_cnt_clear(struct hinic_hwif *hwif, u16 msix_index)
**/
void hinic_set_pf_action(struct hinic_hwif *hwif, enum hinic_pf_action action)
{
- u32 attr5 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR5_ADDR);
+ u32 attr5;
+
+ if (HINIC_IS_VF(hwif))
+ return;
+ attr5 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR5_ADDR);
attr5 = HINIC_FA5_CLEAR(attr5, PF_ACTION);
attr5 |= HINIC_FA5_SET(action, PF_ACTION);
@@ -183,27 +190,47 @@ void hinic_set_msix_state(struct hinic_hwif *hwif, u16 msix_idx,
**/
static int hwif_ready(struct hinic_hwif *hwif)
{
- struct pci_dev *pdev = hwif->pdev;
u32 addr, attr1;
addr = HINIC_CSR_FUNC_ATTR1_ADDR;
attr1 = hinic_hwif_read_reg(hwif, addr);
- if (!HINIC_FA1_GET(attr1, INIT_STATUS)) {
- dev_err(&pdev->dev, "hwif status is not ready\n");
- return -EFAULT;
+ if (!HINIC_FA1_GET(attr1, MGMT_INIT_STATUS))
+ return -EBUSY;
+
+ if (HINIC_IS_VF(hwif)) {
+ if (!HINIC_FA1_GET(attr1, PF_INIT_STATUS))
+ return -EBUSY;
}
return 0;
}
+static int wait_hwif_ready(struct hinic_hwif *hwif)
+{
+ unsigned long timeout = 0;
+
+ do {
+ if (!hwif_ready(hwif))
+ return 0;
+
+ usleep_range(999, 1000);
+ timeout++;
+ } while (timeout <= WAIT_HWIF_READY_TIMEOUT);
+
+ dev_err(&hwif->pdev->dev, "Wait for hwif timeout\n");
+
+ return -EBUSY;
+}
+
/**
* set_hwif_attr - set the attributes in the relevant members in hwif
* @hwif: the HW interface of a pci function device
* @attr0: the first attribute that was read from the hw
* @attr1: the second attribute that was read from the hw
**/
-static void set_hwif_attr(struct hinic_hwif *hwif, u32 attr0, u32 attr1)
+static void set_hwif_attr(struct hinic_hwif *hwif, u32 attr0, u32 attr1,
+ u32 attr2)
{
hwif->attr.func_idx = HINIC_FA0_GET(attr0, FUNC_IDX);
hwif->attr.pf_idx = HINIC_FA0_GET(attr0, PF_IDX);
@@ -214,6 +241,8 @@ static void set_hwif_attr(struct hinic_hwif *hwif, u32 attr0, u32 attr1)
hwif->attr.num_ceqs = BIT(HINIC_FA1_GET(attr1, CEQS_PER_FUNC));
hwif->attr.num_irqs = BIT(HINIC_FA1_GET(attr1, IRQS_PER_FUNC));
hwif->attr.num_dma_attr = BIT(HINIC_FA1_GET(attr1, DMA_ATTR_PER_FUNC));
+ hwif->attr.global_vf_id_of_pf = HINIC_FA2_GET(attr2,
+ GLOBAL_VF_ID_OF_PF);
}
/**
@@ -222,7 +251,7 @@ static void set_hwif_attr(struct hinic_hwif *hwif, u32 attr0, u32 attr1)
**/
static void read_hwif_attr(struct hinic_hwif *hwif)
{
- u32 addr, attr0, attr1;
+ u32 addr, attr0, attr1, attr2;
addr = HINIC_CSR_FUNC_ATTR0_ADDR;
attr0 = hinic_hwif_read_reg(hwif, addr);
@@ -230,7 +259,10 @@ static void read_hwif_attr(struct hinic_hwif *hwif)
addr = HINIC_CSR_FUNC_ATTR1_ADDR;
attr1 = hinic_hwif_read_reg(hwif, addr);
- set_hwif_attr(hwif, attr0, attr1);
+ addr = HINIC_CSR_FUNC_ATTR2_ADDR;
+ attr2 = hinic_hwif_read_reg(hwif, addr);
+
+ set_hwif_attr(hwif, attr0, attr1, attr2);
}
/**
@@ -309,6 +341,34 @@ static void dma_attr_init(struct hinic_hwif *hwif)
HINIC_PCIE_SNOOP, HINIC_PCIE_TPH_DISABLE);
}
+u16 hinic_glb_pf_vf_offset(struct hinic_hwif *hwif)
+{
+ if (!hwif)
+ return 0;
+
+ return hwif->attr.global_vf_id_of_pf;
+}
+
+u16 hinic_global_func_id_hw(struct hinic_hwif *hwif)
+{
+ u32 addr, attr0;
+
+ addr = HINIC_CSR_FUNC_ATTR0_ADDR;
+ attr0 = hinic_hwif_read_reg(hwif, addr);
+
+ return HINIC_FA0_GET(attr0, FUNC_IDX);
+}
+
+u16 hinic_pf_id_of_vf_hw(struct hinic_hwif *hwif)
+{
+ u32 addr, attr0;
+
+ addr = HINIC_CSR_FUNC_ATTR0_ADDR;
+ attr0 = hinic_hwif_read_reg(hwif, addr);
+
+ return HINIC_FA0_GET(attr0, PF_IDX);
+}
+
/**
* hinic_init_hwif - initialize the hw interface
* @hwif: the HW interface of a pci function device
@@ -335,7 +395,7 @@ int hinic_init_hwif(struct hinic_hwif *hwif, struct pci_dev *pdev)
goto err_map_intr_bar;
}
- err = hwif_ready(hwif);
+ err = wait_hwif_ready(hwif);
if (err) {
dev_err(&pdev->dev, "HW interface is not ready\n");
goto err_hwif_ready;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
index c7bb9ceca72c..0872e035faa1 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
@@ -35,6 +35,7 @@
#define HINIC_FA0_FUNC_IDX_SHIFT 0
#define HINIC_FA0_PF_IDX_SHIFT 10
#define HINIC_FA0_PCI_INTF_IDX_SHIFT 14
+#define HINIC_FA0_VF_IN_PF_SHIFT 16
/* reserved members - off 16 */
#define HINIC_FA0_FUNC_TYPE_SHIFT 24
@@ -42,6 +43,7 @@
#define HINIC_FA0_PF_IDX_MASK 0xF
#define HINIC_FA0_PCI_INTF_IDX_MASK 0x3
#define HINIC_FA0_FUNC_TYPE_MASK 0x1
+#define HINIC_FA0_VF_IN_PF_MASK 0xFF
#define HINIC_FA0_GET(val, member) \
(((val) >> HINIC_FA0_##member##_SHIFT) & HINIC_FA0_##member##_MASK)
@@ -53,17 +55,25 @@
#define HINIC_FA1_IRQS_PER_FUNC_SHIFT 20
#define HINIC_FA1_DMA_ATTR_PER_FUNC_SHIFT 24
/* reserved members - off 27 */
-#define HINIC_FA1_INIT_STATUS_SHIFT 30
+#define HINIC_FA1_MGMT_INIT_STATUS_SHIFT 30
+#define HINIC_FA1_PF_INIT_STATUS_SHIFT 31
#define HINIC_FA1_AEQS_PER_FUNC_MASK 0x3
#define HINIC_FA1_CEQS_PER_FUNC_MASK 0x7
#define HINIC_FA1_IRQS_PER_FUNC_MASK 0xF
#define HINIC_FA1_DMA_ATTR_PER_FUNC_MASK 0x7
-#define HINIC_FA1_INIT_STATUS_MASK 0x1
+#define HINIC_FA1_MGMT_INIT_STATUS_MASK 0x1
+#define HINIC_FA1_PF_INIT_STATUS_MASK 0x1
#define HINIC_FA1_GET(val, member) \
(((val) >> HINIC_FA1_##member##_SHIFT) & HINIC_FA1_##member##_MASK)
+#define HINIC_FA2_GLOBAL_VF_ID_OF_PF_SHIFT 16
+#define HINIC_FA2_GLOBAL_VF_ID_OF_PF_MASK 0x3FF
+
+#define HINIC_FA2_GET(val, member) \
+ (((val) >> HINIC_FA2_##member##_SHIFT) & HINIC_FA2_##member##_MASK)
+
#define HINIC_FA4_OUTBOUND_STATE_SHIFT 0
#define HINIC_FA4_DB_STATE_SHIFT 1
@@ -140,6 +150,7 @@
#define HINIC_HWIF_PPF_IDX(hwif) ((hwif)->attr.ppf_idx)
#define HINIC_FUNC_TYPE(hwif) ((hwif)->attr.func_type)
+#define HINIC_IS_VF(hwif) (HINIC_FUNC_TYPE(hwif) == HINIC_VF)
#define HINIC_IS_PF(hwif) (HINIC_FUNC_TYPE(hwif) == HINIC_PF)
#define HINIC_IS_PPF(hwif) (HINIC_FUNC_TYPE(hwif) == HINIC_PPF)
@@ -173,6 +184,7 @@ enum hinic_pcie_tph {
enum hinic_func_type {
HINIC_PF = 0,
+ HINIC_VF = 1,
HINIC_PPF = 2,
};
@@ -180,7 +192,7 @@ enum hinic_mod_type {
HINIC_MOD_COMM = 0, /* HW communication module */
HINIC_MOD_L2NIC = 1, /* L2NIC module */
HINIC_MOD_CFGM = 7, /* Configuration module */
-
+ HINIC_MOD_HILINK = 14, /* Hilink module */
HINIC_MOD_MAX = 15
};
@@ -223,6 +235,8 @@ struct hinic_func_attr {
u8 num_ceqs;
u8 num_dma_attr;
+
+ u16 global_vf_id_of_pf;
};
struct hinic_hwif {
@@ -271,6 +285,12 @@ enum hinic_db_state hinic_db_state_get(struct hinic_hwif *hwif);
void hinic_db_state_set(struct hinic_hwif *hwif,
enum hinic_db_state db_state);
+u16 hinic_glb_pf_vf_offset(struct hinic_hwif *hwif);
+
+u16 hinic_global_func_id_hw(struct hinic_hwif *hwif);
+
+u16 hinic_pf_id_of_vf_hw(struct hinic_hwif *hwif);
+
int hinic_init_hwif(struct hinic_hwif *hwif, struct pci_dev *pdev);
void hinic_free_hwif(struct hinic_hwif *hwif);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
index d66f86fa3f46..3e3fa742e476 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
@@ -15,6 +15,7 @@
#include <linux/io.h>
#include <linux/err.h>
+#include "hinic_hw_dev.h"
#include "hinic_hw_if.h"
#include "hinic_hw_eqs.h"
#include "hinic_hw_wqe.h"
@@ -34,6 +35,8 @@
#define DB_IDX(db, db_base) \
(((unsigned long)(db) - (unsigned long)(db_base)) / HINIC_DB_PAGE_SIZE)
+#define HINIC_PAGE_SIZE_HW(pg_size) ((u8)ilog2((u32)((pg_size) >> 12)))
+
enum io_cmd {
IO_CMD_MODIFY_QUEUE_CTXT = 0,
IO_CMD_CLEAN_QUEUE_CTXT,
@@ -279,7 +282,7 @@ static int init_qp(struct hinic_func_to_io *func_to_io,
err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->sq_wq[q_id],
HINIC_SQ_WQEBB_SIZE, HINIC_SQ_PAGE_SIZE,
- HINIC_SQ_DEPTH, HINIC_SQ_WQE_MAX_SIZE);
+ func_to_io->sq_depth, HINIC_SQ_WQE_MAX_SIZE);
if (err) {
dev_err(&pdev->dev, "Failed to allocate WQ for SQ\n");
return err;
@@ -287,7 +290,7 @@ static int init_qp(struct hinic_func_to_io *func_to_io,
err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->rq_wq[q_id],
HINIC_RQ_WQEBB_SIZE, HINIC_RQ_PAGE_SIZE,
- HINIC_RQ_DEPTH, HINIC_RQ_WQE_SIZE);
+ func_to_io->rq_depth, HINIC_RQ_WQE_SIZE);
if (err) {
dev_err(&pdev->dev, "Failed to allocate WQ for RQ\n");
goto err_rq_alloc;
@@ -484,6 +487,33 @@ void hinic_io_destroy_qps(struct hinic_func_to_io *func_to_io, int num_qps)
devm_kfree(&pdev->dev, func_to_io->qps);
}
+int hinic_set_wq_page_size(struct hinic_hwdev *hwdev, u16 func_idx,
+ u32 page_size)
+{
+ struct hinic_wq_page_size page_size_info = {0};
+ u16 out_size = sizeof(page_size_info);
+ struct hinic_pfhwdev *pfhwdev;
+ int err;
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+ page_size_info.func_idx = func_idx;
+ page_size_info.ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif);
+ page_size_info.page_size = HINIC_PAGE_SIZE_HW(page_size);
+
+ err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
+ HINIC_COMM_CMD_PAGESIZE_SET, &page_size_info,
+ sizeof(page_size_info), &page_size_info,
+ &out_size, HINIC_MGMT_MSG_SYNC);
+ if (err || !out_size || page_size_info.status) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to set wq page size, err: %d, status: 0x%x, out_size: 0x%0x\n",
+ err, page_size_info.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
/**
* hinic_io_init - Initialize the IO components
* @func_to_io: func to io channel that holds the IO components
@@ -506,6 +536,7 @@ int hinic_io_init(struct hinic_func_to_io *func_to_io,
func_to_io->hwif = hwif;
func_to_io->qps = NULL;
func_to_io->max_qps = max_qps;
+ func_to_io->ceqs.hwdev = func_to_io->hwdev;
err = hinic_ceqs_init(&func_to_io->ceqs, hwif, num_ceqs,
HINIC_DEFAULT_CEQ_LEN, HINIC_EQ_PAGE_SIZE,
@@ -541,6 +572,14 @@ int hinic_io_init(struct hinic_func_to_io *func_to_io,
func_to_io->cmdq_db_area[cmdq] = db_area;
}
+ err = hinic_set_wq_page_size(func_to_io->hwdev,
+ HINIC_HWIF_FUNC_IDX(hwif),
+ HINIC_DEFAULT_WQ_PAGE_SIZE);
+ if (err) {
+ dev_err(&func_to_io->hwif->pdev->dev, "Failed to set wq page size\n");
+ goto init_wq_pg_size_err;
+ }
+
err = hinic_init_cmdqs(&func_to_io->cmdqs, hwif,
func_to_io->cmdq_db_area);
if (err) {
@@ -551,6 +590,11 @@ int hinic_io_init(struct hinic_func_to_io *func_to_io,
return 0;
err_init_cmdqs:
+ if (!HINIC_IS_VF(func_to_io->hwif))
+ hinic_set_wq_page_size(func_to_io->hwdev,
+ HINIC_HWIF_FUNC_IDX(hwif),
+ HINIC_HW_WQ_PAGE_SIZE);
+init_wq_pg_size_err:
err_db_area:
for (type = HINIC_CMDQ_SYNC; type < cmdq; type++)
return_db_area(func_to_io, func_to_io->cmdq_db_area[type]);
@@ -575,6 +619,11 @@ void hinic_io_free(struct hinic_func_to_io *func_to_io)
hinic_free_cmdqs(&func_to_io->cmdqs);
+ if (!HINIC_IS_VF(func_to_io->hwif))
+ hinic_set_wq_page_size(func_to_io->hwdev,
+ HINIC_HWIF_FUNC_IDX(func_to_io->hwif),
+ HINIC_HW_WQ_PAGE_SIZE);
+
for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++)
return_db_area(func_to_io, func_to_io->cmdq_db_area[cmdq]);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h
index cac2b722e7dc..214f162f7579 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h
@@ -20,6 +20,8 @@
#define HINIC_DB_PAGE_SIZE SZ_4K
#define HINIC_DB_SIZE SZ_4M
+#define HINIC_HW_WQ_PAGE_SIZE SZ_4K
+#define HINIC_DEFAULT_WQ_PAGE_SIZE SZ_256K
#define HINIC_DB_MAX_AREAS (HINIC_DB_SIZE / HINIC_DB_PAGE_SIZE)
@@ -47,7 +49,7 @@ struct hinic_free_db_area {
struct hinic_func_to_io {
struct hinic_hwif *hwif;
-
+ struct hinic_hwdev *hwdev;
struct hinic_ceqs ceqs;
struct hinic_wqs wqs;
@@ -58,6 +60,9 @@ struct hinic_func_to_io {
struct hinic_qp *qps;
u16 max_qps;
+ u16 sq_depth;
+ u16 rq_depth;
+
void __iomem **sq_db;
void __iomem *db_base;
@@ -69,8 +74,27 @@ struct hinic_func_to_io {
void __iomem *cmdq_db_area[HINIC_MAX_CMDQ_TYPES];
struct hinic_cmdqs cmdqs;
+
+ u16 max_vfs;
+ struct vf_data_storage *vf_infos;
+ u8 link_status;
+};
+
+struct hinic_wq_page_size {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u8 ppf_idx;
+ u8 page_size;
+
+ u32 rsvd1;
};
+int hinic_set_wq_page_size(struct hinic_hwdev *hwdev, u16 func_idx,
+ u32 page_size);
+
int hinic_io_create_qps(struct hinic_func_to_io *func_to_io,
u16 base_qpn, int num_qps,
struct msix_entry *sq_msix_entries,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
new file mode 100644
index 000000000000..bc2f87e6cb5d
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
@@ -0,0 +1,1210 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <linux/semaphore.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include "hinic_hw_if.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_hw_csr.h"
+#include "hinic_hw_dev.h"
+#include "hinic_hw_mbox.h"
+
+#define HINIC_MBOX_INT_DST_FUNC_SHIFT 0
+#define HINIC_MBOX_INT_DST_AEQN_SHIFT 10
+#define HINIC_MBOX_INT_SRC_RESP_AEQN_SHIFT 12
+#define HINIC_MBOX_INT_STAT_DMA_SHIFT 14
+/* The size of data to be sended (unit of 4 bytes) */
+#define HINIC_MBOX_INT_TX_SIZE_SHIFT 20
+/* SO_RO(strong order, relax order) */
+#define HINIC_MBOX_INT_STAT_DMA_SO_RO_SHIFT 25
+#define HINIC_MBOX_INT_WB_EN_SHIFT 28
+
+#define HINIC_MBOX_INT_DST_FUNC_MASK 0x3FF
+#define HINIC_MBOX_INT_DST_AEQN_MASK 0x3
+#define HINIC_MBOX_INT_SRC_RESP_AEQN_MASK 0x3
+#define HINIC_MBOX_INT_STAT_DMA_MASK 0x3F
+#define HINIC_MBOX_INT_TX_SIZE_MASK 0x1F
+#define HINIC_MBOX_INT_STAT_DMA_SO_RO_MASK 0x3
+#define HINIC_MBOX_INT_WB_EN_MASK 0x1
+
+#define HINIC_MBOX_INT_SET(val, field) \
+ (((val) & HINIC_MBOX_INT_##field##_MASK) << \
+ HINIC_MBOX_INT_##field##_SHIFT)
+
+enum hinic_mbox_tx_status {
+ TX_NOT_DONE = 1,
+};
+
+#define HINIC_MBOX_CTRL_TRIGGER_AEQE_SHIFT 0
+
+/* specifies the issue request for the message data.
+ * 0 - Tx request is done;
+ * 1 - Tx request is in process.
+ */
+#define HINIC_MBOX_CTRL_TX_STATUS_SHIFT 1
+
+#define HINIC_MBOX_CTRL_TRIGGER_AEQE_MASK 0x1
+#define HINIC_MBOX_CTRL_TX_STATUS_MASK 0x1
+
+#define HINIC_MBOX_CTRL_SET(val, field) \
+ (((val) & HINIC_MBOX_CTRL_##field##_MASK) << \
+ HINIC_MBOX_CTRL_##field##_SHIFT)
+
+#define HINIC_MBOX_HEADER_MSG_LEN_SHIFT 0
+#define HINIC_MBOX_HEADER_MODULE_SHIFT 11
+#define HINIC_MBOX_HEADER_SEG_LEN_SHIFT 16
+#define HINIC_MBOX_HEADER_NO_ACK_SHIFT 22
+#define HINIC_MBOX_HEADER_SEQID_SHIFT 24
+#define HINIC_MBOX_HEADER_LAST_SHIFT 30
+
+/* specifies the mailbox message direction
+ * 0 - send
+ * 1 - receive
+ */
+#define HINIC_MBOX_HEADER_DIRECTION_SHIFT 31
+#define HINIC_MBOX_HEADER_CMD_SHIFT 32
+#define HINIC_MBOX_HEADER_MSG_ID_SHIFT 40
+#define HINIC_MBOX_HEADER_STATUS_SHIFT 48
+#define HINIC_MBOX_HEADER_SRC_GLB_FUNC_IDX_SHIFT 54
+
+#define HINIC_MBOX_HEADER_MSG_LEN_MASK 0x7FF
+#define HINIC_MBOX_HEADER_MODULE_MASK 0x1F
+#define HINIC_MBOX_HEADER_SEG_LEN_MASK 0x3F
+#define HINIC_MBOX_HEADER_NO_ACK_MASK 0x1
+#define HINIC_MBOX_HEADER_SEQID_MASK 0x3F
+#define HINIC_MBOX_HEADER_LAST_MASK 0x1
+#define HINIC_MBOX_HEADER_DIRECTION_MASK 0x1
+#define HINIC_MBOX_HEADER_CMD_MASK 0xFF
+#define HINIC_MBOX_HEADER_MSG_ID_MASK 0xFF
+#define HINIC_MBOX_HEADER_STATUS_MASK 0x3F
+#define HINIC_MBOX_HEADER_SRC_GLB_FUNC_IDX_MASK 0x3FF
+
+#define HINIC_MBOX_HEADER_GET(val, field) \
+ (((val) >> HINIC_MBOX_HEADER_##field##_SHIFT) & \
+ HINIC_MBOX_HEADER_##field##_MASK)
+#define HINIC_MBOX_HEADER_SET(val, field) \
+ ((u64)((val) & HINIC_MBOX_HEADER_##field##_MASK) << \
+ HINIC_MBOX_HEADER_##field##_SHIFT)
+
+#define MBOX_SEGLEN_MASK \
+ HINIC_MBOX_HEADER_SET(HINIC_MBOX_HEADER_SEG_LEN_MASK, SEG_LEN)
+
+#define HINIC_MBOX_SEG_LEN 48
+#define HINIC_MBOX_COMP_TIME 8000U
+#define MBOX_MSG_POLLING_TIMEOUT 8000
+
+#define HINIC_MBOX_DATA_SIZE 2040
+
+#define MBOX_MAX_BUF_SZ 2048UL
+#define MBOX_HEADER_SZ 8
+
+#define MBOX_INFO_SZ 4
+
+/* MBOX size is 64B, 8B for mbox_header, 4B reserved */
+#define MBOX_SEG_LEN 48
+#define MBOX_SEG_LEN_ALIGN 4
+#define MBOX_WB_STATUS_LEN 16UL
+
+/* mbox write back status is 16B, only first 4B is used */
+#define MBOX_WB_STATUS_ERRCODE_MASK 0xFFFF
+#define MBOX_WB_STATUS_MASK 0xFF
+#define MBOX_WB_ERROR_CODE_MASK 0xFF00
+#define MBOX_WB_STATUS_FINISHED_SUCCESS 0xFF
+#define MBOX_WB_STATUS_FINISHED_WITH_ERR 0xFE
+#define MBOX_WB_STATUS_NOT_FINISHED 0x00
+
+#define MBOX_STATUS_FINISHED(wb) \
+ (((wb) & MBOX_WB_STATUS_MASK) != MBOX_WB_STATUS_NOT_FINISHED)
+#define MBOX_STATUS_SUCCESS(wb) \
+ (((wb) & MBOX_WB_STATUS_MASK) == MBOX_WB_STATUS_FINISHED_SUCCESS)
+#define MBOX_STATUS_ERRCODE(wb) \
+ ((wb) & MBOX_WB_ERROR_CODE_MASK)
+
+#define SEQ_ID_START_VAL 0
+#define SEQ_ID_MAX_VAL 42
+
+#define DST_AEQ_IDX_DEFAULT_VAL 0
+#define SRC_AEQ_IDX_DEFAULT_VAL 0
+#define NO_DMA_ATTRIBUTE_VAL 0
+
+#define HINIC_MGMT_RSP_AEQN 0
+#define HINIC_MBOX_RSP_AEQN 2
+#define HINIC_MBOX_RECV_AEQN 0
+
+#define MBOX_MSG_NO_DATA_LEN 1
+
+#define MBOX_BODY_FROM_HDR(header) ((u8 *)(header) + MBOX_HEADER_SZ)
+#define MBOX_AREA(hwif) \
+ ((hwif)->cfg_regs_bar + HINIC_FUNC_CSR_MAILBOX_DATA_OFF)
+
+#define IS_PF_OR_PPF_SRC(src_func_idx) ((src_func_idx) < HINIC_MAX_PF_FUNCS)
+
+#define MBOX_RESPONSE_ERROR 0x1
+#define MBOX_MSG_ID_MASK 0xFF
+#define MBOX_MSG_ID(func_to_func) ((func_to_func)->send_msg_id)
+#define MBOX_MSG_ID_INC(func_to_func_mbox) (MBOX_MSG_ID(func_to_func_mbox) = \
+ (MBOX_MSG_ID(func_to_func_mbox) + 1) & MBOX_MSG_ID_MASK)
+
+#define FUNC_ID_OFF_SET_8B 8
+#define FUNC_ID_OFF_SET_10B 10
+
+/* max message counter wait to process for one function */
+#define HINIC_MAX_MSG_CNT_TO_PROCESS 10
+
+#define HINIC_QUEUE_MIN_DEPTH 6
+#define HINIC_QUEUE_MAX_DEPTH 12
+#define HINIC_MAX_RX_BUFFER_SIZE 15
+
+enum hinic_hwif_direction_type {
+ HINIC_HWIF_DIRECT_SEND = 0,
+ HINIC_HWIF_RESPONSE = 1,
+};
+
+enum mbox_send_mod {
+ MBOX_SEND_MSG_INT,
+};
+
+enum mbox_seg_type {
+ NOT_LAST_SEG,
+ LAST_SEG,
+};
+
+enum mbox_ordering_type {
+ STRONG_ORDER,
+};
+
+enum mbox_write_back_type {
+ WRITE_BACK = 1,
+};
+
+enum mbox_aeq_trig_type {
+ NOT_TRIGGER,
+ TRIGGER,
+};
+
+/**
+ * hinic_register_pf_mbox_cb - register mbox callback for pf
+ * @hwdev: the pointer to hw device
+ * @mod: specific mod that the callback will handle
+ * @callback: callback function
+ * Return: 0 - success, negative - failure
+ */
+int hinic_register_pf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod,
+ hinic_pf_mbox_cb callback)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+
+ if (mod >= HINIC_MOD_MAX)
+ return -EFAULT;
+
+ func_to_func->pf_mbox_cb[mod] = callback;
+
+ set_bit(HINIC_PF_MBOX_CB_REG, &func_to_func->pf_mbox_cb_state[mod]);
+
+ return 0;
+}
+
+/**
+ * hinic_register_vf_mbox_cb - register mbox callback for vf
+ * @hwdev: the pointer to hw device
+ * @mod: specific mod that the callback will handle
+ * @callback: callback function
+ * Return: 0 - success, negative - failure
+ */
+int hinic_register_vf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod,
+ hinic_vf_mbox_cb callback)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+
+ if (mod >= HINIC_MOD_MAX)
+ return -EFAULT;
+
+ func_to_func->vf_mbox_cb[mod] = callback;
+
+ set_bit(HINIC_VF_MBOX_CB_REG, &func_to_func->vf_mbox_cb_state[mod]);
+
+ return 0;
+}
+
+/**
+ * hinic_unregister_pf_mbox_cb - unregister the mbox callback for pf
+ * @hwdev: the pointer to hw device
+ * @mod: specific mod that the callback will handle
+ */
+void hinic_unregister_pf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+
+ clear_bit(HINIC_PF_MBOX_CB_REG, &func_to_func->pf_mbox_cb_state[mod]);
+
+ while (test_bit(HINIC_PF_MBOX_CB_RUNNING,
+ &func_to_func->pf_mbox_cb_state[mod]))
+ usleep_range(900, 1000);
+
+ func_to_func->pf_mbox_cb[mod] = NULL;
+}
+
+/**
+ * hinic_unregister_vf_mbox_cb - unregister the mbox callback for vf
+ * @hwdev: the pointer to hw device
+ * @mod: specific mod that the callback will handle
+ */
+void hinic_unregister_vf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+
+ clear_bit(HINIC_VF_MBOX_CB_REG, &func_to_func->vf_mbox_cb_state[mod]);
+
+ while (test_bit(HINIC_VF_MBOX_CB_RUNNING,
+ &func_to_func->vf_mbox_cb_state[mod]))
+ usleep_range(900, 1000);
+
+ func_to_func->vf_mbox_cb[mod] = NULL;
+}
+
+static int recv_vf_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
+ struct hinic_recv_mbox *recv_mbox,
+ void *buf_out, u16 *out_size)
+{
+ hinic_vf_mbox_cb cb;
+ int ret = 0;
+
+ if (recv_mbox->mod >= HINIC_MOD_MAX) {
+ dev_err(&func_to_func->hwif->pdev->dev, "Receive illegal mbox message, mod = %d\n",
+ recv_mbox->mod);
+ return -EINVAL;
+ }
+
+ set_bit(HINIC_VF_MBOX_CB_RUNNING,
+ &func_to_func->vf_mbox_cb_state[recv_mbox->mod]);
+
+ cb = func_to_func->vf_mbox_cb[recv_mbox->mod];
+ if (cb && test_bit(HINIC_VF_MBOX_CB_REG,
+ &func_to_func->vf_mbox_cb_state[recv_mbox->mod])) {
+ cb(func_to_func->hwdev, recv_mbox->cmd, recv_mbox->mbox,
+ recv_mbox->mbox_len, buf_out, out_size);
+ } else {
+ dev_err(&func_to_func->hwif->pdev->dev, "VF mbox cb is not registered\n");
+ ret = -EINVAL;
+ }
+
+ clear_bit(HINIC_VF_MBOX_CB_RUNNING,
+ &func_to_func->vf_mbox_cb_state[recv_mbox->mod]);
+
+ return ret;
+}
+
+static int
+recv_pf_from_vf_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
+ struct hinic_recv_mbox *recv_mbox,
+ u16 src_func_idx, void *buf_out,
+ u16 *out_size)
+{
+ hinic_pf_mbox_cb cb;
+ u16 vf_id = 0;
+ int ret;
+
+ if (recv_mbox->mod >= HINIC_MOD_MAX) {
+ dev_err(&func_to_func->hwif->pdev->dev, "Receive illegal mbox message, mod = %d\n",
+ recv_mbox->mod);
+ return -EINVAL;
+ }
+
+ set_bit(HINIC_PF_MBOX_CB_RUNNING,
+ &func_to_func->pf_mbox_cb_state[recv_mbox->mod]);
+
+ cb = func_to_func->pf_mbox_cb[recv_mbox->mod];
+ if (cb && test_bit(HINIC_PF_MBOX_CB_REG,
+ &func_to_func->pf_mbox_cb_state[recv_mbox->mod])) {
+ vf_id = src_func_idx -
+ hinic_glb_pf_vf_offset(func_to_func->hwif);
+ ret = cb(func_to_func->hwdev, vf_id, recv_mbox->cmd,
+ recv_mbox->mbox, recv_mbox->mbox_len,
+ buf_out, out_size);
+ } else {
+ dev_err(&func_to_func->hwif->pdev->dev, "PF mbox mod(0x%x) cb is not registered\n",
+ recv_mbox->mod);
+ ret = -EINVAL;
+ }
+
+ clear_bit(HINIC_PF_MBOX_CB_RUNNING,
+ &func_to_func->pf_mbox_cb_state[recv_mbox->mod]);
+
+ return ret;
+}
+
+static bool check_mbox_seq_id_and_seg_len(struct hinic_recv_mbox *recv_mbox,
+ u8 seq_id, u8 seg_len)
+{
+ if (seq_id > SEQ_ID_MAX_VAL || seg_len > MBOX_SEG_LEN)
+ return false;
+
+ if (seq_id == 0) {
+ recv_mbox->seq_id = seq_id;
+ } else {
+ if (seq_id != recv_mbox->seq_id + 1)
+ return false;
+
+ recv_mbox->seq_id = seq_id;
+ }
+
+ return true;
+}
+
+static void resp_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
+ struct hinic_recv_mbox *recv_mbox)
+{
+ spin_lock(&func_to_func->mbox_lock);
+ if (recv_mbox->msg_info.msg_id == func_to_func->send_msg_id &&
+ func_to_func->event_flag == EVENT_START)
+ complete(&recv_mbox->recv_done);
+ else
+ dev_err(&func_to_func->hwif->pdev->dev,
+ "Mbox response timeout, current send msg id(0x%x), recv msg id(0x%x), status(0x%x)\n",
+ func_to_func->send_msg_id, recv_mbox->msg_info.msg_id,
+ recv_mbox->msg_info.status);
+ spin_unlock(&func_to_func->mbox_lock);
+}
+
+static void recv_func_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
+ struct hinic_recv_mbox *recv_mbox,
+ u16 src_func_idx);
+
+static void recv_func_mbox_work_handler(struct work_struct *work)
+{
+ struct hinic_mbox_work *mbox_work =
+ container_of(work, struct hinic_mbox_work, work);
+ struct hinic_recv_mbox *recv_mbox;
+
+ recv_func_mbox_handler(mbox_work->func_to_func, mbox_work->recv_mbox,
+ mbox_work->src_func_idx);
+
+ recv_mbox =
+ &mbox_work->func_to_func->mbox_send[mbox_work->src_func_idx];
+
+ atomic_dec(&recv_mbox->msg_cnt);
+
+ kfree(mbox_work);
+}
+
+static void recv_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
+ void *header, struct hinic_recv_mbox *recv_mbox)
+{
+ void *mbox_body = MBOX_BODY_FROM_HDR(header);
+ struct hinic_recv_mbox *rcv_mbox_temp = NULL;
+ u64 mbox_header = *((u64 *)header);
+ struct hinic_mbox_work *mbox_work;
+ u8 seq_id, seg_len;
+ u16 src_func_idx;
+ int pos;
+
+ seq_id = HINIC_MBOX_HEADER_GET(mbox_header, SEQID);
+ seg_len = HINIC_MBOX_HEADER_GET(mbox_header, SEG_LEN);
+ src_func_idx = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX);
+
+ if (!check_mbox_seq_id_and_seg_len(recv_mbox, seq_id, seg_len)) {
+ dev_err(&func_to_func->hwif->pdev->dev,
+ "Mailbox sequence and segment check fail, src func id: 0x%x, front id: 0x%x, current id: 0x%x, seg len: 0x%x\n",
+ src_func_idx, recv_mbox->seq_id, seq_id, seg_len);
+ recv_mbox->seq_id = SEQ_ID_MAX_VAL;
+ return;
+ }
+
+ pos = seq_id * MBOX_SEG_LEN;
+ memcpy((u8 *)recv_mbox->mbox + pos, mbox_body,
+ HINIC_MBOX_HEADER_GET(mbox_header, SEG_LEN));
+
+ if (!HINIC_MBOX_HEADER_GET(mbox_header, LAST))
+ return;
+
+ recv_mbox->cmd = HINIC_MBOX_HEADER_GET(mbox_header, CMD);
+ recv_mbox->mod = HINIC_MBOX_HEADER_GET(mbox_header, MODULE);
+ recv_mbox->mbox_len = HINIC_MBOX_HEADER_GET(mbox_header, MSG_LEN);
+ recv_mbox->ack_type = HINIC_MBOX_HEADER_GET(mbox_header, NO_ACK);
+ recv_mbox->msg_info.msg_id = HINIC_MBOX_HEADER_GET(mbox_header, MSG_ID);
+ recv_mbox->msg_info.status = HINIC_MBOX_HEADER_GET(mbox_header, STATUS);
+ recv_mbox->seq_id = SEQ_ID_MAX_VAL;
+
+ if (HINIC_MBOX_HEADER_GET(mbox_header, DIRECTION) ==
+ HINIC_HWIF_RESPONSE) {
+ resp_mbox_handler(func_to_func, recv_mbox);
+ return;
+ }
+
+ if (atomic_read(&recv_mbox->msg_cnt) > HINIC_MAX_MSG_CNT_TO_PROCESS) {
+ dev_warn(&func_to_func->hwif->pdev->dev,
+ "This function(%u) have %d message wait to process,can't add to work queue\n",
+ src_func_idx, atomic_read(&recv_mbox->msg_cnt));
+ return;
+ }
+
+ rcv_mbox_temp = kmemdup(recv_mbox, sizeof(*rcv_mbox_temp), GFP_KERNEL);
+ if (!rcv_mbox_temp)
+ return;
+
+ rcv_mbox_temp->mbox = kmemdup(recv_mbox->mbox, MBOX_MAX_BUF_SZ,
+ GFP_KERNEL);
+ if (!rcv_mbox_temp->mbox)
+ goto err_alloc_rcv_mbox_msg;
+
+ rcv_mbox_temp->buf_out = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
+ if (!rcv_mbox_temp->buf_out)
+ goto err_alloc_rcv_mbox_buf;
+
+ mbox_work = kzalloc(sizeof(*mbox_work), GFP_KERNEL);
+ if (!mbox_work)
+ goto err_alloc_mbox_work;
+
+ mbox_work->func_to_func = func_to_func;
+ mbox_work->recv_mbox = rcv_mbox_temp;
+ mbox_work->src_func_idx = src_func_idx;
+
+ atomic_inc(&recv_mbox->msg_cnt);
+ INIT_WORK(&mbox_work->work, recv_func_mbox_work_handler);
+ queue_work(func_to_func->workq, &mbox_work->work);
+
+ return;
+
+err_alloc_mbox_work:
+ kfree(rcv_mbox_temp->buf_out);
+
+err_alloc_rcv_mbox_buf:
+ kfree(rcv_mbox_temp->mbox);
+
+err_alloc_rcv_mbox_msg:
+ kfree(rcv_mbox_temp);
+}
+
+void hinic_mbox_func_aeqe_handler(void *handle, void *header, u8 size)
+{
+ struct hinic_mbox_func_to_func *func_to_func;
+ u64 mbox_header = *((u64 *)header);
+ struct hinic_recv_mbox *recv_mbox;
+ u64 src, dir;
+
+ func_to_func = ((struct hinic_hwdev *)handle)->func_to_func;
+
+ dir = HINIC_MBOX_HEADER_GET(mbox_header, DIRECTION);
+ src = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX);
+
+ if (src >= HINIC_MAX_FUNCTIONS) {
+ dev_err(&func_to_func->hwif->pdev->dev,
+ "Mailbox source function id:%u is invalid\n", (u32)src);
+ return;
+ }
+
+ recv_mbox = (dir == HINIC_HWIF_DIRECT_SEND) ?
+ &func_to_func->mbox_send[src] :
+ &func_to_func->mbox_resp[src];
+
+ recv_mbox_handler(func_to_func, (u64 *)header, recv_mbox);
+}
+
+void hinic_mbox_self_aeqe_handler(void *handle, void *header, u8 size)
+{
+ struct hinic_mbox_func_to_func *func_to_func;
+ struct hinic_send_mbox *send_mbox;
+
+ func_to_func = ((struct hinic_hwdev *)handle)->func_to_func;
+ send_mbox = &func_to_func->send_mbox;
+
+ complete(&send_mbox->send_done);
+}
+
+static void clear_mbox_status(struct hinic_send_mbox *mbox)
+{
+ *mbox->wb_status = 0;
+
+ /* clear mailbox write back status */
+ wmb();
+}
+
+static void mbox_copy_header(struct hinic_hwdev *hwdev,
+ struct hinic_send_mbox *mbox, u64 *header)
+{
+ u32 i, idx_max = MBOX_HEADER_SZ / sizeof(u32);
+ u32 *data = (u32 *)header;
+
+ for (i = 0; i < idx_max; i++)
+ __raw_writel(*(data + i), mbox->data + i * sizeof(u32));
+}
+
+static void mbox_copy_send_data(struct hinic_hwdev *hwdev,
+ struct hinic_send_mbox *mbox, void *seg,
+ u16 seg_len)
+{
+ u8 mbox_max_buf[MBOX_SEG_LEN] = {0};
+ u32 data_len, chk_sz = sizeof(u32);
+ u32 *data = seg;
+ u32 i, idx_max;
+
+ /* The mbox message should be aligned in 4 bytes. */
+ if (seg_len % chk_sz) {
+ memcpy(mbox_max_buf, seg, seg_len);
+ data = (u32 *)mbox_max_buf;
+ }
+
+ data_len = seg_len;
+ idx_max = ALIGN(data_len, chk_sz) / chk_sz;
+
+ for (i = 0; i < idx_max; i++)
+ __raw_writel(*(data + i),
+ mbox->data + MBOX_HEADER_SZ + i * sizeof(u32));
+}
+
+static void write_mbox_msg_attr(struct hinic_mbox_func_to_func *func_to_func,
+ u16 dst_func, u16 dst_aeqn, u16 seg_len,
+ int poll)
+{
+ u16 rsp_aeq = (dst_aeqn == 0) ? 0 : HINIC_MBOX_RSP_AEQN;
+ u32 mbox_int, mbox_ctrl;
+
+ mbox_int = HINIC_MBOX_INT_SET(dst_func, DST_FUNC) |
+ HINIC_MBOX_INT_SET(dst_aeqn, DST_AEQN) |
+ HINIC_MBOX_INT_SET(rsp_aeq, SRC_RESP_AEQN) |
+ HINIC_MBOX_INT_SET(NO_DMA_ATTRIBUTE_VAL, STAT_DMA) |
+ HINIC_MBOX_INT_SET(ALIGN(MBOX_SEG_LEN + MBOX_HEADER_SZ +
+ MBOX_INFO_SZ, MBOX_SEG_LEN_ALIGN) >> 2,
+ TX_SIZE) |
+ HINIC_MBOX_INT_SET(STRONG_ORDER, STAT_DMA_SO_RO) |
+ HINIC_MBOX_INT_SET(WRITE_BACK, WB_EN);
+
+ hinic_hwif_write_reg(func_to_func->hwif,
+ HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF, mbox_int);
+
+ wmb(); /* writing the mbox int attributes */
+ mbox_ctrl = HINIC_MBOX_CTRL_SET(TX_NOT_DONE, TX_STATUS);
+
+ if (poll)
+ mbox_ctrl |= HINIC_MBOX_CTRL_SET(NOT_TRIGGER, TRIGGER_AEQE);
+ else
+ mbox_ctrl |= HINIC_MBOX_CTRL_SET(TRIGGER, TRIGGER_AEQE);
+
+ hinic_hwif_write_reg(func_to_func->hwif,
+ HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF, mbox_ctrl);
+}
+
+static void dump_mox_reg(struct hinic_hwdev *hwdev)
+{
+ u32 val;
+
+ val = hinic_hwif_read_reg(hwdev->hwif,
+ HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF);
+ dev_err(&hwdev->hwif->pdev->dev, "Mailbox control reg: 0x%x\n", val);
+
+ val = hinic_hwif_read_reg(hwdev->hwif,
+ HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF);
+ dev_err(&hwdev->hwif->pdev->dev, "Mailbox interrupt offset: 0x%x\n",
+ val);
+}
+
+static u16 get_mbox_status(struct hinic_send_mbox *mbox)
+{
+ /* write back is 16B, but only use first 4B */
+ u64 wb_val = be64_to_cpu(*mbox->wb_status);
+
+ rmb(); /* verify reading before check */
+
+ return (u16)(wb_val & MBOX_WB_STATUS_ERRCODE_MASK);
+}
+
+static int
+wait_for_mbox_seg_completion(struct hinic_mbox_func_to_func *func_to_func,
+ int poll, u16 *wb_status)
+{
+ struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
+ struct hinic_hwdev *hwdev = func_to_func->hwdev;
+ struct completion *done = &send_mbox->send_done;
+ u32 cnt = 0;
+ unsigned long jif;
+
+ if (poll) {
+ while (cnt < MBOX_MSG_POLLING_TIMEOUT) {
+ *wb_status = get_mbox_status(send_mbox);
+ if (MBOX_STATUS_FINISHED(*wb_status))
+ break;
+
+ usleep_range(900, 1000);
+ cnt++;
+ }
+
+ if (cnt == MBOX_MSG_POLLING_TIMEOUT) {
+ dev_err(&hwdev->hwif->pdev->dev, "Send mailbox segment timeout, wb status: 0x%x\n",
+ *wb_status);
+ dump_mox_reg(hwdev);
+ return -ETIMEDOUT;
+ }
+ } else {
+ jif = msecs_to_jiffies(HINIC_MBOX_COMP_TIME);
+ if (!wait_for_completion_timeout(done, jif)) {
+ dev_err(&hwdev->hwif->pdev->dev, "Send mailbox segment timeout\n");
+ dump_mox_reg(hwdev);
+ return -ETIMEDOUT;
+ }
+
+ *wb_status = get_mbox_status(send_mbox);
+ }
+
+ return 0;
+}
+
+static int send_mbox_seg(struct hinic_mbox_func_to_func *func_to_func,
+ u64 header, u16 dst_func, void *seg, u16 seg_len,
+ int poll, void *msg_info)
+{
+ struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
+ u16 seq_dir = HINIC_MBOX_HEADER_GET(header, DIRECTION);
+ struct hinic_hwdev *hwdev = func_to_func->hwdev;
+ struct completion *done = &send_mbox->send_done;
+ u8 num_aeqs = hwdev->hwif->attr.num_aeqs;
+ u16 dst_aeqn, wb_status = 0, errcode;
+
+ if (num_aeqs >= 4)
+ dst_aeqn = (seq_dir == HINIC_HWIF_DIRECT_SEND) ?
+ HINIC_MBOX_RECV_AEQN : HINIC_MBOX_RSP_AEQN;
+ else
+ dst_aeqn = 0;
+
+ if (!poll)
+ init_completion(done);
+
+ clear_mbox_status(send_mbox);
+
+ mbox_copy_header(hwdev, send_mbox, &header);
+
+ mbox_copy_send_data(hwdev, send_mbox, seg, seg_len);
+
+ write_mbox_msg_attr(func_to_func, dst_func, dst_aeqn, seg_len, poll);
+
+ wmb(); /* writing the mbox msg attributes */
+
+ if (wait_for_mbox_seg_completion(func_to_func, poll, &wb_status))
+ return -ETIMEDOUT;
+
+ if (!MBOX_STATUS_SUCCESS(wb_status)) {
+ dev_err(&hwdev->hwif->pdev->dev, "Send mailbox segment to function %d error, wb status: 0x%x\n",
+ dst_func, wb_status);
+ errcode = MBOX_STATUS_ERRCODE(wb_status);
+ return errcode ? errcode : -EFAULT;
+ }
+
+ return 0;
+}
+
+static int send_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
+ enum hinic_mod_type mod, u16 cmd, void *msg,
+ u16 msg_len, u16 dst_func,
+ enum hinic_hwif_direction_type direction,
+ enum hinic_mbox_ack_type ack_type,
+ struct mbox_msg_info *msg_info)
+{
+ struct hinic_hwdev *hwdev = func_to_func->hwdev;
+ u16 seg_len = MBOX_SEG_LEN;
+ u8 *msg_seg = (u8 *)msg;
+ u16 left = msg_len;
+ u32 seq_id = 0;
+ u64 header = 0;
+ int err = 0;
+
+ down(&func_to_func->msg_send_sem);
+
+ header = HINIC_MBOX_HEADER_SET(msg_len, MSG_LEN) |
+ HINIC_MBOX_HEADER_SET(mod, MODULE) |
+ HINIC_MBOX_HEADER_SET(seg_len, SEG_LEN) |
+ HINIC_MBOX_HEADER_SET(ack_type, NO_ACK) |
+ HINIC_MBOX_HEADER_SET(SEQ_ID_START_VAL, SEQID) |
+ HINIC_MBOX_HEADER_SET(NOT_LAST_SEG, LAST) |
+ HINIC_MBOX_HEADER_SET(direction, DIRECTION) |
+ HINIC_MBOX_HEADER_SET(cmd, CMD) |
+ /* The vf's offset to it's associated pf */
+ HINIC_MBOX_HEADER_SET(msg_info->msg_id, MSG_ID) |
+ HINIC_MBOX_HEADER_SET(msg_info->status, STATUS) |
+ HINIC_MBOX_HEADER_SET(hinic_global_func_id_hw(hwdev->hwif),
+ SRC_GLB_FUNC_IDX);
+
+ while (!(HINIC_MBOX_HEADER_GET(header, LAST))) {
+ if (left <= HINIC_MBOX_SEG_LEN) {
+ header &= ~MBOX_SEGLEN_MASK;
+ header |= HINIC_MBOX_HEADER_SET(left, SEG_LEN);
+ header |= HINIC_MBOX_HEADER_SET(LAST_SEG, LAST);
+
+ seg_len = left;
+ }
+
+ err = send_mbox_seg(func_to_func, header, dst_func, msg_seg,
+ seg_len, MBOX_SEND_MSG_INT, msg_info);
+ if (err) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to send mbox seg, seq_id=0x%llx\n",
+ HINIC_MBOX_HEADER_GET(header, SEQID));
+ goto err_send_mbox_seg;
+ }
+
+ left -= HINIC_MBOX_SEG_LEN;
+ msg_seg += HINIC_MBOX_SEG_LEN;
+
+ seq_id++;
+ header &= ~(HINIC_MBOX_HEADER_SET(HINIC_MBOX_HEADER_SEQID_MASK,
+ SEQID));
+ header |= HINIC_MBOX_HEADER_SET(seq_id, SEQID);
+ }
+
+err_send_mbox_seg:
+ up(&func_to_func->msg_send_sem);
+
+ return err;
+}
+
+static void
+response_for_recv_func_mbox(struct hinic_mbox_func_to_func *func_to_func,
+ struct hinic_recv_mbox *recv_mbox, int err,
+ u16 out_size, u16 src_func_idx)
+{
+ struct mbox_msg_info msg_info = {0};
+
+ if (recv_mbox->ack_type == MBOX_ACK) {
+ msg_info.msg_id = recv_mbox->msg_info.msg_id;
+ if (err == HINIC_MBOX_PF_BUSY_ACTIVE_FW)
+ msg_info.status = HINIC_MBOX_PF_BUSY_ACTIVE_FW;
+ else if (err == HINIC_MBOX_VF_CMD_ERROR)
+ msg_info.status = HINIC_MBOX_VF_CMD_ERROR;
+ else if (err)
+ msg_info.status = HINIC_MBOX_PF_SEND_ERR;
+
+ /* if no data needs to response, set out_size to 1 */
+ if (!out_size || err)
+ out_size = MBOX_MSG_NO_DATA_LEN;
+
+ send_mbox_to_func(func_to_func, recv_mbox->mod, recv_mbox->cmd,
+ recv_mbox->buf_out, out_size, src_func_idx,
+ HINIC_HWIF_RESPONSE, MBOX_ACK,
+ &msg_info);
+ }
+}
+
+static void recv_func_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
+ struct hinic_recv_mbox *recv_mbox,
+ u16 src_func_idx)
+{
+ void *buf_out = recv_mbox->buf_out;
+ u16 out_size = MBOX_MAX_BUF_SZ;
+ int err = 0;
+
+ if (HINIC_IS_VF(func_to_func->hwif)) {
+ err = recv_vf_mbox_handler(func_to_func, recv_mbox, buf_out,
+ &out_size);
+ } else {
+ if (IS_PF_OR_PPF_SRC(src_func_idx))
+ dev_warn(&func_to_func->hwif->pdev->dev,
+ "Unsupported pf2pf mbox msg\n");
+ else
+ err = recv_pf_from_vf_mbox_handler(func_to_func,
+ recv_mbox,
+ src_func_idx,
+ buf_out, &out_size);
+ }
+
+ response_for_recv_func_mbox(func_to_func, recv_mbox, err, out_size,
+ src_func_idx);
+ kfree(recv_mbox->buf_out);
+ kfree(recv_mbox->mbox);
+ kfree(recv_mbox);
+}
+
+static void set_mbox_to_func_event(struct hinic_mbox_func_to_func *func_to_func,
+ enum mbox_event_state event_flag)
+{
+ spin_lock(&func_to_func->mbox_lock);
+ func_to_func->event_flag = event_flag;
+ spin_unlock(&func_to_func->mbox_lock);
+}
+
+static int mbox_resp_info_handler(struct hinic_mbox_func_to_func *func_to_func,
+ struct hinic_recv_mbox *mbox_for_resp,
+ enum hinic_mod_type mod, u16 cmd,
+ void *buf_out, u16 *out_size)
+{
+ int err;
+
+ if (mbox_for_resp->msg_info.status) {
+ err = mbox_for_resp->msg_info.status;
+ if (err != HINIC_MBOX_PF_BUSY_ACTIVE_FW)
+ dev_err(&func_to_func->hwif->pdev->dev, "Mbox response error(0x%x)\n",
+ mbox_for_resp->msg_info.status);
+ return err;
+ }
+
+ if (buf_out && out_size) {
+ if (*out_size < mbox_for_resp->mbox_len) {
+ dev_err(&func_to_func->hwif->pdev->dev,
+ "Invalid response mbox message length: %d for mod %d cmd %d, should less than: %d\n",
+ mbox_for_resp->mbox_len, mod, cmd, *out_size);
+ return -EFAULT;
+ }
+
+ if (mbox_for_resp->mbox_len)
+ memcpy(buf_out, mbox_for_resp->mbox,
+ mbox_for_resp->mbox_len);
+
+ *out_size = mbox_for_resp->mbox_len;
+ }
+
+ return 0;
+}
+
+int hinic_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
+ enum hinic_mod_type mod, u16 cmd, u16 dst_func,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout)
+{
+ struct hinic_recv_mbox *mbox_for_resp;
+ struct mbox_msg_info msg_info = {0};
+ unsigned long timeo;
+ int err;
+
+ mbox_for_resp = &func_to_func->mbox_resp[dst_func];
+
+ down(&func_to_func->mbox_send_sem);
+
+ init_completion(&mbox_for_resp->recv_done);
+
+ msg_info.msg_id = MBOX_MSG_ID_INC(func_to_func);
+
+ set_mbox_to_func_event(func_to_func, EVENT_START);
+
+ err = send_mbox_to_func(func_to_func, mod, cmd, buf_in, in_size,
+ dst_func, HINIC_HWIF_DIRECT_SEND, MBOX_ACK,
+ &msg_info);
+ if (err) {
+ dev_err(&func_to_func->hwif->pdev->dev, "Send mailbox failed, msg_id: %d\n",
+ msg_info.msg_id);
+ set_mbox_to_func_event(func_to_func, EVENT_FAIL);
+ goto err_send_mbox;
+ }
+
+ timeo = msecs_to_jiffies(timeout ? timeout : HINIC_MBOX_COMP_TIME);
+ if (!wait_for_completion_timeout(&mbox_for_resp->recv_done, timeo)) {
+ set_mbox_to_func_event(func_to_func, EVENT_TIMEOUT);
+ dev_err(&func_to_func->hwif->pdev->dev,
+ "Send mbox msg timeout, msg_id: %d\n", msg_info.msg_id);
+ err = -ETIMEDOUT;
+ goto err_send_mbox;
+ }
+
+ set_mbox_to_func_event(func_to_func, EVENT_END);
+
+ err = mbox_resp_info_handler(func_to_func, mbox_for_resp, mod, cmd,
+ buf_out, out_size);
+
+err_send_mbox:
+ up(&func_to_func->mbox_send_sem);
+
+ return err;
+}
+
+static int mbox_func_params_valid(struct hinic_mbox_func_to_func *func_to_func,
+ void *buf_in, u16 in_size)
+{
+ if (in_size > HINIC_MBOX_DATA_SIZE) {
+ dev_err(&func_to_func->hwif->pdev->dev,
+ "Mbox msg len(%d) exceed limit(%d)\n",
+ in_size, HINIC_MBOX_DATA_SIZE);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_mbox_to_pf(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size, u32 timeout)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+ int err = mbox_func_params_valid(func_to_func, buf_in, in_size);
+
+ if (err)
+ return err;
+
+ if (!HINIC_IS_VF(hwdev->hwif)) {
+ dev_err(&hwdev->hwif->pdev->dev, "Params error, func_type: %d\n",
+ HINIC_FUNC_TYPE(hwdev->hwif));
+ return -EINVAL;
+ }
+
+ return hinic_mbox_to_func(func_to_func, mod, cmd,
+ hinic_pf_id_of_vf_hw(hwdev->hwif), buf_in,
+ in_size, buf_out, out_size, timeout);
+}
+
+int hinic_mbox_to_vf(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size, u32 timeout)
+{
+ struct hinic_mbox_func_to_func *func_to_func;
+ u16 dst_func_idx;
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ func_to_func = hwdev->func_to_func;
+ err = mbox_func_params_valid(func_to_func, buf_in, in_size);
+ if (err)
+ return err;
+
+ if (HINIC_IS_VF(hwdev->hwif)) {
+ dev_err(&hwdev->hwif->pdev->dev, "Params error, func_type: %d\n",
+ HINIC_FUNC_TYPE(hwdev->hwif));
+ return -EINVAL;
+ }
+
+ if (!vf_id) {
+ dev_err(&hwdev->hwif->pdev->dev,
+ "VF id(%d) error!\n", vf_id);
+ return -EINVAL;
+ }
+
+ /* vf_offset_to_pf + vf_id is the vf's global function id of vf in
+ * this pf
+ */
+ dst_func_idx = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id;
+
+ return hinic_mbox_to_func(func_to_func, mod, cmd, dst_func_idx, buf_in,
+ in_size, buf_out, out_size, timeout);
+}
+
+static int init_mbox_info(struct hinic_recv_mbox *mbox_info)
+{
+ int err;
+
+ mbox_info->seq_id = SEQ_ID_MAX_VAL;
+
+ mbox_info->mbox = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
+ if (!mbox_info->mbox)
+ return -ENOMEM;
+
+ mbox_info->buf_out = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
+ if (!mbox_info->buf_out) {
+ err = -ENOMEM;
+ goto err_alloc_buf_out;
+ }
+
+ atomic_set(&mbox_info->msg_cnt, 0);
+
+ return 0;
+
+err_alloc_buf_out:
+ kfree(mbox_info->mbox);
+
+ return err;
+}
+
+static void clean_mbox_info(struct hinic_recv_mbox *mbox_info)
+{
+ kfree(mbox_info->buf_out);
+ kfree(mbox_info->mbox);
+}
+
+static int alloc_mbox_info(struct hinic_hwdev *hwdev,
+ struct hinic_recv_mbox *mbox_info)
+{
+ u16 func_idx, i;
+ int err;
+
+ for (func_idx = 0; func_idx < HINIC_MAX_FUNCTIONS; func_idx++) {
+ err = init_mbox_info(&mbox_info[func_idx]);
+ if (err) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to init function %d mbox info\n",
+ func_idx);
+ goto err_init_mbox_info;
+ }
+ }
+
+ return 0;
+
+err_init_mbox_info:
+ for (i = 0; i < func_idx; i++)
+ clean_mbox_info(&mbox_info[i]);
+
+ return err;
+}
+
+static void free_mbox_info(struct hinic_recv_mbox *mbox_info)
+{
+ u16 func_idx;
+
+ for (func_idx = 0; func_idx < HINIC_MAX_FUNCTIONS; func_idx++)
+ clean_mbox_info(&mbox_info[func_idx]);
+}
+
+static void prepare_send_mbox(struct hinic_mbox_func_to_func *func_to_func)
+{
+ struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
+
+ send_mbox->data = MBOX_AREA(func_to_func->hwif);
+}
+
+static int alloc_mbox_wb_status(struct hinic_mbox_func_to_func *func_to_func)
+{
+ struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
+ struct hinic_hwdev *hwdev = func_to_func->hwdev;
+ u32 addr_h, addr_l;
+
+ send_mbox->wb_vaddr = dma_alloc_coherent(&hwdev->hwif->pdev->dev,
+ MBOX_WB_STATUS_LEN,
+ &send_mbox->wb_paddr,
+ GFP_KERNEL);
+ if (!send_mbox->wb_vaddr)
+ return -ENOMEM;
+
+ send_mbox->wb_status = send_mbox->wb_vaddr;
+
+ addr_h = upper_32_bits(send_mbox->wb_paddr);
+ addr_l = lower_32_bits(send_mbox->wb_paddr);
+
+ hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF,
+ addr_h);
+ hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF,
+ addr_l);
+
+ return 0;
+}
+
+static void free_mbox_wb_status(struct hinic_mbox_func_to_func *func_to_func)
+{
+ struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
+ struct hinic_hwdev *hwdev = func_to_func->hwdev;
+
+ hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF,
+ 0);
+ hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF,
+ 0);
+
+ dma_free_coherent(&hwdev->hwif->pdev->dev, MBOX_WB_STATUS_LEN,
+ send_mbox->wb_vaddr,
+ send_mbox->wb_paddr);
+}
+
+static int comm_pf_mbox_handler(void *handle, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ struct hinic_hwdev *hwdev = handle;
+ struct hinic_pfhwdev *pfhwdev;
+ int err = 0;
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+ if (cmd == HINIC_COMM_CMD_START_FLR) {
+ *out_size = 0;
+ } else {
+ err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
+ cmd, buf_in, in_size, buf_out, out_size,
+ HINIC_MGMT_MSG_SYNC);
+ if (err && err != HINIC_MBOX_PF_BUSY_ACTIVE_FW)
+ dev_err(&hwdev->hwif->pdev->dev,
+ "PF mbox common callback handler err: %d\n",
+ err);
+ }
+
+ return err;
+}
+
+int hinic_func_to_func_init(struct hinic_hwdev *hwdev)
+{
+ struct hinic_mbox_func_to_func *func_to_func;
+ struct hinic_pfhwdev *pfhwdev;
+ int err;
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+ func_to_func = kzalloc(sizeof(*func_to_func), GFP_KERNEL);
+ if (!func_to_func)
+ return -ENOMEM;
+
+ hwdev->func_to_func = func_to_func;
+ func_to_func->hwdev = hwdev;
+ func_to_func->hwif = hwdev->hwif;
+ sema_init(&func_to_func->mbox_send_sem, 1);
+ sema_init(&func_to_func->msg_send_sem, 1);
+ spin_lock_init(&func_to_func->mbox_lock);
+ func_to_func->workq = create_singlethread_workqueue(HINIC_MBOX_WQ_NAME);
+ if (!func_to_func->workq) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to initialize MBOX workqueue\n");
+ err = -ENOMEM;
+ goto err_create_mbox_workq;
+ }
+
+ err = alloc_mbox_info(hwdev, func_to_func->mbox_send);
+ if (err) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to alloc mem for mbox_active\n");
+ goto err_alloc_mbox_for_send;
+ }
+
+ err = alloc_mbox_info(hwdev, func_to_func->mbox_resp);
+ if (err) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to alloc mem for mbox_passive\n");
+ goto err_alloc_mbox_for_resp;
+ }
+
+ err = alloc_mbox_wb_status(func_to_func);
+ if (err) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to alloc mbox write back status\n");
+ goto err_alloc_wb_status;
+ }
+
+ prepare_send_mbox(func_to_func);
+
+ hinic_aeq_register_hw_cb(&hwdev->aeqs, HINIC_MBX_FROM_FUNC,
+ &pfhwdev->hwdev, hinic_mbox_func_aeqe_handler);
+ hinic_aeq_register_hw_cb(&hwdev->aeqs, HINIC_MBX_SEND_RSLT,
+ &pfhwdev->hwdev, hinic_mbox_self_aeqe_handler);
+
+ if (!HINIC_IS_VF(hwdev->hwif))
+ hinic_register_pf_mbox_cb(hwdev, HINIC_MOD_COMM,
+ comm_pf_mbox_handler);
+
+ return 0;
+
+err_alloc_wb_status:
+ free_mbox_info(func_to_func->mbox_resp);
+
+err_alloc_mbox_for_resp:
+ free_mbox_info(func_to_func->mbox_send);
+
+err_alloc_mbox_for_send:
+ destroy_workqueue(func_to_func->workq);
+
+err_create_mbox_workq:
+ kfree(func_to_func);
+
+ return err;
+}
+
+void hinic_func_to_func_free(struct hinic_hwdev *hwdev)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+
+ hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MBX_FROM_FUNC);
+ hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MBX_SEND_RSLT);
+
+ hinic_unregister_pf_mbox_cb(hwdev, HINIC_MOD_COMM);
+ /* destroy workqueue before free related mbox resources in case of
+ * illegal resource access
+ */
+ destroy_workqueue(func_to_func->workq);
+
+ free_mbox_wb_status(func_to_func);
+ free_mbox_info(func_to_func->mbox_resp);
+ free_mbox_info(func_to_func->mbox_send);
+
+ kfree(func_to_func);
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h
new file mode 100644
index 000000000000..7b18559bfe80
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef HINIC_MBOX_H_
+#define HINIC_MBOX_H_
+
+#define HINIC_MBOX_PF_SEND_ERR 0x1
+#define HINIC_MBOX_PF_BUSY_ACTIVE_FW 0x2
+#define HINIC_MBOX_VF_CMD_ERROR 0x3
+
+#define HINIC_MAX_FUNCTIONS 512
+
+#define HINIC_MAX_PF_FUNCS 16
+
+#define HINIC_MBOX_WQ_NAME "hinic_mbox"
+
+#define HINIC_FUNC_CSR_MAILBOX_DATA_OFF 0x80
+#define HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF 0x0100
+#define HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF 0x0104
+#define HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF 0x0108
+#define HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF 0x010C
+
+enum hinic_mbox_ack_type {
+ MBOX_ACK,
+ MBOX_NO_ACK,
+};
+
+struct mbox_msg_info {
+ u8 msg_id;
+ u8 status;
+};
+
+struct hinic_recv_mbox {
+ struct completion recv_done;
+ void *mbox;
+ u8 cmd;
+ enum hinic_mod_type mod;
+ u16 mbox_len;
+ void *buf_out;
+ enum hinic_mbox_ack_type ack_type;
+ struct mbox_msg_info msg_info;
+ u8 seq_id;
+ atomic_t msg_cnt;
+};
+
+struct hinic_send_mbox {
+ struct completion send_done;
+ u8 *data;
+
+ u64 *wb_status;
+ void *wb_vaddr;
+ dma_addr_t wb_paddr;
+};
+
+typedef void (*hinic_vf_mbox_cb)(void *handle, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size);
+typedef int (*hinic_pf_mbox_cb)(void *handle, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size);
+
+enum mbox_event_state {
+ EVENT_START = 0,
+ EVENT_FAIL,
+ EVENT_TIMEOUT,
+ EVENT_END,
+};
+
+enum hinic_mbox_cb_state {
+ HINIC_VF_MBOX_CB_REG = 0,
+ HINIC_VF_MBOX_CB_RUNNING,
+ HINIC_PF_MBOX_CB_REG,
+ HINIC_PF_MBOX_CB_RUNNING,
+ HINIC_PPF_MBOX_CB_REG,
+ HINIC_PPF_MBOX_CB_RUNNING,
+ HINIC_PPF_TO_PF_MBOX_CB_REG,
+ HINIC_PPF_TO_PF_MBOX_CB_RUNNIG,
+};
+
+struct hinic_mbox_func_to_func {
+ struct hinic_hwdev *hwdev;
+ struct hinic_hwif *hwif;
+
+ struct semaphore mbox_send_sem;
+ struct semaphore msg_send_sem;
+ struct hinic_send_mbox send_mbox;
+
+ struct workqueue_struct *workq;
+
+ struct hinic_recv_mbox mbox_resp[HINIC_MAX_FUNCTIONS];
+ struct hinic_recv_mbox mbox_send[HINIC_MAX_FUNCTIONS];
+
+ hinic_vf_mbox_cb vf_mbox_cb[HINIC_MOD_MAX];
+ hinic_pf_mbox_cb pf_mbox_cb[HINIC_MOD_MAX];
+ unsigned long pf_mbox_cb_state[HINIC_MOD_MAX];
+ unsigned long vf_mbox_cb_state[HINIC_MOD_MAX];
+
+ u8 send_msg_id;
+ enum mbox_event_state event_flag;
+
+ /* lock for mbox event flag */
+ spinlock_t mbox_lock;
+};
+
+struct hinic_mbox_work {
+ struct work_struct work;
+ u16 src_func_idx;
+ struct hinic_mbox_func_to_func *func_to_func;
+ struct hinic_recv_mbox *recv_mbox;
+};
+
+struct vf_cmd_msg_handle {
+ u8 cmd;
+ int (*cmd_msg_handler)(void *hwdev, u16 vf_id,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size);
+};
+
+int hinic_register_pf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod,
+ hinic_pf_mbox_cb callback);
+
+int hinic_register_vf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod,
+ hinic_vf_mbox_cb callback);
+
+void hinic_unregister_pf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod);
+
+void hinic_unregister_vf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod);
+
+void hinic_mbox_func_aeqe_handler(void *handle, void *header, u8 size);
+
+void hinic_mbox_self_aeqe_handler(void *handle, void *header, u8 size);
+
+int hinic_func_to_func_init(struct hinic_hwdev *hwdev);
+
+void hinic_func_to_func_free(struct hinic_hwdev *hwdev);
+
+int hinic_mbox_to_pf(struct hinic_hwdev *hwdev, enum hinic_mod_type mod,
+ u8 cmd, void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout);
+
+int hinic_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
+ enum hinic_mod_type mod, u16 cmd, u16 dst_func,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout);
+
+int hinic_mbox_to_vf(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size, u32 timeout);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
index 992908e6eebf..c33eb1147055 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
@@ -361,7 +361,11 @@ int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
if (cmd == HINIC_PORT_CMD_SET_FUNC_STATE)
timeout = SET_FUNC_PORT_MGMT_TIMEOUT;
- return msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size,
+ if (HINIC_IS_VF(hwif))
+ return hinic_mbox_to_pf(pf_to_mgmt->hwdev, mod, cmd, buf_in,
+ in_size, buf_out, out_size, 0);
+ else
+ return msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size,
buf_out, out_size, MGMT_DIRECT_SEND,
MSG_NOT_RESP, timeout);
}
@@ -398,8 +402,8 @@ static void mgmt_recv_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt,
recv_msg->msg, recv_msg->msg_len,
buf_out, &out_size);
else
- dev_err(&pdev->dev, "No MGMT msg handler, mod = %d\n",
- recv_msg->mod);
+ dev_err(&pdev->dev, "No MGMT msg handler, mod: %d, cmd: %d\n",
+ recv_msg->mod, recv_msg->cmd);
mgmt_cb->state &= ~HINIC_MGMT_CB_RUNNING;
@@ -561,6 +565,10 @@ int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt,
int err;
pf_to_mgmt->hwif = hwif;
+ pf_to_mgmt->hwdev = hwdev;
+
+ if (HINIC_IS_VF(hwif))
+ return 0;
sema_init(&pf_to_mgmt->sync_msg_lock, 1);
pf_to_mgmt->sync_msg_id = 0;
@@ -592,6 +600,9 @@ void hinic_pf_to_mgmt_free(struct hinic_pf_to_mgmt *pf_to_mgmt)
struct hinic_pfhwdev *pfhwdev = mgmt_to_pfhwdev(pf_to_mgmt);
struct hinic_hwdev *hwdev = &pfhwdev->hwdev;
+ if (HINIC_IS_VF(hwdev->hwif))
+ return;
+
hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MSG_FROM_MGMT_CPU);
hinic_api_cmd_free(pf_to_mgmt->cmd_chain);
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h
index 182fba17b643..c2b142c08b0e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h
@@ -60,7 +60,9 @@ enum hinic_cfg_cmd {
};
enum hinic_comm_cmd {
+ HINIC_COMM_CMD_START_FLR = 0x1,
HINIC_COMM_CMD_IO_STATUS_GET = 0x3,
+ HINIC_COMM_CMD_DMA_ATTR_SET = 0x4,
HINIC_COMM_CMD_CMDQ_CTXT_SET = 0x10,
HINIC_COMM_CMD_CMDQ_CTXT_GET = 0x11,
@@ -74,7 +76,13 @@ enum hinic_comm_cmd {
HINIC_COMM_CMD_IO_RES_CLEAR = 0x29,
- HINIC_COMM_CMD_MAX = 0x32,
+ HINIC_COMM_CMD_CEQ_CTRL_REG_WR_BY_UP = 0x33,
+
+ HINIC_COMM_CMD_L2NIC_RESET = 0x4b,
+
+ HINIC_COMM_CMD_PAGESIZE_SET = 0x50,
+
+ HINIC_COMM_CMD_MAX = 0x51,
};
enum hinic_mgmt_cb_state {
@@ -107,7 +115,7 @@ struct hinic_mgmt_cb {
struct hinic_pf_to_mgmt {
struct hinic_hwif *hwif;
-
+ struct hinic_hwdev *hwdev;
struct semaphore sync_msg_lock;
u16 sync_msg_id;
u8 *sync_msg_buf;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
index be364b7a7019..fcf7bfe4aa47 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
@@ -108,7 +108,12 @@ void hinic_sq_prepare_ctxt(struct hinic_sq_ctxt *sq_ctxt,
wq_page_pfn_hi = upper_32_bits(wq_page_pfn);
wq_page_pfn_lo = lower_32_bits(wq_page_pfn);
- wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq->block_paddr);
+ /* If only one page, use 0-level CLA */
+ if (wq->num_q_pages == 1)
+ wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq_page_addr);
+ else
+ wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq->block_paddr);
+
wq_block_pfn_hi = upper_32_bits(wq_block_pfn);
wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
@@ -638,6 +643,7 @@ void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size,
/* increment prod_idx to the next */
prod_idx += ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
+ prod_idx = SQ_MASKED_IDX(sq, prod_idx);
wmb(); /* Write all before the doorbell */
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
index 79091e131418..ca3e2d060284 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
@@ -38,12 +38,15 @@
#define HINIC_SQ_WQEBB_SIZE 64
#define HINIC_RQ_WQEBB_SIZE 32
-#define HINIC_SQ_PAGE_SIZE SZ_4K
-#define HINIC_RQ_PAGE_SIZE SZ_4K
+#define HINIC_SQ_PAGE_SIZE SZ_256K
+#define HINIC_RQ_PAGE_SIZE SZ_256K
#define HINIC_SQ_DEPTH SZ_4K
#define HINIC_RQ_DEPTH SZ_4K
+#define HINIC_MAX_QUEUE_DEPTH SZ_4K
+#define HINIC_MIN_QUEUE_DEPTH 128
+
/* In any change to HINIC_RX_BUF_SZ, HINIC_RX_BUF_SZ_IDX must be changed */
#define HINIC_RX_BUF_SZ 2048
#define HINIC_RX_BUF_SZ_IDX HINIC_RX_BUF_SZ_2048_IDX
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
index 03363216ff59..5dc3743f8091 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
@@ -503,7 +503,7 @@ err_alloc_wq_pages:
* Return 0 - Success, negative - Failure
**/
int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
- u16 wqebb_size, u16 wq_page_size, u16 q_depth,
+ u16 wqebb_size, u32 wq_page_size, u16 q_depth,
u16 max_wqe_size)
{
struct hinic_hwif *hwif = wqs->hwif;
@@ -600,7 +600,7 @@ void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq)
**/
int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
struct hinic_wq *wq, struct hinic_hwif *hwif,
- int cmdq_blocks, u16 wqebb_size, u16 wq_page_size,
+ int cmdq_blocks, u16 wqebb_size, u32 wq_page_size,
u16 q_depth, u16 max_wqe_size)
{
struct pci_dev *pdev = hwif->pdev;
@@ -768,7 +768,10 @@ struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size,
*prod_idx = curr_prod_idx;
- if (curr_pg != end_pg) {
+ /* If we only have one page, still need to get shadown wqe when
+ * wqe rolling-over page
+ */
+ if (curr_pg != end_pg || MASKED_WQE_IDX(wq, end_prod_idx) < *prod_idx) {
void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *prod_idx);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h
index 811eef744140..b06f8c0255de 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h
@@ -26,7 +26,7 @@ struct hinic_wq {
int block_idx;
u16 wqebb_size;
- u16 wq_page_size;
+ u32 wq_page_size;
u16 q_depth;
u16 max_wqe_size;
u16 num_wqebbs_per_page;
@@ -76,7 +76,7 @@ struct hinic_cmdq_pages {
int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
struct hinic_wq *wq, struct hinic_hwif *hwif,
- int cmdq_blocks, u16 wqebb_size, u16 wq_page_size,
+ int cmdq_blocks, u16 wqebb_size, u32 wq_page_size,
u16 q_depth, u16 max_wqe_size);
void hinic_wqs_cmdq_free(struct hinic_cmdq_pages *cmdq_pages,
@@ -88,7 +88,7 @@ int hinic_wqs_alloc(struct hinic_wqs *wqs, int num_wqs,
void hinic_wqs_free(struct hinic_wqs *wqs);
int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
- u16 wqebb_size, u16 wq_page_size, u16 q_depth,
+ u16 wqebb_size, u32 wq_page_size, u16 q_depth,
u16 max_wqe_size);
void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 63b92f6cc856..c8ab129a7ae8 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -29,6 +29,7 @@
#include "hinic_tx.h"
#include "hinic_rx.h"
#include "hinic_dev.h"
+#include "hinic_sriov.h"
MODULE_AUTHOR("Huawei Technologies CO., Ltd");
MODULE_DESCRIPTION("Huawei Intelligent NIC driver");
@@ -46,6 +47,7 @@ MODULE_PARM_DESC(rx_weight, "Number Rx packets for NAPI budget (default=64)");
#define HINIC_DEV_ID_DUAL_PORT_100GE 0x0200
#define HINIC_DEV_ID_DUAL_PORT_100GE_MEZZ 0x0205
#define HINIC_DEV_ID_QUAD_PORT_25GE_MEZZ 0x0210
+#define HINIC_DEV_ID_VF 0x375e
#define HINIC_WQ_NAME "hinic_dev"
@@ -65,6 +67,8 @@ MODULE_PARM_DESC(rx_weight, "Number Rx packets for NAPI budget (default=64)");
#define rx_mode_work_to_nic_dev(rx_mode_work) \
container_of(rx_mode_work, struct hinic_dev, rx_mode_work)
+#define HINIC_WAIT_SRIOV_CFG_TIMEOUT 15000
+
static int change_mac_addr(struct net_device *netdev, const u8 *addr);
static int set_features(struct hinic_dev *nic_dev,
@@ -368,14 +372,15 @@ static void hinic_enable_rss(struct hinic_dev *nic_dev)
netif_err(nic_dev, drv, netdev, "Failed to init rss\n");
}
-static int hinic_open(struct net_device *netdev)
+int hinic_open(struct net_device *netdev)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
enum hinic_port_link_state link_state;
int err, ret;
if (!(nic_dev->flags & HINIC_INTF_UP)) {
- err = hinic_hwdev_ifup(nic_dev->hwdev);
+ err = hinic_hwdev_ifup(nic_dev->hwdev, nic_dev->sq_depth,
+ nic_dev->rq_depth);
if (err) {
netif_err(nic_dev, drv, netdev,
"Failed - HW interface up\n");
@@ -423,9 +428,6 @@ static int hinic_open(struct net_device *netdev)
goto err_func_port_state;
}
- /* Wait up to 3 sec between port enable to link state */
- msleep(3000);
-
down(&nic_dev->mgmt_lock);
err = hinic_port_link_state(nic_dev, &link_state);
@@ -434,6 +436,9 @@ static int hinic_open(struct net_device *netdev)
goto err_port_link;
}
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ hinic_notify_all_vfs_link_changed(nic_dev->hwdev, link_state);
+
if (link_state == HINIC_LINK_STATE_UP)
nic_dev->flags |= HINIC_LINK_UP;
@@ -479,7 +484,7 @@ err_create_txqs:
return err;
}
-static int hinic_close(struct net_device *netdev)
+int hinic_close(struct net_device *netdev)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
unsigned int flags;
@@ -496,6 +501,9 @@ static int hinic_close(struct net_device *netdev)
up(&nic_dev->mgmt_lock);
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ hinic_notify_all_vfs_link_changed(nic_dev->hwdev, 0);
+
hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE);
hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE);
@@ -673,7 +681,7 @@ static int hinic_vlan_rx_add_vid(struct net_device *netdev,
}
err = hinic_port_add_mac(nic_dev, netdev->dev_addr, vid);
- if (err) {
+ if (err && err != HINIC_PF_SET_VF_ALREADY) {
netif_err(nic_dev, drv, netdev, "Failed to set mac\n");
goto err_add_mac;
}
@@ -725,8 +733,6 @@ static void set_rx_mode(struct work_struct *work)
struct hinic_rx_mode_work *rx_mode_work = work_to_rx_mode_work(work);
struct hinic_dev *nic_dev = rx_mode_work_to_nic_dev(rx_mode_work);
- netif_info(nic_dev, drv, nic_dev->netdev, "set rx mode work\n");
-
hinic_port_set_rx_mode(nic_dev, rx_mode_work->rx_mode);
__dev_uc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr);
@@ -745,10 +751,12 @@ static void hinic_set_rx_mode(struct net_device *netdev)
HINIC_RX_MODE_MC |
HINIC_RX_MODE_BC;
- if (netdev->flags & IFF_PROMISC)
- rx_mode |= HINIC_RX_MODE_PROMISC;
- else if (netdev->flags & IFF_ALLMULTI)
+ if (netdev->flags & IFF_PROMISC) {
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ rx_mode |= HINIC_RX_MODE_PROMISC;
+ } else if (netdev->flags & IFF_ALLMULTI) {
rx_mode |= HINIC_RX_MODE_MC_ALL;
+ }
rx_mode_work->rx_mode = rx_mode;
@@ -758,8 +766,26 @@ static void hinic_set_rx_mode(struct net_device *netdev)
static void hinic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
+ u16 sw_pi, hw_ci, sw_ci;
+ struct hinic_sq *sq;
+ u16 num_sqs, q_id;
+
+ num_sqs = hinic_hwdev_num_qps(nic_dev->hwdev);
netif_err(nic_dev, drv, netdev, "Tx timeout\n");
+
+ for (q_id = 0; q_id < num_sqs; q_id++) {
+ if (!netif_xmit_stopped(netdev_get_tx_queue(netdev, q_id)))
+ continue;
+
+ sq = hinic_hwdev_get_sq(nic_dev->hwdev, q_id);
+ sw_pi = atomic_read(&sq->wq->prod_idx) & sq->wq->mask;
+ hw_ci = be16_to_cpu(*(u16 *)(sq->hw_ci_addr)) & sq->wq->mask;
+ sw_ci = atomic_read(&sq->wq->cons_idx) & sq->wq->mask;
+ netif_err(nic_dev, drv, netdev, "Txq%d: sw_pi: %d, hw_ci: %d, sw_ci: %d, napi->state: 0x%lx\n",
+ q_id, sw_pi, hw_ci, sw_ci,
+ nic_dev->txqs[q_id].napi.state);
+ }
}
static void hinic_get_stats64(struct net_device *netdev,
@@ -825,6 +851,29 @@ static const struct net_device_ops hinic_netdev_ops = {
.ndo_get_stats64 = hinic_get_stats64,
.ndo_fix_features = hinic_fix_features,
.ndo_set_features = hinic_set_features,
+ .ndo_set_vf_mac = hinic_ndo_set_vf_mac,
+ .ndo_set_vf_vlan = hinic_ndo_set_vf_vlan,
+ .ndo_get_vf_config = hinic_ndo_get_vf_config,
+ .ndo_set_vf_trust = hinic_ndo_set_vf_trust,
+ .ndo_set_vf_rate = hinic_ndo_set_vf_bw,
+ .ndo_set_vf_spoofchk = hinic_ndo_set_vf_spoofchk,
+ .ndo_set_vf_link_state = hinic_ndo_set_vf_link_state,
+};
+
+static const struct net_device_ops hinicvf_netdev_ops = {
+ .ndo_open = hinic_open,
+ .ndo_stop = hinic_close,
+ .ndo_change_mtu = hinic_change_mtu,
+ .ndo_set_mac_address = hinic_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_vlan_rx_add_vid = hinic_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = hinic_vlan_rx_kill_vid,
+ .ndo_set_rx_mode = hinic_set_rx_mode,
+ .ndo_start_xmit = hinic_xmit_frame,
+ .ndo_tx_timeout = hinic_tx_timeout,
+ .ndo_get_stats64 = hinic_get_stats64,
+ .ndo_fix_features = hinic_fix_features,
+ .ndo_set_features = hinic_set_features,
};
static void netdev_features_init(struct net_device *netdev)
@@ -884,6 +933,10 @@ static void link_status_event_handler(void *handle, void *buf_in, u16 in_size,
netif_info(nic_dev, drv, nic_dev->netdev, "HINIC_Link is DOWN\n");
}
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ hinic_notify_all_vfs_link_changed(nic_dev->hwdev,
+ link_status->link);
+
ret_link_status = buf_out;
ret_link_status->status = 0;
@@ -957,7 +1010,12 @@ static int nic_dev_init(struct pci_dev *pdev)
}
hinic_set_ethtool_ops(netdev);
- netdev->netdev_ops = &hinic_netdev_ops;
+
+ if (!HINIC_IS_VF(hwdev->hwif))
+ netdev->netdev_ops = &hinic_netdev_ops;
+ else
+ netdev->netdev_ops = &hinicvf_netdev_ops;
+
netdev->max_mtu = ETH_MAX_MTU;
nic_dev = netdev_priv(netdev);
@@ -969,6 +1027,10 @@ static int nic_dev_init(struct pci_dev *pdev)
nic_dev->rxqs = NULL;
nic_dev->tx_weight = tx_weight;
nic_dev->rx_weight = rx_weight;
+ nic_dev->sq_depth = HINIC_SQ_DEPTH;
+ nic_dev->rq_depth = HINIC_RQ_DEPTH;
+ nic_dev->sriov_info.hwdev = hwdev;
+ nic_dev->sriov_info.pdev = pdev;
sema_init(&nic_dev->mgmt_lock, 1);
@@ -995,11 +1057,25 @@ static int nic_dev_init(struct pci_dev *pdev)
pci_set_drvdata(pdev, netdev);
err = hinic_port_get_mac(nic_dev, netdev->dev_addr);
- if (err)
- dev_warn(&pdev->dev, "Failed to get mac address\n");
+ if (err) {
+ dev_err(&pdev->dev, "Failed to get mac address\n");
+ goto err_get_mac;
+ }
+
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) {
+ dev_err(&pdev->dev, "Invalid MAC address\n");
+ err = -EIO;
+ goto err_add_mac;
+ }
+
+ dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
+ netdev->dev_addr);
+ eth_hw_addr_random(netdev);
+ }
err = hinic_port_add_mac(nic_dev, netdev->dev_addr, 0);
- if (err) {
+ if (err && err != HINIC_PF_SET_VF_ALREADY) {
dev_err(&pdev->dev, "Failed to add mac\n");
goto err_add_mac;
}
@@ -1041,6 +1117,7 @@ err_set_features:
cancel_work_sync(&rx_mode_work->work);
err_set_mtu:
+err_get_mac:
err_add_mac:
pci_set_drvdata(pdev, NULL);
destroy_workqueue(nic_dev->workq);
@@ -1114,14 +1191,41 @@ err_pci_regions:
return err;
}
+#define HINIC_WAIT_SRIOV_CFG_TIMEOUT 15000
+
+static void wait_sriov_cfg_complete(struct hinic_dev *nic_dev)
+{
+ struct hinic_sriov_info *sriov_info = &nic_dev->sriov_info;
+ u32 loop_cnt = 0;
+
+ set_bit(HINIC_FUNC_REMOVE, &sriov_info->state);
+ usleep_range(9900, 10000);
+
+ while (loop_cnt < HINIC_WAIT_SRIOV_CFG_TIMEOUT) {
+ if (!test_bit(HINIC_SRIOV_ENABLE, &sriov_info->state) &&
+ !test_bit(HINIC_SRIOV_DISABLE, &sriov_info->state))
+ return;
+
+ usleep_range(9900, 10000);
+ loop_cnt++;
+ }
+}
+
static void hinic_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct hinic_dev *nic_dev = netdev_priv(netdev);
struct hinic_rx_mode_work *rx_mode_work;
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) {
+ wait_sriov_cfg_complete(nic_dev);
+ hinic_pci_sriov_disable(pdev);
+ }
+
unregister_netdev(netdev);
+ hinic_port_del_mac(nic_dev, netdev->dev_addr, 0);
+
hinic_hwdev_cb_unregister(nic_dev->hwdev,
HINIC_MGMT_MSG_CMD_LINK_STATUS);
@@ -1132,6 +1236,8 @@ static void hinic_remove(struct pci_dev *pdev)
destroy_workqueue(nic_dev->workq);
+ hinic_vf_func_free(nic_dev->hwdev);
+
hinic_free_hwdev(nic_dev->hwdev);
free_netdev(netdev);
@@ -1152,6 +1258,7 @@ static const struct pci_device_id hinic_pci_table[] = {
{ PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_100GE), 0},
{ PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_100GE_MEZZ), 0},
{ PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_QUAD_PORT_25GE_MEZZ), 0},
+ { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_VF), 0},
{ 0, 0}
};
MODULE_DEVICE_TABLE(pci, hinic_pci_table);
@@ -1162,6 +1269,7 @@ static struct pci_driver hinic_driver = {
.probe = hinic_probe,
.remove = hinic_remove,
.shutdown = hinic_shutdown,
+ .sriov_configure = hinic_pci_sriov_configure,
};
module_pci_driver(hinic_driver);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c
index 1e389a004e50..175c0ee00038 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c
@@ -37,20 +37,14 @@ enum mac_op {
static int change_mac(struct hinic_dev *nic_dev, const u8 *addr,
u16 vlan_id, enum mac_op op)
{
- struct net_device *netdev = nic_dev->netdev;
struct hinic_hwdev *hwdev = nic_dev->hwdev;
struct hinic_port_mac_cmd port_mac_cmd;
struct hinic_hwif *hwif = hwdev->hwif;
+ u16 out_size = sizeof(port_mac_cmd);
struct pci_dev *pdev = hwif->pdev;
enum hinic_port_cmd cmd;
- u16 out_size;
int err;
- if (vlan_id >= VLAN_N_VID) {
- netif_err(nic_dev, drv, netdev, "Invalid VLAN number\n");
- return -EINVAL;
- }
-
if (op == MAC_SET)
cmd = HINIC_PORT_CMD_SET_MAC;
else
@@ -63,12 +57,25 @@ static int change_mac(struct hinic_dev *nic_dev, const u8 *addr,
err = hinic_port_msg_cmd(hwdev, cmd, &port_mac_cmd,
sizeof(port_mac_cmd),
&port_mac_cmd, &out_size);
- if (err || (out_size != sizeof(port_mac_cmd)) || port_mac_cmd.status) {
+ if (err || out_size != sizeof(port_mac_cmd) ||
+ (port_mac_cmd.status &&
+ port_mac_cmd.status != HINIC_PF_SET_VF_ALREADY &&
+ port_mac_cmd.status != HINIC_MGMT_STATUS_EXIST)) {
dev_err(&pdev->dev, "Failed to change MAC, ret = %d\n",
port_mac_cmd.status);
return -EFAULT;
}
+ if (port_mac_cmd.status == HINIC_PF_SET_VF_ALREADY) {
+ dev_warn(&pdev->dev, "PF has already set VF mac, ignore %s operation\n",
+ (op == MAC_SET) ? "set" : "del");
+ return HINIC_PF_SET_VF_ALREADY;
+ }
+
+ if (cmd == HINIC_PORT_CMD_SET_MAC && port_mac_cmd.status ==
+ HINIC_MGMT_STATUS_EXIST)
+ dev_warn(&pdev->dev, "MAC is repeated, ignore set operation\n");
+
return 0;
}
@@ -112,8 +119,8 @@ int hinic_port_get_mac(struct hinic_dev *nic_dev, u8 *addr)
struct hinic_hwdev *hwdev = nic_dev->hwdev;
struct hinic_port_mac_cmd port_mac_cmd;
struct hinic_hwif *hwif = hwdev->hwif;
+ u16 out_size = sizeof(port_mac_cmd);
struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
int err;
port_mac_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
@@ -144,9 +151,9 @@ int hinic_port_set_mtu(struct hinic_dev *nic_dev, int new_mtu)
struct hinic_hwdev *hwdev = nic_dev->hwdev;
struct hinic_port_mtu_cmd port_mtu_cmd;
struct hinic_hwif *hwif = hwdev->hwif;
+ u16 out_size = sizeof(port_mtu_cmd);
struct pci_dev *pdev = hwif->pdev;
int err, max_frame;
- u16 out_size;
if (new_mtu < HINIC_MIN_MTU_SIZE) {
netif_err(nic_dev, drv, netdev, "mtu < MIN MTU size");
@@ -248,14 +255,9 @@ int hinic_port_link_state(struct hinic_dev *nic_dev,
struct hinic_hwif *hwif = hwdev->hwif;
struct hinic_port_link_cmd link_cmd;
struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
+ u16 out_size = sizeof(link_cmd);
int err;
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "unsupported PCI Function type\n");
- return -EINVAL;
- }
-
link_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_LINK_STATE,
@@ -284,13 +286,11 @@ int hinic_port_set_state(struct hinic_dev *nic_dev, enum hinic_port_state state)
struct hinic_port_state_cmd port_state;
struct hinic_hwif *hwif = hwdev->hwif;
struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
+ u16 out_size = sizeof(port_state);
int err;
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "unsupported PCI Function type\n");
- return -EINVAL;
- }
+ if (HINIC_IS_VF(hwdev->hwif))
+ return 0;
port_state.state = state;
@@ -320,7 +320,7 @@ int hinic_port_set_func_state(struct hinic_dev *nic_dev,
struct hinic_hwdev *hwdev = nic_dev->hwdev;
struct hinic_hwif *hwif = hwdev->hwif;
struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
+ u16 out_size = sizeof(func_state);
int err;
func_state.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
@@ -351,7 +351,7 @@ int hinic_port_get_cap(struct hinic_dev *nic_dev,
struct hinic_hwdev *hwdev = nic_dev->hwdev;
struct hinic_hwif *hwif = hwdev->hwif;
struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
+ u16 out_size = sizeof(*port_cap);
int err;
port_cap->func_idx = HINIC_HWIF_FUNC_IDX(hwif);
@@ -382,7 +382,7 @@ int hinic_port_set_tso(struct hinic_dev *nic_dev, enum hinic_tso_state state)
struct hinic_hwif *hwif = hwdev->hwif;
struct hinic_tso_config tso_cfg = {0};
struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
+ u16 out_size = sizeof(tso_cfg);
int err;
tso_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif);
@@ -405,9 +405,9 @@ int hinic_set_rx_csum_offload(struct hinic_dev *nic_dev, u32 en)
{
struct hinic_checksum_offload rx_csum_cfg = {0};
struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ u16 out_size = sizeof(rx_csum_cfg);
struct hinic_hwif *hwif;
struct pci_dev *pdev;
- u16 out_size;
int err;
if (!hwdev)
@@ -443,6 +443,7 @@ int hinic_set_rx_vlan_offload(struct hinic_dev *nic_dev, u8 en)
if (!hwdev)
return -EINVAL;
+ out_size = sizeof(vlan_cfg);
hwif = hwdev->hwif;
pdev = hwif->pdev;
vlan_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif);
@@ -465,14 +466,14 @@ int hinic_set_max_qnum(struct hinic_dev *nic_dev, u8 num_rqs)
{
struct hinic_hwdev *hwdev = nic_dev->hwdev;
struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
struct hinic_rq_num rq_num = { 0 };
+ struct pci_dev *pdev = hwif->pdev;
u16 out_size = sizeof(rq_num);
int err;
rq_num.func_id = HINIC_HWIF_FUNC_IDX(hwif);
rq_num.num_rqs = num_rqs;
- rq_num.rq_depth = ilog2(HINIC_SQ_DEPTH);
+ rq_num.rq_depth = ilog2(nic_dev->rq_depth);
err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RQ_IQ_MAP,
&rq_num, sizeof(rq_num),
@@ -491,8 +492,8 @@ static int hinic_set_rx_lro(struct hinic_dev *nic_dev, u8 ipv4_en, u8 ipv6_en,
u8 max_wqe_num)
{
struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
struct hinic_lro_config lro_cfg = { 0 };
+ struct hinic_hwif *hwif = hwdev->hwif;
struct pci_dev *pdev = hwif->pdev;
u16 out_size = sizeof(lro_cfg);
int err;
@@ -568,6 +569,9 @@ int hinic_set_rx_lro_state(struct hinic_dev *nic_dev, u8 lro_en,
if (err)
return err;
+ if (HINIC_IS_VF(nic_dev->hwdev->hwif))
+ return 0;
+
err = hinic_set_rx_lro_timer(nic_dev, lro_timer);
if (err)
return err;
@@ -741,9 +745,9 @@ int hinic_get_rss_type(struct hinic_dev *nic_dev, u32 tmpl_idx,
{
struct hinic_rss_context_table ctx_tbl = { 0 };
struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ u16 out_size = sizeof(ctx_tbl);
struct hinic_hwif *hwif;
struct pci_dev *pdev;
- u16 out_size = sizeof(ctx_tbl);
int err;
if (!hwdev || !rss_type)
@@ -784,7 +788,7 @@ int hinic_rss_set_template_tbl(struct hinic_dev *nic_dev, u32 template_id,
struct hinic_hwif *hwif = hwdev->hwif;
struct hinic_rss_key rss_key = { 0 };
struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
+ u16 out_size = sizeof(rss_key);
int err;
rss_key.func_id = HINIC_HWIF_FUNC_IDX(hwif);
@@ -809,9 +813,9 @@ int hinic_rss_get_template_tbl(struct hinic_dev *nic_dev, u32 tmpl_idx,
{
struct hinic_rss_template_key temp_key = { 0 };
struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ u16 out_size = sizeof(temp_key);
struct hinic_hwif *hwif;
struct pci_dev *pdev;
- u16 out_size = sizeof(temp_key);
int err;
if (!hwdev || !temp)
@@ -844,7 +848,7 @@ int hinic_rss_set_hash_engine(struct hinic_dev *nic_dev, u8 template_id,
struct hinic_hwdev *hwdev = nic_dev->hwdev;
struct hinic_hwif *hwif = hwdev->hwif;
struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
+ u16 out_size = sizeof(rss_engine);
int err;
rss_engine.func_id = HINIC_HWIF_FUNC_IDX(hwif);
@@ -868,9 +872,9 @@ int hinic_rss_get_hash_engine(struct hinic_dev *nic_dev, u8 tmpl_idx, u8 *type)
{
struct hinic_rss_engine_type hash_type = { 0 };
struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ u16 out_size = sizeof(hash_type);
struct hinic_hwif *hwif;
struct pci_dev *pdev;
- u16 out_size = sizeof(hash_type);
int err;
if (!hwdev || !type)
@@ -901,7 +905,7 @@ int hinic_rss_cfg(struct hinic_dev *nic_dev, u8 rss_en, u8 template_id)
struct hinic_rss_config rss_cfg = { 0 };
struct hinic_hwif *hwif = hwdev->hwif;
struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
+ u16 out_size = sizeof(rss_cfg);
int err;
rss_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif);
@@ -927,8 +931,8 @@ int hinic_rss_template_alloc(struct hinic_dev *nic_dev, u8 *tmpl_idx)
struct hinic_rss_template_mgmt template_mgmt = { 0 };
struct hinic_hwdev *hwdev = nic_dev->hwdev;
struct hinic_hwif *hwif = hwdev->hwif;
+ u16 out_size = sizeof(template_mgmt);
struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
int err;
template_mgmt.func_id = HINIC_HWIF_FUNC_IDX(hwif);
@@ -953,8 +957,8 @@ int hinic_rss_template_free(struct hinic_dev *nic_dev, u8 tmpl_idx)
struct hinic_rss_template_mgmt template_mgmt = { 0 };
struct hinic_hwdev *hwdev = nic_dev->hwdev;
struct hinic_hwif *hwif = hwdev->hwif;
+ u16 out_size = sizeof(template_mgmt);
struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
int err;
template_mgmt.func_id = HINIC_HWIF_FUNC_IDX(hwif);
@@ -1043,9 +1047,9 @@ int hinic_get_mgmt_version(struct hinic_dev *nic_dev, u8 *mgmt_ver)
{
struct hinic_hwdev *hwdev = nic_dev->hwdev;
struct hinic_version_info up_ver = {0};
+ u16 out_size = sizeof(up_ver);
struct hinic_hwif *hwif;
struct pci_dev *pdev;
- u16 out_size;
int err;
if (!hwdev)
@@ -1068,3 +1072,132 @@ int hinic_get_mgmt_version(struct hinic_dev *nic_dev, u8 *mgmt_ver)
return 0;
}
+
+int hinic_get_link_mode(struct hinic_hwdev *hwdev,
+ struct hinic_link_mode_cmd *link_mode)
+{
+ u16 out_size;
+ int err;
+
+ if (!hwdev || !link_mode)
+ return -EINVAL;
+
+ out_size = sizeof(*link_mode);
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_LINK_MODE,
+ link_mode, sizeof(*link_mode),
+ link_mode, &out_size);
+ if (err || !out_size || link_mode->status) {
+ dev_err(&hwdev->hwif->pdev->dev,
+ "Failed to get link mode, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, link_mode->status, out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hinic_set_autoneg(struct hinic_hwdev *hwdev, bool enable)
+{
+ struct hinic_set_autoneg_cmd autoneg = {0};
+ u16 out_size = sizeof(autoneg);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ autoneg.func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
+ autoneg.enable = enable;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_AUTONEG,
+ &autoneg, sizeof(autoneg),
+ &autoneg, &out_size);
+ if (err || !out_size || autoneg.status) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to %s autoneg, err: %d, status: 0x%x, out size: 0x%x\n",
+ enable ? "enable" : "disable", err, autoneg.status,
+ out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hinic_set_speed(struct hinic_hwdev *hwdev, enum nic_speed_level speed)
+{
+ struct hinic_speed_cmd speed_info = {0};
+ u16 out_size = sizeof(speed_info);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ speed_info.func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
+ speed_info.speed = speed;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_SPEED,
+ &speed_info, sizeof(speed_info),
+ &speed_info, &out_size);
+ if (err || !out_size || speed_info.status) {
+ dev_err(&hwdev->hwif->pdev->dev,
+ "Failed to set speed, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, speed_info.status, out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hinic_set_link_settings(struct hinic_hwdev *hwdev,
+ struct hinic_link_ksettings_info *info)
+{
+ u16 out_size = sizeof(*info);
+ int err;
+
+ err = hinic_hilink_msg_cmd(hwdev, HINIC_HILINK_CMD_SET_LINK_SETTINGS,
+ info, sizeof(*info), info, &out_size);
+ if ((info->status != HINIC_MGMT_CMD_UNSUPPORTED &&
+ info->status) || err || !out_size) {
+ dev_err(&hwdev->hwif->pdev->dev,
+ "Failed to set link settings, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, info->status, out_size);
+ return -EFAULT;
+ }
+
+ return info->status;
+}
+
+int hinic_get_hw_pause_info(struct hinic_hwdev *hwdev,
+ struct hinic_pause_config *pause_info)
+{
+ u16 out_size = sizeof(*pause_info);
+ int err;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_PAUSE_INFO,
+ pause_info, sizeof(*pause_info),
+ pause_info, &out_size);
+ if (err || !out_size || pause_info->status) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to get pause info, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, pause_info->status, out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hinic_set_hw_pause_info(struct hinic_hwdev *hwdev,
+ struct hinic_pause_config *pause_info)
+{
+ u16 out_size = sizeof(*pause_info);
+ int err;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_PAUSE_INFO,
+ pause_info, sizeof(*pause_info),
+ pause_info, &out_size);
+ if (err || !out_size || pause_info->status) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to set pause info, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, pause_info->status, out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.h b/drivers/net/ethernet/huawei/hinic/hinic_port.h
index 44772fd47fc1..661c6322dc15 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port.h
@@ -79,6 +79,42 @@ enum hinic_speed {
HINIC_SPEED_UNKNOWN = 0xFF,
};
+enum hinic_link_mode {
+ HINIC_10GE_BASE_KR = 0,
+ HINIC_40GE_BASE_KR4 = 1,
+ HINIC_40GE_BASE_CR4 = 2,
+ HINIC_100GE_BASE_KR4 = 3,
+ HINIC_100GE_BASE_CR4 = 4,
+ HINIC_25GE_BASE_KR_S = 5,
+ HINIC_25GE_BASE_CR_S = 6,
+ HINIC_25GE_BASE_KR = 7,
+ HINIC_25GE_BASE_CR = 8,
+ HINIC_GE_BASE_KX = 9,
+ HINIC_LINK_MODE_NUMBERS,
+
+ HINIC_SUPPORTED_UNKNOWN = 0xFFFF,
+};
+
+enum hinic_port_type {
+ HINIC_PORT_TP, /* BASET */
+ HINIC_PORT_AUI,
+ HINIC_PORT_MII,
+ HINIC_PORT_FIBRE, /* OPTICAL */
+ HINIC_PORT_BNC,
+ HINIC_PORT_ELEC,
+ HINIC_PORT_COPPER, /* PORT_DA */
+ HINIC_PORT_AOC,
+ HINIC_PORT_BACKPLANE,
+ HINIC_PORT_NONE = 0xEF,
+ HINIC_PORT_OTHER = 0xFF,
+};
+
+enum hinic_valid_link_settings {
+ HILINK_LINK_SET_SPEED = 0x1,
+ HILINK_LINK_SET_AUTONEG = 0x2,
+ HILINK_LINK_SET_FEC = 0x4,
+};
+
enum hinic_tso_state {
HINIC_TSO_DISABLE = 0,
HINIC_TSO_ENABLE = 1,
@@ -148,9 +184,9 @@ struct hinic_port_link_status {
u8 version;
u8 rsvd0[6];
- u16 rsvd1;
+ u16 func_id;
u8 link;
- u8 rsvd2;
+ u8 port_id;
};
struct hinic_port_func_state_cmd {
@@ -179,6 +215,50 @@ struct hinic_port_cap {
u8 rsvd2[3];
};
+struct hinic_link_mode_cmd {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u16 supported; /* 0xFFFF represents invalid value */
+ u16 advertised;
+};
+
+struct hinic_speed_cmd {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 speed;
+};
+
+struct hinic_set_autoneg_cmd {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 enable; /* 1: enable , 0: disable */
+};
+
+struct hinic_link_ksettings_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+
+ u32 valid_bitmap;
+ u32 speed; /* enum nic_speed_level */
+ u8 autoneg; /* 0 - off; 1 - on */
+ u8 fec; /* 0 - RSFEC; 1 - BASEFEC; 2 - NOFEC */
+ u8 rsvd2[18]; /* reserved for duplex, port, etc. */
+};
+
struct hinic_tso_config {
u8 status;
u8 version;
@@ -506,6 +586,61 @@ struct hinic_cmd_vport_stats {
struct hinic_vport_stats stats;
};
+struct hinic_tx_rate_cfg_max_min {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 min_rate;
+ u32 max_rate;
+ u8 rsvd2[8];
+};
+
+struct hinic_tx_rate_cfg {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 tx_rate;
+};
+
+enum nic_speed_level {
+ LINK_SPEED_10MB = 0,
+ LINK_SPEED_100MB,
+ LINK_SPEED_1GB,
+ LINK_SPEED_10GB,
+ LINK_SPEED_25GB,
+ LINK_SPEED_40GB,
+ LINK_SPEED_100GB,
+ LINK_SPEED_LEVELS,
+};
+
+struct hinic_spoofchk_set {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 state;
+ u8 rsvd1;
+ u16 func_id;
+};
+
+struct hinic_pause_config {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 auto_neg;
+ u32 rx_pause;
+ u32 tx_pause;
+};
+
int hinic_port_add_mac(struct hinic_dev *nic_dev, const u8 *addr,
u16 vlan_id);
@@ -585,4 +720,24 @@ int hinic_set_rx_vlan_offload(struct hinic_dev *nic_dev, u8 en);
int hinic_get_mgmt_version(struct hinic_dev *nic_dev, u8 *mgmt_ver);
+int hinic_set_link_settings(struct hinic_hwdev *hwdev,
+ struct hinic_link_ksettings_info *info);
+
+int hinic_get_link_mode(struct hinic_hwdev *hwdev,
+ struct hinic_link_mode_cmd *link_mode);
+
+int hinic_set_autoneg(struct hinic_hwdev *hwdev, bool enable);
+
+int hinic_set_speed(struct hinic_hwdev *hwdev, enum nic_speed_level speed);
+
+int hinic_get_hw_pause_info(struct hinic_hwdev *hwdev,
+ struct hinic_pause_config *pause_info);
+
+int hinic_set_hw_pause_info(struct hinic_hwdev *hwdev,
+ struct hinic_pause_config *pause_info);
+
+int hinic_open(struct net_device *netdev);
+
+int hinic_close(struct net_device *netdev);
+
#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index 815649e37cb1..af20d0dd6de7 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -432,9 +432,11 @@ static int rx_poll(struct napi_struct *napi, int budget)
return budget;
napi_complete(napi);
- hinic_hwdev_set_msix_state(nic_dev->hwdev,
- rq->msix_entry,
- HINIC_MSIX_ENABLE);
+
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ hinic_hwdev_set_msix_state(nic_dev->hwdev,
+ rq->msix_entry,
+ HINIC_MSIX_ENABLE);
return pkts;
}
@@ -461,9 +463,10 @@ static irqreturn_t rx_irq(int irq, void *data)
/* Disable the interrupt until napi will be completed */
nic_dev = netdev_priv(rxq->netdev);
- hinic_hwdev_set_msix_state(nic_dev->hwdev,
- rq->msix_entry,
- HINIC_MSIX_DISABLE);
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ hinic_hwdev_set_msix_state(nic_dev->hwdev,
+ rq->msix_entry,
+ HINIC_MSIX_DISABLE);
nic_dev = netdev_priv(rxq->netdev);
hinic_hwdev_msix_cnt_set(nic_dev->hwdev, rq->msix_entry);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
new file mode 100644
index 000000000000..efab2dd2c889
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
@@ -0,0 +1,1294 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include <linux/pci.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+
+#include "hinic_hw_dev.h"
+#include "hinic_dev.h"
+#include "hinic_hw_mbox.h"
+#include "hinic_hw_cmdq.h"
+#include "hinic_port.h"
+#include "hinic_sriov.h"
+
+static unsigned char set_vf_link_state;
+module_param(set_vf_link_state, byte, 0444);
+MODULE_PARM_DESC(set_vf_link_state, "Set vf link state, 0 represents link auto, 1 represents link always up, 2 represents link always down. - default is 0.");
+
+#define HINIC_VLAN_PRIORITY_SHIFT 13
+#define HINIC_ADD_VLAN_IN_MAC 0x8000
+#define HINIC_TX_RATE_TABLE_FULL 12
+
+static int hinic_set_mac(struct hinic_hwdev *hwdev, const u8 *mac_addr,
+ u16 vlan_id, u16 func_id)
+{
+ struct hinic_port_mac_cmd mac_info = {0};
+ u16 out_size = sizeof(mac_info);
+ int err;
+
+ mac_info.func_idx = func_id;
+ mac_info.vlan_id = vlan_id;
+ memcpy(mac_info.mac, mac_addr, ETH_ALEN);
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_MAC, &mac_info,
+ sizeof(mac_info), &mac_info, &out_size);
+ if (err || out_size != sizeof(mac_info) ||
+ (mac_info.status && mac_info.status != HINIC_PF_SET_VF_ALREADY &&
+ mac_info.status != HINIC_MGMT_STATUS_EXIST)) {
+ dev_err(&hwdev->func_to_io.hwif->pdev->dev, "Failed to change MAC, ret = %d\n",
+ mac_info.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void hinic_notify_vf_link_status(struct hinic_hwdev *hwdev, u16 vf_id,
+ u8 link_status)
+{
+ struct vf_data_storage *vf_infos = hwdev->func_to_io.vf_infos;
+ struct hinic_port_link_status link = {0};
+ u16 out_size = sizeof(link);
+ int err;
+
+ if (vf_infos[HW_VF_ID_TO_OS(vf_id)].registered) {
+ link.link = link_status;
+ link.func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id;
+ err = hinic_mbox_to_vf(hwdev, HINIC_MOD_L2NIC,
+ vf_id, HINIC_PORT_CMD_LINK_STATUS_REPORT,
+ &link, sizeof(link),
+ &link, &out_size, 0);
+ if (err || !out_size || link.status)
+ dev_err(&hwdev->hwif->pdev->dev,
+ "Send link change event to VF %d failed, err: %d, status: 0x%x, out_size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf_id), err,
+ link.status, out_size);
+ }
+}
+
+/* send link change event mbox msg to active vfs under the pf */
+void hinic_notify_all_vfs_link_changed(struct hinic_hwdev *hwdev,
+ u8 link_status)
+{
+ struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
+ u16 i;
+
+ nic_io->link_status = link_status;
+ for (i = 1; i <= nic_io->max_vfs; i++) {
+ if (!nic_io->vf_infos[HW_VF_ID_TO_OS(i)].link_forced)
+ hinic_notify_vf_link_status(hwdev, i, link_status);
+ }
+}
+
+static u16 hinic_vf_info_vlanprio(struct hinic_hwdev *hwdev, int vf_id)
+{
+ struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
+ u16 pf_vlan, vlanprio;
+ u8 pf_qos;
+
+ pf_vlan = nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan;
+ pf_qos = nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos;
+ vlanprio = pf_vlan | pf_qos << HINIC_VLAN_PRIORITY_SHIFT;
+
+ return vlanprio;
+}
+
+static int hinic_set_vf_vlan(struct hinic_hwdev *hwdev, bool add, u16 vid,
+ u8 qos, int vf_id)
+{
+ struct hinic_vf_vlan_config vf_vlan = {0};
+ u16 out_size = sizeof(vf_vlan);
+ int err;
+ u8 cmd;
+
+ /* VLAN 0 is a special case, don't allow it to be removed */
+ if (!vid && !add)
+ return 0;
+
+ vf_vlan.func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id;
+ vf_vlan.vlan_id = vid;
+ vf_vlan.qos = qos;
+
+ if (add)
+ cmd = HINIC_PORT_CMD_SET_VF_VLAN;
+ else
+ cmd = HINIC_PORT_CMD_CLR_VF_VLAN;
+
+ err = hinic_port_msg_cmd(hwdev, cmd, &vf_vlan,
+ sizeof(vf_vlan), &vf_vlan, &out_size);
+ if (err || !out_size || vf_vlan.status) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to set VF %d vlan, err: %d, status: 0x%x, out size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf_id), err, vf_vlan.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int hinic_set_vf_tx_rate_max_min(struct hinic_hwdev *hwdev, u16 vf_id,
+ u32 max_rate, u32 min_rate)
+{
+ struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
+ struct hinic_tx_rate_cfg_max_min rate_cfg = {0};
+ u16 out_size = sizeof(rate_cfg);
+ int err;
+
+ rate_cfg.func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id;
+ rate_cfg.max_rate = max_rate;
+ rate_cfg.min_rate = min_rate;
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_VF_MAX_MIN_RATE,
+ &rate_cfg, sizeof(rate_cfg), &rate_cfg,
+ &out_size);
+ if ((rate_cfg.status != HINIC_MGMT_CMD_UNSUPPORTED &&
+ rate_cfg.status) || err || !out_size) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to set VF(%d) max rate(%d), min rate(%d), err: %d, status: 0x%x, out size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf_id), max_rate, min_rate, err,
+ rate_cfg.status, out_size);
+ return -EIO;
+ }
+
+ if (!rate_cfg.status) {
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].max_rate = max_rate;
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].min_rate = min_rate;
+ }
+
+ return rate_cfg.status;
+}
+
+static int hinic_set_vf_rate_limit(struct hinic_hwdev *hwdev, u16 vf_id,
+ u32 tx_rate)
+{
+ struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
+ struct hinic_tx_rate_cfg rate_cfg = {0};
+ u16 out_size = sizeof(rate_cfg);
+ int err;
+
+ rate_cfg.func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id;
+ rate_cfg.tx_rate = tx_rate;
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_VF_RATE,
+ &rate_cfg, sizeof(rate_cfg), &rate_cfg,
+ &out_size);
+ if (err || !out_size || rate_cfg.status) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to set VF(%d) rate(%d), err: %d, status: 0x%x, out size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf_id), tx_rate, err, rate_cfg.status,
+ out_size);
+ if (rate_cfg.status)
+ return rate_cfg.status;
+
+ return -EIO;
+ }
+
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].max_rate = tx_rate;
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].min_rate = 0;
+
+ return 0;
+}
+
+static int hinic_set_vf_tx_rate(struct hinic_hwdev *hwdev, u16 vf_id,
+ u32 max_rate, u32 min_rate)
+{
+ int err;
+
+ err = hinic_set_vf_tx_rate_max_min(hwdev, vf_id, max_rate, min_rate);
+ if (err != HINIC_MGMT_CMD_UNSUPPORTED)
+ return err;
+
+ if (min_rate) {
+ dev_err(&hwdev->hwif->pdev->dev, "Current firmware doesn't support to set min tx rate\n");
+ return -EOPNOTSUPP;
+ }
+
+ dev_info(&hwdev->hwif->pdev->dev, "Current firmware doesn't support to set min tx rate, force min_tx_rate = max_tx_rate\n");
+
+ return hinic_set_vf_rate_limit(hwdev, vf_id, max_rate);
+}
+
+static int hinic_init_vf_config(struct hinic_hwdev *hwdev, u16 vf_id)
+{
+ struct vf_data_storage *vf_info;
+ u16 func_id, vlan_id;
+ int err = 0;
+
+ vf_info = hwdev->func_to_io.vf_infos + HW_VF_ID_TO_OS(vf_id);
+ if (vf_info->pf_set_mac) {
+ func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id;
+
+ vlan_id = 0;
+
+ err = hinic_set_mac(hwdev, vf_info->vf_mac_addr, vlan_id,
+ func_id);
+ if (err) {
+ dev_err(&hwdev->func_to_io.hwif->pdev->dev, "Failed to set VF %d MAC\n",
+ HW_VF_ID_TO_OS(vf_id));
+ return err;
+ }
+ }
+
+ if (hinic_vf_info_vlanprio(hwdev, vf_id)) {
+ err = hinic_set_vf_vlan(hwdev, true, vf_info->pf_vlan,
+ vf_info->pf_qos, vf_id);
+ if (err) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to add VF %d VLAN_QOS\n",
+ HW_VF_ID_TO_OS(vf_id));
+ return err;
+ }
+ }
+
+ if (vf_info->max_rate) {
+ err = hinic_set_vf_tx_rate(hwdev, vf_id, vf_info->max_rate,
+ vf_info->min_rate);
+ if (err) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to set VF %d max rate: %d, min rate: %d\n",
+ HW_VF_ID_TO_OS(vf_id), vf_info->max_rate,
+ vf_info->min_rate);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int hinic_register_vf_msg_handler(void *hwdev, u16 vf_id,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_register_vf *register_info = buf_out;
+ struct hinic_hwdev *hw_dev = hwdev;
+ struct hinic_func_to_io *nic_io;
+ int err;
+
+ nic_io = &hw_dev->func_to_io;
+ if (vf_id > nic_io->max_vfs) {
+ dev_err(&hw_dev->hwif->pdev->dev, "Register VF id %d exceed limit[0-%d]\n",
+ HW_VF_ID_TO_OS(vf_id), HW_VF_ID_TO_OS(nic_io->max_vfs));
+ register_info->status = EFAULT;
+ return -EFAULT;
+ }
+
+ *out_size = sizeof(*register_info);
+ err = hinic_init_vf_config(hw_dev, vf_id);
+ if (err) {
+ register_info->status = EFAULT;
+ return err;
+ }
+
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].registered = true;
+
+ return 0;
+}
+
+static int hinic_unregister_vf_msg_handler(void *hwdev, u16 vf_id,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_hwdev *hw_dev = hwdev;
+ struct hinic_func_to_io *nic_io;
+
+ nic_io = &hw_dev->func_to_io;
+ *out_size = 0;
+ if (vf_id > nic_io->max_vfs)
+ return 0;
+
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].registered = false;
+
+ return 0;
+}
+
+static int hinic_change_vf_mtu_msg_handler(void *hwdev, u16 vf_id,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_hwdev *hw_dev = hwdev;
+ int err;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_CHANGE_MTU, buf_in,
+ in_size, buf_out, out_size);
+ if (err) {
+ dev_err(&hw_dev->hwif->pdev->dev, "Failed to set VF %u mtu\n",
+ vf_id);
+ return err;
+ }
+
+ return 0;
+}
+
+static int hinic_get_vf_mac_msg_handler(void *hwdev, u16 vf_id,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_port_mac_cmd *mac_info = buf_out;
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_func_to_io *nic_io;
+ struct vf_data_storage *vf_info;
+
+ nic_io = &dev->func_to_io;
+ vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf_id);
+
+ memcpy(mac_info->mac, vf_info->vf_mac_addr, ETH_ALEN);
+ mac_info->status = 0;
+ *out_size = sizeof(*mac_info);
+
+ return 0;
+}
+
+static int hinic_set_vf_mac_msg_handler(void *hwdev, u16 vf_id,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_port_mac_cmd *mac_out = buf_out;
+ struct hinic_port_mac_cmd *mac_in = buf_in;
+ struct hinic_hwdev *hw_dev = hwdev;
+ struct hinic_func_to_io *nic_io;
+ struct vf_data_storage *vf_info;
+ int err;
+
+ nic_io = &hw_dev->func_to_io;
+ vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf_id);
+ if (vf_info->pf_set_mac && !(vf_info->trust) &&
+ is_valid_ether_addr(mac_in->mac)) {
+ dev_warn(&hw_dev->hwif->pdev->dev, "PF has already set VF %d MAC address\n",
+ HW_VF_ID_TO_OS(vf_id));
+ mac_out->status = HINIC_PF_SET_VF_ALREADY;
+ *out_size = sizeof(*mac_out);
+ return 0;
+ }
+
+ err = hinic_port_msg_cmd(hw_dev, HINIC_PORT_CMD_SET_MAC, buf_in,
+ in_size, buf_out, out_size);
+ if ((err && err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) || !(*out_size)) {
+ dev_err(&hw_dev->hwif->pdev->dev,
+ "Failed to set VF %d MAC address, err: %d, status: 0x%x, out size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf_id), err, mac_out->status, *out_size);
+ return -EFAULT;
+ }
+
+ return err;
+}
+
+static int hinic_del_vf_mac_msg_handler(void *hwdev, u16 vf_id,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_port_mac_cmd *mac_out = buf_out;
+ struct hinic_port_mac_cmd *mac_in = buf_in;
+ struct hinic_hwdev *hw_dev = hwdev;
+ struct hinic_func_to_io *nic_io;
+ struct vf_data_storage *vf_info;
+ int err;
+
+ nic_io = &hw_dev->func_to_io;
+ vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf_id);
+ if (vf_info->pf_set_mac && is_valid_ether_addr(mac_in->mac) &&
+ !memcmp(vf_info->vf_mac_addr, mac_in->mac, ETH_ALEN)) {
+ dev_warn(&hw_dev->hwif->pdev->dev, "PF has already set VF mac.\n");
+ mac_out->status = HINIC_PF_SET_VF_ALREADY;
+ *out_size = sizeof(*mac_out);
+ return 0;
+ }
+
+ err = hinic_port_msg_cmd(hw_dev, HINIC_PORT_CMD_DEL_MAC, buf_in,
+ in_size, buf_out, out_size);
+ if ((err && err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) || !(*out_size)) {
+ dev_err(&hw_dev->hwif->pdev->dev, "Failed to delete VF %d MAC, err: %d, status: 0x%x, out size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf_id), err, mac_out->status, *out_size);
+ return -EFAULT;
+ }
+
+ return err;
+}
+
+static int hinic_get_vf_link_status_msg_handler(void *hwdev, u16 vf_id,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_port_link_cmd *get_link = buf_out;
+ struct hinic_hwdev *hw_dev = hwdev;
+ struct vf_data_storage *vf_infos;
+ struct hinic_func_to_io *nic_io;
+ bool link_forced, link_up;
+
+ nic_io = &hw_dev->func_to_io;
+ vf_infos = nic_io->vf_infos;
+ link_forced = vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced;
+ link_up = vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up;
+
+ if (link_forced)
+ get_link->state = link_up ?
+ HINIC_LINK_STATE_UP : HINIC_LINK_STATE_DOWN;
+ else
+ get_link->state = nic_io->link_status;
+
+ get_link->status = 0;
+ *out_size = sizeof(*get_link);
+
+ return 0;
+}
+
+static struct vf_cmd_msg_handle nic_vf_cmd_msg_handler[] = {
+ {HINIC_PORT_CMD_VF_REGISTER, hinic_register_vf_msg_handler},
+ {HINIC_PORT_CMD_VF_UNREGISTER, hinic_unregister_vf_msg_handler},
+ {HINIC_PORT_CMD_CHANGE_MTU, hinic_change_vf_mtu_msg_handler},
+ {HINIC_PORT_CMD_GET_MAC, hinic_get_vf_mac_msg_handler},
+ {HINIC_PORT_CMD_SET_MAC, hinic_set_vf_mac_msg_handler},
+ {HINIC_PORT_CMD_DEL_MAC, hinic_del_vf_mac_msg_handler},
+ {HINIC_PORT_CMD_GET_LINK_STATE, hinic_get_vf_link_status_msg_handler},
+};
+
+#define CHECK_IPSU_15BIT 0X8000
+
+static
+struct hinic_sriov_info *hinic_get_sriov_info_by_pcidev(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+
+ return &nic_dev->sriov_info;
+}
+
+static int hinic_check_mac_info(u8 status, u16 vlan_id)
+{
+ if ((status && status != HINIC_MGMT_STATUS_EXIST &&
+ status != HINIC_PF_SET_VF_ALREADY) ||
+ (vlan_id & CHECK_IPSU_15BIT &&
+ status == HINIC_MGMT_STATUS_EXIST))
+ return -EINVAL;
+
+ return 0;
+}
+
+#define HINIC_VLAN_ID_MASK 0x7FFF
+
+static int hinic_update_mac(struct hinic_hwdev *hwdev, u8 *old_mac,
+ u8 *new_mac, u16 vlan_id, u16 func_id)
+{
+ struct hinic_port_mac_update mac_info = {0};
+ u16 out_size = sizeof(mac_info);
+ int err;
+
+ if (!hwdev || !old_mac || !new_mac)
+ return -EINVAL;
+
+ if ((vlan_id & HINIC_VLAN_ID_MASK) >= VLAN_N_VID) {
+ dev_err(&hwdev->hwif->pdev->dev, "Invalid VLAN number: %d\n",
+ (vlan_id & HINIC_VLAN_ID_MASK));
+ return -EINVAL;
+ }
+
+ mac_info.func_id = func_id;
+ mac_info.vlan_id = vlan_id;
+ memcpy(mac_info.old_mac, old_mac, ETH_ALEN);
+ memcpy(mac_info.new_mac, new_mac, ETH_ALEN);
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_UPDATE_MAC, &mac_info,
+ sizeof(mac_info), &mac_info, &out_size);
+
+ if (err || !out_size ||
+ hinic_check_mac_info(mac_info.status, mac_info.vlan_id)) {
+ dev_err(&hwdev->hwif->pdev->dev,
+ "Failed to update MAC, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, mac_info.status, out_size);
+ return -EINVAL;
+ }
+
+ if (mac_info.status == HINIC_PF_SET_VF_ALREADY) {
+ dev_warn(&hwdev->hwif->pdev->dev,
+ "PF has already set VF MAC. Ignore update operation\n");
+ return HINIC_PF_SET_VF_ALREADY;
+ }
+
+ if (mac_info.status == HINIC_MGMT_STATUS_EXIST)
+ dev_warn(&hwdev->hwif->pdev->dev, "MAC is repeated. Ignore update operation\n");
+
+ return 0;
+}
+
+static void hinic_get_vf_config(struct hinic_hwdev *hwdev, u16 vf_id,
+ struct ifla_vf_info *ivi)
+{
+ struct vf_data_storage *vfinfo;
+
+ vfinfo = hwdev->func_to_io.vf_infos + HW_VF_ID_TO_OS(vf_id);
+
+ ivi->vf = HW_VF_ID_TO_OS(vf_id);
+ memcpy(ivi->mac, vfinfo->vf_mac_addr, ETH_ALEN);
+ ivi->vlan = vfinfo->pf_vlan;
+ ivi->qos = vfinfo->pf_qos;
+ ivi->spoofchk = vfinfo->spoofchk;
+ ivi->trusted = vfinfo->trust;
+ ivi->max_tx_rate = vfinfo->max_rate;
+ ivi->min_tx_rate = vfinfo->min_rate;
+
+ if (!vfinfo->link_forced)
+ ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
+ else if (vfinfo->link_up)
+ ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
+ else
+ ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
+}
+
+int hinic_ndo_get_vf_config(struct net_device *netdev,
+ int vf, struct ifla_vf_info *ivi)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_sriov_info *sriov_info;
+
+ sriov_info = &nic_dev->sriov_info;
+ if (vf >= sriov_info->num_vfs)
+ return -EINVAL;
+
+ hinic_get_vf_config(sriov_info->hwdev, OS_VF_ID_TO_HW(vf), ivi);
+
+ return 0;
+}
+
+static int hinic_set_vf_mac(struct hinic_hwdev *hwdev, int vf,
+ unsigned char *mac_addr)
+{
+ struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
+ struct vf_data_storage *vf_info;
+ u16 func_id;
+ int err;
+
+ vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf);
+
+ /* duplicate request, so just return success */
+ if (vf_info->pf_set_mac &&
+ !memcmp(vf_info->vf_mac_addr, mac_addr, ETH_ALEN))
+ return 0;
+
+ vf_info->pf_set_mac = true;
+
+ func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf;
+ err = hinic_update_mac(hwdev, vf_info->vf_mac_addr,
+ mac_addr, 0, func_id);
+ if (err) {
+ vf_info->pf_set_mac = false;
+ return err;
+ }
+
+ memcpy(vf_info->vf_mac_addr, mac_addr, ETH_ALEN);
+
+ return 0;
+}
+
+int hinic_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_sriov_info *sriov_info;
+ int err;
+
+ sriov_info = &nic_dev->sriov_info;
+ if (!is_valid_ether_addr(mac) || vf >= sriov_info->num_vfs)
+ return -EINVAL;
+
+ err = hinic_set_vf_mac(sriov_info->hwdev, OS_VF_ID_TO_HW(vf), mac);
+ if (err)
+ return err;
+
+ netif_info(nic_dev, drv, netdev, "Setting MAC %pM on VF %d\n", mac, vf);
+ netif_info(nic_dev, drv, netdev, "Reload the VF driver to make this change effective.");
+
+ return 0;
+}
+
+static int hinic_add_vf_vlan(struct hinic_hwdev *hwdev, int vf_id,
+ u16 vlan, u8 qos)
+{
+ struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
+ int err;
+
+ err = hinic_set_vf_vlan(hwdev, true, vlan, qos, vf_id);
+ if (err)
+ return err;
+
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan = vlan;
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos = qos;
+
+ dev_info(&hwdev->hwif->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
+ vlan, qos, HW_VF_ID_TO_OS(vf_id));
+ return 0;
+}
+
+static int hinic_kill_vf_vlan(struct hinic_hwdev *hwdev, int vf_id)
+{
+ struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
+ int err;
+
+ err = hinic_set_vf_vlan(hwdev, false,
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan,
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos,
+ vf_id);
+ if (err)
+ return err;
+
+ dev_info(&hwdev->hwif->pdev->dev, "Remove VLAN %d on VF %d\n",
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan,
+ HW_VF_ID_TO_OS(vf_id));
+
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan = 0;
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos = 0;
+
+ return 0;
+}
+
+static int hinic_update_mac_vlan(struct hinic_dev *nic_dev, u16 old_vlan,
+ u16 new_vlan, int vf_id)
+{
+ struct vf_data_storage *vf_info;
+ u16 vlan_id;
+ int err;
+
+ if (!nic_dev || old_vlan >= VLAN_N_VID || new_vlan >= VLAN_N_VID)
+ return -EINVAL;
+
+ vf_info = nic_dev->hwdev->func_to_io.vf_infos + HW_VF_ID_TO_OS(vf_id);
+ if (!vf_info->pf_set_mac)
+ return 0;
+
+ vlan_id = old_vlan;
+ if (vlan_id)
+ vlan_id |= HINIC_ADD_VLAN_IN_MAC;
+
+ err = hinic_port_del_mac(nic_dev, vf_info->vf_mac_addr, vlan_id);
+ if (err) {
+ dev_err(&nic_dev->hwdev->hwif->pdev->dev, "Failed to delete VF %d MAC %pM vlan %d\n",
+ HW_VF_ID_TO_OS(vf_id), vf_info->vf_mac_addr, old_vlan);
+ return err;
+ }
+
+ vlan_id = new_vlan;
+ if (vlan_id)
+ vlan_id |= HINIC_ADD_VLAN_IN_MAC;
+
+ err = hinic_port_add_mac(nic_dev, vf_info->vf_mac_addr, vlan_id);
+ if (err) {
+ dev_err(&nic_dev->hwdev->hwif->pdev->dev, "Failed to add VF %d MAC %pM vlan %d\n",
+ HW_VF_ID_TO_OS(vf_id), vf_info->vf_mac_addr, new_vlan);
+ goto out;
+ }
+
+ return 0;
+
+out:
+ vlan_id = old_vlan;
+ if (vlan_id)
+ vlan_id |= HINIC_ADD_VLAN_IN_MAC;
+ hinic_port_add_mac(nic_dev, vf_info->vf_mac_addr, vlan_id);
+
+ return err;
+}
+
+static int set_hw_vf_vlan(struct hinic_dev *nic_dev,
+ u16 cur_vlanprio, int vf, u16 vlan, u8 qos)
+{
+ u16 old_vlan = cur_vlanprio & VLAN_VID_MASK;
+ int err = 0;
+
+ if (vlan || qos) {
+ if (cur_vlanprio) {
+ err = hinic_kill_vf_vlan(nic_dev->hwdev,
+ OS_VF_ID_TO_HW(vf));
+ if (err) {
+ dev_err(&nic_dev->sriov_info.pdev->dev, "Failed to delete vf %d old vlan %d\n",
+ vf, old_vlan);
+ goto out;
+ }
+ }
+ err = hinic_add_vf_vlan(nic_dev->hwdev,
+ OS_VF_ID_TO_HW(vf), vlan, qos);
+ if (err) {
+ dev_err(&nic_dev->sriov_info.pdev->dev, "Failed to add vf %d new vlan %d\n",
+ vf, vlan);
+ goto out;
+ }
+ } else {
+ err = hinic_kill_vf_vlan(nic_dev->hwdev, OS_VF_ID_TO_HW(vf));
+ if (err) {
+ dev_err(&nic_dev->sriov_info.pdev->dev, "Failed to delete vf %d vlan %d\n",
+ vf, old_vlan);
+ goto out;
+ }
+ }
+
+ err = hinic_update_mac_vlan(nic_dev, old_vlan, vlan,
+ OS_VF_ID_TO_HW(vf));
+
+out:
+ return err;
+}
+
+int hinic_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
+ __be16 vlan_proto)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_sriov_info *sriov_info;
+ u16 vlanprio, cur_vlanprio;
+
+ sriov_info = &nic_dev->sriov_info;
+ if (vf >= sriov_info->num_vfs || vlan > 4095 || qos > 7)
+ return -EINVAL;
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+ vlanprio = vlan | qos << HINIC_VLAN_PRIORITY_SHIFT;
+ cur_vlanprio = hinic_vf_info_vlanprio(nic_dev->hwdev,
+ OS_VF_ID_TO_HW(vf));
+ /* duplicate request, so just return success */
+ if (vlanprio == cur_vlanprio)
+ return 0;
+
+ return set_hw_vf_vlan(nic_dev, cur_vlanprio, vf, vlan, qos);
+}
+
+static int hinic_set_vf_trust(struct hinic_hwdev *hwdev, u16 vf_id,
+ bool trust)
+{
+ struct vf_data_storage *vf_infos;
+ struct hinic_func_to_io *nic_io;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ nic_io = &hwdev->func_to_io;
+ vf_infos = nic_io->vf_infos;
+ vf_infos[vf_id].trust = trust;
+
+ return 0;
+}
+
+int hinic_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
+{
+ struct hinic_dev *adapter = netdev_priv(netdev);
+ struct hinic_sriov_info *sriov_info;
+ struct hinic_func_to_io *nic_io;
+ bool cur_trust;
+ int err;
+
+ sriov_info = &adapter->sriov_info;
+ nic_io = &adapter->hwdev->func_to_io;
+
+ if (vf >= sriov_info->num_vfs)
+ return -EINVAL;
+
+ cur_trust = nic_io->vf_infos[vf].trust;
+ /* same request, so just return success */
+ if ((setting && cur_trust) || (!setting && !cur_trust))
+ return 0;
+
+ err = hinic_set_vf_trust(adapter->hwdev, vf, setting);
+ if (!err)
+ dev_info(&sriov_info->pdev->dev, "Set VF %d trusted %s succeed\n",
+ vf, setting ? "on" : "off");
+ else
+ dev_err(&sriov_info->pdev->dev, "Failed set VF %d trusted %s\n",
+ vf, setting ? "on" : "off");
+
+ return err;
+}
+
+int hinic_ndo_set_vf_bw(struct net_device *netdev,
+ int vf, int min_tx_rate, int max_tx_rate)
+{
+ u32 speeds[] = {SPEED_10, SPEED_100, SPEED_1000, SPEED_10000,
+ SPEED_25000, SPEED_40000, SPEED_100000};
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_port_cap port_cap = { 0 };
+ enum hinic_port_link_state link_state;
+ int err;
+
+ if (vf >= nic_dev->sriov_info.num_vfs) {
+ netif_err(nic_dev, drv, netdev, "VF number must be less than %d\n",
+ nic_dev->sriov_info.num_vfs);
+ return -EINVAL;
+ }
+
+ if (max_tx_rate < min_tx_rate) {
+ netif_err(nic_dev, drv, netdev, "Max rate %d must be greater than or equal to min rate %d\n",
+ max_tx_rate, min_tx_rate);
+ return -EINVAL;
+ }
+
+ err = hinic_port_link_state(nic_dev, &link_state);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Get link status failed when setting vf tx rate\n");
+ return -EIO;
+ }
+
+ if (link_state == HINIC_LINK_STATE_DOWN) {
+ netif_err(nic_dev, drv, netdev,
+ "Link status must be up when setting vf tx rate\n");
+ return -EPERM;
+ }
+
+ err = hinic_port_get_cap(nic_dev, &port_cap);
+ if (err || port_cap.speed > LINK_SPEED_100GB)
+ return -EIO;
+
+ /* rate limit cannot be less than 0 and greater than link speed */
+ if (max_tx_rate < 0 || max_tx_rate > speeds[port_cap.speed]) {
+ netif_err(nic_dev, drv, netdev, "Max tx rate must be in [0 - %d]\n",
+ speeds[port_cap.speed]);
+ return -EINVAL;
+ }
+
+ err = hinic_set_vf_tx_rate(nic_dev->hwdev, OS_VF_ID_TO_HW(vf),
+ max_tx_rate, min_tx_rate);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Unable to set VF %d max rate %d min rate %d%s\n",
+ vf, max_tx_rate, min_tx_rate,
+ err == HINIC_TX_RATE_TABLE_FULL ?
+ ", tx rate profile is full" : "");
+ return -EIO;
+ }
+
+ netif_info(nic_dev, drv, netdev,
+ "Set VF %d max tx rate %d min tx rate %d successfully\n",
+ vf, max_tx_rate, min_tx_rate);
+
+ return 0;
+}
+
+static int hinic_set_vf_spoofchk(struct hinic_hwdev *hwdev, u16 vf_id,
+ bool spoofchk)
+{
+ struct hinic_spoofchk_set spoofchk_cfg = {0};
+ struct vf_data_storage *vf_infos = NULL;
+ u16 out_size = sizeof(spoofchk_cfg);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ vf_infos = hwdev->func_to_io.vf_infos;
+
+ spoofchk_cfg.func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id;
+ spoofchk_cfg.state = spoofchk ? 1 : 0;
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_ENABLE_SPOOFCHK,
+ &spoofchk_cfg, sizeof(spoofchk_cfg),
+ &spoofchk_cfg, &out_size);
+ if (spoofchk_cfg.status == HINIC_MGMT_CMD_UNSUPPORTED) {
+ err = HINIC_MGMT_CMD_UNSUPPORTED;
+ } else if (err || !out_size || spoofchk_cfg.status) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to set VF(%d) spoofchk, err: %d, status: 0x%x, out size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf_id), err, spoofchk_cfg.status,
+ out_size);
+ err = -EIO;
+ }
+
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].spoofchk = spoofchk;
+
+ return err;
+}
+
+int hinic_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_sriov_info *sriov_info;
+ bool cur_spoofchk;
+ int err;
+
+ sriov_info = &nic_dev->sriov_info;
+ if (vf >= sriov_info->num_vfs)
+ return -EINVAL;
+
+ cur_spoofchk = nic_dev->hwdev->func_to_io.vf_infos[vf].spoofchk;
+
+ /* same request, so just return success */
+ if ((setting && cur_spoofchk) || (!setting && !cur_spoofchk))
+ return 0;
+
+ err = hinic_set_vf_spoofchk(sriov_info->hwdev,
+ OS_VF_ID_TO_HW(vf), setting);
+
+ if (!err) {
+ netif_info(nic_dev, drv, netdev, "Set VF %d spoofchk %s successfully\n",
+ vf, setting ? "on" : "off");
+ } else if (err == HINIC_MGMT_CMD_UNSUPPORTED) {
+ netif_err(nic_dev, drv, netdev,
+ "Current firmware doesn't support to set vf spoofchk, need to upgrade latest firmware version\n");
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static int hinic_set_vf_link_state(struct hinic_hwdev *hwdev, u16 vf_id,
+ int link)
+{
+ struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
+ struct vf_data_storage *vf_infos = nic_io->vf_infos;
+ u8 link_status = 0;
+
+ switch (link) {
+ case HINIC_IFLA_VF_LINK_STATE_AUTO:
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = false;
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = nic_io->link_status ?
+ true : false;
+ link_status = nic_io->link_status;
+ break;
+ case HINIC_IFLA_VF_LINK_STATE_ENABLE:
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = true;
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = true;
+ link_status = HINIC_LINK_UP;
+ break;
+ case HINIC_IFLA_VF_LINK_STATE_DISABLE:
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = true;
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = false;
+ link_status = HINIC_LINK_DOWN;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Notify the VF of its new link state */
+ hinic_notify_vf_link_status(hwdev, vf_id, link_status);
+
+ return 0;
+}
+
+int hinic_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_sriov_info *sriov_info;
+
+ sriov_info = &nic_dev->sriov_info;
+
+ if (vf_id >= sriov_info->num_vfs) {
+ netif_err(nic_dev, drv, netdev,
+ "Invalid VF Identifier %d\n", vf_id);
+ return -EINVAL;
+ }
+
+ return hinic_set_vf_link_state(sriov_info->hwdev,
+ OS_VF_ID_TO_HW(vf_id), link);
+}
+
+/* pf receive message from vf */
+static int nic_pf_mbox_handler(void *hwdev, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ struct vf_cmd_msg_handle *vf_msg_handle;
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_func_to_io *nic_io;
+ struct hinic_pfhwdev *pfhwdev;
+ int err = 0;
+ u32 i;
+
+ if (!hwdev)
+ return -EFAULT;
+
+ pfhwdev = container_of(dev, struct hinic_pfhwdev, hwdev);
+ nic_io = &dev->func_to_io;
+ for (i = 0; i < ARRAY_SIZE(nic_vf_cmd_msg_handler); i++) {
+ vf_msg_handle = &nic_vf_cmd_msg_handler[i];
+ if (cmd == vf_msg_handle->cmd &&
+ vf_msg_handle->cmd_msg_handler) {
+ err = vf_msg_handle->cmd_msg_handler(hwdev, vf_id,
+ buf_in, in_size,
+ buf_out,
+ out_size);
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(nic_vf_cmd_msg_handler))
+ err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC,
+ cmd, buf_in, in_size, buf_out,
+ out_size, HINIC_MGMT_MSG_SYNC);
+
+ if (err && err != HINIC_MBOX_PF_BUSY_ACTIVE_FW)
+ dev_err(&nic_io->hwif->pdev->dev, "PF receive VF L2NIC cmd: %d process error, err:%d\n",
+ cmd, err);
+ return err;
+}
+
+static int cfg_mbx_pf_proc_vf_msg(void *hwdev, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ struct hinic_dev_cap *dev_cap = buf_out;
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_cap *cap;
+
+ cap = &dev->nic_cap;
+ memset(dev_cap, 0, sizeof(*dev_cap));
+
+ dev_cap->max_vf = cap->max_vf;
+ dev_cap->max_sqs = cap->max_vf_qps;
+ dev_cap->max_rqs = cap->max_vf_qps;
+
+ *out_size = sizeof(*dev_cap);
+
+ return 0;
+}
+
+static int hinic_init_vf_infos(struct hinic_func_to_io *nic_io, u16 vf_id)
+{
+ struct vf_data_storage *vf_infos = nic_io->vf_infos;
+
+ if (set_vf_link_state > HINIC_IFLA_VF_LINK_STATE_DISABLE) {
+ dev_warn(&nic_io->hwif->pdev->dev, "Module Parameter set_vf_link_state value %d is out of range, resetting to %d\n",
+ set_vf_link_state, HINIC_IFLA_VF_LINK_STATE_AUTO);
+ set_vf_link_state = HINIC_IFLA_VF_LINK_STATE_AUTO;
+ }
+
+ switch (set_vf_link_state) {
+ case HINIC_IFLA_VF_LINK_STATE_AUTO:
+ vf_infos[vf_id].link_forced = false;
+ break;
+ case HINIC_IFLA_VF_LINK_STATE_ENABLE:
+ vf_infos[vf_id].link_forced = true;
+ vf_infos[vf_id].link_up = true;
+ break;
+ case HINIC_IFLA_VF_LINK_STATE_DISABLE:
+ vf_infos[vf_id].link_forced = true;
+ vf_infos[vf_id].link_up = false;
+ break;
+ default:
+ dev_err(&nic_io->hwif->pdev->dev, "Invalid input parameter set_vf_link_state: %d\n",
+ set_vf_link_state);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void hinic_clear_vf_infos(struct hinic_dev *nic_dev, u16 vf_id)
+{
+ struct vf_data_storage *vf_infos;
+ u16 func_id;
+
+ func_id = hinic_glb_pf_vf_offset(nic_dev->hwdev->hwif) + vf_id;
+ vf_infos = nic_dev->hwdev->func_to_io.vf_infos + HW_VF_ID_TO_OS(vf_id);
+ if (vf_infos->pf_set_mac)
+ hinic_port_del_mac(nic_dev, vf_infos->vf_mac_addr, 0);
+
+ if (hinic_vf_info_vlanprio(nic_dev->hwdev, vf_id))
+ hinic_kill_vf_vlan(nic_dev->hwdev, vf_id);
+
+ if (vf_infos->max_rate)
+ hinic_set_vf_tx_rate(nic_dev->hwdev, vf_id, 0, 0);
+
+ if (vf_infos->spoofchk)
+ hinic_set_vf_spoofchk(nic_dev->hwdev, vf_id, false);
+
+ if (vf_infos->trust)
+ hinic_set_vf_trust(nic_dev->hwdev, vf_id, false);
+
+ memset(vf_infos, 0, sizeof(*vf_infos));
+ /* set vf_infos to default */
+ hinic_init_vf_infos(&nic_dev->hwdev->func_to_io, HW_VF_ID_TO_OS(vf_id));
+}
+
+static int hinic_deinit_vf_hw(struct hinic_sriov_info *sriov_info,
+ u16 start_vf_id, u16 end_vf_id)
+{
+ struct hinic_dev *nic_dev;
+ u16 func_idx, idx;
+
+ nic_dev = container_of(sriov_info, struct hinic_dev, sriov_info);
+
+ for (idx = start_vf_id; idx <= end_vf_id; idx++) {
+ func_idx = hinic_glb_pf_vf_offset(nic_dev->hwdev->hwif) + idx;
+ hinic_set_wq_page_size(nic_dev->hwdev, func_idx,
+ HINIC_HW_WQ_PAGE_SIZE);
+ hinic_clear_vf_infos(nic_dev, idx);
+ }
+
+ return 0;
+}
+
+int hinic_vf_func_init(struct hinic_hwdev *hwdev)
+{
+ struct hinic_register_vf register_info = {0};
+ u16 out_size = sizeof(register_info);
+ struct hinic_func_to_io *nic_io;
+ int err = 0;
+ u32 size, i;
+
+ nic_io = &hwdev->func_to_io;
+
+ if (HINIC_IS_VF(hwdev->hwif)) {
+ err = hinic_mbox_to_pf(hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_VF_REGISTER,
+ &register_info, sizeof(register_info),
+ &register_info, &out_size, 0);
+ if (err || register_info.status || !out_size) {
+ dev_err(&hwdev->hwif->pdev->dev,
+ "Failed to register VF, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, register_info.status, out_size);
+ hinic_unregister_vf_mbox_cb(hwdev, HINIC_MOD_L2NIC);
+ return -EIO;
+ }
+ } else {
+ err = hinic_register_pf_mbox_cb(hwdev, HINIC_MOD_CFGM,
+ cfg_mbx_pf_proc_vf_msg);
+ if (err) {
+ dev_err(&hwdev->hwif->pdev->dev,
+ "Register PF mailbox callback failed\n");
+ return err;
+ }
+ nic_io->max_vfs = hwdev->nic_cap.max_vf;
+ size = sizeof(*nic_io->vf_infos) * nic_io->max_vfs;
+ if (size != 0) {
+ nic_io->vf_infos = kzalloc(size, GFP_KERNEL);
+ if (!nic_io->vf_infos) {
+ err = -ENOMEM;
+ goto out_free_nic_io;
+ }
+
+ for (i = 0; i < nic_io->max_vfs; i++) {
+ err = hinic_init_vf_infos(nic_io, i);
+ if (err)
+ goto err_init_vf_infos;
+ }
+
+ err = hinic_register_pf_mbox_cb(hwdev, HINIC_MOD_L2NIC,
+ nic_pf_mbox_handler);
+ if (err)
+ goto err_register_pf_mbox_cb;
+ }
+ }
+
+ return 0;
+
+err_register_pf_mbox_cb:
+err_init_vf_infos:
+ kfree(nic_io->vf_infos);
+out_free_nic_io:
+ return err;
+}
+
+void hinic_vf_func_free(struct hinic_hwdev *hwdev)
+{
+ struct hinic_register_vf unregister = {0};
+ u16 out_size = sizeof(unregister);
+ int err;
+
+ if (HINIC_IS_VF(hwdev->hwif)) {
+ err = hinic_mbox_to_pf(hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_VF_UNREGISTER,
+ &unregister, sizeof(unregister),
+ &unregister, &out_size, 0);
+ if (err || !out_size || unregister.status)
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to unregister VF, err: %d, status: 0x%x, out_size: 0x%x\n",
+ err, unregister.status, out_size);
+ } else {
+ if (hwdev->func_to_io.vf_infos) {
+ hinic_unregister_pf_mbox_cb(hwdev, HINIC_MOD_L2NIC);
+ kfree(hwdev->func_to_io.vf_infos);
+ }
+ }
+}
+
+static int hinic_init_vf_hw(struct hinic_hwdev *hwdev, u16 start_vf_id,
+ u16 end_vf_id)
+{
+ u16 i, func_idx;
+ int err;
+
+ /* vf use 256K as default wq page size, and can't change it */
+ for (i = start_vf_id; i <= end_vf_id; i++) {
+ func_idx = hinic_glb_pf_vf_offset(hwdev->hwif) + i;
+ err = hinic_set_wq_page_size(hwdev, func_idx,
+ HINIC_DEFAULT_WQ_PAGE_SIZE);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int hinic_pci_sriov_disable(struct pci_dev *pdev)
+{
+ struct hinic_sriov_info *sriov_info;
+ u16 tmp_vfs;
+
+ sriov_info = hinic_get_sriov_info_by_pcidev(pdev);
+ /* if SR-IOV is already disabled then nothing will be done */
+ if (!sriov_info->sriov_enabled)
+ return 0;
+
+ set_bit(HINIC_SRIOV_DISABLE, &sriov_info->state);
+
+ /* If our VFs are assigned we cannot shut down SR-IOV
+ * without causing issues, so just leave the hardware
+ * available but disabled
+ */
+ if (pci_vfs_assigned(sriov_info->pdev)) {
+ clear_bit(HINIC_SRIOV_DISABLE, &sriov_info->state);
+ dev_warn(&pdev->dev, "Unloading driver while VFs are assigned - VFs will not be deallocated\n");
+ return -EPERM;
+ }
+ sriov_info->sriov_enabled = false;
+
+ /* disable iov and allow time for transactions to clear */
+ pci_disable_sriov(sriov_info->pdev);
+
+ tmp_vfs = (u16)sriov_info->num_vfs;
+ sriov_info->num_vfs = 0;
+ hinic_deinit_vf_hw(sriov_info, OS_VF_ID_TO_HW(0),
+ OS_VF_ID_TO_HW(tmp_vfs - 1));
+
+ clear_bit(HINIC_SRIOV_DISABLE, &sriov_info->state);
+
+ return 0;
+}
+
+int hinic_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
+{
+ struct hinic_sriov_info *sriov_info;
+ int err;
+
+ sriov_info = hinic_get_sriov_info_by_pcidev(pdev);
+
+ if (test_and_set_bit(HINIC_SRIOV_ENABLE, &sriov_info->state)) {
+ dev_err(&pdev->dev,
+ "SR-IOV enable in process, please wait, num_vfs %d\n",
+ num_vfs);
+ return -EPERM;
+ }
+
+ err = hinic_init_vf_hw(sriov_info->hwdev, OS_VF_ID_TO_HW(0),
+ OS_VF_ID_TO_HW((u16)num_vfs - 1));
+ if (err) {
+ dev_err(&sriov_info->pdev->dev,
+ "Failed to init vf in hardware before enable sriov, error %d\n",
+ err);
+ clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state);
+ return err;
+ }
+
+ err = pci_enable_sriov(sriov_info->pdev, num_vfs);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to enable SR-IOV, error %d\n", err);
+ clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state);
+ return err;
+ }
+
+ sriov_info->sriov_enabled = true;
+ sriov_info->num_vfs = num_vfs;
+ clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state);
+
+ return num_vfs;
+}
+
+int hinic_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
+{
+ struct hinic_sriov_info *sriov_info;
+
+ sriov_info = hinic_get_sriov_info_by_pcidev(dev);
+
+ if (test_bit(HINIC_FUNC_REMOVE, &sriov_info->state))
+ return -EBUSY;
+
+ if (!num_vfs)
+ return hinic_pci_sriov_disable(dev);
+ else
+ return hinic_pci_sriov_enable(dev, num_vfs);
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.h b/drivers/net/ethernet/huawei/hinic/hinic_sriov.h
new file mode 100644
index 000000000000..ba627a362f9a
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef HINIC_SRIOV_H
+#define HINIC_SRIOV_H
+
+#include "hinic_hw_dev.h"
+
+#define OS_VF_ID_TO_HW(os_vf_id) ((os_vf_id) + 1)
+#define HW_VF_ID_TO_OS(hw_vf_id) ((hw_vf_id) - 1)
+
+enum hinic_sriov_state {
+ HINIC_SRIOV_DISABLE,
+ HINIC_SRIOV_ENABLE,
+ HINIC_FUNC_REMOVE,
+};
+
+enum {
+ HINIC_IFLA_VF_LINK_STATE_AUTO, /* link state of the uplink */
+ HINIC_IFLA_VF_LINK_STATE_ENABLE, /* link always up */
+ HINIC_IFLA_VF_LINK_STATE_DISABLE, /* link always down */
+};
+
+struct hinic_sriov_info {
+ struct pci_dev *pdev;
+ struct hinic_hwdev *hwdev;
+ bool sriov_enabled;
+ unsigned int num_vfs;
+ unsigned long state;
+};
+
+struct vf_data_storage {
+ u8 vf_mac_addr[ETH_ALEN];
+ bool registered;
+ bool pf_set_mac;
+ u16 pf_vlan;
+ u8 pf_qos;
+ u32 max_rate;
+ u32 min_rate;
+
+ bool link_forced;
+ bool link_up; /* only valid if VF link is forced */
+ bool spoofchk;
+ bool trust;
+};
+
+struct hinic_register_vf {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+};
+
+struct hinic_port_mac_update {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 vlan_id;
+ u16 rsvd1;
+ u8 old_mac[ETH_ALEN];
+ u16 rsvd2;
+ u8 new_mac[ETH_ALEN];
+};
+
+struct hinic_vf_vlan_config {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 vlan_id;
+ u8 qos;
+ u8 rsvd1[7];
+};
+
+int hinic_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
+
+int hinic_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
+ __be16 vlan_proto);
+
+int hinic_ndo_get_vf_config(struct net_device *netdev,
+ int vf, struct ifla_vf_info *ivi);
+
+int hinic_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting);
+
+int hinic_ndo_set_vf_bw(struct net_device *netdev,
+ int vf, int min_tx_rate, int max_tx_rate);
+
+int hinic_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
+
+int hinic_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link);
+
+void hinic_notify_all_vfs_link_changed(struct hinic_hwdev *hwdev,
+ u8 link_status);
+
+int hinic_pci_sriov_disable(struct pci_dev *dev);
+
+int hinic_pci_sriov_enable(struct pci_dev *dev, int num_vfs);
+
+int hinic_vf_func_init(struct hinic_hwdev *hwdev);
+
+void hinic_vf_func_free(struct hinic_hwdev *hwdev);
+
+int hinic_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index 365016450bdb..4c66a0bc1b28 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -673,9 +673,11 @@ static int free_tx_poll(struct napi_struct *napi, int budget)
if (pkts < budget) {
napi_complete(napi);
- hinic_hwdev_set_msix_state(nic_dev->hwdev,
- sq->msix_entry,
- HINIC_MSIX_ENABLE);
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ hinic_hwdev_set_msix_state(nic_dev->hwdev,
+ sq->msix_entry,
+ HINIC_MSIX_ENABLE);
+
return pkts;
}
@@ -701,10 +703,11 @@ static irqreturn_t tx_irq(int irq, void *data)
nic_dev = netdev_priv(txq->netdev);
- /* Disable the interrupt until napi will be completed */
- hinic_hwdev_set_msix_state(nic_dev->hwdev,
- txq->sq->msix_entry,
- HINIC_MSIX_DISABLE);
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ /* Disable the interrupt until napi will be completed */
+ hinic_hwdev_set_msix_state(nic_dev->hwdev,
+ txq->sq->msix_entry,
+ HINIC_MSIX_DISABLE);
hinic_hwdev_msix_cnt_set(nic_dev->hwdev, txq->sq->msix_entry);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 0d51cbc88028..05bc6e216bca 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -3136,8 +3136,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
if (skb->data_len && hdr_len == len) {
switch (hw->mac_type) {
+ case e1000_82544: {
unsigned int pull_size;
- case e1000_82544:
+
/* Make sure we have room to chop off 4 bytes,
* and that the end alignment will work out to
* this hardware's requirements
@@ -3158,6 +3159,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
}
len = skb_headlen(skb);
break;
+ }
default:
/* do nothing */
break;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 177c6da80c57..e0b074820b47 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6404,6 +6404,31 @@ static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter)
mac_data |= BIT(3);
ew32(CTRL_EXT, mac_data);
+ /* Disable disconnected cable conditioning for Power Gating */
+ mac_data = er32(DPGFR);
+ mac_data |= BIT(2);
+ ew32(DPGFR, mac_data);
+
+ /* Don't wake from dynamic Power Gating with clock request */
+ mac_data = er32(FEXTNVM12);
+ mac_data |= BIT(12);
+ ew32(FEXTNVM12, mac_data);
+
+ /* Ungate PGCB clock */
+ mac_data = er32(FEXTNVM9);
+ mac_data |= BIT(28);
+ ew32(FEXTNVM9, mac_data);
+
+ /* Enable K1 off to enable mPHY Power Gating */
+ mac_data = er32(FEXTNVM6);
+ mac_data |= BIT(31);
+ ew32(FEXTNVM12, mac_data);
+
+ /* Enable mPHY power gating for any link and speed */
+ mac_data = er32(FEXTNVM8);
+ mac_data |= BIT(9);
+ ew32(FEXTNVM8, mac_data);
+
/* Enable the Dynamic Clock Gating in the DMA and MAC */
mac_data = er32(CTRL_EXT);
mac_data |= E1000_CTRL_EXT_DMA_DYN_CLK_EN;
@@ -6433,6 +6458,35 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
mac_data |= BIT(0);
ew32(FEXTNVM7, mac_data);
+ /* Disable mPHY power gating for any link and speed */
+ mac_data = er32(FEXTNVM8);
+ mac_data &= ~BIT(9);
+ ew32(FEXTNVM8, mac_data);
+
+ /* Disable K1 off */
+ mac_data = er32(FEXTNVM6);
+ mac_data &= ~BIT(31);
+ ew32(FEXTNVM12, mac_data);
+
+ /* Disable Ungate PGCB clock */
+ mac_data = er32(FEXTNVM9);
+ mac_data &= ~BIT(28);
+ ew32(FEXTNVM9, mac_data);
+
+ /* Cancel not waking from dynamic
+ * Power Gating with clock request
+ */
+ mac_data = er32(FEXTNVM12);
+ mac_data &= ~BIT(12);
+ ew32(FEXTNVM12, mac_data);
+
+ /* Cancel disable disconnected cable conditioning
+ * for Power Gating
+ */
+ mac_data = er32(DPGFR);
+ mac_data &= ~BIT(2);
+ ew32(DPGFR, mac_data);
+
/* Disable Dynamic Power Gating */
mac_data = er32(CTRL_EXT);
mac_data &= 0xFFFFFFF7;
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h
index df59fd1d660c..8165ba2619a4 100644
--- a/drivers/net/ethernet/intel/e1000e/regs.h
+++ b/drivers/net/ethernet/intel/e1000e/regs.h
@@ -21,9 +21,12 @@
#define E1000_FEXTNVM5 0x00014 /* Future Extended NVM 5 - RW */
#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */
#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
+#define E1000_FEXTNVM8 0x5BB0 /* Future Extended NVM 8 - RW */
#define E1000_FEXTNVM9 0x5BB4 /* Future Extended NVM 9 - RW */
#define E1000_FEXTNVM11 0x5BBC /* Future Extended NVM 11 - RW */
+#define E1000_FEXTNVM12 0x5BC0 /* Future Extended NVM 12 - RW */
#define E1000_PCIEANACFG 0x00F18 /* PCIE Analog Config */
+#define E1000_DPGFR 0x00FAC /* Dynamic Power Gate Force Control Register */
#define E1000_FCT 0x00030 /* Flow Control Type - RW */
#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index b8496037ef7f..a3772beffe02 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1507,6 +1507,22 @@ static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
}
+static unsigned int i40e_rx_frame_truesize(struct i40e_ring *rx_ring,
+ unsigned int size)
+{
+ unsigned int truesize;
+
+#if (PAGE_SIZE < 8192)
+ truesize = i40e_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
+#else
+ truesize = i40e_rx_offset(rx_ring) ?
+ SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring)) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
+ SKB_DATA_ALIGN(size);
+#endif
+ return truesize;
+}
+
/**
* i40e_alloc_mapped_page - recycle or make a new page
* @rx_ring: ring to use
@@ -2246,13 +2262,11 @@ static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *rx_buffer,
unsigned int size)
{
-#if (PAGE_SIZE < 8192)
- unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
+ unsigned int truesize = i40e_rx_frame_truesize(rx_ring, size);
+#if (PAGE_SIZE < 8192)
rx_buffer->page_offset ^= truesize;
#else
- unsigned int truesize = SKB_DATA_ALIGN(i40e_rx_offset(rx_ring) + size);
-
rx_buffer->page_offset += truesize;
#endif
}
@@ -2335,6 +2349,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
bool failure = false;
struct xdp_buff xdp;
+#if (PAGE_SIZE < 8192)
+ xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, 0);
+#endif
xdp.rxq = &rx_ring->xdp_rxq;
while (likely(total_rx_packets < (unsigned int)budget)) {
@@ -2389,7 +2406,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
xdp.data_hard_start = xdp.data -
i40e_rx_offset(rx_ring);
xdp.data_end = xdp.data + size;
-
+#if (PAGE_SIZE > 4096)
+ /* At larger PAGE_SIZE, frame_sz depend on len size */
+ xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, size);
+#endif
skb = i40e_run_xdp(rx_ring, &xdp);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 0b7d29192b2c..2b9184aead5f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -531,12 +531,14 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
+ struct xdp_umem *umem = rx_ring->xsk_umem;
unsigned int xdp_res, xdp_xmit = 0;
bool failure = false;
struct sk_buff *skb;
struct xdp_buff xdp;
xdp.rxq = &rx_ring->xdp_rxq;
+ xdp.frame_sz = xsk_umem_xdp_frame_sz(umem);
while (likely(total_rx_packets < (unsigned int)budget)) {
struct i40e_rx_buffer *bi;
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 42bac3ec5526..e7a2671222d2 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -2962,8 +2962,10 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
/* add profile info */
prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*prof), GFP_KERNEL);
- if (!prof)
+ if (!prof) {
+ status = ICE_ERR_NO_MEMORY;
goto err_ice_add_prof;
+ }
prof->profile_cookie = id;
prof->prof_id = prof_id;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index f67e8362958c..69b21b436f9a 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -423,6 +423,22 @@ static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
return 0;
}
+static unsigned int ice_rx_frame_truesize(struct ice_ring *rx_ring,
+ unsigned int size)
+{
+ unsigned int truesize;
+
+#if (PAGE_SIZE < 8192)
+ truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
+#else
+ truesize = ice_rx_offset(rx_ring) ?
+ SKB_DATA_ALIGN(ice_rx_offset(rx_ring) + size) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
+ SKB_DATA_ALIGN(size);
+#endif
+ return truesize;
+}
+
/**
* ice_run_xdp - Executes an XDP program on initialized xdp_buff
* @rx_ring: Rx ring
@@ -991,6 +1007,10 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
bool failure;
xdp.rxq = &rx_ring->xdp_rxq;
+ /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
+#if (PAGE_SIZE < 8192)
+ xdp.frame_sz = ice_rx_frame_truesize(rx_ring, 0);
+#endif
/* start the loop to process Rx packets bounded by 'budget' */
while (likely(total_rx_pkts < (unsigned int)budget)) {
@@ -1038,6 +1058,10 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
xdp.data_hard_start = xdp.data - ice_rx_offset(rx_ring);
xdp.data_meta = xdp.data;
xdp.data_end = xdp.data + size;
+#if (PAGE_SIZE > 4096)
+ /* At larger PAGE_SIZE, frame_sz depend on len size */
+ xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size);
+#endif
rcu_read_lock();
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
@@ -1051,16 +1075,8 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
if (!xdp_res)
goto construct_skb;
if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
- unsigned int truesize;
-
-#if (PAGE_SIZE < 8192)
- truesize = ice_rx_pg_size(rx_ring) / 2;
-#else
- truesize = SKB_DATA_ALIGN(ice_rx_offset(rx_ring) +
- size);
-#endif
xdp_xmit |= xdp_res;
- ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
+ ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz);
} else {
rx_buf->pagecnt_bias++;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 8279db15e870..23e5515d4527 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -840,11 +840,13 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
+ struct xdp_umem *umem = rx_ring->xsk_umem;
unsigned int xdp_xmit = 0;
bool failure = false;
struct xdp_buff xdp;
xdp.rxq = &rx_ring->xdp_rxq;
+ xdp.frame_sz = xsk_umem_xdp_frame_sz(umem);
while (likely(total_rx_packets < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
diff --git a/drivers/net/ethernet/intel/igc/Makefile b/drivers/net/ethernet/intel/igc/Makefile
index e3c164c12e10..3652f211f351 100644
--- a/drivers/net/ethernet/intel/igc/Makefile
+++ b/drivers/net/ethernet/intel/igc/Makefile
@@ -8,4 +8,4 @@
obj-$(CONFIG_IGC) += igc.o
igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \
-igc_ethtool.o igc_ptp.o igc_dump.o
+igc_ethtool.o igc_ptp.o igc_dump.o igc_tsn.o
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index a1f845a2aa80..8ddc39482a8e 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -19,8 +19,199 @@
/* forward declaration */
void igc_set_ethtool_ops(struct net_device *);
-struct igc_adapter;
-struct igc_ring;
+/* Transmit and receive queues */
+#define IGC_MAX_RX_QUEUES 4
+#define IGC_MAX_TX_QUEUES 4
+
+#define MAX_Q_VECTORS 8
+#define MAX_STD_JUMBO_FRAME_SIZE 9216
+
+#define MAX_ETYPE_FILTER (4 - 1)
+#define IGC_RETA_SIZE 128
+
+struct igc_tx_queue_stats {
+ u64 packets;
+ u64 bytes;
+ u64 restart_queue;
+ u64 restart_queue2;
+};
+
+struct igc_rx_queue_stats {
+ u64 packets;
+ u64 bytes;
+ u64 drops;
+ u64 csum_err;
+ u64 alloc_failed;
+};
+
+struct igc_rx_packet_stats {
+ u64 ipv4_packets; /* IPv4 headers processed */
+ u64 ipv4e_packets; /* IPv4E headers with extensions processed */
+ u64 ipv6_packets; /* IPv6 headers processed */
+ u64 ipv6e_packets; /* IPv6E headers with extensions processed */
+ u64 tcp_packets; /* TCP headers processed */
+ u64 udp_packets; /* UDP headers processed */
+ u64 sctp_packets; /* SCTP headers processed */
+ u64 nfs_packets; /* NFS headers processe */
+ u64 other_packets;
+};
+
+struct igc_ring_container {
+ struct igc_ring *ring; /* pointer to linked list of rings */
+ unsigned int total_bytes; /* total bytes processed this int */
+ unsigned int total_packets; /* total packets processed this int */
+ u16 work_limit; /* total work allowed per interrupt */
+ u8 count; /* total number of rings in vector */
+ u8 itr; /* current ITR setting for ring */
+};
+
+struct igc_ring {
+ struct igc_q_vector *q_vector; /* backlink to q_vector */
+ struct net_device *netdev; /* back pointer to net_device */
+ struct device *dev; /* device for dma mapping */
+ union { /* array of buffer info structs */
+ struct igc_tx_buffer *tx_buffer_info;
+ struct igc_rx_buffer *rx_buffer_info;
+ };
+ void *desc; /* descriptor ring memory */
+ unsigned long flags; /* ring specific flags */
+ void __iomem *tail; /* pointer to ring tail register */
+ dma_addr_t dma; /* phys address of the ring */
+ unsigned int size; /* length of desc. ring in bytes */
+
+ u16 count; /* number of desc. in the ring */
+ u8 queue_index; /* logical index of the ring*/
+ u8 reg_idx; /* physical index of the ring */
+ bool launchtime_enable; /* true if LaunchTime is enabled */
+
+ u32 start_time;
+ u32 end_time;
+
+ /* everything past this point are written often */
+ u16 next_to_clean;
+ u16 next_to_use;
+ u16 next_to_alloc;
+
+ union {
+ /* TX */
+ struct {
+ struct igc_tx_queue_stats tx_stats;
+ struct u64_stats_sync tx_syncp;
+ struct u64_stats_sync tx_syncp2;
+ };
+ /* RX */
+ struct {
+ struct igc_rx_queue_stats rx_stats;
+ struct igc_rx_packet_stats pkt_stats;
+ struct u64_stats_sync rx_syncp;
+ struct sk_buff *skb;
+ };
+ };
+} ____cacheline_internodealigned_in_smp;
+
+/* Board specific private data structure */
+struct igc_adapter {
+ struct net_device *netdev;
+
+ unsigned long state;
+ unsigned int flags;
+ unsigned int num_q_vectors;
+
+ struct msix_entry *msix_entries;
+
+ /* TX */
+ u16 tx_work_limit;
+ u32 tx_timeout_count;
+ int num_tx_queues;
+ struct igc_ring *tx_ring[IGC_MAX_TX_QUEUES];
+
+ /* RX */
+ int num_rx_queues;
+ struct igc_ring *rx_ring[IGC_MAX_RX_QUEUES];
+
+ struct timer_list watchdog_timer;
+ struct timer_list dma_err_timer;
+ struct timer_list phy_info_timer;
+
+ u32 wol;
+ u32 en_mng_pt;
+ u16 link_speed;
+ u16 link_duplex;
+
+ u8 port_num;
+
+ u8 __iomem *io_addr;
+ /* Interrupt Throttle Rate */
+ u32 rx_itr_setting;
+ u32 tx_itr_setting;
+
+ struct work_struct reset_task;
+ struct work_struct watchdog_task;
+ struct work_struct dma_err_task;
+ bool fc_autoneg;
+
+ u8 tx_timeout_factor;
+
+ int msg_enable;
+ u32 max_frame_size;
+ u32 min_frame_size;
+
+ ktime_t base_time;
+ ktime_t cycle_time;
+
+ /* OS defined structs */
+ struct pci_dev *pdev;
+ /* lock for statistics */
+ spinlock_t stats64_lock;
+ struct rtnl_link_stats64 stats64;
+
+ /* structs defined in igc_hw.h */
+ struct igc_hw hw;
+ struct igc_hw_stats stats;
+
+ struct igc_q_vector *q_vector[MAX_Q_VECTORS];
+ u32 eims_enable_mask;
+ u32 eims_other;
+
+ u16 tx_ring_count;
+ u16 rx_ring_count;
+
+ u32 tx_hwtstamp_timeouts;
+ u32 tx_hwtstamp_skipped;
+ u32 rx_hwtstamp_cleared;
+
+ u32 rss_queues;
+ u32 rss_indir_tbl_init;
+
+ /* RX network flow classification support */
+ struct hlist_head nfc_filter_list;
+ unsigned int nfc_filter_count;
+
+ /* lock for RX network flow classification filter */
+ spinlock_t nfc_lock;
+ bool etype_bitmap[MAX_ETYPE_FILTER];
+
+ struct igc_mac_addr *mac_table;
+
+ u8 rss_indir_tbl[IGC_RETA_SIZE];
+
+ unsigned long link_check_timeout;
+ struct igc_info ei;
+
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_caps;
+ struct work_struct ptp_tx_work;
+ struct sk_buff *ptp_tx_skb;
+ struct hwtstamp_config tstamp_config;
+ unsigned long ptp_tx_start;
+ unsigned long last_rx_ptp_check;
+ unsigned long last_rx_timestamp;
+ unsigned int ptp_flags;
+ /* System time value lock */
+ spinlock_t tmreg_lock;
+ struct cyclecounter cc;
+ struct timecounter tc;
+};
void igc_up(struct igc_adapter *adapter);
void igc_down(struct igc_adapter *adapter);
@@ -36,10 +227,10 @@ void igc_write_rss_indir_tbl(struct igc_adapter *adapter);
bool igc_has_link(struct igc_adapter *adapter);
void igc_reset(struct igc_adapter *adapter);
int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx);
-int igc_add_mac_steering_filter(struct igc_adapter *adapter,
- const u8 *addr, u8 queue, u8 flags);
-int igc_del_mac_steering_filter(struct igc_adapter *adapter,
- const u8 *addr, u8 queue, u8 flags);
+int igc_add_mac_filter(struct igc_adapter *adapter, const u8 *addr,
+ const s8 queue, const u8 flags);
+int igc_del_mac_filter(struct igc_adapter *adapter, const u8 *addr,
+ const u8 flags);
void igc_update_stats(struct igc_adapter *adapter);
/* igc_dump declarations */
@@ -50,14 +241,10 @@ extern char igc_driver_name[];
extern char igc_driver_version[];
#define IGC_REGS_LEN 740
-#define IGC_RETA_SIZE 128
/* flags controlling PTP/1588 function */
#define IGC_PTP_ENABLED BIT(0)
-/* Interrupt defines */
-#define IGC_START_ITR 648 /* ~6000 ints/sec */
-
/* Flags definitions */
#define IGC_FLAG_HAS_MSI BIT(0)
#define IGC_FLAG_QUEUE_PAIRS BIT(3)
@@ -70,6 +257,7 @@ extern char igc_driver_version[];
#define IGC_FLAG_HAS_MSIX BIT(13)
#define IGC_FLAG_VLAN_PROMISC BIT(15)
#define IGC_FLAG_RX_LEGACY BIT(16)
+#define IGC_FLAG_TSN_QBV_ENABLED BIT(17)
#define IGC_FLAG_RSS_FIELD_IPV4_UDP BIT(6)
#define IGC_FLAG_RSS_FIELD_IPV6_UDP BIT(7)
@@ -78,6 +266,7 @@ extern char igc_driver_version[];
#define IGC_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
#define IGC_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
+/* Interrupt defines */
#define IGC_START_ITR 648 /* ~6000 ints/sec */
#define IGC_4K_ITR 980
#define IGC_20K_ITR 196
@@ -99,13 +288,6 @@ extern char igc_driver_version[];
#define IGC_MIN_RXD 80
#define IGC_MAX_RXD 4096
-/* Transmit and receive queues */
-#define IGC_MAX_RX_QUEUES 4
-#define IGC_MAX_TX_QUEUES 4
-
-#define MAX_Q_VECTORS 8
-#define MAX_STD_JUMBO_FRAME_SIZE 9216
-
/* Supported Rx Buffer Sizes */
#define IGC_RXBUFFER_256 256
#define IGC_RXBUFFER_2048 2048
@@ -232,83 +414,6 @@ struct igc_rx_buffer {
__u16 pagecnt_bias;
};
-struct igc_tx_queue_stats {
- u64 packets;
- u64 bytes;
- u64 restart_queue;
- u64 restart_queue2;
-};
-
-struct igc_rx_queue_stats {
- u64 packets;
- u64 bytes;
- u64 drops;
- u64 csum_err;
- u64 alloc_failed;
-};
-
-struct igc_rx_packet_stats {
- u64 ipv4_packets; /* IPv4 headers processed */
- u64 ipv4e_packets; /* IPv4E headers with extensions processed */
- u64 ipv6_packets; /* IPv6 headers processed */
- u64 ipv6e_packets; /* IPv6E headers with extensions processed */
- u64 tcp_packets; /* TCP headers processed */
- u64 udp_packets; /* UDP headers processed */
- u64 sctp_packets; /* SCTP headers processed */
- u64 nfs_packets; /* NFS headers processe */
- u64 other_packets;
-};
-
-struct igc_ring_container {
- struct igc_ring *ring; /* pointer to linked list of rings */
- unsigned int total_bytes; /* total bytes processed this int */
- unsigned int total_packets; /* total packets processed this int */
- u16 work_limit; /* total work allowed per interrupt */
- u8 count; /* total number of rings in vector */
- u8 itr; /* current ITR setting for ring */
-};
-
-struct igc_ring {
- struct igc_q_vector *q_vector; /* backlink to q_vector */
- struct net_device *netdev; /* back pointer to net_device */
- struct device *dev; /* device for dma mapping */
- union { /* array of buffer info structs */
- struct igc_tx_buffer *tx_buffer_info;
- struct igc_rx_buffer *rx_buffer_info;
- };
- void *desc; /* descriptor ring memory */
- unsigned long flags; /* ring specific flags */
- void __iomem *tail; /* pointer to ring tail register */
- dma_addr_t dma; /* phys address of the ring */
- unsigned int size; /* length of desc. ring in bytes */
-
- u16 count; /* number of desc. in the ring */
- u8 queue_index; /* logical index of the ring*/
- u8 reg_idx; /* physical index of the ring */
- bool launchtime_enable; /* true if LaunchTime is enabled */
-
- /* everything past this point are written often */
- u16 next_to_clean;
- u16 next_to_use;
- u16 next_to_alloc;
-
- union {
- /* TX */
- struct {
- struct igc_tx_queue_stats tx_stats;
- struct u64_stats_sync tx_syncp;
- struct u64_stats_sync tx_syncp2;
- };
- /* RX */
- struct {
- struct igc_rx_queue_stats rx_stats;
- struct igc_rx_packet_stats pkt_stats;
- struct u64_stats_sync rx_syncp;
- struct sk_buff *skb;
- };
- };
-} ____cacheline_internodealigned_in_smp;
-
struct igc_q_vector {
struct igc_adapter *adapter; /* backlink */
void __iomem *itr_register;
@@ -329,8 +434,6 @@ struct igc_q_vector {
struct igc_ring ring[] ____cacheline_internodealigned_in_smp;
};
-#define MAX_ETYPE_FILTER (4 - 1)
-
enum igc_filter_match_flags {
IGC_FILTER_FLAG_ETHER_TYPE = 0x1,
IGC_FILTER_FLAG_VLAN_TCI = 0x2,
@@ -363,119 +466,16 @@ struct igc_nfc_filter {
struct igc_mac_addr {
u8 addr[ETH_ALEN];
- u8 queue;
+ s8 queue;
u8 state; /* bitmask */
};
#define IGC_MAC_STATE_DEFAULT 0x1
#define IGC_MAC_STATE_IN_USE 0x2
#define IGC_MAC_STATE_SRC_ADDR 0x4
-#define IGC_MAC_STATE_QUEUE_STEERING 0x8
#define IGC_MAX_RXNFC_FILTERS 16
-/* Board specific private data structure */
-struct igc_adapter {
- struct net_device *netdev;
-
- unsigned long state;
- unsigned int flags;
- unsigned int num_q_vectors;
-
- struct msix_entry *msix_entries;
-
- /* TX */
- u16 tx_work_limit;
- u32 tx_timeout_count;
- int num_tx_queues;
- struct igc_ring *tx_ring[IGC_MAX_TX_QUEUES];
-
- /* RX */
- int num_rx_queues;
- struct igc_ring *rx_ring[IGC_MAX_RX_QUEUES];
-
- struct timer_list watchdog_timer;
- struct timer_list dma_err_timer;
- struct timer_list phy_info_timer;
-
- u32 wol;
- u32 en_mng_pt;
- u16 link_speed;
- u16 link_duplex;
-
- u8 port_num;
-
- u8 __iomem *io_addr;
- /* Interrupt Throttle Rate */
- u32 rx_itr_setting;
- u32 tx_itr_setting;
-
- struct work_struct reset_task;
- struct work_struct watchdog_task;
- struct work_struct dma_err_task;
- bool fc_autoneg;
-
- u8 tx_timeout_factor;
-
- int msg_enable;
- u32 max_frame_size;
- u32 min_frame_size;
-
- /* OS defined structs */
- struct pci_dev *pdev;
- /* lock for statistics */
- spinlock_t stats64_lock;
- struct rtnl_link_stats64 stats64;
-
- /* structs defined in igc_hw.h */
- struct igc_hw hw;
- struct igc_hw_stats stats;
-
- struct igc_q_vector *q_vector[MAX_Q_VECTORS];
- u32 eims_enable_mask;
- u32 eims_other;
-
- u16 tx_ring_count;
- u16 rx_ring_count;
-
- u32 tx_hwtstamp_timeouts;
- u32 tx_hwtstamp_skipped;
- u32 rx_hwtstamp_cleared;
-
- u32 rss_queues;
- u32 rss_indir_tbl_init;
-
- /* RX network flow classification support */
- struct hlist_head nfc_filter_list;
- struct hlist_head cls_flower_list;
- unsigned int nfc_filter_count;
-
- /* lock for RX network flow classification filter */
- spinlock_t nfc_lock;
- bool etype_bitmap[MAX_ETYPE_FILTER];
-
- struct igc_mac_addr *mac_table;
-
- u8 rss_indir_tbl[IGC_RETA_SIZE];
-
- unsigned long link_check_timeout;
- struct igc_info ei;
-
- struct ptp_clock *ptp_clock;
- struct ptp_clock_info ptp_caps;
- struct work_struct ptp_tx_work;
- struct sk_buff *ptp_tx_skb;
- struct hwtstamp_config tstamp_config;
- unsigned long ptp_tx_start;
- unsigned long last_rx_ptp_check;
- unsigned long last_rx_timestamp;
- unsigned int ptp_flags;
- /* System time value lock */
- spinlock_t tmreg_lock;
- struct cyclecounter cc;
- struct timecounter tc;
-};
-
/* igc_desc_unused - calculate if we have unused descriptors */
static inline u16 igc_desc_unused(const struct igc_ring *ring)
{
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
index 5a506440560a..f7fb18d8d8f5 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.c
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -212,6 +212,9 @@ static s32 igc_get_invariants_base(struct igc_hw *hw)
case IGC_DEV_ID_I225_I:
case IGC_DEV_ID_I220_V:
case IGC_DEV_ID_I225_K:
+ case IGC_DEV_ID_I225_K2:
+ case IGC_DEV_ID_I225_LMVP:
+ case IGC_DEV_ID_I225_IT:
case IGC_DEV_ID_I225_BLANK_NVM:
mac->type = igc_i225;
break;
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index 4ddccccf42cc..af0c03d77a39 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -44,9 +44,6 @@
/* Wake Up Packet Memory stores the first 128 bytes of the wake up packet */
#define IGC_WUPM_BYTES 128
-/* Physical Func Reset Done Indication */
-#define IGC_CTRL_EXT_LINK_MODE_MASK 0x00C00000
-
/* Loop limit on how long we wait for auto-negotiation to complete */
#define COPPER_LINK_UP_LIMIT 10
#define PHY_AUTO_NEG_LIMIT 45
@@ -66,8 +63,11 @@
* (RAR[15]) for our directed address used by controllers with
* manageability enabled, allowing us room for 15 multicast addresses.
*/
+#define IGC_RAH_QSEL_MASK 0x000C0000
+#define IGC_RAH_QSEL_SHIFT 18
+#define IGC_RAH_QSEL_ENABLE BIT(28)
#define IGC_RAH_AV 0x80000000 /* Receive descriptor valid */
-#define IGC_RAH_POOL_1 0x00040000
+
#define IGC_RAL_MAC_ADDR_LEN 4
#define IGC_RAH_MAC_ADDR_LEN 2
@@ -94,8 +94,6 @@
#define IGC_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
#define IGC_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
-#define IGC_CONNSW_AUTOSENSE_EN 0x1
-
/* As per the EAS the maximum supported size is 9.5KB (9728 bytes) */
#define MAX_JUMBO_FRAME_SIZE 0x2600
@@ -377,6 +375,11 @@
#define I225_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */
#define IGC_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */
+#define IGC_TXPBSIZE_TSN 0x04145145 /* 5k bytes buffer for each queue */
+
+#define IGC_DTXMXPKTSZ_TSN 0x19 /* 1600 bytes of max TX DMA packet size */
+#define IGC_DTXMXPKTSZ_DEFAULT 0x98 /* 9728-byte Jumbo frames */
+
/* Time Sync Interrupt Causes */
#define IGC_TSICR_SYS_WRAP BIT(0) /* SYSTIM Wrap around. */
#define IGC_TSICR_TXTS BIT(1) /* Transmit Timestamp. */
@@ -431,6 +434,14 @@
#define IGC_TSYNCTXCTL_START_SYNC 0x80000000 /* initiate sync */
#define IGC_TSYNCTXCTL_TXSYNSIG 0x00000020 /* Sample TX tstamp in PHY sop */
+/* Transmit Scheduling */
+#define IGC_TQAVCTRL_TRANSMIT_MODE_TSN 0x00000001
+#define IGC_TQAVCTRL_ENHANCED_QAV 0x00000008
+
+#define IGC_TXQCTL_QUEUE_MODE_LAUNCHT 0x00000001
+#define IGC_TXQCTL_STRICT_CYCLE 0x00000002
+#define IGC_TXQCTL_STRICT_END 0x00000004
+
/* Receive Checksum Control */
#define IGC_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */
#define IGC_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
@@ -497,7 +508,6 @@
#define IGC_MDIC_READY 0x10000000
#define IGC_MDIC_INT_EN 0x20000000
#define IGC_MDIC_ERROR 0x40000000
-#define IGC_MDIC_DEST 0x80000000
#define IGC_N0_QUEUE -1
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index f530fc29b074..0a8c4a7412a4 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -153,7 +153,7 @@ static void igc_get_regs(struct net_device *netdev,
memset(p, 0, IGC_REGS_LEN * sizeof(u32));
- regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id;
+ regs->version = (2u << 24) | (hw->revision_id << 16) | hw->device_id;
/* General Registers */
regs_buff[0] = rd32(IGC_CTRL);
@@ -306,6 +306,15 @@ static void igc_get_regs(struct net_device *netdev,
regs_buff[164 + i] = rd32(IGC_TDT(i));
for (i = 0; i < 4; i++)
regs_buff[168 + i] = rd32(IGC_TXDCTL(i));
+
+ /* XXX: Due to a bug few lines above, RAL and RAH registers are
+ * overwritten. To preserve the ABI, we write these registers again in
+ * regs_buff.
+ */
+ for (i = 0; i < 16; i++)
+ regs_buff[172 + i] = rd32(IGC_RAL(i));
+ for (i = 0; i < 16; i++)
+ regs_buff[188 + i] = rd32(IGC_RAH(i));
}
static void igc_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
@@ -1257,20 +1266,16 @@ int igc_add_filter(struct igc_adapter *adapter, struct igc_nfc_filter *input)
}
if (input->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) {
- err = igc_add_mac_steering_filter(adapter,
- input->filter.dst_addr,
- input->action, 0);
- err = min_t(int, err, 0);
+ err = igc_add_mac_filter(adapter, input->filter.dst_addr,
+ input->action, 0);
if (err)
return err;
}
if (input->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) {
- err = igc_add_mac_steering_filter(adapter,
- input->filter.src_addr,
- input->action,
- IGC_MAC_STATE_SRC_ADDR);
- err = min_t(int, err, 0);
+ err = igc_add_mac_filter(adapter, input->filter.src_addr,
+ input->action,
+ IGC_MAC_STATE_SRC_ADDR);
if (err)
return err;
}
@@ -1324,13 +1329,11 @@ int igc_erase_filter(struct igc_adapter *adapter, struct igc_nfc_filter *input)
ntohs(input->filter.vlan_tci));
if (input->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR)
- igc_del_mac_steering_filter(adapter, input->filter.src_addr,
- input->action,
- IGC_MAC_STATE_SRC_ADDR);
+ igc_del_mac_filter(adapter, input->filter.src_addr,
+ IGC_MAC_STATE_SRC_ADDR);
if (input->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR)
- igc_del_mac_steering_filter(adapter, input->filter.dst_addr,
- input->action, 0);
+ igc_del_mac_filter(adapter, input->filter.dst_addr, 0);
return 0;
}
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index 90ac0e0144d8..af34ae310327 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -21,6 +21,9 @@
#define IGC_DEV_ID_I225_I 0x15F8
#define IGC_DEV_ID_I220_V 0x15F7
#define IGC_DEV_ID_I225_K 0x3100
+#define IGC_DEV_ID_I225_K2 0x3101
+#define IGC_DEV_ID_I225_LMVP 0x5502
+#define IGC_DEV_ID_I225_IT 0x0D9F
#define IGC_DEV_ID_I225_BLANK_NVM 0x15FD
/* Function pointers for the MAC. */
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 69fa1ce1f927..9d5f8287c704 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -9,11 +9,13 @@
#include <linux/udp.h>
#include <linux/ip.h>
#include <linux/pm_runtime.h>
+#include <net/pkt_sched.h>
#include <net/ipv6.h>
#include "igc.h"
#include "igc_hw.h"
+#include "igc_tsn.h"
#define DRV_VERSION "0.0.1-k"
#define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver"
@@ -45,6 +47,9 @@ static const struct pci_device_id igc_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base },
/* required last entry */
{0, }
@@ -106,6 +111,9 @@ void igc_reset(struct igc_adapter *adapter)
/* Re-enable PTP, where applicable. */
igc_ptp_reset(adapter);
+ /* Re-enable TSN offloading, where applicable. */
+ igc_tsn_offload_apply(adapter);
+
igc_get_phy_info(hw);
}
@@ -757,48 +765,74 @@ static void igc_setup_tctl(struct igc_adapter *adapter)
}
/**
- * igc_rar_set_index - Sync RAL[index] and RAH[index] registers with MAC table
- * @adapter: address of board private structure
- * @index: Index of the RAR entry which need to be synced with MAC table
+ * igc_set_mac_filter_hw() - Set MAC address filter in hardware
+ * @adapter: Pointer to adapter where the filter should be set
+ * @index: Filter index
+ * @addr: Destination MAC address
+ * @queue: If non-negative, queue assignment feature is enabled and frames
+ * matching the filter are enqueued onto 'queue'. Otherwise, queue
+ * assignment is disabled.
*/
-static void igc_rar_set_index(struct igc_adapter *adapter, u32 index)
+static void igc_set_mac_filter_hw(struct igc_adapter *adapter, int index,
+ const u8 *addr, int queue)
{
- u8 *addr = adapter->mac_table[index].addr;
+ struct net_device *dev = adapter->netdev;
struct igc_hw *hw = &adapter->hw;
- u32 rar_low, rar_high;
+ u32 ral, rah;
- /* HW expects these to be in network order when they are plugged
- * into the registers which are little endian. In order to guarantee
- * that ordering we need to do an leXX_to_cpup here in order to be
- * ready for the byteswap that occurs with writel
- */
- rar_low = le32_to_cpup((__le32 *)(addr));
- rar_high = le16_to_cpup((__le16 *)(addr + 4));
+ if (WARN_ON(index >= hw->mac.rar_entry_count))
+ return;
- /* Indicate to hardware the Address is Valid. */
- if (adapter->mac_table[index].state & IGC_MAC_STATE_IN_USE) {
- if (is_valid_ether_addr(addr))
- rar_high |= IGC_RAH_AV;
+ ral = le32_to_cpup((__le32 *)(addr));
+ rah = le16_to_cpup((__le16 *)(addr + 4));
- rar_high |= IGC_RAH_POOL_1 <<
- adapter->mac_table[index].queue;
+ if (queue >= 0) {
+ rah &= ~IGC_RAH_QSEL_MASK;
+ rah |= (queue << IGC_RAH_QSEL_SHIFT);
+ rah |= IGC_RAH_QSEL_ENABLE;
}
- wr32(IGC_RAL(index), rar_low);
- wrfl();
- wr32(IGC_RAH(index), rar_high);
- wrfl();
+ rah |= IGC_RAH_AV;
+
+ wr32(IGC_RAL(index), ral);
+ wr32(IGC_RAH(index), rah);
+
+ netdev_dbg(dev, "MAC address filter set in HW: index %d", index);
+}
+
+/**
+ * igc_clear_mac_filter_hw() - Clear MAC address filter in hardware
+ * @adapter: Pointer to adapter where the filter should be cleared
+ * @index: Filter index
+ */
+static void igc_clear_mac_filter_hw(struct igc_adapter *adapter, int index)
+{
+ struct net_device *dev = adapter->netdev;
+ struct igc_hw *hw = &adapter->hw;
+
+ if (WARN_ON(index >= hw->mac.rar_entry_count))
+ return;
+
+ wr32(IGC_RAL(index), 0);
+ wr32(IGC_RAH(index), 0);
+
+ netdev_dbg(dev, "MAC address filter cleared in HW: index %d", index);
}
/* Set default MAC address for the PF in the first RAR entry */
static void igc_set_default_mac_filter(struct igc_adapter *adapter)
{
struct igc_mac_addr *mac_table = &adapter->mac_table[0];
+ struct net_device *dev = adapter->netdev;
+ u8 *addr = adapter->hw.mac.addr;
- ether_addr_copy(mac_table->addr, adapter->hw.mac.addr);
+ netdev_dbg(dev, "Set default MAC address filter: address %pM", addr);
+
+ ether_addr_copy(mac_table->addr, addr);
mac_table->state = IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE;
+ mac_table->queue = -1;
- igc_rar_set_index(adapter, 0);
+ igc_set_mac_filter_hw(adapter, 0, addr, mac_table->queue);
}
/**
@@ -864,6 +898,23 @@ static int igc_write_mc_addr_list(struct net_device *netdev)
return netdev_mc_count(netdev);
}
+static __le32 igc_tx_launchtime(struct igc_adapter *adapter, ktime_t txtime)
+{
+ ktime_t cycle_time = adapter->cycle_time;
+ ktime_t base_time = adapter->base_time;
+ u32 launchtime;
+
+ /* FIXME: when using ETF together with taprio, we may have a
+ * case where 'delta' is larger than the cycle_time, this may
+ * cause problems if we don't read the current value of
+ * IGC_BASET, as the value writen into the launchtime
+ * descriptor field may be misinterpreted.
+ */
+ div_s64_rem(ktime_sub_ns(txtime, base_time), cycle_time, &launchtime);
+
+ return cpu_to_le32(launchtime);
+}
+
static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
struct igc_tx_buffer *first,
u32 vlan_macip_lens, u32 type_tucmd,
@@ -871,7 +922,6 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
{
struct igc_adv_tx_context_desc *context_desc;
u16 i = tx_ring->next_to_use;
- struct timespec64 ts;
context_desc = IGC_TX_CTXTDESC(tx_ring, i);
@@ -893,9 +943,12 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
* should have been handled by the upper layers.
*/
if (tx_ring->launchtime_enable) {
- ts = ktime_to_timespec64(first->skb->tstamp);
+ struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
+ ktime_t txtime = first->skb->tstamp;
+
first->skb->tstamp = ktime_set(0, 0);
- context_desc->launch_time = cpu_to_le32(ts.tv_nsec / 32);
+ context_desc->launch_time = igc_tx_launchtime(adapter,
+ txtime);
} else {
context_desc->launch_time = 0;
}
@@ -2133,129 +2186,148 @@ static void igc_nfc_filter_restore(struct igc_adapter *adapter)
spin_unlock(&adapter->nfc_lock);
}
-/* If the filter to be added and an already existing filter express
- * the same address and address type, it should be possible to only
- * override the other configurations, for example the queue to steer
- * traffic.
- */
-static bool igc_mac_entry_can_be_used(const struct igc_mac_addr *entry,
- const u8 *addr, const u8 flags)
+static int igc_find_mac_filter(struct igc_adapter *adapter, const u8 *addr,
+ u8 flags)
{
- if (!(entry->state & IGC_MAC_STATE_IN_USE))
- return true;
+ int max_entries = adapter->hw.mac.rar_entry_count;
+ struct igc_mac_addr *entry;
+ int i;
- if ((entry->state & IGC_MAC_STATE_SRC_ADDR) !=
- (flags & IGC_MAC_STATE_SRC_ADDR))
- return false;
+ for (i = 0; i < max_entries; i++) {
+ entry = &adapter->mac_table[i];
- if (!ether_addr_equal(addr, entry->addr))
- return false;
+ if (!(entry->state & IGC_MAC_STATE_IN_USE))
+ continue;
+ if (!ether_addr_equal(addr, entry->addr))
+ continue;
+ if ((entry->state & IGC_MAC_STATE_SRC_ADDR) !=
+ (flags & IGC_MAC_STATE_SRC_ADDR))
+ continue;
- return true;
+ return i;
+ }
+
+ return -1;
}
-/* Add a MAC filter for 'addr' directing matching traffic to 'queue',
- * 'flags' is used to indicate what kind of match is made, match is by
- * default for the destination address, if matching by source address
- * is desired the flag IGC_MAC_STATE_SRC_ADDR can be used.
- */
-static int igc_add_mac_filter(struct igc_adapter *adapter,
- const u8 *addr, const u8 queue)
+static int igc_get_avail_mac_filter_slot(struct igc_adapter *adapter)
{
- struct igc_hw *hw = &adapter->hw;
- int rar_entries = hw->mac.rar_entry_count;
+ int max_entries = adapter->hw.mac.rar_entry_count;
+ struct igc_mac_addr *entry;
int i;
- if (is_zero_ether_addr(addr))
+ for (i = 0; i < max_entries; i++) {
+ entry = &adapter->mac_table[i];
+
+ if (!(entry->state & IGC_MAC_STATE_IN_USE))
+ return i;
+ }
+
+ return -1;
+}
+
+/**
+ * igc_add_mac_filter() - Add MAC address filter
+ * @adapter: Pointer to adapter where the filter should be added
+ * @addr: MAC address
+ * @queue: If non-negative, queue assignment feature is enabled and frames
+ * matching the filter are enqueued onto 'queue'. Otherwise, queue
+ * assignment is disabled.
+ * @flags: Set IGC_MAC_STATE_SRC_ADDR bit to indicate @address is a source
+ * address
+ *
+ * Return: 0 in case of success, negative errno code otherwise.
+ */
+int igc_add_mac_filter(struct igc_adapter *adapter, const u8 *addr,
+ const s8 queue, const u8 flags)
+{
+ struct net_device *dev = adapter->netdev;
+ int index;
+
+ if (!is_valid_ether_addr(addr))
return -EINVAL;
+ if (flags & IGC_MAC_STATE_SRC_ADDR)
+ return -ENOTSUPP;
- /* Search for the first empty entry in the MAC table.
- * Do not touch entries at the end of the table reserved for the VF MAC
- * addresses.
- */
- for (i = 0; i < rar_entries; i++) {
- if (!igc_mac_entry_can_be_used(&adapter->mac_table[i],
- addr, 0))
- continue;
+ index = igc_find_mac_filter(adapter, addr, flags);
+ if (index >= 0)
+ goto update_queue_assignment;
- ether_addr_copy(adapter->mac_table[i].addr, addr);
- adapter->mac_table[i].queue = queue;
- adapter->mac_table[i].state |= IGC_MAC_STATE_IN_USE;
+ index = igc_get_avail_mac_filter_slot(adapter);
+ if (index < 0)
+ return -ENOSPC;
- igc_rar_set_index(adapter, i);
- return i;
- }
+ netdev_dbg(dev, "Add MAC address filter: index %d address %pM queue %d",
+ index, addr, queue);
+
+ ether_addr_copy(adapter->mac_table[index].addr, addr);
+ adapter->mac_table[index].state |= IGC_MAC_STATE_IN_USE | flags;
+update_queue_assignment:
+ adapter->mac_table[index].queue = queue;
- return -ENOSPC;
+ igc_set_mac_filter_hw(adapter, index, addr, queue);
+ return 0;
}
-/* Remove a MAC filter for 'addr' directing matching traffic to
- * 'queue', 'flags' is used to indicate what kind of match need to be
- * removed, match is by default for the destination address, if
- * matching by source address is to be removed the flag
- * IGC_MAC_STATE_SRC_ADDR can be used.
+/**
+ * igc_del_mac_filter() - Delete MAC address filter
+ * @adapter: Pointer to adapter where the filter should be deleted from
+ * @addr: MAC address
+ * @flags: Set IGC_MAC_STATE_SRC_ADDR bit to indicate @address is a source
+ * address
+ *
+ * Return: 0 in case of success, negative errno code otherwise.
*/
-static int igc_del_mac_filter(struct igc_adapter *adapter,
- const u8 *addr, const u8 queue)
+int igc_del_mac_filter(struct igc_adapter *adapter, const u8 *addr,
+ const u8 flags)
{
- struct igc_hw *hw = &adapter->hw;
- int rar_entries = hw->mac.rar_entry_count;
- int i;
+ struct net_device *dev = adapter->netdev;
+ struct igc_mac_addr *entry;
+ int index;
- if (is_zero_ether_addr(addr))
+ if (!is_valid_ether_addr(addr))
return -EINVAL;
- /* Search for matching entry in the MAC table based on given address
- * and queue. Do not touch entries at the end of the table reserved
- * for the VF MAC addresses.
- */
- for (i = 0; i < rar_entries; i++) {
- if (!(adapter->mac_table[i].state & IGC_MAC_STATE_IN_USE))
- continue;
- if (adapter->mac_table[i].state != 0)
- continue;
- if (adapter->mac_table[i].queue != queue)
- continue;
- if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
- continue;
+ index = igc_find_mac_filter(adapter, addr, flags);
+ if (index < 0)
+ return -ENOENT;
- /* When a filter for the default address is "deleted",
- * we return it to its initial configuration
+ entry = &adapter->mac_table[index];
+
+ if (entry->state & IGC_MAC_STATE_DEFAULT) {
+ /* If this is the default filter, we don't actually delete it.
+ * We just reset to its default value i.e. disable queue
+ * assignment.
*/
- if (adapter->mac_table[i].state & IGC_MAC_STATE_DEFAULT) {
- adapter->mac_table[i].state =
- IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE;
- adapter->mac_table[i].queue = 0;
- } else {
- adapter->mac_table[i].state = 0;
- adapter->mac_table[i].queue = 0;
- memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
- }
+ netdev_dbg(dev, "Disable default MAC filter queue assignment");
- igc_rar_set_index(adapter, i);
- return 0;
+ entry->queue = -1;
+ igc_set_mac_filter_hw(adapter, 0, addr, entry->queue);
+ } else {
+ netdev_dbg(dev, "Delete MAC address filter: index %d address %pM",
+ index, addr);
+
+ entry->state = 0;
+ entry->queue = -1;
+ memset(entry->addr, 0, ETH_ALEN);
+ igc_clear_mac_filter_hw(adapter, index);
}
- return -ENOENT;
+ return 0;
}
static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr)
{
struct igc_adapter *adapter = netdev_priv(netdev);
- int ret;
-
- ret = igc_add_mac_filter(adapter, addr, adapter->num_rx_queues);
- return min_t(int, ret, 0);
+ return igc_add_mac_filter(adapter, addr, -1, 0);
}
static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr)
{
struct igc_adapter *adapter = netdev_priv(netdev);
- igc_del_mac_filter(adapter, addr, adapter->num_rx_queues);
-
- return 0;
+ return igc_del_mac_filter(adapter, addr, 0);
}
/**
@@ -2325,7 +2397,9 @@ static void igc_configure(struct igc_adapter *adapter)
igc_setup_mrqc(adapter);
igc_setup_rctl(adapter);
+ igc_set_default_mac_filter(adapter);
igc_nfc_filter_restore(adapter);
+
igc_configure_tx(adapter);
igc_configure_rx(adapter);
@@ -3458,9 +3532,6 @@ static void igc_nfc_filter_exit(struct igc_adapter *adapter)
hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
igc_erase_filter(adapter, rule);
- hlist_for_each_entry(rule, &adapter->cls_flower_list, nfc_node)
- igc_erase_filter(adapter, rule);
-
spin_unlock(&adapter->nfc_lock);
}
@@ -3689,106 +3760,6 @@ igc_features_check(struct sk_buff *skb, struct net_device *dev,
return features;
}
-/* Add a MAC filter for 'addr' directing matching traffic to 'queue',
- * 'flags' is used to indicate what kind of match is made, match is by
- * default for the destination address, if matching by source address
- * is desired the flag IGC_MAC_STATE_SRC_ADDR can be used.
- */
-static int igc_add_mac_filter_flags(struct igc_adapter *adapter,
- const u8 *addr, const u8 queue,
- const u8 flags)
-{
- struct igc_hw *hw = &adapter->hw;
- int rar_entries = hw->mac.rar_entry_count;
- int i;
-
- if (is_zero_ether_addr(addr))
- return -EINVAL;
-
- /* Search for the first empty entry in the MAC table.
- * Do not touch entries at the end of the table reserved for the VF MAC
- * addresses.
- */
- for (i = 0; i < rar_entries; i++) {
- if (!igc_mac_entry_can_be_used(&adapter->mac_table[i],
- addr, flags))
- continue;
-
- ether_addr_copy(adapter->mac_table[i].addr, addr);
- adapter->mac_table[i].queue = queue;
- adapter->mac_table[i].state |= IGC_MAC_STATE_IN_USE | flags;
-
- igc_rar_set_index(adapter, i);
- return i;
- }
-
- return -ENOSPC;
-}
-
-int igc_add_mac_steering_filter(struct igc_adapter *adapter,
- const u8 *addr, u8 queue, u8 flags)
-{
- return igc_add_mac_filter_flags(adapter, addr, queue,
- IGC_MAC_STATE_QUEUE_STEERING | flags);
-}
-
-/* Remove a MAC filter for 'addr' directing matching traffic to
- * 'queue', 'flags' is used to indicate what kind of match need to be
- * removed, match is by default for the destination address, if
- * matching by source address is to be removed the flag
- * IGC_MAC_STATE_SRC_ADDR can be used.
- */
-static int igc_del_mac_filter_flags(struct igc_adapter *adapter,
- const u8 *addr, const u8 queue,
- const u8 flags)
-{
- struct igc_hw *hw = &adapter->hw;
- int rar_entries = hw->mac.rar_entry_count;
- int i;
-
- if (is_zero_ether_addr(addr))
- return -EINVAL;
-
- /* Search for matching entry in the MAC table based on given address
- * and queue. Do not touch entries at the end of the table reserved
- * for the VF MAC addresses.
- */
- for (i = 0; i < rar_entries; i++) {
- if (!(adapter->mac_table[i].state & IGC_MAC_STATE_IN_USE))
- continue;
- if ((adapter->mac_table[i].state & flags) != flags)
- continue;
- if (adapter->mac_table[i].queue != queue)
- continue;
- if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
- continue;
-
- /* When a filter for the default address is "deleted",
- * we return it to its initial configuration
- */
- if (adapter->mac_table[i].state & IGC_MAC_STATE_DEFAULT) {
- adapter->mac_table[i].state =
- IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE;
- } else {
- adapter->mac_table[i].state = 0;
- adapter->mac_table[i].queue = 0;
- memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
- }
-
- igc_rar_set_index(adapter, i);
- return 0;
- }
-
- return -ENOENT;
-}
-
-int igc_del_mac_steering_filter(struct igc_adapter *adapter,
- const u8 *addr, u8 queue, u8 flags)
-{
- return igc_del_mac_filter_flags(adapter, addr, queue,
- IGC_MAC_STATE_QUEUE_STEERING | flags);
-}
-
static void igc_tsync_interrupt(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
@@ -4009,7 +3980,6 @@ static void igc_watchdog_task(struct work_struct *work)
struct igc_hw *hw = &adapter->hw;
struct igc_phy_info *phy = &hw->phy;
u16 phy_data, retry_count = 20;
- u32 connsw;
u32 link;
int i;
@@ -4022,14 +3992,6 @@ static void igc_watchdog_task(struct work_struct *work)
link = false;
}
- /* Force link down if we have fiber to swap to */
- if (adapter->flags & IGC_FLAG_MAS_ENABLE) {
- if (hw->phy.media_type == igc_media_type_copper) {
- connsw = rd32(IGC_CONNSW);
- if (!(connsw & IGC_CONNSW_AUTOSENSE_EN))
- link = 0;
- }
- }
if (link) {
/* Cancel scheduled suspend requests. */
pm_runtime_resume(netdev->dev.parent);
@@ -4491,6 +4453,158 @@ static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
}
}
+static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue,
+ bool enable)
+{
+ struct igc_ring *ring;
+ int i;
+
+ if (queue < 0 || queue >= adapter->num_tx_queues)
+ return -EINVAL;
+
+ ring = adapter->tx_ring[queue];
+ ring->launchtime_enable = enable;
+
+ if (adapter->base_time)
+ return 0;
+
+ adapter->cycle_time = NSEC_PER_SEC;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ ring = adapter->tx_ring[i];
+ ring->start_time = 0;
+ ring->end_time = NSEC_PER_SEC;
+ }
+
+ return 0;
+}
+
+static bool validate_schedule(const struct tc_taprio_qopt_offload *qopt)
+{
+ int queue_uses[IGC_MAX_TX_QUEUES] = { };
+ size_t n;
+
+ if (qopt->cycle_time_extension)
+ return false;
+
+ for (n = 0; n < qopt->num_entries; n++) {
+ const struct tc_taprio_sched_entry *e;
+ int i;
+
+ e = &qopt->entries[n];
+
+ /* i225 only supports "global" frame preemption
+ * settings.
+ */
+ if (e->command != TC_TAPRIO_CMD_SET_GATES)
+ return false;
+
+ for (i = 0; i < IGC_MAX_TX_QUEUES; i++) {
+ if (e->gate_mask & BIT(i))
+ queue_uses[i]++;
+
+ if (queue_uses[i] > 1)
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static int igc_tsn_enable_launchtime(struct igc_adapter *adapter,
+ struct tc_etf_qopt_offload *qopt)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int err;
+
+ if (hw->mac.type != igc_i225)
+ return -EOPNOTSUPP;
+
+ err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable);
+ if (err)
+ return err;
+
+ return igc_tsn_offload_apply(adapter);
+}
+
+static int igc_save_qbv_schedule(struct igc_adapter *adapter,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ u32 start_time = 0, end_time = 0;
+ size_t n;
+
+ if (!qopt->enable) {
+ adapter->base_time = 0;
+ return 0;
+ }
+
+ if (adapter->base_time)
+ return -EALREADY;
+
+ if (!validate_schedule(qopt))
+ return -EINVAL;
+
+ adapter->cycle_time = qopt->cycle_time;
+ adapter->base_time = qopt->base_time;
+
+ /* FIXME: be a little smarter about cases when the gate for a
+ * queue stays open for more than one entry.
+ */
+ for (n = 0; n < qopt->num_entries; n++) {
+ struct tc_taprio_sched_entry *e = &qopt->entries[n];
+ int i;
+
+ end_time += e->interval;
+
+ for (i = 0; i < IGC_MAX_TX_QUEUES; i++) {
+ struct igc_ring *ring = adapter->tx_ring[i];
+
+ if (!(e->gate_mask & BIT(i)))
+ continue;
+
+ ring->start_time = start_time;
+ ring->end_time = end_time;
+ }
+
+ start_time += e->interval;
+ }
+
+ return 0;
+}
+
+static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int err;
+
+ if (hw->mac.type != igc_i225)
+ return -EOPNOTSUPP;
+
+ err = igc_save_qbv_schedule(adapter, qopt);
+ if (err)
+ return err;
+
+ return igc_tsn_offload_apply(adapter);
+}
+
+static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct igc_adapter *adapter = netdev_priv(dev);
+
+ switch (type) {
+ case TC_SETUP_QDISC_TAPRIO:
+ return igc_tsn_enable_qbv_scheduling(adapter, type_data);
+
+ case TC_SETUP_QDISC_ETF:
+ return igc_tsn_enable_launchtime(adapter, type_data);
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops igc_netdev_ops = {
.ndo_open = igc_open,
.ndo_stop = igc_close,
@@ -4503,6 +4617,7 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_set_features = igc_set_features,
.ndo_features_check = igc_features_check,
.ndo_do_ioctl = igc_ioctl,
+ .ndo_setup_tc = igc_setup_tc,
};
/* PCIe configuration access */
@@ -4726,6 +4841,17 @@ static int igc_probe(struct pci_dev *pdev,
netdev->features |= NETIF_F_RXCSUM;
netdev->features |= NETIF_F_HW_CSUM;
netdev->features |= NETIF_F_SCTP_CRC;
+ netdev->features |= NETIF_F_HW_TC;
+
+#define IGC_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
+ NETIF_F_GSO_GRE_CSUM | \
+ NETIF_F_GSO_IPXIP4 | \
+ NETIF_F_GSO_IPXIP6 | \
+ NETIF_F_GSO_UDP_TUNNEL | \
+ NETIF_F_GSO_UDP_TUNNEL_CSUM)
+
+ netdev->gso_partial_features = IGC_GSO_PARTIAL_FEATURES;
+ netdev->features |= NETIF_F_GSO_PARTIAL | IGC_GSO_PARTIAL_FEATURES;
/* setup the private structure */
err = igc_sw_init(adapter);
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index d4af53a80f11..6093cde2351c 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -231,6 +231,18 @@
#define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
+/* Transmit Scheduling Registers */
+#define IGC_TQAVCTRL 0x3570
+#define IGC_TXQCTL(_n) (0x3344 + 0x4 * (_n))
+#define IGC_BASET_L 0x3314
+#define IGC_BASET_H 0x3318
+#define IGC_QBVCYCLET 0x331C
+#define IGC_QBVCYCLET_S 0x3320
+
+#define IGC_STQT(_n) (0x3324 + 0x4 * (_n))
+#define IGC_ENDQT(_n) (0x3334 + 0x4 * (_n))
+#define IGC_DTXMXPKTSZ 0x355C
+
/* System Time Registers */
#define IGC_SYSTIML 0x0B600 /* System time register Low - RO */
#define IGC_SYSTIMH 0x0B604 /* System time register High - RO */
diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c
new file mode 100644
index 000000000000..174103c4bea6
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_tsn.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Intel Corporation */
+
+#include "igc.h"
+#include "igc_tsn.h"
+
+static bool is_any_launchtime(struct igc_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct igc_ring *ring = adapter->tx_ring[i];
+
+ if (ring->launchtime_enable)
+ return true;
+ }
+
+ return false;
+}
+
+/* Returns the TSN specific registers to their default values after
+ * TSN offloading is disabled.
+ */
+static int igc_tsn_disable_offload(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 tqavctrl;
+ int i;
+
+ if (!(adapter->flags & IGC_FLAG_TSN_QBV_ENABLED))
+ return 0;
+
+ adapter->cycle_time = 0;
+
+ wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
+ wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_DEFAULT);
+
+ tqavctrl = rd32(IGC_TQAVCTRL);
+ tqavctrl &= ~(IGC_TQAVCTRL_TRANSMIT_MODE_TSN |
+ IGC_TQAVCTRL_ENHANCED_QAV);
+ wr32(IGC_TQAVCTRL, tqavctrl);
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct igc_ring *ring = adapter->tx_ring[i];
+
+ ring->start_time = 0;
+ ring->end_time = 0;
+ ring->launchtime_enable = false;
+
+ wr32(IGC_TXQCTL(i), 0);
+ wr32(IGC_STQT(i), 0);
+ wr32(IGC_ENDQT(i), NSEC_PER_SEC);
+ }
+
+ wr32(IGC_QBVCYCLET_S, NSEC_PER_SEC);
+ wr32(IGC_QBVCYCLET, NSEC_PER_SEC);
+
+ adapter->flags &= ~IGC_FLAG_TSN_QBV_ENABLED;
+
+ return 0;
+}
+
+static int igc_tsn_enable_offload(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 tqavctrl, baset_l, baset_h;
+ u32 sec, nsec, cycle;
+ ktime_t base_time, systim;
+ int i;
+
+ if (adapter->flags & IGC_FLAG_TSN_QBV_ENABLED)
+ return 0;
+
+ cycle = adapter->cycle_time;
+ base_time = adapter->base_time;
+
+ wr32(IGC_TSAUXC, 0);
+ wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN);
+ wr32(IGC_TXPBS, IGC_TXPBSIZE_TSN);
+
+ tqavctrl = rd32(IGC_TQAVCTRL);
+ tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN | IGC_TQAVCTRL_ENHANCED_QAV;
+ wr32(IGC_TQAVCTRL, tqavctrl);
+
+ wr32(IGC_QBVCYCLET_S, cycle);
+ wr32(IGC_QBVCYCLET, cycle);
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct igc_ring *ring = adapter->tx_ring[i];
+ u32 txqctl = 0;
+
+ wr32(IGC_STQT(i), ring->start_time);
+ wr32(IGC_ENDQT(i), ring->end_time);
+
+ if (adapter->base_time) {
+ /* If we have a base_time we are in "taprio"
+ * mode and we need to be strict about the
+ * cycles: only transmit a packet if it can be
+ * completed during that cycle.
+ */
+ txqctl |= IGC_TXQCTL_STRICT_CYCLE |
+ IGC_TXQCTL_STRICT_END;
+ }
+
+ if (ring->launchtime_enable)
+ txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT;
+
+ wr32(IGC_TXQCTL(i), txqctl);
+ }
+
+ nsec = rd32(IGC_SYSTIML);
+ sec = rd32(IGC_SYSTIMH);
+
+ systim = ktime_set(sec, nsec);
+
+ if (ktime_compare(systim, base_time) > 0) {
+ s64 n;
+
+ n = div64_s64(ktime_sub_ns(systim, base_time), cycle);
+ base_time = ktime_add_ns(base_time, (n + 1) * cycle);
+ }
+
+ baset_h = div_s64_rem(base_time, NSEC_PER_SEC, &baset_l);
+
+ wr32(IGC_BASET_H, baset_h);
+ wr32(IGC_BASET_L, baset_l);
+
+ adapter->flags |= IGC_FLAG_TSN_QBV_ENABLED;
+
+ return 0;
+}
+
+int igc_tsn_offload_apply(struct igc_adapter *adapter)
+{
+ bool is_any_enabled = adapter->base_time || is_any_launchtime(adapter);
+
+ if (!(adapter->flags & IGC_FLAG_TSN_QBV_ENABLED) && !is_any_enabled)
+ return 0;
+
+ if (!is_any_enabled) {
+ int err = igc_tsn_disable_offload(adapter);
+
+ if (err < 0)
+ return err;
+
+ /* The BASET registers aren't cleared when writing
+ * into them, force a reset if the interface is
+ * running.
+ */
+ if (netif_running(adapter->netdev))
+ schedule_work(&adapter->reset_task);
+
+ return 0;
+ }
+
+ return igc_tsn_enable_offload(adapter);
+}
diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.h b/drivers/net/ethernet/intel/igc/igc_tsn.h
new file mode 100644
index 000000000000..f76bc86ddccd
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_tsn.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2020 Intel Corporation */
+
+#ifndef _IGC_TSN_H_
+#define _IGC_TSN_H_
+
+int igc_tsn_offload_apply(struct igc_adapter *adapter);
+
+#endif /* _IGC_BASE_H */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 718931d951bc..eab5934b04f5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2244,19 +2244,30 @@ xdp_out:
return ERR_PTR(-result);
}
+static unsigned int ixgbe_rx_frame_truesize(struct ixgbe_ring *rx_ring,
+ unsigned int size)
+{
+ unsigned int truesize;
+
+#if (PAGE_SIZE < 8192)
+ truesize = ixgbe_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
+#else
+ truesize = ring_uses_build_skb(rx_ring) ?
+ SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
+ SKB_DATA_ALIGN(size);
+#endif
+ return truesize;
+}
+
static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *rx_buffer,
unsigned int size)
{
+ unsigned int truesize = ixgbe_rx_frame_truesize(rx_ring, size);
#if (PAGE_SIZE < 8192)
- unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
-
rx_buffer->page_offset ^= truesize;
#else
- unsigned int truesize = ring_uses_build_skb(rx_ring) ?
- SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) :
- SKB_DATA_ALIGN(size);
-
rx_buffer->page_offset += truesize;
#endif
}
@@ -2290,6 +2301,11 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
xdp.rxq = &rx_ring->xdp_rxq;
+ /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
+#if (PAGE_SIZE < 8192)
+ xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, 0);
+#endif
+
while (likely(total_rx_packets < budget)) {
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *rx_buffer;
@@ -2323,7 +2339,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
xdp.data_hard_start = xdp.data -
ixgbe_rx_offset(rx_ring);
xdp.data_end = xdp.data + size;
-
+#if (PAGE_SIZE > 4096)
+ /* At larger PAGE_SIZE, frame_sz depend on len size */
+ xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, size);
+#endif
skb = ixgbe_run_xdp(adapter, rx_ring, &xdp);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index 74b540ebb3dc..a656ee9a1fae 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -431,12 +431,14 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
struct ixgbe_adapter *adapter = q_vector->adapter;
u16 cleaned_count = ixgbe_desc_unused(rx_ring);
+ struct xdp_umem *umem = rx_ring->xsk_umem;
unsigned int xdp_res, xdp_xmit = 0;
bool failure = false;
struct sk_buff *skb;
struct xdp_buff xdp;
xdp.rxq = &rx_ring->xdp_rxq;
+ xdp.frame_sz = xsk_umem_xdp_frame_sz(umem);
while (likely(total_rx_packets < budget)) {
union ixgbe_adv_rx_desc *rx_desc;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 4622c4ea2e46..a39e2cb384dd 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1095,19 +1095,31 @@ xdp_out:
return ERR_PTR(-result);
}
+static unsigned int ixgbevf_rx_frame_truesize(struct ixgbevf_ring *rx_ring,
+ unsigned int size)
+{
+ unsigned int truesize;
+
+#if (PAGE_SIZE < 8192)
+ truesize = ixgbevf_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
+#else
+ truesize = ring_uses_build_skb(rx_ring) ?
+ SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
+ SKB_DATA_ALIGN(size);
+#endif
+ return truesize;
+}
+
static void ixgbevf_rx_buffer_flip(struct ixgbevf_ring *rx_ring,
struct ixgbevf_rx_buffer *rx_buffer,
unsigned int size)
{
-#if (PAGE_SIZE < 8192)
- unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
+ unsigned int truesize = ixgbevf_rx_frame_truesize(rx_ring, size);
+#if (PAGE_SIZE < 8192)
rx_buffer->page_offset ^= truesize;
#else
- unsigned int truesize = ring_uses_build_skb(rx_ring) ?
- SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) :
- SKB_DATA_ALIGN(size);
-
rx_buffer->page_offset += truesize;
#endif
}
@@ -1125,6 +1137,11 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
xdp.rxq = &rx_ring->xdp_rxq;
+ /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
+#if (PAGE_SIZE < 8192)
+ xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, 0);
+#endif
+
while (likely(total_rx_packets < budget)) {
struct ixgbevf_rx_buffer *rx_buffer;
union ixgbe_adv_rx_desc *rx_desc;
@@ -1157,7 +1174,10 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
xdp.data_hard_start = xdp.data -
ixgbevf_rx_offset(rx_ring);
xdp.data_end = xdp.data + size;
-
+#if (PAGE_SIZE > 4096)
+ /* At larger PAGE_SIZE, frame_sz depend on len size */
+ xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, size);
+#endif
skb = ixgbevf_run_xdp(adapter, rx_ring, &xdp);
}
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index 900affbdcc0e..1645e4e7ebdb 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -276,7 +276,8 @@ static int xrx200_tx_housekeeping(struct napi_struct *napi, int budget)
return pkts;
}
-static int xrx200_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
+static netdev_tx_t xrx200_start_xmit(struct sk_buff *skb,
+ struct net_device *net_dev)
{
struct xrx200_priv *priv = netdev_priv(net_dev);
struct xrx200_chan *ch = &priv->chan_tx;
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 81d24481b22c..4d4b6243318a 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -666,11 +666,6 @@ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
return 0;
}
-static inline __be16 sum16_as_be(__sum16 sum)
-{
- return (__force __be16)sum;
-}
-
static int skb_tx_csum(struct mv643xx_eth_private *mp, struct sk_buff *skb,
u16 *l4i_chk, u32 *command, int length)
{
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 51889770958d..41d2a0eac5fa 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2148,12 +2148,17 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
struct bpf_prog *prog, struct xdp_buff *xdp,
struct mvneta_stats *stats)
{
- unsigned int len;
+ unsigned int len, sync;
+ struct page *page;
u32 ret, act;
len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction;
act = bpf_prog_run_xdp(prog, xdp);
+ /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
+ sync = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction;
+ sync = max(sync, len);
+
switch (act) {
case XDP_PASS:
stats->xdp_pass++;
@@ -2164,9 +2169,8 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
err = xdp_do_redirect(pp->dev, xdp, prog);
if (unlikely(err)) {
ret = MVNETA_XDP_DROPPED;
- page_pool_put_page(rxq->page_pool,
- virt_to_head_page(xdp->data), len,
- true);
+ page = virt_to_head_page(xdp->data);
+ page_pool_put_page(rxq->page_pool, page, sync, true);
} else {
ret = MVNETA_XDP_REDIR;
stats->xdp_redirect++;
@@ -2175,10 +2179,10 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
}
case XDP_TX:
ret = mvneta_xdp_xmit_back(pp, xdp);
- if (ret != MVNETA_XDP_TX)
- page_pool_put_page(rxq->page_pool,
- virt_to_head_page(xdp->data), len,
- true);
+ if (ret != MVNETA_XDP_TX) {
+ page = virt_to_head_page(xdp->data);
+ page_pool_put_page(rxq->page_pool, page, sync, true);
+ }
break;
default:
bpf_warn_invalid_xdp_action(act);
@@ -2187,8 +2191,8 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
trace_xdp_exception(pp->dev, prog, act);
/* fall through */
case XDP_DROP:
- page_pool_put_page(rxq->page_pool,
- virt_to_head_page(xdp->data), len, true);
+ page = virt_to_head_page(xdp->data);
+ page_pool_put_page(rxq->page_pool, page, sync, true);
ret = MVNETA_XDP_DROPPED;
stats->xdp_drop++;
break;
@@ -2320,6 +2324,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
rcu_read_lock();
xdp_prog = READ_ONCE(pp->xdp_prog);
xdp_buf.rxq = &rxq->xdp_rxq;
+ xdp_buf.frame_sz = PAGE_SIZE;
/* Fairness NAPI loop */
while (rx_proc < budget && rx_proc < rx_todo) {
@@ -3561,6 +3566,10 @@ static void mvneta_start_dev(struct mvneta_port *pp)
MVNETA_CAUSE_LINK_CHANGE);
phylink_start(pp->phylink);
+
+ /* We may have called phy_speed_down before */
+ phy_speed_up(pp->dev->phydev);
+
netif_tx_start_all_queues(pp->dev);
}
@@ -3568,6 +3577,9 @@ static void mvneta_stop_dev(struct mvneta_port *pp)
{
unsigned int cpu;
+ if (device_may_wakeup(&pp->dev->dev))
+ phy_speed_down(pp->dev->phydev, false);
+
phylink_stop(pp->phylink);
if (!pp->neta_armada3700) {
@@ -4040,6 +4052,10 @@ static int mvneta_mdio_probe(struct mvneta_port *pp)
phylink_ethtool_get_wol(pp->phylink, &wol);
device_set_wakeup_capable(&pp->dev->dev, !!wol.supported);
+ /* PHY WoL may be enabled but device wakeup disabled */
+ if (wol.supported)
+ device_set_wakeup_enable(&pp->dev->dev, !!wol.wolopts);
+
return err;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index f1d2dea90a8c..5975521a4c86 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -379,40 +379,35 @@ void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx)
(pfvf->hw.cq_ecount_wait - 1));
}
-dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
- gfp_t gfp)
+dma_addr_t __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool)
{
dma_addr_t iova;
+ u8 *buf;
- /* Check if request can be accommodated in previous allocated page */
- if (pool->page && ((pool->page_offset + pool->rbsize) <=
- (PAGE_SIZE << pool->rbpage_order))) {
- pool->pageref++;
- goto ret;
- }
-
- otx2_get_page(pool);
-
- /* Allocate a new page */
- pool->page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
- pool->rbpage_order);
- if (unlikely(!pool->page))
+ buf = napi_alloc_frag(pool->rbsize);
+ if (unlikely(!buf))
return -ENOMEM;
- pool->page_offset = 0;
-ret:
- iova = (u64)otx2_dma_map_page(pfvf, pool->page, pool->page_offset,
- pool->rbsize, DMA_FROM_DEVICE);
- if (!iova) {
- if (!pool->page_offset)
- __free_pages(pool->page, pool->rbpage_order);
- pool->page = NULL;
+ iova = dma_map_single_attrs(pfvf->dev, buf, pool->rbsize,
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ if (unlikely(dma_mapping_error(pfvf->dev, iova))) {
+ page_frag_free(buf);
return -ENOMEM;
}
- pool->page_offset += pool->rbsize;
+
return iova;
}
+static dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool)
+{
+ dma_addr_t addr;
+
+ local_bh_disable();
+ addr = __otx2_alloc_rbuf(pfvf, pool);
+ local_bh_enable();
+ return addr;
+}
+
void otx2_tx_timeout(struct net_device *netdev, unsigned int txq)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
@@ -805,7 +800,7 @@ static void otx2_pool_refill_task(struct work_struct *work)
free_ptrs = cq->pool_ptrs;
while (cq->pool_ptrs) {
- bufptr = otx2_alloc_rbuf(pfvf, rbpool, GFP_KERNEL);
+ bufptr = otx2_alloc_rbuf(pfvf, rbpool);
if (bufptr <= 0) {
/* Schedule a WQ if we fails to free atleast half of the
* pointers else enable napi for this RQ.
@@ -1064,7 +1059,6 @@ static int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
return err;
pool->rbsize = buf_size;
- pool->rbpage_order = get_order(buf_size);
/* Initialize this pool's context via AF */
aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
@@ -1152,13 +1146,12 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
return -ENOMEM;
for (ptr = 0; ptr < num_sqbs; ptr++) {
- bufptr = otx2_alloc_rbuf(pfvf, pool, GFP_KERNEL);
+ bufptr = otx2_alloc_rbuf(pfvf, pool);
if (bufptr <= 0)
return bufptr;
otx2_aura_freeptr(pfvf, pool_id, bufptr);
sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
}
- otx2_get_page(pool);
}
return 0;
@@ -1204,13 +1197,12 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
pool = &pfvf->qset.pool[pool_id];
for (ptr = 0; ptr < num_ptrs; ptr++) {
- bufptr = otx2_alloc_rbuf(pfvf, pool, GFP_KERNEL);
+ bufptr = otx2_alloc_rbuf(pfvf, pool);
if (bufptr <= 0)
return bufptr;
otx2_aura_freeptr(pfvf, pool_id,
bufptr + OTX2_HEAD_ROOM);
}
- otx2_get_page(pool);
}
return 0;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 018c283a0ac4..2fa29889522e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -309,7 +309,7 @@ static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset)
default:
blkaddr = BLKADDR_RVUM;
break;
- };
+ }
offset &= ~(RVU_FUNC_BLKADDR_MASK << RVU_FUNC_BLKADDR_SHIFT);
offset |= (blkaddr << RVU_FUNC_BLKADDR_SHIFT);
@@ -434,18 +434,6 @@ static inline void otx2_aura_freeptr(struct otx2_nic *pfvf,
otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_FREE0));
}
-/* Update page ref count */
-static inline void otx2_get_page(struct otx2_pool *pool)
-{
- if (!pool->page)
- return;
-
- if (pool->pageref)
- page_ref_add(pool->page, pool->pageref);
- pool->pageref = 0;
- pool->page = NULL;
-}
-
static inline int otx2_get_pool_idx(struct otx2_nic *pfvf, int type, int idx)
{
if (type == AURA_NIX_SQ)
@@ -589,8 +577,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl);
int otx2_txsch_alloc(struct otx2_nic *pfvf);
int otx2_txschq_stop(struct otx2_nic *pfvf);
void otx2_sqb_flush(struct otx2_nic *pfvf);
-dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
- gfp_t gfp);
+dma_addr_t __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool);
int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 411e5ea1031e..64786568af0d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1856,13 +1856,17 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
num_vec = pci_msix_vec_count(pdev);
hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
GFP_KERNEL);
- if (!hw->irq_name)
+ if (!hw->irq_name) {
+ err = -ENOMEM;
goto err_free_netdev;
+ }
hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec,
sizeof(cpumask_var_t), GFP_KERNEL);
- if (!hw->affinity_mask)
+ if (!hw->affinity_mask) {
+ err = -ENOMEM;
goto err_free_netdev;
+ }
/* Map CSRs */
pf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 45abe0cd0e7b..b04f5429d72d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -286,7 +286,7 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
/* Refill pool with new buffers */
while (cq->pool_ptrs) {
- bufptr = otx2_alloc_rbuf(pfvf, cq->rbpool, GFP_ATOMIC);
+ bufptr = __otx2_alloc_rbuf(pfvf, cq->rbpool);
if (unlikely(bufptr <= 0)) {
struct refill_work *work;
struct delayed_work *dwork;
@@ -304,7 +304,6 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM);
cq->pool_ptrs--;
}
- otx2_get_page(cq->rbpool);
return processed_cqe;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index 4ab32d3adb78..da97f2d4416f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -113,11 +113,7 @@ struct otx2_cq_poll {
struct otx2_pool {
struct qmem *stack;
struct qmem *fc_addr;
- u8 rbpage_order;
u16 rbsize;
- u32 page_offset;
- u16 pageref;
- struct page *page;
};
struct otx2_cq_queue {
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 09047109d0da..f6a1f8666f95 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -65,7 +65,7 @@ u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
return __raw_readl(eth->base + reg);
}
-u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
+static u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
{
u32 val;
@@ -1122,7 +1122,7 @@ static void mtk_stop_queue(struct mtk_eth *eth)
}
}
-static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
diff --git a/drivers/net/ethernet/mellanox/mlx4/crdump.c b/drivers/net/ethernet/mellanox/mlx4/crdump.c
index 73eae80e1cb7..ac5468b77488 100644
--- a/drivers/net/ethernet/mellanox/mlx4/crdump.c
+++ b/drivers/net/ethernet/mellanox/mlx4/crdump.c
@@ -197,6 +197,7 @@ int mlx4_crdump_collect(struct mlx4_dev *dev)
err = devlink_region_snapshot_id_get(devlink, &id);
if (err) {
mlx4_err(dev, "crdump: devlink get snapshot id err %d\n", err);
+ iounmap(cr_space);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 8a5ea2543670..b816154bc79a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1235,7 +1235,6 @@ static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
struct mlx4_en_priv *priv = netdev_priv(dev);
u32 n = mlx4_en_get_rxfh_indir_size(dev);
u32 i, rss_rings;
- int err = 0;
rss_rings = priv->prof->rss_rings ?: n;
rss_rings = rounddown_pow_of_two(rss_rings);
@@ -1249,7 +1248,7 @@ static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
memcpy(key, priv->rss_key, MLX4_EN_RSS_KEY_SIZE);
if (hfunc)
*hfunc = priv->rss_hash_fn;
- return err;
+ return 0;
}
static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
@@ -1393,7 +1392,6 @@ static int mlx4_en_ethtool_add_mac_rule(struct ethtool_rxnfc *cmd,
struct mlx4_spec_list *spec_l2,
unsigned char *mac)
{
- int err = 0;
__be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH;
@@ -1408,7 +1406,7 @@ static int mlx4_en_ethtool_add_mac_rule(struct ethtool_rxnfc *cmd,
list_add_tail(&spec_l2->list, rule_list_h);
- return err;
+ return 0;
}
static int mlx4_en_ethtool_add_mac_rule_by_ipv4(struct mlx4_en_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 43dcbd8214c6..5bd3cd37d50f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -51,7 +51,8 @@
#include "en_port.h"
#define MLX4_EN_MAX_XDP_MTU ((int)(PAGE_SIZE - ETH_HLEN - (2 * VLAN_HLEN) - \
- XDP_PACKET_HEADROOM))
+ XDP_PACKET_HEADROOM - \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))))
int mlx4_en_setup_tc(struct net_device *dev, u8 up)
{
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index db3552f2d087..8a10285b0e10 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -683,6 +683,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
rcu_read_lock();
xdp_prog = rcu_dereference(ring->xdp_prog);
xdp.rxq = &ring->xdp_rxq;
+ xdp.frame_sz = priv->frag_info[0].frag_stride;
doorbell_pending = 0;
/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
@@ -946,7 +947,7 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
xdp_tx_cq = priv->tx_cq[TX_XDP][cq->ring];
if (xdp_tx_cq->xdp_busy) {
clean_complete = mlx4_en_process_tx_cq(dev, xdp_tx_cq,
- budget);
+ budget) < budget;
xdp_tx_cq->xdp_busy = !clean_complete;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index a30edb436f4a..9dff7b086c9f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -392,8 +392,8 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
return cnt;
}
-bool mlx4_en_process_tx_cq(struct net_device *dev,
- struct mlx4_en_cq *cq, int napi_budget)
+int mlx4_en_process_tx_cq(struct net_device *dev,
+ struct mlx4_en_cq *cq, int napi_budget)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_cq *mcq = &cq->mcq;
@@ -415,7 +415,7 @@ bool mlx4_en_process_tx_cq(struct net_device *dev,
u32 ring_cons;
if (unlikely(!priv->port_up))
- return true;
+ return 0;
netdev_txq_bql_complete_prefetchw(ring->tx_queue);
@@ -492,7 +492,7 @@ bool mlx4_en_process_tx_cq(struct net_device *dev,
WRITE_ONCE(ring->cons, ring_cons + txbbs_skipped);
if (cq->type == TX_XDP)
- return done < budget;
+ return done;
netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
@@ -504,7 +504,7 @@ bool mlx4_en_process_tx_cq(struct net_device *dev,
ring->wake_queue++;
}
- return done < budget;
+ return done;
}
void mlx4_en_tx_irq(struct mlx4_cq *mcq)
@@ -524,14 +524,14 @@ int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget)
struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
struct net_device *dev = cq->dev;
struct mlx4_en_priv *priv = netdev_priv(dev);
- bool clean_complete;
+ int work_done;
- clean_complete = mlx4_en_process_tx_cq(dev, cq, budget);
- if (!clean_complete)
+ work_done = mlx4_en_process_tx_cq(dev, cq, budget);
+ if (work_done >= budget)
return budget;
- napi_complete(napi);
- mlx4_en_arm_cq(priv, cq);
+ if (napi_complete_done(napi, work_done))
+ mlx4_en_arm_cq(priv, cq);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 630f15977f09..9f5603612960 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -737,8 +737,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev,
int budget);
int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget);
-bool mlx4_en_process_tx_cq(struct net_device *dev,
- struct mlx4_en_cq *cq, int napi_budget);
+int mlx4_en_process_tx_cq(struct net_device *dev,
+ struct mlx4_en_cq *cq, int napi_budget);
u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
int index, u64 timestamp,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 6d32915000fc..d3c7dbd7f1d5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -12,7 +12,7 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
# mlx5 core basic
#
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
- health.o mcg.o cq.o alloc.o qp.o port.o mr.o pd.o \
+ health.o mcg.o cq.o alloc.o port.o mr.o pd.o \
transobj.o vport.o sriov.o fs_cmd.o fs_core.o pci_irq.o \
fs_counters.o rl.o lag.o dev.o events.o wq.o lib/gid.o \
lib/devcom.o lib/pci_vsc.o lib/dm.o diag/fs_tracepoint.o \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h
index c13260467750..82b185121edb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h
@@ -5,7 +5,6 @@
#include <linux/skbuff.h>
#include <linux/netdevice.h>
-#include "en.h"
static inline bool is_metadata_hdr_valid(struct sk_buff *skb)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c
index eddc34e4a762..8a4985d8cbfe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c
@@ -58,12 +58,21 @@ int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *xfrm,
- const __be32 saddr[4],
- const __be32 daddr[4],
- const __be32 spi, bool is_ipv6)
+ u32 *sa_handle)
{
- return mlx5_fpga_ipsec_create_sa_ctx(mdev, xfrm, saddr, daddr,
- spi, is_ipv6);
+ __be32 saddr[4] = {}, daddr[4] = {};
+
+ if (!xfrm->attrs.is_ipv6) {
+ saddr[3] = xfrm->attrs.saddr.a4;
+ daddr[3] = xfrm->attrs.daddr.a4;
+ } else {
+ memcpy(saddr, xfrm->attrs.saddr.a6, sizeof(saddr));
+ memcpy(daddr, xfrm->attrs.daddr.a6, sizeof(daddr));
+ }
+
+ return mlx5_fpga_ipsec_create_sa_ctx(mdev, xfrm, saddr,
+ daddr, xfrm->attrs.spi,
+ xfrm->attrs.is_ipv6, sa_handle);
}
void mlx5_accel_esp_free_hw_context(void *context)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h
index 530e428d46ab..e89747674712 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h
@@ -48,9 +48,7 @@ int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *xfrm,
- const __be32 saddr[4],
- const __be32 daddr[4],
- const __be32 spi, bool is_ipv6);
+ u32 *sa_handle);
void mlx5_accel_esp_free_hw_context(void *context);
int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev);
@@ -64,9 +62,7 @@ void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev);
static inline void *
mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *xfrm,
- const __be32 saddr[4],
- const __be32 daddr[4],
- const __be32 spi, bool is_ipv6)
+ u32 *sa_handle)
{
return NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index 818edc63e428..8379b24cb838 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -34,7 +34,6 @@
#include <linux/module.h>
#include <linux/hardirq.h>
#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
#include <rdma/ib_verbs.h>
#include <linux/mlx5/cq.h>
#include "mlx5_core.h"
@@ -91,8 +90,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u32 *in, int inlen, u32 *out, int outlen)
{
int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), c_eqn);
- u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)];
- u32 din[MLX5_ST_SZ_DW(destroy_cq_in)];
+ u32 din[MLX5_ST_SZ_DW(destroy_cq_in)] = {};
struct mlx5_eq_comp *eq;
int err;
@@ -142,20 +140,17 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
err_cq_add:
mlx5_eq_del_cq(&eq->core, cq);
err_cmd:
- memset(din, 0, sizeof(din));
- memset(dout, 0, sizeof(dout));
MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
MLX5_SET(destroy_cq_in, din, cqn, cq->cqn);
MLX5_SET(destroy_cq_in, din, uid, cq->uid);
- mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
+ mlx5_cmd_exec_in(dev, destroy_cq, din);
return err;
}
EXPORT_SYMBOL(mlx5_core_create_cq);
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
{
- u32 out[MLX5_ST_SZ_DW(destroy_cq_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {};
int err;
mlx5_eq_del_cq(mlx5_get_async_eq(dev), cq);
@@ -164,7 +159,7 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
MLX5_SET(destroy_cq_in, in, opcode, MLX5_CMD_OP_DESTROY_CQ);
MLX5_SET(destroy_cq_in, in, cqn, cq->cqn);
MLX5_SET(destroy_cq_in, in, uid, cq->uid);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_in(dev, destroy_cq, in);
if (err)
return err;
@@ -179,20 +174,20 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
EXPORT_SYMBOL(mlx5_core_destroy_cq);
int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
- u32 *out, int outlen)
+ u32 *out)
{
- u32 in[MLX5_ST_SZ_DW(query_cq_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_cq_in)] = {};
MLX5_SET(query_cq_in, in, opcode, MLX5_CMD_OP_QUERY_CQ);
MLX5_SET(query_cq_in, in, cqn, cq->cqn);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
+ return mlx5_cmd_exec_inout(dev, query_cq, in, out);
}
EXPORT_SYMBOL(mlx5_core_query_cq);
int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u32 *in, int inlen)
{
- u32 out[MLX5_ST_SZ_DW(modify_cq_out)] = {0};
+ u32 out[MLX5_ST_SZ_DW(modify_cq_out)] = {};
MLX5_SET(modify_cq_in, in, opcode, MLX5_CMD_OP_MODIFY_CQ);
MLX5_SET(modify_cq_in, in, uid, cq->uid);
@@ -205,7 +200,7 @@ int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
u16 cq_period,
u16 cq_max_count)
{
- u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {};
void *cqc;
MLX5_SET(modify_cq_in, in, cqn, cq->cqn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index 04854e5fbcd7..6409090b3ec5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -101,15 +101,15 @@ void mlx5_unregister_debugfs(void)
void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev)
{
- atomic_set(&dev->num_qps, 0);
-
dev->priv.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg_root);
}
+EXPORT_SYMBOL(mlx5_qp_debugfs_init);
void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev)
{
debugfs_remove_recursive(dev->priv.qp_debugfs);
}
+EXPORT_SYMBOL(mlx5_qp_debugfs_cleanup);
void mlx5_eq_debugfs_init(struct mlx5_core_dev *dev)
{
@@ -202,42 +202,37 @@ void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev)
static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
int index, int *is_str)
{
- int outlen = MLX5_ST_SZ_BYTES(query_qp_out);
- struct mlx5_qp_context *ctx;
+ u32 out[MLX5_ST_SZ_BYTES(query_qp_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {};
u64 param = 0;
- u32 *out;
+ int state;
+ u32 *qpc;
int err;
- int no_sq;
- out = kzalloc(outlen, GFP_KERNEL);
- if (!out)
- return param;
-
- err = mlx5_core_qp_query(dev, qp, out, outlen);
- if (err) {
- mlx5_core_warn(dev, "failed to query qp err=%d\n", err);
- goto out;
- }
+ MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
+ MLX5_SET(query_qp_in, in, qpn, qp->qpn);
+ err = mlx5_cmd_exec_inout(dev, query_qp, in, out);
+ if (err)
+ return 0;
*is_str = 0;
- /* FIXME: use MLX5_GET rather than mlx5_qp_context manual struct */
- ctx = (struct mlx5_qp_context *)MLX5_ADDR_OF(query_qp_out, out, qpc);
-
+ qpc = MLX5_ADDR_OF(query_qp_out, out, qpc);
switch (index) {
case QP_PID:
param = qp->pid;
break;
case QP_STATE:
- param = (unsigned long)mlx5_qp_state_str(be32_to_cpu(ctx->flags) >> 28);
+ state = MLX5_GET(qpc, qpc, state);
+ param = (unsigned long)mlx5_qp_state_str(state);
*is_str = 1;
break;
case QP_XPORT:
- param = (unsigned long)mlx5_qp_type_str((be32_to_cpu(ctx->flags) >> 16) & 0xff);
+ param = (unsigned long)mlx5_qp_type_str(MLX5_GET(qpc, qpc, st));
*is_str = 1;
break;
case QP_MTU:
- switch (ctx->mtu_msgmax >> 5) {
+ switch (MLX5_GET(qpc, qpc, mtu)) {
case IB_MTU_256:
param = 256;
break;
@@ -258,46 +253,31 @@ static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
}
break;
case QP_N_RECV:
- param = 1 << ((ctx->rq_size_stride >> 3) & 0xf);
+ param = 1 << MLX5_GET(qpc, qpc, log_rq_size);
break;
case QP_RECV_SZ:
- param = 1 << ((ctx->rq_size_stride & 7) + 4);
+ param = 1 << (MLX5_GET(qpc, qpc, log_rq_stride) + 4);
break;
case QP_N_SEND:
- no_sq = be16_to_cpu(ctx->sq_crq_size) >> 15;
- if (!no_sq)
- param = 1 << (be16_to_cpu(ctx->sq_crq_size) >> 11);
- else
- param = 0;
+ if (!MLX5_GET(qpc, qpc, no_sq))
+ param = 1 << MLX5_GET(qpc, qpc, log_sq_size);
break;
case QP_LOG_PG_SZ:
- param = (be32_to_cpu(ctx->log_pg_sz_remote_qpn) >> 24) & 0x1f;
- param += 12;
+ param = MLX5_GET(qpc, qpc, log_page_size) + 12;
break;
case QP_RQPN:
- param = be32_to_cpu(ctx->log_pg_sz_remote_qpn) & 0xffffff;
+ param = MLX5_GET(qpc, qpc, remote_qpn);
break;
}
-out:
- kfree(out);
return param;
}
-static int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
- u32 *out, int outlen)
-{
- u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {};
-
- MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ);
- MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
-}
-
static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
int index)
{
int outlen = MLX5_ST_SZ_BYTES(query_eq_out);
+ u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {};
u64 param = 0;
void *ctx;
u32 *out;
@@ -307,7 +287,9 @@ static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
if (!out)
return param;
- err = mlx5_core_eq_query(dev, eq, out, outlen);
+ MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ);
+ MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
+ err = mlx5_cmd_exec_inout(dev, query_eq, in, out);
if (err) {
mlx5_core_warn(dev, "failed to query eq\n");
goto out;
@@ -344,7 +326,7 @@ static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
if (!out)
return param;
- err = mlx5_core_query_cq(dev, cq, out, outlen);
+ err = mlx5_core_query_cq(dev, cq, out);
if (err) {
mlx5_core_warn(dev, "failed to query cq\n");
goto out;
@@ -461,6 +443,7 @@ int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
return err;
}
+EXPORT_SYMBOL(mlx5_debug_qp_add);
void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
{
@@ -470,6 +453,7 @@ void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
if (qp->dbg)
rem_res_tree(qp->dbg);
}
+EXPORT_SYMBOL(mlx5_debug_qp_remove);
int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index 5ce6ebbc7f10..a7551274be58 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -684,7 +684,7 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
get_block_timestamp(tracer, &tmp_trace_block[TRACES_PER_BLOCK - 1]);
while (block_timestamp > tracer->last_timestamp) {
- /* Check block override if its not the first block */
+ /* Check block override if it's not the first block */
if (!tracer->last_timestamp) {
u64 *ts_event;
/* To avoid block override be the HW in case of buffer
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
index d2228e37450f..a894ea98c95a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
@@ -8,33 +8,13 @@ bool mlx5_read_embedded_cpu(struct mlx5_core_dev *dev)
return (ioread32be(&dev->iseg->initializing) >> MLX5_ECPU_BIT_NUM) & 1;
}
-static int mlx5_peer_pf_enable_hca(struct mlx5_core_dev *dev)
-{
- u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {};
- u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {};
-
- MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
- MLX5_SET(enable_hca_in, in, function_id, 0);
- MLX5_SET(enable_hca_in, in, embedded_cpu_function, 0);
- return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
-}
-
-static int mlx5_peer_pf_disable_hca(struct mlx5_core_dev *dev)
-{
- u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {};
- u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {};
-
- MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
- MLX5_SET(disable_hca_in, in, function_id, 0);
- MLX5_SET(disable_hca_in, in, embedded_cpu_function, 0);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
static int mlx5_peer_pf_init(struct mlx5_core_dev *dev)
{
+ u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {};
int err;
- err = mlx5_peer_pf_enable_hca(dev);
+ MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
+ err = mlx5_cmd_exec_in(dev, enable_hca, in);
if (err)
mlx5_core_err(dev, "Failed to enable peer PF HCA err(%d)\n",
err);
@@ -44,9 +24,11 @@ static int mlx5_peer_pf_init(struct mlx5_core_dev *dev)
static void mlx5_peer_pf_cleanup(struct mlx5_core_dev *dev)
{
+ u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {};
int err;
- err = mlx5_peer_pf_disable_hca(dev);
+ MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
+ err = mlx5_cmd_exec_in(dev, disable_hca, in);
if (err) {
mlx5_core_err(dev, "Failed to disable peer PF HCA err(%d)\n",
err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 23701c0e36ec..26911b15f8fe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -339,16 +339,6 @@ struct mlx5e_cq_decomp {
u16 wqe_counter;
} ____cacheline_aligned_in_smp;
-struct mlx5e_tx_wqe_info {
- struct sk_buff *skb;
- u32 num_bytes;
- u8 num_wqebbs;
- u8 num_dma;
-#ifdef CONFIG_MLX5_EN_TLS
- struct page *resync_dump_frag_page;
-#endif
-};
-
enum mlx5e_dma_map_type {
MLX5E_DMA_MAP_SINGLE,
MLX5E_DMA_MAP_PAGE
@@ -370,18 +360,6 @@ enum {
MLX5E_SQ_STATE_PENDING_XSK_TX,
};
-struct mlx5e_sq_wqe_info {
- u8 opcode;
- u8 num_wqebbs;
-
- /* Auxiliary data for different opcodes. */
- union {
- struct {
- struct mlx5e_rq *rq;
- } umr;
- };
-};
-
struct mlx5e_txqsq {
/* data path */
@@ -484,11 +462,6 @@ struct mlx5e_xdp_info_fifo {
u32 mask;
};
-struct mlx5e_xdp_wqe_info {
- u8 num_wqebbs;
- u8 num_pkts;
-};
-
struct mlx5e_xdp_mpwqe {
/* Current MPWQE session */
struct mlx5e_tx_wqe *wqe;
@@ -552,7 +525,7 @@ struct mlx5e_icosq {
/* write@xmit, read@completion */
struct {
- struct mlx5e_sq_wqe_info *ico_wqe;
+ struct mlx5e_icosq_wqe_info *wqe_info;
} db;
/* read only */
@@ -652,6 +625,7 @@ struct mlx5e_rq {
struct {
u16 umem_headroom;
u16 headroom;
+ u32 frame0_sz;
u8 map_dir; /* dma map direction */
} buff;
@@ -919,8 +893,8 @@ void mlx5e_build_ptys2ethtool_map(void);
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev);
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
-netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more);
+void mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more);
void mlx5e_trigger_irq(struct mlx5e_icosq *sq);
void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe);
@@ -1013,7 +987,7 @@ int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params *rss_params,
const struct mlx5e_tirc_config *ttconfig,
void *tirc, bool inner);
-void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen);
+void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in);
struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt);
struct mlx5e_xsk_param;
@@ -1103,8 +1077,8 @@ void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv);
void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv);
#endif
-int mlx5e_create_tir(struct mlx5_core_dev *mdev,
- struct mlx5e_tir *tir, u32 *in, int inlen);
+int mlx5e_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir,
+ u32 *in);
void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
struct mlx5e_tir *tir);
int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
index 3a199a03d929..7283443868f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
@@ -43,7 +43,7 @@ int mlx5e_reporter_cq_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg)
void *cqc;
int err;
- err = mlx5_core_query_cq(priv->mdev, &cq->mcq, out, sizeof(out));
+ err = mlx5_core_query_cq(priv->mdev, &cq->mcq, out);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
index 7cd5b02e0f10..8fe8b4d6ad1c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
@@ -38,12 +38,11 @@ int mlx5e_monitor_counter_supported(struct mlx5e_priv *priv)
void mlx5e_monitor_counter_arm(struct mlx5e_priv *priv)
{
- u32 in[MLX5_ST_SZ_DW(arm_monitor_counter_in)] = {};
- u32 out[MLX5_ST_SZ_DW(arm_monitor_counter_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(arm_monitor_counter_in)] = {};
MLX5_SET(arm_monitor_counter_in, in, opcode,
MLX5_CMD_OP_ARM_MONITOR_COUNTER);
- mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(priv->mdev, arm_monitor_counter, in);
}
static void mlx5e_monitor_counters_work(struct work_struct *work)
@@ -66,19 +65,6 @@ static int mlx5e_monitor_event_handler(struct notifier_block *nb,
return NOTIFY_OK;
}
-static void mlx5e_monitor_counter_start(struct mlx5e_priv *priv)
-{
- MLX5_NB_INIT(&priv->monitor_counters_nb, mlx5e_monitor_event_handler,
- MONITOR_COUNTER);
- mlx5_eq_notifier_register(priv->mdev, &priv->monitor_counters_nb);
-}
-
-static void mlx5e_monitor_counter_stop(struct mlx5e_priv *priv)
-{
- mlx5_eq_notifier_unregister(priv->mdev, &priv->monitor_counters_nb);
- cancel_work_sync(&priv->monitor_counters_work);
-}
-
static int fill_monitor_counter_ppcnt_set1(int cnt, u32 *in)
{
enum mlx5_monitor_counter_ppcnt ppcnt_cnt;
@@ -118,8 +104,7 @@ static void mlx5e_set_monitor_counter(struct mlx5e_priv *priv)
int num_q_counters = MLX5_CAP_GEN(mdev, num_q_monitor_counters);
int num_ppcnt_counters = !MLX5_CAP_PCAM_REG(mdev, ppcnt) ? 0 :
MLX5_CAP_GEN(mdev, num_ppcnt_monitor_counters);
- u32 in[MLX5_ST_SZ_DW(set_monitor_counter_in)] = {};
- u32 out[MLX5_ST_SZ_DW(set_monitor_counter_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(set_monitor_counter_in)] = {};
int q_counter = priv->q_counter;
int cnt = 0;
@@ -136,34 +121,31 @@ static void mlx5e_set_monitor_counter(struct mlx5e_priv *priv)
MLX5_SET(set_monitor_counter_in, in, opcode,
MLX5_CMD_OP_SET_MONITOR_COUNTER);
- mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(mdev, set_monitor_counter, in);
}
/* check if mlx5e_monitor_counter_supported before calling this function*/
void mlx5e_monitor_counter_init(struct mlx5e_priv *priv)
{
INIT_WORK(&priv->monitor_counters_work, mlx5e_monitor_counters_work);
- mlx5e_monitor_counter_start(priv);
+ MLX5_NB_INIT(&priv->monitor_counters_nb, mlx5e_monitor_event_handler,
+ MONITOR_COUNTER);
+ mlx5_eq_notifier_register(priv->mdev, &priv->monitor_counters_nb);
+
mlx5e_set_monitor_counter(priv);
mlx5e_monitor_counter_arm(priv);
queue_work(priv->wq, &priv->update_stats_work);
}
-static void mlx5e_monitor_counter_disable(struct mlx5e_priv *priv)
+/* check if mlx5e_monitor_counter_supported before calling this function*/
+void mlx5e_monitor_counter_cleanup(struct mlx5e_priv *priv)
{
- u32 in[MLX5_ST_SZ_DW(set_monitor_counter_in)] = {};
- u32 out[MLX5_ST_SZ_DW(set_monitor_counter_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(set_monitor_counter_in)] = {};
- MLX5_SET(set_monitor_counter_in, in, num_of_counters, 0);
MLX5_SET(set_monitor_counter_in, in, opcode,
MLX5_CMD_OP_SET_MONITOR_COUNTER);
- mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out));
-}
-
-/* check if mlx5e_monitor_counter_supported before calling this function*/
-void mlx5e_monitor_counter_cleanup(struct mlx5e_priv *priv)
-{
- mlx5e_monitor_counter_disable(priv);
- mlx5e_monitor_counter_stop(priv);
+ mlx5_cmd_exec_in(priv->mdev, set_monitor_counter, in);
+ mlx5_eq_notifier_unregister(priv->mdev, &priv->monitor_counters_nb);
+ cancel_work_sync(&priv->monitor_counters_work);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index a172c5e39710..5568ded97e0b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -73,9 +73,7 @@ struct mlx5_ct_ft {
struct mlx5_ct_entry {
u16 zone;
struct rhash_head node;
- struct flow_rule *flow_rule;
struct mlx5_fc *counter;
- unsigned long lastuse;
unsigned long cookie;
unsigned long restore_cookie;
struct mlx5_ct_zone_rule zone_rules[2];
@@ -384,7 +382,7 @@ mlx5_tc_ct_entry_create_nat(struct mlx5_tc_ct_priv *ct_priv,
char *modact;
int err, i;
- action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
+ action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
@@ -603,7 +601,6 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
return -ENOMEM;
entry->zone = ft->zone;
- entry->flow_rule = flow_rule;
entry->cookie = flow->cookie;
entry->restore_cookie = meta_action->ct_metadata.cookie;
@@ -687,7 +684,7 @@ mlx5_tc_ct_block_flow_offload(enum tc_setup_type type, void *type_data,
return mlx5_tc_ct_block_flow_offload_stats(ft, f);
default:
break;
- };
+ }
return -EOPNOTSUPP;
}
@@ -1131,7 +1128,7 @@ mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
{
bool clear_action = attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
- struct mlx5_flow_handle *rule;
+ struct mlx5_flow_handle *rule = ERR_PTR(-EINVAL);
int err;
if (!ct_priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index f07b1399744e..dce2bbbf9109 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -27,25 +27,30 @@
#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
+enum mlx5e_icosq_wqe_type {
+ MLX5E_ICOSQ_WQE_NOP,
+ MLX5E_ICOSQ_WQE_UMR_RX,
+};
+
static inline bool
mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
{
return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc);
}
-static inline void *
-mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq, size_t size, u16 *pi)
+static inline void *mlx5e_fetch_wqe(struct mlx5_wq_cyc *wq, u16 pi, size_t wqe_size)
{
- struct mlx5_wq_cyc *wq = &sq->wq;
void *wqe;
- *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
- memset(wqe, 0, size);
+ wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+ memset(wqe, 0, wqe_size);
return wqe;
}
+#define MLX5E_TX_FETCH_WQE(sq, pi) \
+ ((struct mlx5e_tx_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5e_tx_wqe)))
+
static inline struct mlx5e_tx_wqe *
mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
{
@@ -81,6 +86,84 @@ mlx5e_post_nop_fence(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
return wqe;
}
+struct mlx5e_tx_wqe_info {
+ struct sk_buff *skb;
+ u32 num_bytes;
+ u8 num_wqebbs;
+ u8 num_dma;
+#ifdef CONFIG_MLX5_EN_TLS
+ struct page *resync_dump_frag_page;
+#endif
+};
+
+static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ u16 pi, contig_wqebbs;
+
+ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+ if (unlikely(contig_wqebbs < size)) {
+ struct mlx5e_tx_wqe_info *wi, *edge_wi;
+
+ wi = &sq->db.wqe_info[pi];
+ edge_wi = wi + contig_wqebbs;
+
+ /* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */
+ for (; wi < edge_wi; wi++) {
+ *wi = (struct mlx5e_tx_wqe_info) {
+ .num_wqebbs = 1,
+ };
+ mlx5e_post_nop(wq, sq->sqn, &sq->pc);
+ }
+ sq->stats->nop += contig_wqebbs;
+
+ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ }
+
+ return pi;
+}
+
+struct mlx5e_icosq_wqe_info {
+ u8 wqe_type;
+ u8 num_wqebbs;
+
+ /* Auxiliary data for different wqe types. */
+ union {
+ struct {
+ struct mlx5e_rq *rq;
+ } umr;
+ };
+};
+
+static inline u16 mlx5e_icosq_get_next_pi(struct mlx5e_icosq *sq, u16 size)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ u16 pi, contig_wqebbs;
+
+ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+ if (unlikely(contig_wqebbs < size)) {
+ struct mlx5e_icosq_wqe_info *wi, *edge_wi;
+
+ wi = &sq->db.wqe_info[pi];
+ edge_wi = wi + contig_wqebbs;
+
+ /* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */
+ for (; wi < edge_wi; wi++) {
+ *wi = (struct mlx5e_icosq_wqe_info) {
+ .wqe_type = MLX5E_ICOSQ_WQE_NOP,
+ .num_wqebbs = 1,
+ };
+ mlx5e_post_nop(wq, sq->sqn, &sq->pc);
+ }
+
+ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ }
+
+ return pi;
+}
+
static inline void
mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq, struct mlx5_wq_cyc *wq,
u16 pi, u16 nnops)
@@ -102,7 +185,7 @@ static inline void
mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map,
struct mlx5_wqe_ctrl_seg *ctrl)
{
- ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+ ctrl->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE;
/* ensure wqe is visible to device before updating doorbell record */
dma_wmb();
@@ -189,6 +272,22 @@ static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
}
}
+static inline void mlx5e_dump_error_cqe(struct mlx5e_cq *cq, u32 sqn,
+ struct mlx5_err_cqe *err_cqe)
+{
+ struct mlx5_cqwq *wq = &cq->wq;
+ u32 ci;
+
+ ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1);
+
+ netdev_err(cq->channel->netdev,
+ "Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
+ cq->mcq.cqn, ci, sqn,
+ get_cqe_opcode((struct mlx5_cqe64 *)err_cqe),
+ err_cqe->syndrome, err_cqe->vendor_err_synd);
+ mlx5_dump_err_cqe(cq->mdev, err_cqe);
+}
+
/* SW parser related functions */
struct mlx5e_swp_spec {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index f049e0ac308a..761c8979bd41 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -137,6 +137,7 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
if (xsk)
xdp.handle = di->xsk.handle;
xdp.rxq = &rq->xdp_rxq;
+ xdp.frame_sz = rq->buff.frame0_sz;
act = bpf_prog_run_xdp(prog, &xdp);
if (xsk) {
@@ -178,20 +179,43 @@ xdp_abort:
}
}
-static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq)
+static u16 mlx5e_xdpsq_get_next_pi(struct mlx5e_xdpsq *sq, u16 size)
{
- struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
- struct mlx5e_xdpsq_stats *stats = sq->stats;
struct mlx5_wq_cyc *wq = &sq->wq;
u16 pi, contig_wqebbs;
pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+ if (unlikely(contig_wqebbs < size)) {
+ struct mlx5e_xdp_wqe_info *wi, *edge_wi;
+
+ wi = &sq->db.wqe_info[pi];
+ edge_wi = wi + contig_wqebbs;
+
+ /* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */
+ for (; wi < edge_wi; wi++) {
+ *wi = (struct mlx5e_xdp_wqe_info) {
+ .num_wqebbs = 1,
+ .num_pkts = 0,
+ };
+ mlx5e_post_nop(wq, sq->sqn, &sq->pc);
+ }
+ sq->stats->nops += contig_wqebbs;
+
+ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ }
- if (unlikely(contig_wqebbs < MLX5_SEND_WQE_MAX_WQEBBS))
- mlx5e_fill_xdpsq_frag_edge(sq, wq, pi, contig_wqebbs);
+ return pi;
+}
+
+static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq)
+{
+ struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
+ struct mlx5e_xdpsq_stats *stats = sq->stats;
+ u16 pi;
- session->wqe = mlx5e_xdpsq_fetch_wqe(sq, &pi);
+ pi = mlx5e_xdpsq_get_next_pi(sq, MLX5_SEND_WQE_MAX_WQEBBS);
+ session->wqe = MLX5E_TX_FETCH_WQE(sq, pi);
prefetchw(session->wqe->data);
session->ds_count = MLX5E_XDP_TX_EMPTY_DS_COUNT;
@@ -408,22 +432,15 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
i = 0;
do {
- u16 wqe_counter;
+ struct mlx5e_xdp_wqe_info *wi;
+ u16 wqe_counter, ci;
bool last_wqe;
mlx5_cqwq_pop(&cq->wq);
wqe_counter = be16_to_cpu(cqe->wqe_counter);
- if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ))
- netdev_WARN_ONCE(sq->channel->netdev,
- "Bad OP in XDPSQ CQE: 0x%x\n",
- get_cqe_opcode(cqe));
-
do {
- struct mlx5e_xdp_wqe_info *wi;
- u16 ci;
-
last_wqe = (sqcc == wqe_counter);
ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
wi = &sq->db.wqe_info[ci];
@@ -432,6 +449,15 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, true);
} while (!last_wqe);
+
+ if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
+ netdev_WARN_ONCE(sq->channel->netdev,
+ "Bad OP in XDPSQ CQE: 0x%x\n",
+ get_cqe_opcode(cqe));
+ mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
+ (struct mlx5_err_cqe *)cqe);
+ mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
+ }
} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
if (xsk_frames)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index d7587f40ecae..e2e01f064c1e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -137,22 +137,10 @@ mlx5e_xdp_no_room_for_inline_pkt(struct mlx5e_xdp_mpwqe *session)
session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT > MLX5E_XDP_MPW_MAX_NUM_DS;
}
-static inline void
-mlx5e_fill_xdpsq_frag_edge(struct mlx5e_xdpsq *sq, struct mlx5_wq_cyc *wq,
- u16 pi, u16 nnops)
-{
- struct mlx5e_xdp_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi];
-
- edge_wi = wi + nnops;
- /* fill sq frag edge with nops to avoid wqe wrapping two pages */
- for (; wi < edge_wi; wi++) {
- wi->num_wqebbs = 1;
- wi->num_pkts = 0;
- mlx5e_post_nop(wq, sq->sqn, &sq->pc);
- }
-
- sq->stats->nops += nnops;
-}
+struct mlx5e_xdp_wqe_info {
+ u8 num_wqebbs;
+ u8 num_pkts;
+};
static inline void
mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq,
@@ -186,19 +174,6 @@ mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq,
session->ds_count++;
}
-static inline struct mlx5e_tx_wqe *
-mlx5e_xdpsq_fetch_wqe(struct mlx5e_xdpsq *sq, u16 *pi)
-{
- struct mlx5_wq_cyc *wq = &sq->wq;
- struct mlx5e_tx_wqe *wqe;
-
- *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
- memset(wqe, 0, sizeof(*wqe));
-
- return wqe;
-}
-
static inline void
mlx5e_xdpi_fifo_push(struct mlx5e_xdp_info_fifo *fifo,
struct mlx5e_xdp_info *xi)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index 3022463f2284..fac145dcf2ce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -42,6 +42,8 @@
#include "en/txrx.h"
#if IS_ENABLED(CONFIG_GENEVE)
+#include <net/geneve.h>
+
static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
{
return mlx5_tx_swp_supported(mdev);
@@ -100,33 +102,49 @@ mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb)
udp_hdr(skb)->len = htons(payload_len);
}
-static inline struct sk_buff *
-mlx5e_accel_handle_tx(struct sk_buff *skb,
- struct mlx5e_txqsq *sq,
- struct net_device *dev,
- struct mlx5e_tx_wqe **wqe,
- u16 *pi)
+struct mlx5e_accel_tx_state {
+#ifdef CONFIG_MLX5_EN_TLS
+ struct mlx5e_accel_tx_tls_state tls;
+#endif
+};
+
+static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
+ struct mlx5e_txqsq *sq,
+ struct sk_buff *skb,
+ struct mlx5e_accel_tx_state *state)
{
+ if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
+ mlx5e_udp_gso_handle_tx_skb(skb);
+
#ifdef CONFIG_MLX5_EN_TLS
if (test_bit(MLX5E_SQ_STATE_TLS, &sq->state)) {
- skb = mlx5e_tls_handle_tx_skb(dev, sq, skb, wqe, pi);
- if (unlikely(!skb))
- return NULL;
+ /* May send SKBs and WQEs. */
+ if (unlikely(!mlx5e_tls_handle_tx_skb(dev, sq, skb, &state->tls)))
+ return false;
}
#endif
+ return true;
+}
+
+static inline bool mlx5e_accel_tx_finish(struct mlx5e_priv *priv,
+ struct mlx5e_txqsq *sq,
+ struct sk_buff *skb,
+ struct mlx5e_tx_wqe *wqe,
+ struct mlx5e_accel_tx_state *state)
+{
+#ifdef CONFIG_MLX5_EN_TLS
+ mlx5e_tls_handle_tx_wqe(sq, &wqe->ctrl, &state->tls);
+#endif
+
#ifdef CONFIG_MLX5_EN_IPSEC
if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state)) {
- skb = mlx5e_ipsec_handle_tx_skb(dev, *wqe, skb);
- if (unlikely(!skb))
- return NULL;
+ if (unlikely(!mlx5e_ipsec_handle_tx_skb(priv, &wqe->eth, skb)))
+ return false;
}
#endif
- if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
- mlx5e_udp_gso_handle_tx_skb(skb);
-
- return skb;
+ return true;
}
#endif /* __MLX5E_EN_ACCEL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 29626c6c9c25..92eb3bad4acd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -75,18 +75,23 @@ struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *ipsec,
return ret;
}
-static int mlx5e_ipsec_sadb_rx_add(struct mlx5e_ipsec_sa_entry *sa_entry)
+static int mlx5e_ipsec_sadb_rx_add(struct mlx5e_ipsec_sa_entry *sa_entry,
+ unsigned int handle)
{
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+ struct mlx5e_ipsec_sa_entry *_sa_entry;
unsigned long flags;
- int ret;
- ret = ida_simple_get(&ipsec->halloc, 1, 0, GFP_KERNEL);
- if (ret < 0)
- return ret;
+ rcu_read_lock();
+ hash_for_each_possible_rcu(ipsec->sadb_rx, _sa_entry, hlist, handle)
+ if (_sa_entry->handle == handle) {
+ rcu_read_unlock();
+ return -EEXIST;
+ }
+ rcu_read_unlock();
spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
- sa_entry->handle = ret;
+ sa_entry->handle = handle;
hash_add_rcu(ipsec->sadb_rx, &sa_entry->hlist, sa_entry->handle);
spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
@@ -103,15 +108,6 @@ static void mlx5e_ipsec_sadb_rx_del(struct mlx5e_ipsec_sa_entry *sa_entry)
spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
}
-static void mlx5e_ipsec_sadb_rx_free(struct mlx5e_ipsec_sa_entry *sa_entry)
-{
- struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
-
- /* xfrm already doing sync rcu between del and free callbacks */
-
- ida_simple_remove(&ipsec->halloc, sa_entry->handle);
-}
-
static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct xfrm_replay_state_esn *replay_esn;
@@ -199,6 +195,14 @@ mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
attrs->flags |= (x->props.mode == XFRM_MODE_TRANSPORT) ?
MLX5_ACCEL_ESP_FLAGS_TRANSPORT :
MLX5_ACCEL_ESP_FLAGS_TUNNEL;
+
+ /* spi */
+ attrs->spi = x->id.spi;
+
+ /* source , destination ips */
+ memcpy(&attrs->saddr, x->props.saddr.a6, sizeof(attrs->saddr));
+ memcpy(&attrs->daddr, x->id.daddr.a6, sizeof(attrs->daddr));
+ attrs->is_ipv6 = (x->props.family != AF_INET);
}
static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
@@ -284,8 +288,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
struct net_device *netdev = x->xso.dev;
struct mlx5_accel_esp_xfrm_attrs attrs;
struct mlx5e_priv *priv;
- __be32 saddr[4] = {0}, daddr[4] = {0}, spi;
- bool is_ipv6 = false;
+ unsigned int sa_handle;
int err;
priv = netdev_priv(netdev);
@@ -303,20 +306,6 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
sa_entry->x = x;
sa_entry->ipsec = priv->ipsec;
- /* Add the SA to handle processed incoming packets before the add SA
- * completion was received
- */
- if (x->xso.flags & XFRM_OFFLOAD_INBOUND) {
- err = mlx5e_ipsec_sadb_rx_add(sa_entry);
- if (err) {
- netdev_info(netdev, "Failed adding to SADB_RX: %d\n", err);
- goto err_entry;
- }
- } else {
- sa_entry->set_iv_op = (x->props.flags & XFRM_STATE_ESN) ?
- mlx5e_ipsec_set_iv_esn : mlx5e_ipsec_set_iv;
- }
-
/* check esn */
mlx5e_ipsec_update_esn_state(sa_entry);
@@ -327,41 +316,38 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA);
if (IS_ERR(sa_entry->xfrm)) {
err = PTR_ERR(sa_entry->xfrm);
- goto err_sadb_rx;
+ goto err_sa_entry;
}
/* create hw context */
- if (x->props.family == AF_INET) {
- saddr[3] = x->props.saddr.a4;
- daddr[3] = x->id.daddr.a4;
- } else {
- memcpy(saddr, x->props.saddr.a6, sizeof(saddr));
- memcpy(daddr, x->id.daddr.a6, sizeof(daddr));
- is_ipv6 = true;
- }
- spi = x->id.spi;
sa_entry->hw_context =
mlx5_accel_esp_create_hw_context(priv->mdev,
sa_entry->xfrm,
- saddr, daddr, spi,
- is_ipv6);
+ &sa_handle);
if (IS_ERR(sa_entry->hw_context)) {
err = PTR_ERR(sa_entry->hw_context);
goto err_xfrm;
}
+ if (x->xso.flags & XFRM_OFFLOAD_INBOUND) {
+ err = mlx5e_ipsec_sadb_rx_add(sa_entry, sa_handle);
+ if (err)
+ goto err_hw_ctx;
+ } else {
+ sa_entry->set_iv_op = (x->props.flags & XFRM_STATE_ESN) ?
+ mlx5e_ipsec_set_iv_esn : mlx5e_ipsec_set_iv;
+ }
+
x->xso.offload_handle = (unsigned long)sa_entry;
goto out;
+err_hw_ctx:
+ mlx5_accel_esp_free_hw_context(sa_entry->hw_context);
err_xfrm:
mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm);
-err_sadb_rx:
- if (x->xso.flags & XFRM_OFFLOAD_INBOUND) {
- mlx5e_ipsec_sadb_rx_del(sa_entry);
- mlx5e_ipsec_sadb_rx_free(sa_entry);
- }
-err_entry:
+err_sa_entry:
kfree(sa_entry);
+
out:
return err;
}
@@ -390,9 +376,6 @@ static void mlx5e_xfrm_free_state(struct xfrm_state *x)
mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm);
}
- if (x->xso.flags & XFRM_OFFLOAD_INBOUND)
- mlx5e_ipsec_sadb_rx_free(sa_entry);
-
kfree(sa_entry);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 93bf10e6508c..c85151a1e008 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -109,11 +109,6 @@ int mlx5e_ipsec_init(struct mlx5e_priv *priv);
void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv);
void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv);
-int mlx5e_ipsec_get_count(struct mlx5e_priv *priv);
-int mlx5e_ipsec_get_strings(struct mlx5e_priv *priv, uint8_t *data);
-void mlx5e_ipsec_update_stats(struct mlx5e_priv *priv);
-int mlx5e_ipsec_get_stats(struct mlx5e_priv *priv, u64 *data);
-
struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *dev,
unsigned int handle);
@@ -136,26 +131,6 @@ static inline void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
{
}
-static inline int mlx5e_ipsec_get_count(struct mlx5e_priv *priv)
-{
- return 0;
-}
-
-static inline int mlx5e_ipsec_get_strings(struct mlx5e_priv *priv,
- uint8_t *data)
-{
- return 0;
-}
-
-static inline void mlx5e_ipsec_update_stats(struct mlx5e_priv *priv)
-{
-}
-
-static inline int mlx5e_ipsec_get_stats(struct mlx5e_priv *priv, u64 *data)
-{
- return 0;
-}
-
#endif
#endif /* __MLX5E_IPSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 0dd17514caae..824b87ac8f9e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -233,11 +233,10 @@ static void mlx5e_ipsec_set_metadata(struct sk_buff *skb,
ntohs(mdata->content.tx.seq));
}
-struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
- struct mlx5e_tx_wqe *wqe,
- struct sk_buff *skb)
+bool mlx5e_ipsec_handle_tx_skb(struct mlx5e_priv *priv,
+ struct mlx5_wqe_eth_seg *eseg,
+ struct sk_buff *skb)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
struct xfrm_offload *xo = xfrm_offload(skb);
struct mlx5e_ipsec_metadata *mdata;
struct mlx5e_ipsec_sa_entry *sa_entry;
@@ -245,7 +244,7 @@ struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
struct sec_path *sp;
if (!xo)
- return skb;
+ return true;
sp = skb_sec_path(skb);
if (unlikely(sp->len != 1)) {
@@ -276,16 +275,16 @@ struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_metadata);
goto drop;
}
- mlx5e_ipsec_set_swp(skb, &wqe->eth, x->props.mode, xo);
+ mlx5e_ipsec_set_swp(skb, eseg, x->props.mode, xo);
sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
sa_entry->set_iv_op(skb, x, xo);
mlx5e_ipsec_set_metadata(skb, mdata, xo);
- return skb;
+ return true;
drop:
kfree_skb(skb);
- return NULL;
+ return false;
}
static inline struct xfrm_state *
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index db84500b024f..ba02643586a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -52,9 +52,9 @@ void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo);
void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo);
-struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
- struct mlx5e_tx_wqe *wqe,
- struct sk_buff *skb);
+bool mlx5e_ipsec_handle_tx_skb(struct mlx5e_priv *priv,
+ struct mlx5_wqe_eth_seg *eseg,
+ struct sk_buff *skb);
#endif /* CONFIG_MLX5_EN_IPSEC */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
index 6fea59223dc4..6c5c54bcd9be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
@@ -38,6 +38,7 @@
#include "accel/ipsec.h"
#include "fpga/sdk.h"
#include "en_accel/ipsec.h"
+#include "fpga/ipsec.h"
static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_in_packets) },
@@ -73,61 +74,74 @@ static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
#define NUM_IPSEC_HW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_hw_stats_desc)
#define NUM_IPSEC_SW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_sw_stats_desc)
-#define NUM_IPSEC_COUNTERS (NUM_IPSEC_HW_COUNTERS + NUM_IPSEC_SW_COUNTERS)
-
-int mlx5e_ipsec_get_count(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_sw)
{
- if (!priv->ipsec)
- return 0;
-
- return NUM_IPSEC_COUNTERS;
+ return NUM_IPSEC_SW_COUNTERS;
}
-int mlx5e_ipsec_get_strings(struct mlx5e_priv *priv, uint8_t *data)
-{
- unsigned int i, idx = 0;
+static inline MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_sw) {}
- if (!priv->ipsec)
- return 0;
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec_sw)
+{
+ unsigned int i;
- for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- mlx5e_ipsec_hw_stats_desc[i].format);
+ if (priv->ipsec)
+ for (i = 0; i < NUM_IPSEC_SW_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ mlx5e_ipsec_sw_stats_desc[i].format);
+ return idx;
+}
- for (i = 0; i < NUM_IPSEC_SW_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- mlx5e_ipsec_sw_stats_desc[i].format);
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_sw)
+{
+ int i;
- return NUM_IPSEC_COUNTERS;
+ if (priv->ipsec)
+ for (i = 0; i < NUM_IPSEC_SW_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->ipsec->sw_stats,
+ mlx5e_ipsec_sw_stats_desc, i);
+ return idx;
}
-void mlx5e_ipsec_update_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_hw)
{
- int ret;
+ return (mlx5_fpga_ipsec_device_caps(priv->mdev)) ? NUM_IPSEC_HW_COUNTERS : 0;
+}
- if (!priv->ipsec)
- return;
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_hw)
+{
+ int ret = 0;
- ret = mlx5_accel_ipsec_counters_read(priv->mdev, (u64 *)&priv->ipsec->stats,
- NUM_IPSEC_HW_COUNTERS);
+ if (priv->ipsec)
+ ret = mlx5_accel_ipsec_counters_read(priv->mdev, (u64 *)&priv->ipsec->stats,
+ NUM_IPSEC_HW_COUNTERS);
if (ret)
memset(&priv->ipsec->stats, 0, sizeof(priv->ipsec->stats));
}
-int mlx5e_ipsec_get_stats(struct mlx5e_priv *priv, u64 *data)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec_hw)
{
- int i, idx = 0;
-
- if (!priv->ipsec)
- return 0;
+ unsigned int i;
- for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(&priv->ipsec->stats,
- mlx5e_ipsec_hw_stats_desc, i);
+ if (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev))
+ for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ mlx5e_ipsec_hw_stats_desc[i].format);
- for (i = 0; i < NUM_IPSEC_SW_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->ipsec->sw_stats,
- mlx5e_ipsec_sw_stats_desc, i);
+ return idx;
+}
- return NUM_IPSEC_COUNTERS;
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_hw)
+{
+ int i;
+
+ if (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev))
+ for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_CPU(&priv->ipsec->stats,
+ mlx5e_ipsec_hw_stats_desc,
+ i);
+ return idx;
}
+
+MLX5E_DEFINE_STATS_GRP(ipsec_sw, 0);
+MLX5E_DEFINE_STATS_GRP(ipsec_hw, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
index 63116be6b1d6..dabbc5f226ce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
@@ -9,6 +9,7 @@
#ifdef CONFIG_MLX5_EN_TLS
#include <net/tls.h>
#include "accel/tls.h"
+#include "en_accel/tls_rxtx.h"
#define MLX5E_KTLS_STATIC_UMR_WQE_SZ \
(offsetof(struct mlx5e_umr_wqe, tls_static_params_ctx) + \
@@ -27,6 +28,14 @@ struct mlx5e_dump_wqe {
struct mlx5_wqe_data_seg data;
};
+#define MLX5E_TLS_FETCH_UMR_WQE(sq, pi) \
+ ((struct mlx5e_umr_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, MLX5E_KTLS_STATIC_UMR_WQE_SZ))
+#define MLX5E_TLS_FETCH_PROGRESS_WQE(sq, pi) \
+ ((struct mlx5e_tx_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, MLX5E_KTLS_PROGRESS_WQE_SZ))
+#define MLX5E_TLS_FETCH_DUMP_WQE(sq, pi) \
+ ((struct mlx5e_dump_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, \
+ sizeof(struct mlx5e_dump_wqe)))
+
#define MLX5E_KTLS_DUMP_WQEBBS \
(DIV_ROUND_UP(sizeof(struct mlx5e_dump_wqe), MLX5_SEND_WQE_BB))
@@ -87,10 +96,9 @@ mlx5e_get_ktls_tx_priv_ctx(struct tls_context *tls_ctx)
void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv);
void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx);
-struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
- struct mlx5e_txqsq *sq,
- struct sk_buff *skb,
- struct mlx5e_tx_wqe **wqe, u16 *pi);
+bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq,
+ struct sk_buff *skb, int datalen,
+ struct mlx5e_accel_tx_tls_state *state);
void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe_info *wi,
u32 *dma_fifo_cc);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index 52a56622034a..3cd78d9503c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -108,10 +108,11 @@ static void tx_fill_wi(struct mlx5e_txqsq *sq,
{
struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
- memset(wi, 0, sizeof(*wi));
- wi->num_wqebbs = num_wqebbs;
- wi->num_bytes = num_bytes;
- wi->resync_dump_frag_page = page;
+ *wi = (struct mlx5e_tx_wqe_info) {
+ .num_wqebbs = num_wqebbs,
+ .num_bytes = num_bytes,
+ .resync_dump_frag_page = page,
+ };
}
void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
@@ -134,13 +135,14 @@ post_static_params(struct mlx5e_txqsq *sq,
struct mlx5e_ktls_offload_context_tx *priv_tx,
bool fence)
{
+ u16 pi, num_wqebbs = MLX5E_KTLS_STATIC_WQEBBS;
struct mlx5e_umr_wqe *umr_wqe;
- u16 pi;
- umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi);
+ pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
+ umr_wqe = MLX5E_TLS_FETCH_UMR_WQE(sq, pi);
build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence);
- tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, 0, NULL);
- sq->pc += MLX5E_KTLS_STATIC_WQEBBS;
+ tx_fill_wi(sq, pi, num_wqebbs, 0, NULL);
+ sq->pc += num_wqebbs;
}
static void
@@ -148,13 +150,14 @@ post_progress_params(struct mlx5e_txqsq *sq,
struct mlx5e_ktls_offload_context_tx *priv_tx,
bool fence)
{
+ u16 pi, num_wqebbs = MLX5E_KTLS_PROGRESS_WQEBBS;
struct mlx5e_tx_wqe *wqe;
- u16 pi;
- wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi);
+ pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
+ wqe = MLX5E_TLS_FETCH_PROGRESS_WQE(sq, pi);
build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence);
- tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, 0, NULL);
- sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS;
+ tx_fill_wi(sq, pi, num_wqebbs, 0, NULL);
+ sq->pc += num_wqebbs;
}
static void
@@ -163,14 +166,6 @@ mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
bool skip_static_post, bool fence_first_post)
{
bool progress_fence = skip_static_post || !fence_first_post;
- struct mlx5_wq_cyc *wq = &sq->wq;
- u16 contig_wqebbs_room, pi;
-
- pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
- if (unlikely(contig_wqebbs_room <
- MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS))
- mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
if (!skip_static_post)
post_static_params(sq, priv_tx, fence_first_post);
@@ -278,7 +273,9 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool fir
int fsz;
u16 pi;
- wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
+ BUILD_BUG_ON(MLX5E_KTLS_DUMP_WQEBBS != 1);
+ pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
+ wqe = MLX5E_TLS_FETCH_DUMP_WQE(sq, pi);
ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
@@ -343,11 +340,8 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
u32 seq)
{
struct mlx5e_sq_stats *stats = sq->stats;
- struct mlx5_wq_cyc *wq = &sq->wq;
enum mlx5e_ktls_sync_retval ret;
struct tx_sync_info info = {};
- u16 contig_wqebbs_room, pi;
- u8 num_wqebbs;
int i = 0;
ret = tx_sync_info_get(priv_tx, seq, datalen, &info);
@@ -376,13 +370,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
return MLX5E_KTLS_SYNC_DONE;
}
- num_wqebbs = mlx5e_ktls_dumps_num_wqebbs(sq, info.nr_frags, info.sync_len);
- pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
-
- if (unlikely(contig_wqebbs_room < num_wqebbs))
- mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
-
for (; i < info.nr_frags; i++) {
unsigned int orig_fsz, frag_offset = 0, n = 0;
skb_frag_t *f = &info.frags[i];
@@ -422,34 +409,18 @@ err_out:
return MLX5E_KTLS_SYNC_FAIL;
}
-struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
- struct mlx5e_txqsq *sq,
- struct sk_buff *skb,
- struct mlx5e_tx_wqe **wqe, u16 *pi)
+bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq,
+ struct sk_buff *skb, int datalen,
+ struct mlx5e_accel_tx_tls_state *state)
{
struct mlx5e_ktls_offload_context_tx *priv_tx;
struct mlx5e_sq_stats *stats = sq->stats;
- struct mlx5_wqe_ctrl_seg *cseg;
- struct tls_context *tls_ctx;
- int datalen;
u32 seq;
- if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
- goto out;
-
- datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
- if (!datalen)
- goto out;
-
- tls_ctx = tls_get_ctx(skb->sk);
- if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
- goto err_out;
-
priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) {
mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false);
- *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
stats->tls_ctx++;
}
@@ -460,30 +431,28 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
switch (ret) {
case MLX5E_KTLS_SYNC_DONE:
- *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
break;
case MLX5E_KTLS_SYNC_SKIP_NO_DATA:
if (likely(!skb->decrypted))
goto out;
WARN_ON_ONCE(1);
/* fall-through */
- default: /* MLX5E_KTLS_SYNC_FAIL */
+ case MLX5E_KTLS_SYNC_FAIL:
goto err_out;
}
}
priv_tx->expected_seq = seq + datalen;
- cseg = &(*wqe)->ctrl;
- cseg->tisn = cpu_to_be32(priv_tx->tisn << 8);
+ state->tls_tisn = priv_tx->tisn;
stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
stats->tls_encrypted_bytes += datalen;
out:
- return skb;
+ return true;
err_out:
dev_kfree_skb_any(skb);
- return NULL;
+ return false;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
index ef1ed15a53b4..05454a843b28 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
@@ -184,18 +184,17 @@ static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb,
nskb->queue_mapping = skb->queue_mapping;
}
-static struct sk_buff *
-mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
- struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5e_tx_wqe **wqe,
- u16 *pi,
- struct mlx5e_tls *tls)
+static bool mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
+ struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5e_tls *tls)
{
u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
+ struct mlx5e_tx_wqe *wqe;
struct sync_info info;
struct sk_buff *nskb;
int linear_len = 0;
int headln;
+ u16 pi;
int i;
sq->stats->tls_ooo++;
@@ -217,7 +216,7 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
if (likely(payload <= -info.sync_len))
/* SKB payload doesn't require offload
*/
- return skb;
+ return true;
atomic64_inc(&tls->sw_stats.tx_tls_drop_bypass_required);
goto err_out;
@@ -247,20 +246,19 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
sq->stats->tls_resync_bytes += nskb->len;
mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln,
cpu_to_be64(info.rcd_sn));
- mlx5e_sq_xmit(sq, nskb, *wqe, *pi, true);
- *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
- return skb;
+ pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
+ wqe = MLX5E_TX_FETCH_WQE(sq, pi);
+ mlx5e_sq_xmit(sq, nskb, wqe, pi, true);
+
+ return true;
err_out:
dev_kfree_skb_any(skb);
- return NULL;
+ return false;
}
-struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
- struct mlx5e_txqsq *sq,
- struct sk_buff *skb,
- struct mlx5e_tx_wqe **wqe,
- u16 *pi)
+bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
+ struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_tls_offload_context_tx *context;
@@ -269,41 +267,45 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
int datalen;
u32 skb_seq;
- if (MLX5_CAP_GEN(sq->channel->mdev, tls_tx)) {
- skb = mlx5e_ktls_handle_tx_skb(netdev, sq, skb, wqe, pi);
- goto out;
- }
-
if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
- goto out;
+ return true;
datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
if (!datalen)
- goto out;
+ return true;
tls_ctx = tls_get_ctx(skb->sk);
- if (unlikely(tls_ctx->netdev != netdev))
- goto out;
+ if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
+ goto err_out;
+
+ if (MLX5_CAP_GEN(sq->channel->mdev, tls_tx))
+ return mlx5e_ktls_handle_tx_skb(tls_ctx, sq, skb, datalen, state);
skb_seq = ntohl(tcp_hdr(skb)->seq);
context = mlx5e_get_tls_tx_context(tls_ctx);
expected_seq = context->expected_seq;
- if (unlikely(expected_seq != skb_seq)) {
- skb = mlx5e_tls_handle_ooo(context, sq, skb, wqe, pi, priv->tls);
- goto out;
- }
+ if (unlikely(expected_seq != skb_seq))
+ return mlx5e_tls_handle_ooo(context, sq, skb, priv->tls);
if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) {
atomic64_inc(&priv->tls->sw_stats.tx_tls_drop_metadata);
dev_kfree_skb_any(skb);
- skb = NULL;
- goto out;
+ return false;
}
context->expected_seq = skb_seq + datalen;
-out:
- return skb;
+ return true;
+
+err_out:
+ dev_kfree_skb_any(skb);
+ return false;
+}
+
+void mlx5e_tls_handle_tx_wqe(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg,
+ struct mlx5e_accel_tx_tls_state *state)
+{
+ cseg->tisn = cpu_to_be32(state->tls_tisn << 8);
}
static int tls_update_resync_sn(struct net_device *netdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
index 90bc1f2384c8..a50d0394df0a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
@@ -40,11 +40,14 @@
#include "en.h"
#include "en/txrx.h"
-struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
- struct mlx5e_txqsq *sq,
- struct sk_buff *skb,
- struct mlx5e_tx_wqe **wqe,
- u16 *pi);
+struct mlx5e_accel_tx_tls_state {
+ u32 tls_tisn;
+};
+
+bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
+ struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state);
+void mlx5e_tls_handle_tx_wqe(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg,
+ struct mlx5e_accel_tx_tls_state *state);
void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
u32 *cqe_bcnt);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index f7890e0ce96c..af3228b3f303 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -36,12 +36,11 @@
* Global resources are common to all the netdevices crated on the same nic.
*/
-int mlx5e_create_tir(struct mlx5_core_dev *mdev,
- struct mlx5e_tir *tir, u32 *in, int inlen)
+int mlx5e_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, u32 *in)
{
int err;
- err = mlx5_core_create_tir(mdev, in, inlen, &tir->tirn);
+ err = mlx5_core_create_tir(mdev, in, &tir->tirn);
if (err)
return err;
@@ -167,7 +166,7 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
mutex_lock(&mdev->mlx5e_res.td.list_lock);
list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
tirn = tir->tirn;
- err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
+ err = mlx5_core_modify_tir(mdev, tirn, in);
if (err)
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 6d703ddee4e2..0279bb7246e1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -432,7 +432,7 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
*cur_params = new_channels.params;
- mlx5e_num_channels_changed(priv);
+ err = mlx5e_num_channels_changed(priv);
goto out;
}
@@ -1204,7 +1204,7 @@ int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
}
if (hash_changed)
- mlx5e_modify_tirs_hash(priv, in, inlen);
+ mlx5e_modify_tirs_hash(priv, in);
mutex_unlock(&priv->state_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index 3bc2ac3d53fc..83c9b2bbc4af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -858,7 +858,7 @@ static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv,
goto out;
priv->rss_params.rx_hash_fields[tt] = rx_hash_field;
- mlx5e_modify_tirs_hash(priv, in, inlen);
+ mlx5e_modify_tirs_hash(priv, in);
out:
mutex_unlock(&priv->state_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index b314adf438da..0e4ca08ddca9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -233,7 +233,7 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
ds_cnt);
cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
- cseg->imm = rq->mkey_be;
+ cseg->umr_mkey = rq->mkey_be;
ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
ucseg->xlt_octowords =
@@ -462,6 +462,8 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->mpwqe.num_strides =
BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
+ rq->buff.frame0_sz = (1 << rq->mpwqe.log_stride_sz);
+
err = mlx5e_create_rq_umr_mkey(mdev, rq);
if (err)
goto err_rq_wq_destroy;
@@ -485,6 +487,8 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
num_xsk_frames = wq_sz << rq->wqe.info.log_num_frags;
rq->wqe.info = rqp->frags_info;
+ rq->buff.frame0_sz = rq->wqe.info.arr[0].frag_stride;
+
rq->wqe.frags =
kvzalloc_node(array_size(sizeof(*rq->wqe.frags),
(wq_sz << rq->wqe.info.log_num_frags)),
@@ -522,6 +526,8 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
}
if (xsk) {
+ rq->buff.frame0_sz = xsk_umem_xdp_frame_sz(umem);
+
err = mlx5e_xsk_resize_reuseq(umem, num_xsk_frames);
if (unlikely(err)) {
mlx5_core_err(mdev, "Unable to allocate the Reuse Ring for %u frames\n",
@@ -721,7 +727,7 @@ int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state)
MLX5_SET(modify_rq_in, in, rq_state, curr_state);
MLX5_SET(rqc, rqc, state, next_state);
- err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
+ err = mlx5_core_modify_rq(mdev, rq->rqn, in);
kvfree(in);
@@ -752,7 +758,7 @@ static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
MLX5_SET(rqc, rqc, scatter_fcs, enable);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
- err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
+ err = mlx5_core_modify_rq(mdev, rq->rqn, in);
kvfree(in);
@@ -781,7 +787,7 @@ static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
MLX5_SET(rqc, rqc, vsd, vsd);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
- err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
+ err = mlx5_core_modify_rq(mdev, rq->rqn, in);
kvfree(in);
@@ -1027,17 +1033,17 @@ static void mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq)
static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq)
{
- kvfree(sq->db.ico_wqe);
+ kvfree(sq->db.wqe_info);
}
static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa)
{
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+ size_t size;
- sq->db.ico_wqe = kvzalloc_node(array_size(wq_sz,
- sizeof(*sq->db.ico_wqe)),
- GFP_KERNEL, numa);
- if (!sq->db.ico_wqe)
+ size = array_size(wq_sz, sizeof(*sq->db.wqe_info));
+ sq->db.wqe_info = kvzalloc_node(size, GFP_KERNEL, numa);
+ if (!sq->db.wqe_info)
return -ENOMEM;
return 0;
@@ -1259,7 +1265,7 @@ int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, p->rl_index);
}
- err = mlx5_core_modify_sq(mdev, sqn, in, inlen);
+ err = mlx5_core_modify_sq(mdev, sqn, in);
kvfree(in);
@@ -1364,13 +1370,12 @@ static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
/* last doorbell out, godspeed .. */
if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) {
u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- struct mlx5e_tx_wqe_info *wi;
struct mlx5e_tx_wqe *nop;
- wi = &sq->db.wqe_info[pi];
+ sq->db.wqe_info[pi] = (struct mlx5e_tx_wqe_info) {
+ .num_wqebbs = 1,
+ };
- memset(wi, 0, sizeof(*wi));
- wi->num_wqebbs = 1;
nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl);
}
@@ -1482,20 +1487,21 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
/* Pre initialize fixed WQE fields */
for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
- struct mlx5e_xdp_wqe_info *wi = &sq->db.wqe_info[i];
struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i);
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
struct mlx5_wqe_data_seg *dseg;
+ sq->db.wqe_info[i] = (struct mlx5e_xdp_wqe_info) {
+ .num_wqebbs = 1,
+ .num_pkts = 1,
+ };
+
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1);
dseg->lkey = sq->mkey_be;
-
- wi->num_wqebbs = 1;
- wi->num_pkts = 1;
}
}
@@ -2698,7 +2704,7 @@ static void mlx5e_update_rx_hash_fields(struct mlx5e_tirc_config *ttconfig,
ttconfig->rx_hash_fields = rx_hash_fields;
}
-void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
+void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in)
{
void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
struct mlx5e_rss_params *rss = &priv->rss_params;
@@ -2714,7 +2720,7 @@ void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
mlx5e_update_rx_hash_fields(&ttconfig, tt,
rss->rx_hash_fields[tt]);
mlx5e_build_indir_tir_ctx_hash(rss, &ttconfig, tirc, false);
- mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
+ mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in);
}
if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
@@ -2725,8 +2731,7 @@ void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
mlx5e_update_rx_hash_fields(&ttconfig, tt,
rss->rx_hash_fields[tt]);
mlx5e_build_indir_tir_ctx_hash(rss, &ttconfig, tirc, true);
- mlx5_core_modify_tir(mdev, priv->inner_indir_tir[tt].tirn, in,
- inlen);
+ mlx5_core_modify_tir(mdev, priv->inner_indir_tir[tt].tirn, in);
}
}
@@ -2752,15 +2757,13 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
- err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in,
- inlen);
+ err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in);
if (err)
goto free_in;
}
for (ix = 0; ix < priv->max_nch; ix++) {
- err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
- in, inlen);
+ err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn, in);
if (err)
goto free_in;
}
@@ -2839,11 +2842,8 @@ void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv)
ETH_MAX_MTU);
}
-static void mlx5e_netdev_set_tcs(struct net_device *netdev)
+static void mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
- int nch = priv->channels.params.num_channels;
- int ntc = priv->channels.params.num_tc;
int tc;
netdev_reset_tc(netdev);
@@ -2860,15 +2860,47 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev)
netdev_set_tc_queue(netdev, tc, nch, 0);
}
-static void mlx5e_update_netdev_queues(struct mlx5e_priv *priv, u16 count)
+static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
{
- int num_txqs = count * priv->channels.params.num_tc;
- int num_rxqs = count * priv->profile->rq_groups;
struct net_device *netdev = priv->netdev;
+ int num_txqs, num_rxqs, nch, ntc;
+ int old_num_txqs, old_ntc;
+ int err;
+
+ old_num_txqs = netdev->real_num_tx_queues;
+ old_ntc = netdev->num_tc;
+
+ nch = priv->channels.params.num_channels;
+ ntc = priv->channels.params.num_tc;
+ num_txqs = nch * ntc;
+ num_rxqs = nch * priv->profile->rq_groups;
+
+ mlx5e_netdev_set_tcs(netdev, nch, ntc);
+
+ err = netif_set_real_num_tx_queues(netdev, num_txqs);
+ if (err) {
+ netdev_warn(netdev, "netif_set_real_num_tx_queues failed, %d\n", err);
+ goto err_tcs;
+ }
+ err = netif_set_real_num_rx_queues(netdev, num_rxqs);
+ if (err) {
+ netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err);
+ goto err_txqs;
+ }
+
+ return 0;
+
+err_txqs:
+ /* netif_set_real_num_rx_queues could fail only when nch increased. Only
+ * one of nch and ntc is changed in this function. That means, the call
+ * to netif_set_real_num_tx_queues below should not fail, because it
+ * decreases the number of TX queues.
+ */
+ WARN_ON_ONCE(netif_set_real_num_tx_queues(netdev, old_num_txqs));
- mlx5e_netdev_set_tcs(netdev);
- netif_set_real_num_tx_queues(netdev, num_txqs);
- netif_set_real_num_rx_queues(netdev, num_rxqs);
+err_tcs:
+ mlx5e_netdev_set_tcs(netdev, old_num_txqs / old_ntc, old_ntc);
+ return err;
}
static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
@@ -2895,8 +2927,12 @@ static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
int mlx5e_num_channels_changed(struct mlx5e_priv *priv)
{
u16 count = priv->channels.params.num_channels;
+ int err;
+
+ err = mlx5e_update_netdev_queues(priv);
+ if (err)
+ return err;
- mlx5e_update_netdev_queues(priv, count);
mlx5e_set_default_xps_cpumasks(priv, &priv->channels.params);
if (!netif_is_rxfh_configured(priv->netdev))
@@ -3214,7 +3250,7 @@ int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn)
if (mlx5_lag_is_lacp_owner(mdev))
MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
- return mlx5_core_create_tis(mdev, in, MLX5_ST_SZ_BYTES(create_tis_in), tisn);
+ return mlx5_core_create_tis(mdev, in, tisn);
}
void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn)
@@ -3332,7 +3368,7 @@ int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc)
tir = &priv->indir_tir[tt];
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
mlx5e_build_indir_tir_ctx(priv, tt, tirc);
- err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
+ err = mlx5e_create_tir(priv->mdev, tir, in);
if (err) {
mlx5_core_warn(priv->mdev, "create indirect tirs failed, %d\n", err);
goto err_destroy_inner_tirs;
@@ -3347,7 +3383,7 @@ int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc)
tir = &priv->inner_indir_tir[i];
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
mlx5e_build_inner_indir_tir_ctx(priv, i, tirc);
- err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
+ err = mlx5e_create_tir(priv->mdev, tir, in);
if (err) {
mlx5_core_warn(priv->mdev, "create inner indirect tirs failed, %d\n", err);
goto err_destroy_inner_tirs;
@@ -3390,7 +3426,7 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
tir = &tirs[ix];
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
mlx5e_build_direct_tir_ctx(priv, tir->rqt.rqtn, tirc);
- err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
+ err = mlx5e_create_tir(priv->mdev, tir, in);
if (unlikely(err))
goto err_destroy_ch_tirs;
}
@@ -5002,29 +5038,40 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
void mlx5e_create_q_counters(struct mlx5e_priv *priv)
{
+ u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {};
struct mlx5_core_dev *mdev = priv->mdev;
int err;
- err = mlx5_core_alloc_q_counter(mdev, &priv->q_counter);
- if (err) {
- mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err);
- priv->q_counter = 0;
- }
+ MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
+ err = mlx5_cmd_exec_inout(mdev, alloc_q_counter, in, out);
+ if (!err)
+ priv->q_counter =
+ MLX5_GET(alloc_q_counter_out, out, counter_set_id);
- err = mlx5_core_alloc_q_counter(mdev, &priv->drop_rq_q_counter);
- if (err) {
- mlx5_core_warn(mdev, "alloc drop RQ counter failed, %d\n", err);
- priv->drop_rq_q_counter = 0;
- }
+ err = mlx5_cmd_exec_inout(mdev, alloc_q_counter, in, out);
+ if (!err)
+ priv->drop_rq_q_counter =
+ MLX5_GET(alloc_q_counter_out, out, counter_set_id);
}
void mlx5e_destroy_q_counters(struct mlx5e_priv *priv)
{
- if (priv->q_counter)
- mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
+ u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {};
- if (priv->drop_rq_q_counter)
- mlx5_core_dealloc_q_counter(priv->mdev, priv->drop_rq_q_counter);
+ MLX5_SET(dealloc_q_counter_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_Q_COUNTER);
+ if (priv->q_counter) {
+ MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
+ priv->q_counter);
+ mlx5_cmd_exec_in(priv->mdev, dealloc_q_counter, in);
+ }
+
+ if (priv->drop_rq_q_counter) {
+ MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
+ priv->drop_rq_q_counter);
+ mlx5_cmd_exec_in(priv->mdev, dealloc_q_counter, in);
+ }
}
static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
@@ -5363,9 +5410,11 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
*/
if (take_rtnl)
rtnl_lock();
- mlx5e_num_channels_changed(priv);
+ err = mlx5e_num_channels_changed(priv);
if (take_rtnl)
rtnl_unlock();
+ if (err)
+ goto out;
err = profile->init_tx(priv);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index f372e94948fd..1eac7a53d56f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -2051,26 +2051,22 @@ static int register_devlink_port(struct mlx5_core_dev *dev,
return 0;
mlx5e_rep_get_port_parent_id(rpriv->netdev, &ppid);
+ dl_port_index = vport_to_devlink_port_index(dev, rep->vport);
pfnum = PCI_FUNC(dev->pdev->devfn);
- if (rep->vport == MLX5_VPORT_UPLINK) {
+ if (rep->vport == MLX5_VPORT_UPLINK)
devlink_port_attrs_set(&rpriv->dl_port,
DEVLINK_PORT_FLAVOUR_PHYSICAL,
pfnum, false, 0,
&ppid.id[0], ppid.id_len);
- dl_port_index = vport_to_devlink_port_index(dev, rep->vport);
- } else if (rep->vport == MLX5_VPORT_PF) {
+ else if (rep->vport == MLX5_VPORT_PF)
devlink_port_attrs_pci_pf_set(&rpriv->dl_port,
&ppid.id[0], ppid.id_len,
pfnum);
- dl_port_index = rep->vport;
- } else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch,
- rpriv->rep->vport)) {
+ else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport))
devlink_port_attrs_pci_vf_set(&rpriv->dl_port,
&ppid.id[0], ppid.id_len,
pfnum, rep->vport - 1);
- dl_port_index = vport_to_devlink_port_index(dev, rep->vport);
- }
return devlink_port_register(devlink, &rpriv->dl_port, dl_port_index);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index e2beb89c1832..821f94beda7a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -468,22 +468,6 @@ static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n)
mlx5_wq_ll_update_db_record(wq);
}
-static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq,
- struct mlx5_wq_cyc *wq,
- u16 pi, u16 nnops)
-{
- struct mlx5e_sq_wqe_info *edge_wi, *wi = &sq->db.ico_wqe[pi];
-
- edge_wi = wi + nnops;
-
- /* fill sq frag edge with nops to avoid wqe wrapping two pages */
- for (; wi < edge_wi; wi++) {
- wi->opcode = MLX5_OPCODE_NOP;
- wi->num_wqebbs = 1;
- mlx5e_post_nop(wq, sq->sqn, &sq->pc);
- }
-}
-
static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
@@ -492,7 +476,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5e_umr_wqe *umr_wqe;
u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1);
- u16 pi, contig_wqebbs_room;
+ u16 pi;
int err;
int i;
@@ -502,13 +486,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
goto err;
}
- pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
- if (unlikely(contig_wqebbs_room < MLX5E_UMR_WQEBBS)) {
- mlx5e_fill_icosq_frag_edge(sq, wq, pi, contig_wqebbs_room);
- pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- }
-
+ pi = mlx5e_icosq_get_next_pi(sq, MLX5E_UMR_WQEBBS);
umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi);
memcpy(umr_wqe, &rq->mpwqe.umr_wqe, offsetof(struct mlx5e_umr_wqe, inline_mtts));
@@ -527,9 +505,12 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
MLX5_OPCODE_UMR);
umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset);
- sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR;
- sq->db.ico_wqe[pi].num_wqebbs = MLX5E_UMR_WQEBBS;
- sq->db.ico_wqe[pi].umr.rq = rq;
+ sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
+ .wqe_type = MLX5E_ICOSQ_WQE_UMR_RX,
+ .num_wqebbs = MLX5E_UMR_WQEBBS,
+ .umr.rq = rq,
+ };
+
sq->pc += MLX5E_UMR_WQEBBS;
sq->doorbell_cseg = &umr_wqe->ctrl;
@@ -618,33 +599,38 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
wqe_counter = be16_to_cpu(cqe->wqe_counter);
do {
- struct mlx5e_sq_wqe_info *wi;
+ struct mlx5e_icosq_wqe_info *wi;
u16 ci;
last_wqe = (sqcc == wqe_counter);
ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
- wi = &sq->db.ico_wqe[ci];
+ wi = &sq->db.wqe_info[ci];
sqcc += wi->num_wqebbs;
if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
netdev_WARN_ONCE(cq->channel->netdev,
"Bad OP in ICOSQ CQE: 0x%x\n",
get_cqe_opcode(cqe));
+ mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
+ (struct mlx5_err_cqe *)cqe);
if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
queue_work(cq->channel->priv->wq, &sq->recover_work);
break;
}
- if (likely(wi->opcode == MLX5_OPCODE_UMR))
+ switch (wi->wqe_type) {
+ case MLX5E_ICOSQ_WQE_UMR_RX:
wi->umr.rq->mpwqe.umr_completed++;
- else if (unlikely(wi->opcode != MLX5_OPCODE_NOP))
+ break;
+ case MLX5E_ICOSQ_WQE_NOP:
+ break;
+ default:
netdev_WARN_ONCE(cq->channel->netdev,
- "Bad OPCODE in ICOSQ WQE info: 0x%x\n",
- wi->opcode);
-
+ "Bad WQE type in ICOSQ WQE info: 0x%x\n",
+ wi->wqe_type);
+ }
} while (!last_wqe);
-
} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
sq->cc = sqcc;
@@ -1084,6 +1070,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
if (consumed)
return NULL; /* page/packet was consumed by XDP */
+ frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt);
if (unlikely(!skb))
return NULL;
@@ -1385,6 +1372,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
return NULL; /* page/packet was consumed by XDP */
}
+ frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt32);
if (unlikely(!skb))
return NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 30b216d9284c..f009fe09e99b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -32,8 +32,8 @@
#include "lib/mlx5.h"
#include "en.h"
-#include "en_accel/ipsec.h"
#include "en_accel/tls.h"
+#include "en_accel/en_accel.h"
static unsigned int stats_grps_num(struct mlx5e_priv *priv)
{
@@ -411,18 +411,29 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt)
{
struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
- u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
+ u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {};
+ int ret;
+
+ MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
+
+ if (priv->q_counter) {
+ MLX5_SET(query_q_counter_in, in, counter_set_id,
+ priv->q_counter);
+ ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
+ if (!ret)
+ qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out,
+ out, out_of_buffer);
+ }
- if (priv->q_counter &&
- !mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out,
- sizeof(out)))
- qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out,
- out, out_of_buffer);
- if (priv->drop_rq_q_counter &&
- !mlx5_core_query_q_counter(priv->mdev, priv->drop_rq_q_counter, 0,
- out, sizeof(out)))
- qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out, out,
- out_of_buffer);
+ if (priv->drop_rq_q_counter) {
+ MLX5_SET(query_q_counter_in, in, counter_set_id,
+ priv->drop_rq_q_counter);
+ ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
+ if (!ret)
+ qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out,
+ out, out_of_buffer);
+ }
}
#define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c)
@@ -480,18 +491,14 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)
{
u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out;
- int outlen = MLX5_ST_SZ_BYTES(query_vnic_env_out);
- u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
struct mlx5_core_dev *mdev = priv->mdev;
if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
return;
- MLX5_SET(query_vnic_env_in, in, opcode,
- MLX5_CMD_OP_QUERY_VNIC_ENV);
- MLX5_SET(query_vnic_env_in, in, op_mod, 0);
- MLX5_SET(query_vnic_env_in, in, other_vport, 0);
- mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
+ MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);
+ mlx5_cmd_exec_inout(mdev, query_vnic_env, in, out);
}
#define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
@@ -566,15 +573,12 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport)
{
- int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
u32 *out = (u32 *)priv->stats.vport.query_vport_out;
- u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {};
struct mlx5_core_dev *mdev = priv->mdev;
MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER);
- MLX5_SET(query_vport_counter_in, in, op_mod, 0);
- MLX5_SET(query_vport_counter_in, in, other_vport, 0);
- mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
+ mlx5_cmd_exec_inout(mdev, query_vport_counter, in, out);
}
#define PPORT_802_3_OFF(c) \
@@ -1424,27 +1428,6 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; }
-static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec)
-{
- return mlx5e_ipsec_get_count(priv);
-}
-
-static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec)
-{
- return idx + mlx5e_ipsec_get_strings(priv,
- data + idx * ETH_GSTRING_LEN);
-}
-
-static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec)
-{
- return idx + mlx5e_ipsec_get_stats(priv, data + idx);
-}
-
-static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec)
-{
- mlx5e_ipsec_update_stats(priv);
-}
-
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)
{
return mlx5e_tls_get_count(priv);
@@ -1714,7 +1697,6 @@ MLX5E_DEFINE_STATS_GRP(pme, 0);
MLX5E_DEFINE_STATS_GRP(channels, 0);
MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0);
MLX5E_DEFINE_STATS_GRP(eth_ext, 0);
-static MLX5E_DEFINE_STATS_GRP(ipsec, 0);
static MLX5E_DEFINE_STATS_GRP(tls, 0);
/* The stats groups order is opposite to the update_stats() order calls */
@@ -1731,7 +1713,10 @@ mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
&MLX5E_STATS_GRP(pcie),
&MLX5E_STATS_GRP(per_prio),
&MLX5E_STATS_GRP(pme),
- &MLX5E_STATS_GRP(ipsec),
+#ifdef CONFIG_MLX5_EN_IPSEC
+ &MLX5E_STATS_GRP(ipsec_sw),
+ &MLX5E_STATS_GRP(ipsec_hw),
+#endif
&MLX5E_STATS_GRP(tls),
&MLX5E_STATS_GRP(channels),
&MLX5E_STATS_GRP(per_port_buff_congest),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 092b39ffa32a..2b83ba990714 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -390,5 +390,7 @@ extern MLX5E_DECLARE_STATS_GRP(per_prio);
extern MLX5E_DECLARE_STATS_GRP(pme);
extern MLX5E_DECLARE_STATS_GRP(channels);
extern MLX5E_DECLARE_STATS_GRP(per_port_buff_congest);
+extern MLX5E_DECLARE_STATS_GRP(ipsec_hw);
+extern MLX5E_DECLARE_STATS_GRP(ipsec_sw);
#endif /* __MLX5_EN_STATS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index a574c588269a..a050808f2128 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -61,7 +61,7 @@
#include "lib/geneve.h"
#include "diag/en_tc_tracepoint.h"
-#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)
+#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
struct mlx5_nic_flow_attr {
u32 action;
@@ -171,6 +171,11 @@ struct tunnel_match_key {
int filter_ifindex;
};
+struct tunnel_match_enc_opts {
+ struct flow_dissector_key_enc_opts key;
+ struct flow_dissector_key_enc_opts mask;
+};
+
/* Tunnel_id mapping is TUNNEL_INFO_BITS + ENC_OPTS_BITS.
* Upper TUNNEL_INFO_BITS for general tunnel info.
* Lower ENC_OPTS_BITS bits for enc_opts.
@@ -568,7 +573,7 @@ struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
{
- u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {};
void *tirc;
int err;
@@ -582,7 +587,7 @@ static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]);
MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
- err = mlx5_core_create_tir(hp->func_mdev, in, MLX5_ST_SZ_BYTES(create_tir_in), &hp->tirn);
+ err = mlx5_core_create_tir(hp->func_mdev, in, &hp->tirn);
if (err)
goto create_tir_err;
@@ -666,7 +671,7 @@ static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, &ttconfig, tirc, false);
err = mlx5_core_create_tir(hp->func_mdev, in,
- MLX5_ST_SZ_BYTES(create_tir_in), &hp->indir_tirn[tt]);
+ &hp->indir_tirn[tt]);
if (err) {
mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
goto err_destroy_tirs;
@@ -1092,7 +1097,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
if (IS_ERR(priv->fs.tc.t)) {
mutex_unlock(&priv->fs.tc.t_lock);
NL_SET_ERR_MSG_MOD(extack,
- "Failed to create tc offload table\n");
+ "Failed to create tc offload table");
netdev_err(priv->netdev,
"Failed to create tc offload table\n");
return PTR_ERR(priv->fs.tc.t);
@@ -1824,9 +1829,7 @@ enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv,
*dont_care = false;
if (opt->opt_class != U16_MAX ||
- opt->type != U8_MAX ||
- memchr_inv(opt->opt_data, 0xFF,
- opt->length * 4)) {
+ opt->type != U8_MAX) {
NL_SET_ERR_MSG(extack,
"Partial match of tunnel options in chain > 0 isn't supported");
netdev_warn(priv->netdev,
@@ -1863,6 +1866,7 @@ static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
struct flow_match_enc_opts enc_opts_match;
+ struct tunnel_match_enc_opts tun_enc_opts;
struct mlx5_rep_uplink_priv *uplink_priv;
struct mlx5e_rep_priv *uplink_rpriv;
struct tunnel_match_key tunnel_key;
@@ -1905,8 +1909,14 @@ static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
goto err_enc_opts;
if (!enc_opts_is_dont_care) {
+ memset(&tun_enc_opts, 0, sizeof(tun_enc_opts));
+ memcpy(&tun_enc_opts.key, enc_opts_match.key,
+ sizeof(*enc_opts_match.key));
+ memcpy(&tun_enc_opts.mask, enc_opts_match.mask,
+ sizeof(*enc_opts_match.mask));
+
err = mapping_add(uplink_priv->tunnel_enc_opts_mapping,
- enc_opts_match.key, &enc_opts_id);
+ &tun_enc_opts, &enc_opts_id);
if (err)
goto err_enc_opts;
}
@@ -2661,7 +2671,7 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
set_vals = &hdrs[0].vals;
add_vals = &hdrs[1].vals;
- action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
+ action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
for (i = 0; i < ARRAY_SIZE(fields); i++) {
bool skip;
@@ -2794,7 +2804,7 @@ int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev,
if (mod_hdr_acts->num_actions < mod_hdr_acts->max_actions)
return 0;
- action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
+ action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
max_hw_actions = mlx5e_flow_namespace_max_modify_action(mdev,
namespace);
@@ -4707,7 +4717,7 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
{
- const size_t sz_enc_opts = sizeof(struct flow_dissector_key_enc_opts);
+ const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts);
struct mlx5_rep_uplink_priv *uplink_priv;
struct mlx5e_rep_priv *priv;
struct mapping_ctx *mapping;
@@ -4802,7 +4812,7 @@ static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
u32 tunnel_id)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct flow_dissector_key_enc_opts enc_opts = {};
+ struct tunnel_match_enc_opts enc_opts = {};
struct mlx5_rep_uplink_priv *uplink_priv;
struct mlx5e_rep_priv *uplink_rpriv;
struct metadata_dst *tun_dst;
@@ -4840,7 +4850,7 @@ static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
}
}
- tun_dst = tun_rx_dst(enc_opts.len);
+ tun_dst = tun_rx_dst(enc_opts.key.len);
if (!tun_dst) {
WARN_ON_ONCE(true);
return false;
@@ -4854,9 +4864,11 @@ static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
key32_to_tunnel_id(key.enc_key_id.keyid),
TUNNEL_KEY);
- if (enc_opts.len)
- ip_tunnel_info_opts_set(&tun_dst->u.tun_info, enc_opts.data,
- enc_opts.len, enc_opts.dst_opt_type);
+ if (enc_opts.key.len)
+ ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
+ enc_opts.key.data,
+ enc_opts.key.len,
+ enc_opts.key.dst_opt_type);
skb_dst_set(skb, (struct dst_entry *)tun_dst);
dev = dev_get_by_index(&init_net, key.filter_ifindex);
@@ -4893,7 +4905,7 @@ bool mlx5e_tc_rep_update_skb(struct mlx5_cqe64 *cqe,
reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK);
if (reg_c0 == MLX5_FS_DEFAULT_FLOW_TAG)
reg_c0 = 0;
- reg_c1 = be32_to_cpu(cqe->imm_inval_pkey);
+ reg_c1 = be32_to_cpu(cqe->ft_metadata);
if (!reg_c0)
return true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index fd6b2a1898c5..f79454746d0d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -265,8 +265,8 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
}
-netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more)
+void mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more)
{
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5_wqe_ctrl_seg *cseg;
@@ -324,7 +324,8 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_wqe_ctrl_seg cur_ctrl = wqe->ctrl;
#endif
mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
- wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
+ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ wqe = MLX5E_TX_FETCH_WQE(sq, pi);
#ifdef CONFIG_MLX5_EN_IPSEC
wqe->eth = cur_eth;
#endif
@@ -372,47 +373,38 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
num_dma, wi, cseg, xmit_more);
- return NETDEV_TX_OK;
+ return;
err_drop:
stats->dropped++;
dev_kfree_skb_any(skb);
-
- return NETDEV_TX_OK;
}
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_accel_tx_state accel = {};
struct mlx5e_tx_wqe *wqe;
struct mlx5e_txqsq *sq;
u16 pi;
sq = priv->txq2sq[skb_get_queue_mapping(skb)];
- wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
- /* might send skbs and update wqe and pi */
- skb = mlx5e_accel_handle_tx(skb, sq, dev, &wqe, &pi);
- if (unlikely(!skb))
- return NETDEV_TX_OK;
+ /* May send SKBs and WQEs. */
+ if (unlikely(!mlx5e_accel_tx_begin(dev, sq, skb, &accel)))
+ goto out;
- return mlx5e_sq_xmit(sq, skb, wqe, pi, netdev_xmit_more());
-}
+ pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
+ wqe = MLX5E_TX_FETCH_WQE(sq, pi);
-static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq,
- struct mlx5_err_cqe *err_cqe)
-{
- struct mlx5_cqwq *wq = &sq->cq.wq;
- u32 ci;
+ /* May update the WQE, but may not post other WQEs. */
+ if (unlikely(!mlx5e_accel_tx_finish(priv, sq, skb, wqe, &accel)))
+ goto out;
- ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1);
+ mlx5e_sq_xmit(sq, skb, wqe, pi, netdev_xmit_more());
- netdev_err(sq->channel->netdev,
- "Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
- sq->cq.mcq.cqn, ci, sq->sqn,
- get_cqe_opcode((struct mlx5_cqe64 *)err_cqe),
- err_cqe->syndrome, err_cqe->vendor_err_synd);
- mlx5_dump_err_cqe(sq->cq.mdev, err_cqe);
+out:
+ return NETDEV_TX_OK;
}
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
@@ -501,7 +493,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) {
if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING,
&sq->state)) {
- mlx5e_dump_error_cqe(sq,
+ mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
(struct mlx5_err_cqe *)cqe);
mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
queue_work(cq->channel->priv->wq,
@@ -582,11 +574,9 @@ mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey,
dseg->av.key.qkey.qkey = cpu_to_be32(dqkey);
}
-netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5_av *av, u32 dqpn, u32 dqkey,
- bool xmit_more)
+void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5_av *av, u32 dqpn, u32 dqkey, bool xmit_more)
{
- struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5i_tx_wqe *wqe;
struct mlx5_wqe_datagram_seg *datagram;
@@ -596,9 +586,9 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe_info *wi;
struct mlx5e_sq_stats *stats = sq->stats;
- u16 headlen, ihs, pi, contig_wqebbs_room;
u16 ds_cnt, ds_cnt_inl = 0;
u8 num_wqebbs, opcode;
+ u16 headlen, ihs, pi;
u32 num_bytes;
int num_dma;
__be16 mss;
@@ -634,14 +624,8 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
}
num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
- pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
- if (unlikely(contig_wqebbs_room < num_wqebbs)) {
- mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
- pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- }
-
- mlx5i_sq_fetch_wqe(sq, &wqe, pi);
+ pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
+ wqe = MLX5I_SQ_FETCH_WQE(sq, pi);
/* fill wqe */
wi = &sq->db.wqe_info[pi];
@@ -669,12 +653,10 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
num_dma, wi, cseg, xmit_more);
- return NETDEV_TX_OK;
+ return;
err_drop:
stats->dropped++;
dev_kfree_skb_any(skb);
-
- return NETDEV_TX_OK;
}
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index acb20215a33b..8480278f2ee2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -78,8 +78,11 @@ void mlx5e_trigger_irq(struct mlx5e_icosq *sq)
struct mlx5e_tx_wqe *nopwqe;
u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
- sq->db.ico_wqe[pi].num_wqebbs = 1;
+ sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
+ .wqe_type = MLX5E_ICOSQ_WQE_NOP,
+ .num_wqebbs = 1,
+ };
+
nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index cccea3a8eddd..4d974b5405b5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -36,7 +36,6 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/vport.h>
#include <linux/mlx5/eq.h>
-#include <linux/mlx5/cmd.h>
#ifdef CONFIG_RFS_ACCEL
#include <linux/cpu_rmap.h>
#endif
@@ -102,12 +101,11 @@ struct mlx5_eq_table {
static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
{
- u32 out[MLX5_ST_SZ_DW(destroy_eq_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {};
MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ);
MLX5_SET(destroy_eq_in, in, eq_number, eqn);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, destroy_eq, in);
}
/* caller must eventually call mlx5_cq_put on the returned cq */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.c
index 029001040737..d5bf908dfecd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.c
@@ -274,7 +274,7 @@ mlx5_esw_chains_destroy_fdb_table(struct mlx5_eswitch *esw,
static int
create_fdb_chain_restore(struct fdb_chain *fdb_chain)
{
- char modact[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)];
+ char modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)];
struct mlx5_eswitch *esw = fdb_chain->esw;
struct mlx5_modify_hdr *mod_hdr;
u32 index;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 7f618a443bfd..c5eb4e7754a9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -84,8 +84,7 @@ mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num)
static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
u32 events_mask)
{
- int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
- int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {};
void *nic_vport_ctx;
MLX5_SET(modify_nic_vport_context_in, in,
@@ -108,40 +107,24 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
MLX5_SET(nic_vport_context, nic_vport_ctx,
event_on_promisc_change, 1);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, modify_nic_vport_context, in);
}
/* E-Switch vport context HW commands */
int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
- bool other_vport,
- void *in, int inlen)
+ bool other_vport, void *in)
{
- u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
-
MLX5_SET(modify_esw_vport_context_in, in, opcode,
MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
MLX5_SET(modify_esw_vport_context_in, in, other_vport, other_vport);
- return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
-}
-
-int mlx5_eswitch_query_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
- bool other_vport,
- void *out, int outlen)
-{
- u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
-
- MLX5_SET(query_esw_vport_context_in, in, opcode,
- MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
- MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
- MLX5_SET(modify_esw_vport_context_in, in, other_vport, other_vport);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
+ return mlx5_cmd_exec_in(dev, modify_esw_vport_context, in);
}
static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
u16 vlan, u8 qos, u8 set_flags)
{
- u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
!MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
@@ -170,8 +153,7 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
MLX5_SET(modify_esw_vport_context_in, in,
field_select.vport_cvlan_insert, 1);
- return mlx5_eswitch_modify_esw_vport_context(dev, vport, true,
- in, sizeof(in));
+ return mlx5_eswitch_modify_esw_vport_context(dev, vport, true, in);
}
/* E-Switch FDB */
@@ -1901,7 +1883,7 @@ const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
MLX5_SET(query_esw_functions_in, in, opcode,
MLX5_CMD_OP_QUERY_ESW_FUNCTIONS);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
+ err = mlx5_cmd_exec_inout(dev, query_esw_functions, in, out);
if (!err)
return out;
@@ -2783,8 +2765,8 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
{
struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
- u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
- struct mlx5_vport_drop_stats stats = {0};
+ u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {};
+ struct mlx5_vport_drop_stats stats = {};
int err = 0;
u32 *out;
@@ -2801,7 +2783,7 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
MLX5_SET(query_vport_counter_in, in, vport_number, vport->vport);
MLX5_SET(query_vport_counter_in, in, other_vport, 1);
- err = mlx5_cmd_exec(esw->dev, in, sizeof(in), out, outlen);
+ err = mlx5_cmd_exec_inout(esw->dev, query_vport_counter, in, out);
if (err)
goto free_out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index c1848b57f61c..4a1c6c78bb14 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -329,11 +329,7 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule);
int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
- bool other_vport,
- void *in, int inlen);
-int mlx5_eswitch_query_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
- bool other_vport,
- void *out, int outlen);
+ bool other_vport, void *in);
struct mlx5_flow_spec;
struct mlx5_esw_flow_attr;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 5d9def18ae3a..57ac2ef52e80 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -784,7 +784,8 @@ static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw)
static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
{
u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
- u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
+ u32 min[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
u8 curr, wanted;
int err;
@@ -792,8 +793,9 @@ static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
!mlx5_eswitch_vport_match_metadata_enabled(esw))
return 0;
- err = mlx5_eswitch_query_esw_vport_context(esw->dev, 0, false,
- out, sizeof(out));
+ MLX5_SET(query_esw_vport_context_in, in, opcode,
+ MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
+ err = mlx5_cmd_exec_inout(esw->dev, query_esw_vport_context, in, out);
if (err)
return err;
@@ -808,14 +810,12 @@ static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
else
curr &= ~wanted;
- MLX5_SET(modify_esw_vport_context_in, in,
+ MLX5_SET(modify_esw_vport_context_in, min,
esw_vport_context.fdb_to_vport_reg_c_id, curr);
-
- MLX5_SET(modify_esw_vport_context_in, in,
+ MLX5_SET(modify_esw_vport_context_in, min,
field_select.fdb_to_vport_reg_c_id, 1);
- err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, in,
- sizeof(in));
+ err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, min);
if (!err) {
if (enable && (curr & MLX5_FDB_TO_VPORT_REG_C_1))
esw->flags |= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
@@ -1468,7 +1468,7 @@ query_vports:
out:
*mode = mlx5_mode;
return 0;
-}
+}
static void esw_destroy_restore_table(struct mlx5_eswitch *esw)
{
@@ -1484,7 +1484,7 @@ static void esw_destroy_restore_table(struct mlx5_eswitch *esw)
static int esw_create_restore_table(struct mlx5_eswitch *esw)
{
- u8 modact[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)] = {};
+ u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_core_dev *dev = esw->dev;
@@ -1894,7 +1894,7 @@ static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
- u8 action[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)] = {};
+ u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
struct mlx5_flow_act flow_act = {};
int err = 0;
u32 key;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c
index c0fd2212e890..9a37077152aa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c
@@ -31,7 +31,6 @@
*/
#include <linux/etherdevice.h>
-#include <linux/mlx5/cmd.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/device.h>
@@ -143,15 +142,15 @@ int mlx5_fpga_query(struct mlx5_core_dev *dev, struct mlx5_fpga_query *query)
int mlx5_fpga_create_qp(struct mlx5_core_dev *dev, void *fpga_qpc,
u32 *fpga_qpn)
{
- u32 in[MLX5_ST_SZ_DW(fpga_create_qp_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(fpga_create_qp_out)];
+ u32 out[MLX5_ST_SZ_DW(fpga_create_qp_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(fpga_create_qp_in)] = {};
int ret;
MLX5_SET(fpga_create_qp_in, in, opcode, MLX5_CMD_OP_FPGA_CREATE_QP);
memcpy(MLX5_ADDR_OF(fpga_create_qp_in, in, fpga_qpc), fpga_qpc,
MLX5_FLD_SZ_BYTES(fpga_create_qp_in, fpga_qpc));
- ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ ret = mlx5_cmd_exec_inout(dev, fpga_create_qp, in, out);
if (ret)
return ret;
@@ -165,8 +164,7 @@ int mlx5_fpga_modify_qp(struct mlx5_core_dev *dev, u32 fpga_qpn,
enum mlx5_fpga_qpc_field_select fields,
void *fpga_qpc)
{
- u32 in[MLX5_ST_SZ_DW(fpga_modify_qp_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(fpga_modify_qp_out)];
+ u32 in[MLX5_ST_SZ_DW(fpga_modify_qp_in)] = {};
MLX5_SET(fpga_modify_qp_in, in, opcode, MLX5_CMD_OP_FPGA_MODIFY_QP);
MLX5_SET(fpga_modify_qp_in, in, field_select, fields);
@@ -174,20 +172,20 @@ int mlx5_fpga_modify_qp(struct mlx5_core_dev *dev, u32 fpga_qpn,
memcpy(MLX5_ADDR_OF(fpga_modify_qp_in, in, fpga_qpc), fpga_qpc,
MLX5_FLD_SZ_BYTES(fpga_modify_qp_in, fpga_qpc));
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, fpga_modify_qp, in);
}
int mlx5_fpga_query_qp(struct mlx5_core_dev *dev,
u32 fpga_qpn, void *fpga_qpc)
{
- u32 in[MLX5_ST_SZ_DW(fpga_query_qp_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(fpga_query_qp_out)];
+ u32 out[MLX5_ST_SZ_DW(fpga_query_qp_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(fpga_query_qp_in)] = {};
int ret;
MLX5_SET(fpga_query_qp_in, in, opcode, MLX5_CMD_OP_FPGA_QUERY_QP);
MLX5_SET(fpga_query_qp_in, in, fpga_qpn, fpga_qpn);
- ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ ret = mlx5_cmd_exec_inout(dev, fpga_query_qp, in, out);
if (ret)
return ret;
@@ -198,20 +196,19 @@ int mlx5_fpga_query_qp(struct mlx5_core_dev *dev,
int mlx5_fpga_destroy_qp(struct mlx5_core_dev *dev, u32 fpga_qpn)
{
- u32 in[MLX5_ST_SZ_DW(fpga_destroy_qp_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(fpga_destroy_qp_out)];
+ u32 in[MLX5_ST_SZ_DW(fpga_destroy_qp_in)] = {};
MLX5_SET(fpga_destroy_qp_in, in, opcode, MLX5_CMD_OP_FPGA_DESTROY_QP);
MLX5_SET(fpga_destroy_qp_in, in, fpga_qpn, fpga_qpn);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, fpga_destroy_qp, in);
}
int mlx5_fpga_query_qp_counters(struct mlx5_core_dev *dev, u32 fpga_qpn,
bool clear, struct mlx5_fpga_qp_counters *data)
{
- u32 in[MLX5_ST_SZ_DW(fpga_query_qp_counters_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(fpga_query_qp_counters_out)];
+ u32 out[MLX5_ST_SZ_DW(fpga_query_qp_counters_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(fpga_query_qp_counters_in)] = {};
int ret;
MLX5_SET(fpga_query_qp_counters_in, in, opcode,
@@ -219,7 +216,7 @@ int mlx5_fpga_query_qp_counters(struct mlx5_core_dev *dev, u32 fpga_qpn,
MLX5_SET(fpga_query_qp_counters_in, in, clear, clear);
MLX5_SET(fpga_query_qp_counters_in, in, fpga_qpn, fpga_qpn);
- ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ ret = mlx5_cmd_exec_inout(dev, fpga_query_qp_counters, in, out);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
index 61021133029e..182d3ac3e73f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
@@ -165,7 +165,7 @@ static void mlx5_fpga_conn_post_send(struct mlx5_fpga_conn *conn,
ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
ctrl->opmod_idx_opcode = cpu_to_be32(((conn->qp.sq.pc & 0xffff) << 8) |
MLX5_OPCODE_SEND);
- ctrl->qpn_ds = cpu_to_be32(size | (conn->qp.mqp.qpn << 8));
+ ctrl->qpn_ds = cpu_to_be32(size | (conn->qp.qpn << 8));
conn->qp.sq.pc++;
conn->qp.sq.bufs[ix] = buf;
@@ -362,23 +362,6 @@ static void mlx5_fpga_conn_arm_cq(struct mlx5_fpga_conn *conn)
conn->fdev->conn_res.uar->map, conn->cq.wq.cc);
}
-static void mlx5_fpga_conn_cq_event(struct mlx5_core_cq *mcq,
- enum mlx5_event event)
-{
- struct mlx5_fpga_conn *conn;
-
- conn = container_of(mcq, struct mlx5_fpga_conn, cq.mcq);
- mlx5_fpga_warn(conn->fdev, "CQ event %u on CQ #%u\n", event, mcq->cqn);
-}
-
-static void mlx5_fpga_conn_event(struct mlx5_core_qp *mqp, int event)
-{
- struct mlx5_fpga_conn *conn;
-
- conn = container_of(mqp, struct mlx5_fpga_conn, qp.mqp);
- mlx5_fpga_warn(conn->fdev, "QP event %u on QP #%u\n", event, mqp->qpn);
-}
-
static inline void mlx5_fpga_conn_cqes(struct mlx5_fpga_conn *conn,
unsigned int budget)
{
@@ -493,7 +476,6 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
*conn->cq.mcq.arm_db = 0;
conn->cq.mcq.vector = 0;
conn->cq.mcq.comp = mlx5_fpga_conn_cq_complete;
- conn->cq.mcq.event = mlx5_fpga_conn_cq_event;
conn->cq.mcq.irqn = irqn;
conn->cq.mcq.uar = fdev->conn_res.uar;
tasklet_init(&conn->cq.tasklet, mlx5_fpga_conn_cq_tasklet,
@@ -534,8 +516,9 @@ static int mlx5_fpga_conn_create_qp(struct mlx5_fpga_conn *conn,
unsigned int tx_size, unsigned int rx_size)
{
struct mlx5_fpga_device *fdev = conn->fdev;
+ u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
struct mlx5_core_dev *mdev = fdev->mdev;
- u32 temp_qpc[MLX5_ST_SZ_DW(qpc)] = {0};
+ u32 temp_qpc[MLX5_ST_SZ_DW(qpc)] = {};
void *in = NULL, *qpc;
int err, inlen;
@@ -600,12 +583,13 @@ static int mlx5_fpga_conn_create_qp(struct mlx5_fpga_conn *conn,
mlx5_fill_page_frag_array(&conn->qp.wq_ctrl.buf,
(__be64 *)MLX5_ADDR_OF(create_qp_in, in, pas));
- err = mlx5_core_create_qp(mdev, &conn->qp.mqp, in, inlen);
+ MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
+ err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
if (err)
goto err_sq_bufs;
- conn->qp.mqp.event = mlx5_fpga_conn_event;
- mlx5_fpga_dbg(fdev, "Created QP #0x%x\n", conn->qp.mqp.qpn);
+ conn->qp.qpn = MLX5_GET(create_qp_out, out, qpn);
+ mlx5_fpga_dbg(fdev, "Created QP #0x%x\n", conn->qp.qpn);
goto out;
@@ -658,7 +642,13 @@ static void mlx5_fpga_conn_flush_send_bufs(struct mlx5_fpga_conn *conn)
static void mlx5_fpga_conn_destroy_qp(struct mlx5_fpga_conn *conn)
{
- mlx5_core_destroy_qp(conn->fdev->mdev, &conn->qp.mqp);
+ struct mlx5_core_dev *dev = conn->fdev->mdev;
+ u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
+
+ MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
+ MLX5_SET(destroy_qp_in, in, qpn, conn->qp.qpn);
+ mlx5_cmd_exec_in(dev, destroy_qp, in);
+
mlx5_fpga_conn_free_recv_bufs(conn);
mlx5_fpga_conn_flush_send_bufs(conn);
kvfree(conn->qp.sq.bufs);
@@ -666,30 +656,29 @@ static void mlx5_fpga_conn_destroy_qp(struct mlx5_fpga_conn *conn)
mlx5_wq_destroy(&conn->qp.wq_ctrl);
}
-static inline int mlx5_fpga_conn_reset_qp(struct mlx5_fpga_conn *conn)
+static int mlx5_fpga_conn_reset_qp(struct mlx5_fpga_conn *conn)
{
struct mlx5_core_dev *mdev = conn->fdev->mdev;
+ u32 in[MLX5_ST_SZ_DW(qp_2rst_in)] = {};
+
+ mlx5_fpga_dbg(conn->fdev, "Modifying QP %u to RST\n", conn->qp.qpn);
- mlx5_fpga_dbg(conn->fdev, "Modifying QP %u to RST\n", conn->qp.mqp.qpn);
+ MLX5_SET(qp_2rst_in, in, opcode, MLX5_CMD_OP_2RST_QP);
+ MLX5_SET(qp_2rst_in, in, qpn, conn->qp.qpn);
- return mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2RST_QP, 0, NULL,
- &conn->qp.mqp);
+ return mlx5_cmd_exec_in(mdev, qp_2rst, in);
}
-static inline int mlx5_fpga_conn_init_qp(struct mlx5_fpga_conn *conn)
+static int mlx5_fpga_conn_init_qp(struct mlx5_fpga_conn *conn)
{
+ u32 in[MLX5_ST_SZ_DW(rst2init_qp_in)] = {};
struct mlx5_fpga_device *fdev = conn->fdev;
struct mlx5_core_dev *mdev = fdev->mdev;
- u32 *qpc = NULL;
- int err;
+ u32 *qpc;
- mlx5_fpga_dbg(conn->fdev, "Modifying QP %u to INIT\n", conn->qp.mqp.qpn);
+ mlx5_fpga_dbg(conn->fdev, "Modifying QP %u to INIT\n", conn->qp.qpn);
- qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL);
- if (!qpc) {
- err = -ENOMEM;
- goto out;
- }
+ qpc = MLX5_ADDR_OF(rst2init_qp_in, in, qpc);
MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
@@ -700,32 +689,22 @@ static inline int mlx5_fpga_conn_init_qp(struct mlx5_fpga_conn *conn)
MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn);
MLX5_SET64(qpc, qpc, dbr_addr, conn->qp.wq_ctrl.db.dma);
- err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, qpc,
- &conn->qp.mqp);
- if (err) {
- mlx5_fpga_warn(fdev, "qp_modify RST2INIT failed: %d\n", err);
- goto out;
- }
+ MLX5_SET(rst2init_qp_in, in, opcode, MLX5_CMD_OP_RST2INIT_QP);
+ MLX5_SET(rst2init_qp_in, in, qpn, conn->qp.qpn);
-out:
- kfree(qpc);
- return err;
+ return mlx5_cmd_exec_in(mdev, rst2init_qp, in);
}
-static inline int mlx5_fpga_conn_rtr_qp(struct mlx5_fpga_conn *conn)
+static int mlx5_fpga_conn_rtr_qp(struct mlx5_fpga_conn *conn)
{
+ u32 in[MLX5_ST_SZ_DW(init2rtr_qp_in)] = {};
struct mlx5_fpga_device *fdev = conn->fdev;
struct mlx5_core_dev *mdev = fdev->mdev;
- u32 *qpc = NULL;
- int err;
+ u32 *qpc;
mlx5_fpga_dbg(conn->fdev, "QP RTR\n");
- qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL);
- if (!qpc) {
- err = -ENOMEM;
- goto out;
- }
+ qpc = MLX5_ADDR_OF(init2rtr_qp_in, in, qpc);
MLX5_SET(qpc, qpc, mtu, MLX5_QPC_MTU_1K_BYTES);
MLX5_SET(qpc, qpc, log_msg_max, (u8)MLX5_CAP_GEN(mdev, log_max_msg));
@@ -745,33 +724,22 @@ static inline int mlx5_fpga_conn_rtr_qp(struct mlx5_fpga_conn *conn)
MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, fpga_ip),
MLX5_FLD_SZ_BYTES(qpc, primary_address_path.rgid_rip));
- err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, qpc,
- &conn->qp.mqp);
- if (err) {
- mlx5_fpga_warn(fdev, "qp_modify RST2INIT failed: %d\n", err);
- goto out;
- }
+ MLX5_SET(init2rtr_qp_in, in, opcode, MLX5_CMD_OP_INIT2RTR_QP);
+ MLX5_SET(init2rtr_qp_in, in, qpn, conn->qp.qpn);
-out:
- kfree(qpc);
- return err;
+ return mlx5_cmd_exec_in(mdev, init2rtr_qp, in);
}
-static inline int mlx5_fpga_conn_rts_qp(struct mlx5_fpga_conn *conn)
+static int mlx5_fpga_conn_rts_qp(struct mlx5_fpga_conn *conn)
{
struct mlx5_fpga_device *fdev = conn->fdev;
+ u32 in[MLX5_ST_SZ_DW(rtr2rts_qp_in)] = {};
struct mlx5_core_dev *mdev = fdev->mdev;
- u32 *qpc = NULL;
- u32 opt_mask;
- int err;
+ u32 *qpc;
mlx5_fpga_dbg(conn->fdev, "QP RTS\n");
- qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL);
- if (!qpc) {
- err = -ENOMEM;
- goto out;
- }
+ qpc = MLX5_ADDR_OF(rtr2rts_qp_in, in, qpc);
MLX5_SET(qpc, qpc, log_ack_req_freq, 8);
MLX5_SET(qpc, qpc, min_rnr_nak, 0x12);
@@ -781,17 +749,11 @@ static inline int mlx5_fpga_conn_rts_qp(struct mlx5_fpga_conn *conn)
MLX5_SET(qpc, qpc, retry_count, 7);
MLX5_SET(qpc, qpc, rnr_retry, 7); /* Infinite retry if RNR NACK */
- opt_mask = MLX5_QP_OPTPAR_RNR_TIMEOUT;
- err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, opt_mask, qpc,
- &conn->qp.mqp);
- if (err) {
- mlx5_fpga_warn(fdev, "qp_modify RST2INIT failed: %d\n", err);
- goto out;
- }
+ MLX5_SET(rtr2rts_qp_in, in, opcode, MLX5_CMD_OP_RTR2RTS_QP);
+ MLX5_SET(rtr2rts_qp_in, in, qpn, conn->qp.qpn);
+ MLX5_SET(rtr2rts_qp_in, in, opt_param_mask, MLX5_QP_OPTPAR_RNR_TIMEOUT);
-out:
- kfree(qpc);
- return err;
+ return mlx5_cmd_exec_in(mdev, rtr2rts_qp, in);
}
static int mlx5_fpga_conn_connect(struct mlx5_fpga_conn *conn)
@@ -931,7 +893,7 @@ struct mlx5_fpga_conn *mlx5_fpga_conn_create(struct mlx5_fpga_device *fdev,
MLX5_SET(fpga_qpc, conn->fpga_qpc, next_rcv_psn, 1);
MLX5_SET(fpga_qpc, conn->fpga_qpc, next_send_psn, 0);
MLX5_SET(fpga_qpc, conn->fpga_qpc, pkey, MLX5_FPGA_PKEY);
- MLX5_SET(fpga_qpc, conn->fpga_qpc, remote_qpn, conn->qp.mqp.qpn);
+ MLX5_SET(fpga_qpc, conn->fpga_qpc, remote_qpn, conn->qp.qpn);
MLX5_SET(fpga_qpc, conn->fpga_qpc, rnr_retry, 7);
MLX5_SET(fpga_qpc, conn->fpga_qpc, retry_count, 7);
@@ -972,19 +934,11 @@ out:
void mlx5_fpga_conn_destroy(struct mlx5_fpga_conn *conn)
{
- struct mlx5_fpga_device *fdev = conn->fdev;
- struct mlx5_core_dev *mdev = fdev->mdev;
- int err = 0;
-
conn->qp.active = false;
tasklet_disable(&conn->cq.tasklet);
synchronize_irq(conn->cq.mcq.irqn);
mlx5_fpga_destroy_qp(conn->fdev->mdev, conn->fpga_qpn);
- err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2ERR_QP, 0, NULL,
- &conn->qp.mqp);
- if (err)
- mlx5_fpga_warn(fdev, "qp_modify 2ERR failed: %d\n", err);
mlx5_fpga_conn_destroy_qp(conn);
mlx5_fpga_conn_destroy_cq(conn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h
index 634ae10e287b..5116e869a6e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h
@@ -65,7 +65,7 @@ struct mlx5_fpga_conn {
int sgid_index;
struct mlx5_wq_qp wq;
struct mlx5_wq_ctrl wq_ctrl;
- struct mlx5_core_qp mqp;
+ u32 qpn;
struct {
spinlock_t lock; /* Protects all SQ state */
unsigned int pc;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
index b794888fa3ba..b463787d6ca1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
@@ -65,6 +65,7 @@ struct mlx5_fpga_esp_xfrm;
struct mlx5_fpga_ipsec_sa_ctx {
struct rhash_head hash;
struct mlx5_ifc_fpga_ipsec_sa hw_sa;
+ u32 sa_handle;
struct mlx5_core_dev *dev;
struct mlx5_fpga_esp_xfrm *fpga_xfrm;
};
@@ -119,6 +120,8 @@ struct mlx5_fpga_ipsec {
*/
struct rb_root rules_rb;
struct mutex rules_rb_lock; /* rules lock */
+
+ struct ida halloc;
};
static bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev)
@@ -602,7 +605,7 @@ static bool mlx5_is_fpga_ipsec_rule(struct mlx5_core_dev *dev,
const u32 *match_c,
const u32 *match_v)
{
- u32 ipsec_dev_caps = mlx5_accel_ipsec_device_caps(dev);
+ u32 ipsec_dev_caps = mlx5_fpga_ipsec_device_caps(dev);
bool ipv6_flow;
ipv6_flow = mlx5_fs_is_outer_ipv6_flow(dev, match_c, match_v);
@@ -666,7 +669,8 @@ void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *accel_xfrm,
const __be32 saddr[4],
const __be32 daddr[4],
- const __be32 spi, bool is_ipv6)
+ const __be32 spi, bool is_ipv6,
+ u32 *sa_handle)
{
struct mlx5_fpga_ipsec_sa_ctx *sa_ctx;
struct mlx5_fpga_esp_xfrm *fpga_xfrm =
@@ -704,6 +708,17 @@ void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
goto exists;
}
+ if (accel_xfrm->attrs.action == MLX5_ACCEL_ESP_ACTION_DECRYPT) {
+ err = ida_simple_get(&fipsec->halloc, 1, 0, GFP_KERNEL);
+ if (err < 0) {
+ context = ERR_PTR(err);
+ goto exists;
+ }
+
+ sa_ctx->sa_handle = err;
+ if (sa_handle)
+ *sa_handle = sa_ctx->sa_handle;
+ }
/* This is unbounded fpga_xfrm, try to add to hash */
mutex_lock(&fipsec->sa_hash_lock);
@@ -744,7 +759,8 @@ delete_hash:
rhash_sa));
unlock_hash:
mutex_unlock(&fipsec->sa_hash_lock);
-
+ if (accel_xfrm->attrs.action == MLX5_ACCEL_ESP_ACTION_DECRYPT)
+ ida_simple_remove(&fipsec->halloc, sa_ctx->sa_handle);
exists:
mutex_unlock(&fpga_xfrm->lock);
kfree(sa_ctx);
@@ -816,7 +832,7 @@ mlx5_fpga_ipsec_fs_create_sa_ctx(struct mlx5_core_dev *mdev,
/* create */
return mlx5_fpga_ipsec_create_sa_ctx(mdev, accel_xfrm,
saddr, daddr,
- spi, is_ipv6);
+ spi, is_ipv6, NULL);
}
static void
@@ -836,6 +852,10 @@ mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx)
return;
}
+ if (sa_ctx->fpga_xfrm->accel_xfrm.attrs.action &
+ MLX5_ACCEL_ESP_ACTION_DECRYPT)
+ ida_simple_remove(&fipsec->halloc, sa_ctx->sa_handle);
+
mutex_lock(&fipsec->sa_hash_lock);
WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash,
rhash_sa));
@@ -1299,6 +1319,8 @@ int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev)
goto err_destroy_hash;
}
+ ida_init(&fdev->ipsec->halloc);
+
return 0;
err_destroy_hash:
@@ -1331,6 +1353,7 @@ void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev)
if (!mlx5_fpga_is_ipsec_device(mdev))
return;
+ ida_destroy(&fdev->ipsec->halloc);
destroy_rules_rb(&fdev->ipsec->rules_rb);
rhashtable_destroy(&fdev->ipsec->sa_hash);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
index 382985e65b48..9ba637f0f0f2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
@@ -37,6 +37,7 @@
#include "accel/ipsec.h"
#include "fs_cmd.h"
+#ifdef CONFIG_MLX5_FPGA_IPSEC
u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev);
unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev);
int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
@@ -46,7 +47,8 @@ void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *accel_xfrm,
const __be32 saddr[4],
const __be32 daddr[4],
- const __be32 spi, bool is_ipv6);
+ const __be32 spi, bool is_ipv6,
+ u32 *sa_handle);
void mlx5_fpga_ipsec_delete_sa_ctx(void *context);
int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev);
@@ -63,5 +65,17 @@ int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
const struct mlx5_flow_cmds *
mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type);
+#else
+static inline u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev)
+{
+ return 0;
+}
-#endif /* __MLX5_FPGA_SADB_H__ */
+static inline const struct mlx5_flow_cmds *
+mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type)
+{
+ return mlx5_fs_cmd_get_default(type);
+}
+
+#endif /* CONFIG_MLX5_FPGA_IPSEC */
+#endif /* __MLX5_FPGA_IPSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 90048697b2ff..1a8e826ac86b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -155,8 +155,7 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, u32 underlay_qpn,
bool disconnect)
{
- u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
struct mlx5_core_dev *dev = ns->dev;
if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
@@ -167,13 +166,10 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
- if (disconnect) {
+ if (disconnect)
MLX5_SET(set_flow_table_root_in, in, op_mod, 1);
- MLX5_SET(set_flow_table_root_in, in, table_id, 0);
- } else {
- MLX5_SET(set_flow_table_root_in, in, op_mod, 0);
+ else
MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
- }
MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn);
if (ft->vport) {
@@ -181,7 +177,7 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
}
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, set_flow_table_root, in);
}
static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
@@ -192,8 +188,8 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
int en_encap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
int en_decap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
int term = !!(ft->flags & MLX5_FLOW_TABLE_TERMINATION);
- u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {};
struct mlx5_core_dev *dev = ns->dev;
int err;
@@ -239,7 +235,7 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
break;
}
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, create_flow_table, in, out);
if (!err)
ft->id = MLX5_GET(create_flow_table_out, out,
table_id);
@@ -249,8 +245,7 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft)
{
- u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {};
struct mlx5_core_dev *dev = ns->dev;
MLX5_SET(destroy_flow_table_in, in, opcode,
@@ -262,15 +257,14 @@ static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
MLX5_SET(destroy_flow_table_in, in, other_vport, 1);
}
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, destroy_flow_table, in);
}
static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_table *next_ft)
{
- u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {};
struct mlx5_core_dev *dev = ns->dev;
MLX5_SET(modify_flow_table_in, in, opcode,
@@ -310,7 +304,7 @@ static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns,
}
}
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, modify_flow_table, in);
}
static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns,
@@ -318,8 +312,7 @@ static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns,
u32 *in,
struct mlx5_flow_group *fg)
{
- u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {};
struct mlx5_core_dev *dev = ns->dev;
int err;
@@ -332,7 +325,7 @@ static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns,
MLX5_SET(create_flow_group_in, in, other_vport, 1);
}
- err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, create_flow_group, in, out);
if (!err)
fg->id = MLX5_GET(create_flow_group_out, out,
group_id);
@@ -343,8 +336,7 @@ static int mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg)
{
- u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {};
struct mlx5_core_dev *dev = ns->dev;
MLX5_SET(destroy_flow_group_in, in, opcode,
@@ -357,7 +349,7 @@ static int mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
MLX5_SET(destroy_flow_group_in, in, other_vport, 1);
}
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, destroy_flow_group, in);
}
static int mlx5_set_extended_dest(struct mlx5_core_dev *dev,
@@ -600,8 +592,7 @@ static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct fs_fte *fte)
{
- u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {};
struct mlx5_core_dev *dev = ns->dev;
MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
@@ -613,22 +604,22 @@ static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns,
MLX5_SET(delete_fte_in, in, other_vport, 1);
}
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, delete_fte, in);
}
int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev,
enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,
u32 *id)
{
- u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};
+ u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {};
int err;
MLX5_SET(alloc_flow_counter_in, in, opcode,
MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, alloc_bitmask);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, alloc_flow_counter, in, out);
if (!err)
*id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
return err;
@@ -641,21 +632,20 @@ int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id)
{
- u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {};
MLX5_SET(dealloc_flow_counter_in, in, opcode,
MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, dealloc_flow_counter, in);
}
int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
u64 *packets, u64 *bytes)
{
u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
- MLX5_ST_SZ_BYTES(traffic_counter)] = {0};
- u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
+ MLX5_ST_SZ_BYTES(traffic_counter)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {};
void *stats;
int err = 0;
@@ -683,11 +673,10 @@ int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
u32 *out)
{
int outlen = mlx5_cmd_fc_get_bulk_query_out_len(bulk_len);
- u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {};
MLX5_SET(query_flow_counter_in, in, opcode,
MLX5_CMD_OP_QUERY_FLOW_COUNTER);
- MLX5_SET(query_flow_counter_in, in, op_mod, 0);
MLX5_SET(query_flow_counter_in, in, flow_counter_id, base_id);
MLX5_SET(query_flow_counter_in, in, num_of_counters, bulk_len);
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
@@ -700,7 +689,7 @@ static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
enum mlx5_flow_namespace_type namespace,
struct mlx5_pkt_reformat *pkt_reformat)
{
- u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)];
+ u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)] = {};
struct mlx5_core_dev *dev = ns->dev;
void *packet_reformat_context_in;
int max_encap_size;
@@ -732,7 +721,6 @@ static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
reformat_data);
inlen = reformat - (void *)in + size;
- memset(in, 0, inlen);
MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
@@ -741,7 +729,6 @@ static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
reformat_type, reformat_type);
memcpy(reformat, reformat_data, size);
- memset(out, 0, sizeof(out));
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
pkt_reformat->id = MLX5_GET(alloc_packet_reformat_context_out,
@@ -753,17 +740,15 @@ static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
static void mlx5_cmd_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
struct mlx5_pkt_reformat *pkt_reformat)
{
- u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)];
- u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_out)];
+ u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {};
struct mlx5_core_dev *dev = ns->dev;
- memset(in, 0, sizeof(in));
MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
pkt_reformat->id);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev, dealloc_packet_reformat_context, in);
}
static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
@@ -771,7 +756,7 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
void *modify_actions,
struct mlx5_modify_hdr *modify_hdr)
{
- u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)];
+ u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)] = {};
int max_actions, actions_size, inlen, err;
struct mlx5_core_dev *dev = ns->dev;
void *actions_in;
@@ -806,7 +791,7 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
return -EOPNOTSUPP;
}
- actions_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto) * num_actions;
+ actions_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) * num_actions;
inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) + actions_size;
in = kzalloc(inlen, GFP_KERNEL);
@@ -821,7 +806,6 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
actions_in = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
memcpy(actions_in, modify_actions, actions_size);
- memset(out, 0, sizeof(out));
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
modify_hdr->id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id);
@@ -832,17 +816,15 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
static void mlx5_cmd_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
struct mlx5_modify_hdr *modify_hdr)
{
- u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)];
- u32 out[MLX5_ST_SZ_DW(dealloc_modify_header_context_out)];
+ u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)] = {};
struct mlx5_core_dev *dev = ns->dev;
- memset(in, 0, sizeof(in));
MLX5_SET(dealloc_modify_header_context_in, in, opcode,
MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
modify_hdr->id);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev, dealloc_modify_header_context, in);
}
static const struct mlx5_flow_cmds mlx5_flow_cmds = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index d5defe09339a..2da45e9b9b6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -2359,7 +2359,7 @@ static struct mlx5_flow_root_namespace
struct mlx5_flow_root_namespace *root_ns;
struct mlx5_flow_namespace *ns;
- if (mlx5_accel_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
+ if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
(table_type == FS_FT_NIC_RX || table_type == FS_FT_NIC_TX))
cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type);
@@ -2943,7 +2943,8 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
goto err;
}
- if (MLX5_IPSEC_DEV(dev) || MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
+ if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE ||
+ MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
err = init_egress_root_ns(steering);
if (err)
goto err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 90e3d0233101..a5fbe7343508 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -31,7 +31,6 @@
*/
#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
#include <linux/mlx5/eswitch.h>
#include <linux/module.h>
#include "mlx5_core.h"
@@ -68,26 +67,19 @@ enum {
MCQI_FW_STORED_VERSION = 1,
};
-static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out,
- int outlen)
-{
- u32 in[MLX5_ST_SZ_DW(query_adapter_in)] = {0};
-
- MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
-}
-
int mlx5_query_board_id(struct mlx5_core_dev *dev)
{
u32 *out;
int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
+ u32 in[MLX5_ST_SZ_DW(query_adapter_in)] = {};
int err;
out = kzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
- err = mlx5_cmd_query_adapter(dev, out, outlen);
+ MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER);
+ err = mlx5_cmd_exec_inout(dev, query_adapter, in, out);
if (err)
goto out;
@@ -106,13 +98,15 @@ int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id)
{
u32 *out;
int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
+ u32 in[MLX5_ST_SZ_DW(query_adapter_in)] = {};
int err;
out = kzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
- err = mlx5_cmd_query_adapter(mdev, out, outlen);
+ MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER);
+ err = mlx5_cmd_exec_inout(mdev, query_adapter, in, out);
if (err)
goto out;
@@ -260,8 +254,7 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id)
{
- u32 out[MLX5_ST_SZ_DW(init_hca_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(init_hca_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(init_hca_in)] = {};
int i;
MLX5_SET(init_hca_in, in, opcode, MLX5_CMD_OP_INIT_HCA);
@@ -272,16 +265,15 @@ int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id)
sw_owner_id[i]);
}
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, init_hca, in);
}
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev)
{
- u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {};
MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, teardown_hca, in);
}
int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev)
@@ -316,8 +308,8 @@ int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev)
int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
{
unsigned long end, delay_ms = MLX5_FAST_TEARDOWN_WAIT_MS;
- u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {};
int state;
int ret;
@@ -330,7 +322,7 @@ int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
MLX5_SET(teardown_hca_in, in, profile,
MLX5_TEARDOWN_HCA_IN_PROFILE_PREPARE_FAST_TEARDOWN);
- ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ ret = mlx5_cmd_exec_inout(dev, teardown_hca, in, out);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index f99e1752d4e5..c0cfbab15fe9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -36,7 +36,6 @@
#include <linux/vmalloc.h>
#include <linux/hardirq.h>
#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
#include "lib/eq.h"
#include "lib/mlx5.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 673aaa815f57..035bd21e5d4e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -160,45 +160,54 @@ int mlx5i_init_underlay_qp(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5i_priv *ipriv = priv->ppriv;
- struct mlx5_core_qp *qp = &ipriv->qp;
- struct mlx5_qp_context *context;
int ret;
- /* QP states */
- context = kzalloc(sizeof(*context), GFP_KERNEL);
- if (!context)
- return -ENOMEM;
+ {
+ u32 in[MLX5_ST_SZ_DW(rst2init_qp_in)] = {};
+ u32 *qpc;
- context->flags = cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
- context->pri_path.port = 1;
- context->pri_path.pkey_index = cpu_to_be16(ipriv->pkey_index);
- context->qkey = cpu_to_be32(IB_DEFAULT_Q_KEY);
+ qpc = MLX5_ADDR_OF(rst2init_qp_in, in, qpc);
- ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, context, qp);
- if (ret) {
- mlx5_core_err(mdev, "Failed to modify qp RST2INIT, err: %d\n", ret);
- goto err_qp_modify_to_err;
- }
- memset(context, 0, sizeof(*context));
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+ MLX5_SET(qpc, qpc, primary_address_path.pkey_index,
+ ipriv->pkey_index);
+ MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1);
+ MLX5_SET(qpc, qpc, q_key, IB_DEFAULT_Q_KEY);
- ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, context, qp);
- if (ret) {
- mlx5_core_err(mdev, "Failed to modify qp INIT2RTR, err: %d\n", ret);
- goto err_qp_modify_to_err;
+ MLX5_SET(rst2init_qp_in, in, opcode, MLX5_CMD_OP_RST2INIT_QP);
+ MLX5_SET(rst2init_qp_in, in, qpn, ipriv->qpn);
+ ret = mlx5_cmd_exec_in(mdev, rst2init_qp, in);
+ if (ret)
+ goto err_qp_modify_to_err;
}
-
- ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, 0, context, qp);
- if (ret) {
- mlx5_core_err(mdev, "Failed to modify qp RTR2RTS, err: %d\n", ret);
- goto err_qp_modify_to_err;
+ {
+ u32 in[MLX5_ST_SZ_DW(init2rtr_qp_in)] = {};
+
+ MLX5_SET(init2rtr_qp_in, in, opcode, MLX5_CMD_OP_INIT2RTR_QP);
+ MLX5_SET(init2rtr_qp_in, in, qpn, ipriv->qpn);
+ ret = mlx5_cmd_exec_in(mdev, init2rtr_qp, in);
+ if (ret)
+ goto err_qp_modify_to_err;
+ }
+ {
+ u32 in[MLX5_ST_SZ_DW(rtr2rts_qp_in)] = {};
+
+ MLX5_SET(rtr2rts_qp_in, in, opcode, MLX5_CMD_OP_RTR2RTS_QP);
+ MLX5_SET(rtr2rts_qp_in, in, qpn, ipriv->qpn);
+ ret = mlx5_cmd_exec_in(mdev, rtr2rts_qp, in);
+ if (ret)
+ goto err_qp_modify_to_err;
}
-
- kfree(context);
return 0;
err_qp_modify_to_err:
- mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2ERR_QP, 0, &context, qp);
- kfree(context);
+ {
+ u32 in[MLX5_ST_SZ_DW(qp_2err_in)] = {};
+
+ MLX5_SET(qp_2err_in, in, opcode, MLX5_CMD_OP_2ERR_QP);
+ MLX5_SET(qp_2err_in, in, qpn, ipriv->qpn);
+ mlx5_cmd_exec_in(mdev, qp_2err, in);
+ }
return ret;
}
@@ -206,30 +215,24 @@ void mlx5i_uninit_underlay_qp(struct mlx5e_priv *priv)
{
struct mlx5i_priv *ipriv = priv->ppriv;
struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5_qp_context context;
- int err;
+ u32 in[MLX5_ST_SZ_DW(qp_2rst_in)] = {};
- err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2RST_QP, 0, &context,
- &ipriv->qp);
- if (err)
- mlx5_core_err(mdev, "Failed to modify qp 2RST, err: %d\n", err);
+ MLX5_SET(qp_2rst_in, in, opcode, MLX5_CMD_OP_2RST_QP);
+ MLX5_SET(qp_2rst_in, in, qpn, ipriv->qpn);
+ mlx5_cmd_exec_in(mdev, qp_2rst, in);
}
#define MLX5_QP_ENHANCED_ULP_STATELESS_MODE 2
-int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
+int mlx5i_create_underlay_qp(struct mlx5e_priv *priv)
{
- u32 *in = NULL;
+ u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(create_qp_in)] = {};
+ struct mlx5i_priv *ipriv = priv->ppriv;
void *addr_path;
int ret = 0;
- int inlen;
void *qpc;
- inlen = MLX5_ST_SZ_BYTES(create_qp_in);
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
- return -ENOMEM;
-
qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
MLX5_SET(qpc, qpc, st, MLX5_QP_ST_UD);
MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
@@ -240,20 +243,23 @@ int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp
MLX5_SET(ads, addr_path, vhca_port_num, 1);
MLX5_SET(ads, addr_path, grh, 1);
- ret = mlx5_core_create_qp(mdev, qp, in, inlen);
- if (ret) {
- mlx5_core_err(mdev, "Failed creating IPoIB QP err : %d\n", ret);
- goto out;
- }
+ MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
+ ret = mlx5_cmd_exec_inout(priv->mdev, create_qp, in, out);
+ if (ret)
+ return ret;
-out:
- kvfree(in);
- return ret;
+ ipriv->qpn = MLX5_GET(create_qp_out, out, qpn);
+
+ return 0;
}
-void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
+void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, u32 qpn)
{
- mlx5_core_destroy_qp(mdev, qp);
+ u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
+
+ MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
+ MLX5_SET(destroy_qp_in, in, qpn, qpn);
+ mlx5_cmd_exec_in(mdev, destroy_qp, in);
}
int mlx5i_create_tis(struct mlx5_core_dev *mdev, u32 underlay_qpn, u32 *tisn)
@@ -273,13 +279,13 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv)
struct mlx5i_priv *ipriv = priv->ppriv;
int err;
- err = mlx5i_create_underlay_qp(priv->mdev, &ipriv->qp);
+ err = mlx5i_create_underlay_qp(priv);
if (err) {
mlx5_core_warn(priv->mdev, "create underlay QP failed, %d\n", err);
return err;
}
- err = mlx5i_create_tis(priv->mdev, ipriv->qp.qpn, &priv->tisn[0][0]);
+ err = mlx5i_create_tis(priv->mdev, ipriv->qpn, &priv->tisn[0][0]);
if (err) {
mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err);
goto err_destroy_underlay_qp;
@@ -288,7 +294,7 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv)
return 0;
err_destroy_underlay_qp:
- mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp);
+ mlx5i_destroy_underlay_qp(priv->mdev, ipriv->qpn);
return err;
}
@@ -297,7 +303,7 @@ static void mlx5i_cleanup_tx(struct mlx5e_priv *priv)
struct mlx5i_priv *ipriv = priv->ppriv;
mlx5e_destroy_tis(priv->mdev, priv->tisn[0][0]);
- mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp);
+ mlx5i_destroy_underlay_qp(priv->mdev, ipriv->qpn);
}
static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
@@ -500,12 +506,12 @@ int mlx5i_dev_init(struct net_device *dev)
struct mlx5i_priv *ipriv = priv->ppriv;
/* Set dev address using underlay QP */
- dev->dev_addr[1] = (ipriv->qp.qpn >> 16) & 0xff;
- dev->dev_addr[2] = (ipriv->qp.qpn >> 8) & 0xff;
- dev->dev_addr[3] = (ipriv->qp.qpn) & 0xff;
+ dev->dev_addr[1] = (ipriv->qpn >> 16) & 0xff;
+ dev->dev_addr[2] = (ipriv->qpn >> 8) & 0xff;
+ dev->dev_addr[3] = (ipriv->qpn) & 0xff;
/* Add QPN to net-device mapping to HT */
- mlx5i_pkey_add_qpn(dev ,ipriv->qp.qpn);
+ mlx5i_pkey_add_qpn(dev, ipriv->qpn);
return 0;
}
@@ -532,7 +538,7 @@ void mlx5i_dev_cleanup(struct net_device *dev)
mlx5i_uninit_underlay_qp(priv);
/* Delete QPN to net-device mapping from HT */
- mlx5i_pkey_del_qpn(dev, ipriv->qp.qpn);
+ mlx5i_pkey_del_qpn(dev, ipriv->qpn);
}
static int mlx5i_open(struct net_device *netdev)
@@ -552,7 +558,7 @@ static int mlx5i_open(struct net_device *netdev)
goto err_clear_state_opened_flag;
}
- err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qp.qpn);
+ err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qpn);
if (err) {
mlx5_core_warn(mdev, "attach underlay qp to ft failed, %d\n", err);
goto err_reset_qp;
@@ -569,7 +575,7 @@ static int mlx5i_open(struct net_device *netdev)
return 0;
err_remove_fs_underlay_qp:
- mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
+ mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn);
err_reset_qp:
mlx5i_uninit_underlay_qp(epriv);
err_clear_state_opened_flag:
@@ -595,7 +601,7 @@ static int mlx5i_close(struct net_device *netdev)
clear_bit(MLX5E_STATE_OPENED, &epriv->state);
netif_carrier_off(epriv->netdev);
- mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
+ mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn);
mlx5e_deactivate_priv_channels(epriv);
mlx5e_close_channels(&epriv->channels);
mlx5i_uninit_underlay_qp(epriv);
@@ -614,11 +620,12 @@ static int mlx5i_attach_mcast(struct net_device *netdev, struct ib_device *hca,
struct mlx5i_priv *ipriv = epriv->ppriv;
int err;
- mlx5_core_dbg(mdev, "attaching QPN 0x%x, MGID %pI6\n", ipriv->qp.qpn, gid->raw);
- err = mlx5_core_attach_mcg(mdev, gid, ipriv->qp.qpn);
+ mlx5_core_dbg(mdev, "attaching QPN 0x%x, MGID %pI6\n", ipriv->qpn,
+ gid->raw);
+ err = mlx5_core_attach_mcg(mdev, gid, ipriv->qpn);
if (err)
mlx5_core_warn(mdev, "failed attaching QPN 0x%x, MGID %pI6\n",
- ipriv->qp.qpn, gid->raw);
+ ipriv->qpn, gid->raw);
if (set_qkey) {
mlx5_core_dbg(mdev, "%s setting qkey 0x%x\n",
@@ -637,12 +644,13 @@ static int mlx5i_detach_mcast(struct net_device *netdev, struct ib_device *hca,
struct mlx5i_priv *ipriv = epriv->ppriv;
int err;
- mlx5_core_dbg(mdev, "detaching QPN 0x%x, MGID %pI6\n", ipriv->qp.qpn, gid->raw);
+ mlx5_core_dbg(mdev, "detaching QPN 0x%x, MGID %pI6\n", ipriv->qpn,
+ gid->raw);
- err = mlx5_core_detach_mcg(mdev, gid, ipriv->qp.qpn);
+ err = mlx5_core_detach_mcg(mdev, gid, ipriv->qpn);
if (err)
mlx5_core_dbg(mdev, "failed detaching QPN 0x%x, MGID %pI6\n",
- ipriv->qp.qpn, gid->raw);
+ ipriv->qpn, gid->raw);
return err;
}
@@ -655,7 +663,9 @@ static int mlx5i_xmit(struct net_device *dev, struct sk_buff *skb,
struct mlx5_ib_ah *mah = to_mah(address);
struct mlx5i_priv *ipriv = epriv->ppriv;
- return mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, ipriv->qkey, netdev_xmit_more());
+ mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, ipriv->qkey, netdev_xmit_more());
+
+ return NETDEV_TX_OK;
}
static void mlx5i_set_pkey_index(struct net_device *netdev, int id)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
index de7e01a027bb..c4aa47018c0e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
@@ -51,7 +51,7 @@ extern const struct ethtool_ops mlx5i_pkey_ethtool_ops;
/* ipoib rdma netdev's private data structure */
struct mlx5i_priv {
struct rdma_netdev rn; /* keep this first */
- struct mlx5_core_qp qp;
+ u32 qpn;
bool sub_interface;
u32 qkey;
u16 pkey_index;
@@ -62,8 +62,8 @@ struct mlx5i_priv {
int mlx5i_create_tis(struct mlx5_core_dev *mdev, u32 underlay_qpn, u32 *tisn);
/* Underlay QP create/destroy functions */
-int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp);
-void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp);
+int mlx5i_create_underlay_qp(struct mlx5e_priv *priv);
+void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, u32 qpn);
/* Underlay QP state modification init/uninit functions */
int mlx5i_init_underlay_qp(struct mlx5e_priv *priv);
@@ -110,19 +110,11 @@ struct mlx5i_tx_wqe {
struct mlx5_wqe_data_seg data[];
};
-static inline void mlx5i_sq_fetch_wqe(struct mlx5e_txqsq *sq,
- struct mlx5i_tx_wqe **wqe,
- u16 pi)
-{
- struct mlx5_wq_cyc *wq = &sq->wq;
+#define MLX5I_SQ_FETCH_WQE(sq, pi) \
+ ((struct mlx5i_tx_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5i_tx_wqe)))
- *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
- memset(*wqe, 0, sizeof(**wqe));
-}
-
-netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5_av *av, u32 dqpn, u32 dqkey,
- bool xmit_more);
+void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5_av *av, u32 dqpn, u32 dqkey, bool xmit_more);
void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index 96e64187c089..b9af37ad40bf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -204,13 +204,13 @@ static int mlx5i_pkey_open(struct net_device *netdev)
goto err_release_lock;
}
- err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qp.qpn);
+ err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qpn);
if (err) {
mlx5_core_warn(mdev, "attach child underlay qp to ft failed, %d\n", err);
goto err_unint_underlay_qp;
}
- err = mlx5i_create_tis(mdev, ipriv->qp.qpn, &epriv->tisn[0][0]);
+ err = mlx5i_create_tis(mdev, ipriv->qpn, &epriv->tisn[0][0]);
if (err) {
mlx5_core_warn(mdev, "create child tis failed, %d\n", err);
goto err_remove_rx_uderlay_qp;
@@ -230,7 +230,7 @@ static int mlx5i_pkey_open(struct net_device *netdev)
err_clear_state_opened_flag:
mlx5e_destroy_tis(mdev, epriv->tisn[0][0]);
err_remove_rx_uderlay_qp:
- mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
+ mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn);
err_unint_underlay_qp:
mlx5i_uninit_underlay_qp(epriv);
err_release_lock:
@@ -253,7 +253,7 @@ static int mlx5i_pkey_close(struct net_device *netdev)
clear_bit(MLX5E_STATE_OPENED, &priv->state);
netif_carrier_off(priv->netdev);
- mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
+ mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn);
mlx5i_uninit_underlay_qp(priv);
mlx5e_deactivate_priv_channels(priv);
mlx5e_close_channels(&priv->channels);
@@ -307,23 +307,20 @@ static void mlx5i_pkey_cleanup(struct mlx5e_priv *priv)
static int mlx5i_pkey_init_tx(struct mlx5e_priv *priv)
{
- struct mlx5i_priv *ipriv = priv->ppriv;
int err;
- err = mlx5i_create_underlay_qp(priv->mdev, &ipriv->qp);
- if (err) {
+ err = mlx5i_create_underlay_qp(priv);
+ if (err)
mlx5_core_warn(priv->mdev, "create child underlay QP failed, %d\n", err);
- return err;
- }
- return 0;
+ return err;
}
static void mlx5i_pkey_cleanup_tx(struct mlx5e_priv *priv)
{
struct mlx5i_priv *ipriv = priv->ppriv;
- mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp);
+ mlx5i_destroy_underlay_qp(priv->mdev, ipriv->qpn);
}
static int mlx5i_pkey_init_rx(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index 93052b07c76c..874c70e8cc54 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -42,13 +42,12 @@
* Beware of lock dependencies (preferably, no locks should be acquired
* under it).
*/
-static DEFINE_MUTEX(lag_mutex);
+static DEFINE_SPINLOCK(lag_lock);
static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1,
u8 remap_port2)
{
- u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(create_lag_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {};
void *lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG);
@@ -56,14 +55,13 @@ static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1,
MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, create_lag, in);
}
static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 remap_port1,
u8 remap_port2)
{
- u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(modify_lag_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {};
void *lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx);
MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG);
@@ -72,52 +70,29 @@ static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 remap_port1,
MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
-static int mlx5_cmd_destroy_lag(struct mlx5_core_dev *dev)
-{
- u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(destroy_lag_out)] = {0};
-
- MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG);
-
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, modify_lag, in);
}
int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev)
{
- u32 in[MLX5_ST_SZ_DW(create_vport_lag_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(create_vport_lag_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_vport_lag_in)] = {};
MLX5_SET(create_vport_lag_in, in, opcode, MLX5_CMD_OP_CREATE_VPORT_LAG);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, create_vport_lag, in);
}
EXPORT_SYMBOL(mlx5_cmd_create_vport_lag);
int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev)
{
- u32 in[MLX5_ST_SZ_DW(destroy_vport_lag_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(destroy_vport_lag_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_vport_lag_in)] = {};
MLX5_SET(destroy_vport_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_VPORT_LAG);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, destroy_vport_lag, in);
}
EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag);
-static int mlx5_cmd_query_cong_counter(struct mlx5_core_dev *dev,
- bool reset, void *out, int out_size)
-{
- u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = { };
-
- MLX5_SET(query_cong_statistics_in, in, opcode,
- MLX5_CMD_OP_QUERY_CONG_STATISTICS);
- MLX5_SET(query_cong_statistics_in, in, clear, reset);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
-}
-
int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
struct net_device *ndev)
{
@@ -232,12 +207,14 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
bool roce_lag = __mlx5_lag_is_roce(ldev);
int err;
ldev->flags &= ~MLX5_LAG_MODE_FLAGS;
- err = mlx5_cmd_destroy_lag(dev0);
+ MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG);
+ err = mlx5_cmd_exec_in(dev0, destroy_lag, in);
if (err) {
if (roce_lag) {
mlx5_core_err(dev0,
@@ -297,9 +274,9 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
if (!dev0 || !dev1)
return;
- mutex_lock(&lag_mutex);
+ spin_lock(&lag_lock);
tracker = ldev->tracker;
- mutex_unlock(&lag_mutex);
+ spin_unlock(&lag_lock);
do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev);
@@ -481,9 +458,9 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
break;
}
- mutex_lock(&lag_mutex);
+ spin_lock(&lag_lock);
ldev->tracker = tracker;
- mutex_unlock(&lag_mutex);
+ spin_unlock(&lag_lock);
if (changed)
mlx5_queue_bond_work(ldev, 0);
@@ -525,7 +502,7 @@ static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
if (fn >= MLX5_MAX_PORTS)
return;
- mutex_lock(&lag_mutex);
+ spin_lock(&lag_lock);
ldev->pf[fn].dev = dev;
ldev->pf[fn].netdev = netdev;
ldev->tracker.netdev_state[fn].link_up = 0;
@@ -533,7 +510,7 @@ static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
dev->priv.lag = ldev;
- mutex_unlock(&lag_mutex);
+ spin_unlock(&lag_lock);
}
static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
@@ -548,11 +525,11 @@ static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
if (i == MLX5_MAX_PORTS)
return;
- mutex_lock(&lag_mutex);
+ spin_lock(&lag_lock);
memset(&ldev->pf[i], 0, sizeof(*ldev->pf));
dev->priv.lag = NULL;
- mutex_unlock(&lag_mutex);
+ spin_unlock(&lag_lock);
}
/* Must be called with intf_mutex held */
@@ -630,10 +607,10 @@ bool mlx5_lag_is_roce(struct mlx5_core_dev *dev)
struct mlx5_lag *ldev;
bool res;
- mutex_lock(&lag_mutex);
+ spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev);
res = ldev && __mlx5_lag_is_roce(ldev);
- mutex_unlock(&lag_mutex);
+ spin_unlock(&lag_lock);
return res;
}
@@ -644,10 +621,10 @@ bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
struct mlx5_lag *ldev;
bool res;
- mutex_lock(&lag_mutex);
+ spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev);
res = ldev && __mlx5_lag_is_active(ldev);
- mutex_unlock(&lag_mutex);
+ spin_unlock(&lag_lock);
return res;
}
@@ -658,10 +635,10 @@ bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev)
struct mlx5_lag *ldev;
bool res;
- mutex_lock(&lag_mutex);
+ spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev);
res = ldev && __mlx5_lag_is_sriov(ldev);
- mutex_unlock(&lag_mutex);
+ spin_unlock(&lag_lock);
return res;
}
@@ -687,7 +664,7 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
struct net_device *ndev = NULL;
struct mlx5_lag *ldev;
- mutex_lock(&lag_mutex);
+ spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev);
if (!(ldev && __mlx5_lag_is_roce(ldev)))
@@ -704,12 +681,36 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
dev_hold(ndev);
unlock:
- mutex_unlock(&lag_mutex);
+ spin_unlock(&lag_lock);
return ndev;
}
EXPORT_SYMBOL(mlx5_lag_get_roce_netdev);
+u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
+ struct net_device *slave)
+{
+ struct mlx5_lag *ldev;
+ u8 port = 0;
+
+ spin_lock(&lag_lock);
+ ldev = mlx5_lag_dev_get(dev);
+ if (!(ldev && __mlx5_lag_is_roce(ldev)))
+ goto unlock;
+
+ if (ldev->pf[MLX5_LAG_P1].netdev == slave)
+ port = MLX5_LAG_P1;
+ else
+ port = MLX5_LAG_P2;
+
+ port = ldev->v2p_map[port];
+
+unlock:
+ spin_unlock(&lag_lock);
+ return port;
+}
+EXPORT_SYMBOL(mlx5_lag_get_slave_port);
+
bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv)
{
struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev,
@@ -746,7 +747,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
memset(values, 0, sizeof(*values) * num_counters);
- mutex_lock(&lag_mutex);
+ spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev);
if (ldev && __mlx5_lag_is_roce(ldev)) {
num_ports = MLX5_MAX_PORTS;
@@ -756,18 +757,23 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
num_ports = 1;
mdev[MLX5_LAG_P1] = dev;
}
+ spin_unlock(&lag_lock);
for (i = 0; i < num_ports; ++i) {
- ret = mlx5_cmd_query_cong_counter(mdev[i], false, out, outlen);
+ u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = {};
+
+ MLX5_SET(query_cong_statistics_in, in, opcode,
+ MLX5_CMD_OP_QUERY_CONG_STATISTICS);
+ ret = mlx5_cmd_exec_inout(mdev[i], query_cong_statistics, in,
+ out);
if (ret)
- goto unlock;
+ goto free;
for (j = 0; j < num_counters; ++j)
values[j] += be64_to_cpup((__be64 *)(out + offsets[j]));
}
-unlock:
- mutex_unlock(&lag_mutex);
+free:
kvfree(out);
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
index 6cbccba56f70..3d5e57ff558c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
@@ -90,7 +90,8 @@ void mlx5_dm_cleanup(struct mlx5_core_dev *dev)
}
int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
- u64 length, u16 uid, phys_addr_t *addr, u32 *obj_id)
+ u64 length, u32 log_alignment, u16 uid,
+ phys_addr_t *addr, u32 *obj_id)
{
u32 num_blocks = DIV_ROUND_UP_ULL(length, MLX5_SW_ICM_BLOCK_SIZE(dev));
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
@@ -99,6 +100,7 @@ int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
unsigned long *block_map;
u64 icm_start_addr;
u32 log_icm_size;
+ u64 align_mask;
u32 max_blocks;
u64 block_idx;
void *sw_icm;
@@ -136,11 +138,14 @@ int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
return -EOPNOTSUPP;
max_blocks = BIT(log_icm_size - MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
+
+ if (log_alignment < MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))
+ log_alignment = MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
+ align_mask = BIT(log_alignment - MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)) - 1;
+
spin_lock(&dm->lock);
- block_idx = bitmap_find_next_zero_area(block_map,
- max_blocks,
- 0,
- num_blocks, 0);
+ block_idx = bitmap_find_next_zero_area(block_map, max_blocks, 0,
+ num_blocks, align_mask);
if (block_idx < max_blocks)
bitmap_set(block_map,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
index 4be4d2d36218..4aaca7400fb2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
@@ -27,7 +27,6 @@ struct mlx5_eq {
__be32 __iomem *doorbell;
u32 cons_index;
struct mlx5_frag_buf buf;
- int size;
unsigned int vecidx;
unsigned int irqn;
u8 eqn;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c
index 7722a3f9bb68..a68738c8f4bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c
@@ -124,8 +124,7 @@ int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
const u8 *mac, bool vlan, u16 vlan_id, u8 port_num)
{
#define MLX5_SET_RA(p, f, v) MLX5_SET(roce_addr_layout, p, f, v)
- u32 in[MLX5_ST_SZ_DW(set_roce_address_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(set_roce_address_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(set_roce_address_in)] = {};
void *in_addr = MLX5_ADDR_OF(set_roce_address_in, in, roce_address);
char *addr_l3_addr = MLX5_ADDR_OF(roce_addr_layout, in_addr,
source_l3_address);
@@ -153,6 +152,6 @@ int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
MLX5_SET(set_roce_address_in, in, roce_address_index, index);
MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, set_roce_address, in);
}
EXPORT_SYMBOL(mlx5_core_roce_gid_set);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
index 3118e8d66407..fd8449ff9e17 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
@@ -40,8 +40,7 @@
/* HW L2 Table (MPFS) management */
static int set_l2table_entry_cmd(struct mlx5_core_dev *dev, u32 index, u8 *mac)
{
- u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(set_l2_table_entry_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)] = {};
u8 *in_mac_addr;
MLX5_SET(set_l2_table_entry_in, in, opcode, MLX5_CMD_OP_SET_L2_TABLE_ENTRY);
@@ -50,17 +49,16 @@ static int set_l2table_entry_cmd(struct mlx5_core_dev *dev, u32 index, u8 *mac)
in_mac_addr = MLX5_ADDR_OF(set_l2_table_entry_in, in, mac_address);
ether_addr_copy(&in_mac_addr[2], mac);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, set_l2_table_entry, in);
}
static int del_l2table_entry_cmd(struct mlx5_core_dev *dev, u32 index)
{
- u32 in[MLX5_ST_SZ_DW(delete_l2_table_entry_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(delete_l2_table_entry_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(delete_l2_table_entry_in)] = {};
MLX5_SET(delete_l2_table_entry_in, in, opcode, MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
MLX5_SET(delete_l2_table_entry_in, in, table_index, index);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, delete_l2_table_entry, in);
}
/* UC L2 table hash node */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c
index 48b5c847b642..8809a65ecefb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c
@@ -4,7 +4,6 @@
#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/port.h>
-#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
#include "lib/port_tun.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
index 148b55c3db7a..82c766a95165 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
@@ -60,24 +60,22 @@ static inline u8 mlx5_vxlan_max_udp_ports(struct mlx5_core_dev *mdev)
static int mlx5_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port)
{
- u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(add_vxlan_udp_dport_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)] = {};
MLX5_SET(add_vxlan_udp_dport_in, in, opcode,
MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT);
MLX5_SET(add_vxlan_udp_dport_in, in, vxlan_udp_port, port);
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(mdev, add_vxlan_udp_dport, in);
}
static int mlx5_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port)
{
- u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)] = {};
MLX5_SET(delete_vxlan_udp_dport_in, in, opcode,
MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
MLX5_SET(delete_vxlan_udp_dport_in, in, vxlan_udp_port, port);
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(mdev, delete_vxlan_udp_dport, in);
}
static struct mlx5_vxlan_port*
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 7af4210c1b96..742ba012c234 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -206,8 +206,7 @@ static void mlx5_set_driver_version(struct mlx5_core_dev *dev)
{
int driver_ver_sz = MLX5_FLD_SZ_BYTES(set_driver_version_in,
driver_version);
- u8 in[MLX5_ST_SZ_BYTES(set_driver_version_in)] = {0};
- u8 out[MLX5_ST_SZ_BYTES(set_driver_version_out)] = {0};
+ u8 in[MLX5_ST_SZ_BYTES(set_driver_version_in)] = {};
int remaining_size = driver_ver_sz;
char *string;
@@ -234,7 +233,7 @@ static void mlx5_set_driver_version(struct mlx5_core_dev *dev)
MLX5_SET(set_driver_version_in, in, opcode,
MLX5_CMD_OP_SET_DRIVER_VERSION);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev, set_driver_version, in);
}
static int set_dma_caps(struct pci_dev *pdev)
@@ -366,7 +365,7 @@ static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev,
MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
+ err = mlx5_cmd_exec_inout(dev, query_hca_cap, in, out);
if (err) {
mlx5_core_warn(dev,
"QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
@@ -407,30 +406,25 @@ int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type)
return mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_MAX);
}
-static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz, int opmod)
+static int set_caps(struct mlx5_core_dev *dev, void *in, int opmod)
{
- u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)] = {0};
-
MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP);
MLX5_SET(set_hca_cap_in, in, op_mod, opmod << 1);
- return mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, set_hca_cap, in);
}
-static int handle_hca_cap_atomic(struct mlx5_core_dev *dev)
+static int handle_hca_cap_atomic(struct mlx5_core_dev *dev, void *set_ctx)
{
- void *set_ctx;
void *set_hca_cap;
- int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
int req_endianness;
int err;
- if (MLX5_CAP_GEN(dev, atomic)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC);
- if (err)
- return err;
- } else {
+ if (!MLX5_CAP_GEN(dev, atomic))
return 0;
- }
+
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC);
+ if (err)
+ return err;
req_endianness =
MLX5_CAP_ATOMIC(dev,
@@ -439,27 +433,18 @@ static int handle_hca_cap_atomic(struct mlx5_core_dev *dev)
if (req_endianness != MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS)
return 0;
- set_ctx = kzalloc(set_sz, GFP_KERNEL);
- if (!set_ctx)
- return -ENOMEM;
-
set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
/* Set requestor to host endianness */
MLX5_SET(atomic_caps, set_hca_cap, atomic_req_8B_endianness_mode,
MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS);
- err = set_caps(dev, set_ctx, set_sz, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC);
-
- kfree(set_ctx);
- return err;
+ return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC);
}
-static int handle_hca_cap_odp(struct mlx5_core_dev *dev)
+static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx)
{
void *set_hca_cap;
- void *set_ctx;
- int set_sz;
bool do_set = false;
int err;
@@ -471,11 +456,6 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev)
if (err)
return err;
- set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
- set_ctx = kzalloc(set_sz, GFP_KERNEL);
- if (!set_ctx)
- return -ENOMEM;
-
set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_ODP],
MLX5_ST_SZ_BYTES(odp_cap));
@@ -504,30 +484,21 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev)
ODP_CAP_SET_MAX(dev, dc_odp_caps.read);
ODP_CAP_SET_MAX(dev, dc_odp_caps.atomic);
- if (do_set)
- err = set_caps(dev, set_ctx, set_sz,
- MLX5_SET_HCA_CAP_OP_MOD_ODP);
-
- kfree(set_ctx);
+ if (!do_set)
+ return 0;
- return err;
+ return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ODP);
}
-static int handle_hca_cap(struct mlx5_core_dev *dev)
+static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
{
- void *set_ctx = NULL;
struct mlx5_profile *prof = dev->profile;
- int err = -ENOMEM;
- int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
void *set_hca_cap;
-
- set_ctx = kzalloc(set_sz, GFP_KERNEL);
- if (!set_ctx)
- goto query_ex;
+ int err;
err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
if (err)
- goto query_ex;
+ return err;
set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
capability);
@@ -578,37 +549,76 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
num_vhca_ports,
MLX5_CAP_GEN_MAX(dev, num_vhca_ports));
- err = set_caps(dev, set_ctx, set_sz,
- MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
+ if (MLX5_CAP_GEN_MAX(dev, release_all_pages))
+ MLX5_SET(cmd_hca_cap, set_hca_cap, release_all_pages, 1);
-query_ex:
- kfree(set_ctx);
+ return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
+}
+
+static int handle_hca_cap_roce(struct mlx5_core_dev *dev, void *set_ctx)
+{
+ void *set_hca_cap;
+ int err;
+
+ if (!MLX5_CAP_GEN(dev, roce))
+ return 0;
+
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE);
+ if (err)
+ return err;
+
+ if (MLX5_CAP_ROCE(dev, sw_r_roce_src_udp_port) ||
+ !MLX5_CAP_ROCE_MAX(dev, sw_r_roce_src_udp_port))
+ return 0;
+
+ set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
+ memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_ROCE],
+ MLX5_ST_SZ_BYTES(roce_cap));
+ MLX5_SET(roce_cap, set_hca_cap, sw_r_roce_src_udp_port, 1);
+
+ err = set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ROCE);
return err;
}
static int set_hca_cap(struct mlx5_core_dev *dev)
{
+ int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ void *set_ctx;
int err;
- err = handle_hca_cap(dev);
+ set_ctx = kzalloc(set_sz, GFP_KERNEL);
+ if (!set_ctx)
+ return -ENOMEM;
+
+ err = handle_hca_cap(dev, set_ctx);
if (err) {
mlx5_core_err(dev, "handle_hca_cap failed\n");
goto out;
}
- err = handle_hca_cap_atomic(dev);
+ memset(set_ctx, 0, set_sz);
+ err = handle_hca_cap_atomic(dev, set_ctx);
if (err) {
mlx5_core_err(dev, "handle_hca_cap_atomic failed\n");
goto out;
}
- err = handle_hca_cap_odp(dev);
+ memset(set_ctx, 0, set_sz);
+ err = handle_hca_cap_odp(dev, set_ctx);
if (err) {
mlx5_core_err(dev, "handle_hca_cap_odp failed\n");
goto out;
}
+ memset(set_ctx, 0, set_sz);
+ err = handle_hca_cap_roce(dev, set_ctx);
+ if (err) {
+ mlx5_core_err(dev, "handle_hca_cap_roce failed\n");
+ goto out;
+ }
+
out:
+ kfree(set_ctx);
return err;
}
@@ -642,26 +652,24 @@ static int mlx5_core_set_hca_defaults(struct mlx5_core_dev *dev)
int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id)
{
- u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {};
MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
MLX5_SET(enable_hca_in, in, function_id, func_id);
MLX5_SET(enable_hca_in, in, embedded_cpu_function,
dev->caps.embedded_cpu);
- return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, enable_hca, in);
}
int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id)
{
- u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {};
MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
MLX5_SET(disable_hca_in, in, function_id, func_id);
MLX5_SET(enable_hca_in, in, embedded_cpu_function,
dev->caps.embedded_cpu);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, disable_hca, in);
}
u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev,
@@ -686,14 +694,13 @@ u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev,
static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
{
- u32 query_in[MLX5_ST_SZ_DW(query_issi_in)] = {0};
- u32 query_out[MLX5_ST_SZ_DW(query_issi_out)] = {0};
+ u32 query_out[MLX5_ST_SZ_DW(query_issi_out)] = {};
+ u32 query_in[MLX5_ST_SZ_DW(query_issi_in)] = {};
u32 sup_issi;
int err;
MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
- err = mlx5_cmd_exec(dev, query_in, sizeof(query_in),
- query_out, sizeof(query_out));
+ err = mlx5_cmd_exec_inout(dev, query_issi, query_in, query_out);
if (err) {
u32 syndrome;
u8 status;
@@ -713,13 +720,11 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0);
if (sup_issi & (1 << 1)) {
- u32 set_in[MLX5_ST_SZ_DW(set_issi_in)] = {0};
- u32 set_out[MLX5_ST_SZ_DW(set_issi_out)] = {0};
+ u32 set_in[MLX5_ST_SZ_DW(set_issi_in)] = {};
MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI);
MLX5_SET(set_issi_in, set_in, current_issi, 1);
- err = mlx5_cmd_exec(dev, set_in, sizeof(set_in),
- set_out, sizeof(set_out));
+ err = mlx5_cmd_exec_in(dev, set_issi, set_in);
if (err) {
mlx5_core_err(dev, "Failed to set ISSI to 1 err(%d)\n",
err);
@@ -782,7 +787,7 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
}
mlx5_pci_vsc_init(dev);
-
+ dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
return 0;
err_clr_master:
@@ -836,8 +841,6 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
mlx5_cq_debugfs_init(dev);
- mlx5_init_qp_table(dev);
-
mlx5_init_reserved_gids(dev);
mlx5_init_clock(dev);
@@ -896,7 +899,6 @@ err_rl_cleanup:
err_tables_cleanup:
mlx5_geneve_destroy(dev->geneve);
mlx5_vxlan_destroy(dev->vxlan);
- mlx5_cleanup_qp_table(dev);
mlx5_cq_debugfs_cleanup(dev);
mlx5_events_cleanup(dev);
err_eq_cleanup:
@@ -924,7 +926,6 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_vxlan_destroy(dev->vxlan);
mlx5_cleanup_clock(dev);
mlx5_cleanup_reserved_gids(dev);
- mlx5_cleanup_qp_table(dev);
mlx5_cq_debugfs_cleanup(dev);
mlx5_events_cleanup(dev);
mlx5_eq_table_cleanup(dev);
@@ -1180,7 +1181,6 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
{
int err = 0;
- dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
mutex_lock(&dev->intf_state_mutex);
if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
mlx5_core_warn(dev, "interface is up, NOP\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
index ba2b09cc192f..e019d68062d8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
@@ -33,34 +33,31 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
#include <rdma/ib_verbs.h>
#include "mlx5_core.h"
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn)
{
- u32 out[MLX5_ST_SZ_DW(attach_to_mcg_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(attach_to_mcg_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(attach_to_mcg_in)] = {};
void *gid;
MLX5_SET(attach_to_mcg_in, in, opcode, MLX5_CMD_OP_ATTACH_TO_MCG);
MLX5_SET(attach_to_mcg_in, in, qpn, qpn);
gid = MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid);
memcpy(gid, mgid, sizeof(*mgid));
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, attach_to_mcg, in);
}
EXPORT_SYMBOL(mlx5_core_attach_mcg);
int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn)
{
- u32 out[MLX5_ST_SZ_DW(detach_from_mcg_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(detach_from_mcg_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(detach_from_mcg_in)] = {};
void *gid;
MLX5_SET(detach_from_mcg_in, in, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
MLX5_SET(detach_from_mcg_in, in, qpn, qpn);
gid = MLX5_ADDR_OF(detach_from_mcg_in, in, multicast_gid);
memcpy(gid, mgid, sizeof(*mgid));
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, detach_from_mcg, in);
}
EXPORT_SYMBOL(mlx5_core_detach_mcg);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index 366f2cbfc6db..9eb51f06d3ae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -33,14 +33,13 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
struct mlx5_core_mkey *mkey,
u32 *in, int inlen)
{
- u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {0};
+ u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {};
u32 mkey_index;
void *mkc;
int err;
@@ -66,19 +65,18 @@ EXPORT_SYMBOL(mlx5_core_create_mkey);
int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
struct mlx5_core_mkey *mkey)
{
- u32 out[MLX5_ST_SZ_DW(destroy_mkey_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {};
MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY);
MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key));
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, destroy_mkey, in);
}
EXPORT_SYMBOL(mlx5_core_destroy_mkey);
int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
u32 *out, int outlen)
{
- u32 in[MLX5_ST_SZ_DW(query_mkey_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_mkey_in)] = {};
memset(out, 0, outlen);
MLX5_SET(query_mkey_in, in, opcode, MLX5_CMD_OP_QUERY_MKEY);
@@ -100,8 +98,8 @@ static inline u32 mlx5_get_psv(u32 *out, int psv_index)
int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
int npsvs, u32 *sig_index)
{
- u32 out[MLX5_ST_SZ_DW(create_psv_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(create_psv_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(create_psv_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(create_psv_in)] = {};
int i, err;
if (npsvs > MLX5_MAX_PSVS)
@@ -111,7 +109,7 @@ int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
MLX5_SET(create_psv_in, in, pd, pdn);
MLX5_SET(create_psv_in, in, num_psv, npsvs);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, create_psv, in, out);
if (err)
return err;
@@ -124,11 +122,10 @@ EXPORT_SYMBOL(mlx5_core_create_psv);
int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num)
{
- u32 out[MLX5_ST_SZ_DW(destroy_psv_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(destroy_psv_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_psv_in)] = {};
MLX5_SET(destroy_psv_in, in, opcode, MLX5_CMD_OP_DESTROY_PSV);
MLX5_SET(destroy_psv_in, in, psvn, psv_num);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, destroy_psv, in);
}
EXPORT_SYMBOL(mlx5_core_destroy_psv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 91bd258ecf1b..8ce78f42dfc0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -35,7 +35,6 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
#include "lib/eq.h"
@@ -51,6 +50,7 @@ struct mlx5_pages_req {
u8 ec_function;
s32 npages;
struct work_struct work;
+ u8 release_all;
};
struct fw_page {
@@ -136,8 +136,8 @@ static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr)
static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
s32 *npages, int boot)
{
- u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {};
int err;
MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES);
@@ -146,7 +146,7 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES);
MLX5_SET(query_pages_in, in, embedded_cpu_function, mlx5_core_is_ecpf(dev));
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, query_pages, in, out);
if (err)
return err;
@@ -182,25 +182,17 @@ static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr)
#define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT)
-static void free_4k(struct mlx5_core_dev *dev, u64 addr)
+static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp)
{
- struct fw_page *fwp;
- int n;
-
- fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK);
- if (!fwp) {
- mlx5_core_warn(dev, "page not found\n");
- return;
- }
+ int n = (fwp->addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
- n = (addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
fwp->free_count++;
set_bit(n, &fwp->bitmask);
if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) {
rb_erase(&fwp->rb_node, &dev->priv.page_root);
if (fwp->free_count != 1)
list_del(&fwp->list);
- dma_unmap_page(dev->device, addr & MLX5_U64_4K_PAGE_MASK,
+ dma_unmap_page(dev->device, fwp->addr & MLX5_U64_4K_PAGE_MASK,
PAGE_SIZE, DMA_BIDIRECTIONAL);
__free_page(fwp->page);
kfree(fwp);
@@ -209,6 +201,18 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr)
}
}
+static void free_addr(struct mlx5_core_dev *dev, u64 addr)
+{
+ struct fw_page *fwp;
+
+ fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK);
+ if (!fwp) {
+ mlx5_core_warn_rl(dev, "page not found\n");
+ return;
+ }
+ free_fwp(dev, fwp);
+}
+
static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
{
struct device *device = dev->device;
@@ -257,8 +261,7 @@ err_mapping:
static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id,
bool ec_function)
{
- u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
int err;
MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
@@ -266,7 +269,7 @@ static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id,
MLX5_SET(manage_pages_in, in, function_id, func_id);
MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_in(dev, manage_pages, in);
if (err)
mlx5_core_warn(dev, "page notify failed func_id(%d) err(%d)\n",
func_id, err);
@@ -331,7 +334,7 @@ retry:
out_4k:
for (i--; i >= 0; i--)
- free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]));
+ free_addr(dev, MLX5_GET64(manage_pages_in, in, pas[i]));
out_free:
kvfree(in);
if (notify_fail)
@@ -339,6 +342,33 @@ out_free:
return err;
}
+static void release_all_pages(struct mlx5_core_dev *dev, u32 func_id,
+ bool ec_function)
+{
+ struct rb_node *p;
+ int npages = 0;
+
+ p = rb_first(&dev->priv.page_root);
+ while (p) {
+ struct fw_page *fwp = rb_entry(p, struct fw_page, rb_node);
+
+ p = rb_next(p);
+ if (fwp->func_id != func_id)
+ continue;
+ free_fwp(dev, fwp);
+ npages++;
+ }
+
+ dev->priv.fw_pages -= npages;
+ if (func_id)
+ dev->priv.vfs_pages -= npages;
+ else if (mlx5_core_is_ecpf(dev) && !ec_function)
+ dev->priv.peer_pf_pages -= npages;
+
+ mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x\n",
+ npages, ec_function, func_id);
+}
+
static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
u32 *in, int in_size, u32 *out, int out_size)
{
@@ -374,7 +404,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
int *nclaimed, bool ec_function)
{
int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
- u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
int num_claimed;
u32 *out;
int err;
@@ -410,7 +440,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
}
for (i = 0; i < num_claimed; i++)
- free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]));
+ free_addr(dev, MLX5_GET64(manage_pages_out, out, pas[i]));
if (nclaimed)
*nclaimed = num_claimed;
@@ -432,7 +462,9 @@ static void pages_work_handler(struct work_struct *work)
struct mlx5_core_dev *dev = req->dev;
int err = 0;
- if (req->npages < 0)
+ if (req->release_all)
+ release_all_pages(dev, req->func_id, req->ec_function);
+ else if (req->npages < 0)
err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL,
req->ec_function);
else if (req->npages > 0)
@@ -447,6 +479,7 @@ static void pages_work_handler(struct work_struct *work)
enum {
EC_FUNCTION_MASK = 0x8000,
+ RELEASE_ALL_PAGES_MASK = 0x4000,
};
static int req_pages_handler(struct notifier_block *nb,
@@ -457,6 +490,7 @@ static int req_pages_handler(struct notifier_block *nb,
struct mlx5_priv *priv;
struct mlx5_eqe *eqe;
bool ec_function;
+ bool release_all;
u16 func_id;
s32 npages;
@@ -467,8 +501,10 @@ static int req_pages_handler(struct notifier_block *nb,
func_id = be16_to_cpu(eqe->data.req_pages.func_id);
npages = be32_to_cpu(eqe->data.req_pages.num_pages);
ec_function = be16_to_cpu(eqe->data.req_pages.ec_function) & EC_FUNCTION_MASK;
- mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
- func_id, npages);
+ release_all = be16_to_cpu(eqe->data.req_pages.ec_function) &
+ RELEASE_ALL_PAGES_MASK;
+ mlx5_core_dbg(dev, "page request for func 0x%x, npages %d, release_all %d\n",
+ func_id, npages, release_all);
req = kzalloc(sizeof(*req), GFP_ATOMIC);
if (!req) {
mlx5_core_warn(dev, "failed to allocate pages request\n");
@@ -479,6 +515,7 @@ static int req_pages_handler(struct notifier_block *nb,
req->func_id = func_id;
req->npages = npages;
req->ec_function = ec_function;
+ req->release_all = release_all;
INIT_WORK(&req->work, pages_work_handler);
queue_work(dev->priv.pg_wq, &req->work);
return NOTIFY_OK;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pd.c b/drivers/net/ethernet/mellanox/mlx5/core/pd.c
index bd830d8d6c5f..aabc53ad8bdd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pd.c
@@ -33,17 +33,16 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn)
{
- u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {};
int err;
MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, alloc_pd, in, out);
if (!err)
*pdn = MLX5_GET(alloc_pd_out, out, pd);
return err;
@@ -52,11 +51,10 @@ EXPORT_SYMBOL(mlx5_core_alloc_pd);
int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn)
{
- u32 out[MLX5_ST_SZ_DW(dealloc_pd_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {};
MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD);
MLX5_SET(dealloc_pd_in, in, pd, pdn);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, dealloc_pd, in);
}
EXPORT_SYMBOL(mlx5_core_dealloc_pd);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index cc262b30aed5..9f829e68fc73 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -763,24 +763,23 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_ets_rate_limit);
int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode)
{
- u32 in[MLX5_ST_SZ_DW(set_wol_rol_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(set_wol_rol_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(set_wol_rol_in)] = {};
MLX5_SET(set_wol_rol_in, in, opcode, MLX5_CMD_OP_SET_WOL_ROL);
MLX5_SET(set_wol_rol_in, in, wol_mode_valid, 1);
MLX5_SET(set_wol_rol_in, in, wol_mode, wol_mode);
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(mdev, set_wol_rol, in);
}
EXPORT_SYMBOL_GPL(mlx5_set_port_wol);
int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode)
{
- u32 in[MLX5_ST_SZ_DW(query_wol_rol_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(query_wol_rol_out)] = {0};
+ u32 out[MLX5_ST_SZ_DW(query_wol_rol_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_wol_rol_in)] = {};
int err;
MLX5_SET(query_wol_rol_in, in, opcode, MLX5_CMD_OP_QUERY_WOL_ROL);
- err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(mdev, query_wol_rol, in, out);
if (!err)
*wol_mode = MLX5_GET(query_wol_rol_out, out, wol_mode);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
index f3b29d9ade1f..99039c47ef33 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
@@ -33,15 +33,14 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
/* Scheduling element fw management */
int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
void *ctx, u32 *element_id)
{
- u32 in[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {};
+ u32 in[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {};
void *schedc;
int err;
@@ -53,7 +52,7 @@ int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
hierarchy);
memcpy(schedc, ctx, MLX5_ST_SZ_BYTES(scheduling_context));
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, create_scheduling_element, in, out);
if (err)
return err;
@@ -66,8 +65,7 @@ int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
void *ctx, u32 element_id,
u32 modify_bitmask)
{
- u32 in[MLX5_ST_SZ_DW(modify_scheduling_element_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(modify_scheduling_element_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(modify_scheduling_element_in)] = {};
void *schedc;
schedc = MLX5_ADDR_OF(modify_scheduling_element_in, in,
@@ -82,14 +80,13 @@ int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
hierarchy);
memcpy(schedc, ctx, MLX5_ST_SZ_BYTES(scheduling_context));
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, modify_scheduling_element, in);
}
int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
u32 element_id)
{
- u32 in[MLX5_ST_SZ_DW(destroy_scheduling_element_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(destroy_scheduling_element_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_scheduling_element_in)] = {};
MLX5_SET(destroy_scheduling_element_in, in, opcode,
MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
@@ -98,7 +95,7 @@ int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
MLX5_SET(destroy_scheduling_element_in, in, scheduling_hierarchy,
hierarchy);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, destroy_scheduling_element, in);
}
static bool mlx5_rl_are_equal_raw(struct mlx5_rl_entry *entry, void *rl_in,
@@ -145,8 +142,7 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
static int mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev *dev,
struct mlx5_rl_entry *entry, bool set)
{
- u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)] = {};
- u32 out[MLX5_ST_SZ_DW(set_pp_rate_limit_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)] = {};
void *pp_context;
pp_context = MLX5_ADDR_OF(set_pp_rate_limit_in, in, ctx);
@@ -156,7 +152,7 @@ static int mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev *dev,
MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, entry->index);
if (set)
memcpy(pp_context, entry->rl_raw, sizeof(entry->rl_raw));
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, set_pp_rate_limit, in);
}
bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
index 461b39376daf..6bd34b293007 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
@@ -18,7 +18,7 @@ int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev,
MLX5_SET(query_esw_vport_context_in, in, other_vport, other_vport);
MLX5_SET(query_esw_vport_context_in, in, vport_number, vport_number);
- err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(mdev, query_esw_vport_context, in, out);
if (err)
return err;
@@ -51,7 +51,7 @@ int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_vport,
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 |
HCA_CAP_OPMOD_GET_CUR);
- err = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+ err = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out);
if (err) {
kfree(out);
return err;
@@ -141,7 +141,7 @@ int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev,
MLX5_SET(query_flow_table_in, in, table_type, type);
MLX5_SET(query_flow_table_in, in, table_id, table_id);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, query_flow_table, in, out);
if (err)
return err;
@@ -158,12 +158,11 @@ int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev,
int mlx5dr_cmd_sync_steering(struct mlx5_core_dev *mdev)
{
- u32 out[MLX5_ST_SZ_DW(sync_steering_out)] = {};
u32 in[MLX5_ST_SZ_DW(sync_steering_in)] = {};
MLX5_SET(sync_steering_in, in, opcode, MLX5_CMD_OP_SYNC_STEERING);
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(mdev, sync_steering, in);
}
int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
@@ -214,14 +213,13 @@ int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev,
u32 table_type,
u32 table_id)
{
- u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {};
u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {};
MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
MLX5_SET(delete_fte_in, in, table_type, table_type);
MLX5_SET(delete_fte_in, in, table_id, table_id);
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(mdev, delete_fte, in);
}
int mlx5dr_cmd_alloc_modify_header(struct mlx5_core_dev *mdev,
@@ -263,7 +261,6 @@ out:
int mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev *mdev,
u32 modify_header_id)
{
- u32 out[MLX5_ST_SZ_DW(dealloc_modify_header_context_out)] = {};
u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)] = {};
MLX5_SET(dealloc_modify_header_context_in, in, opcode,
@@ -271,7 +268,7 @@ int mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev *mdev,
MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
modify_header_id);
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(mdev, dealloc_modify_header_context, in);
}
int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev,
@@ -292,7 +289,7 @@ int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev,
MLX5_SET(create_flow_group_in, in, table_type, table_type);
MLX5_SET(create_flow_group_in, in, table_id, table_id);
- err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
+ err = mlx5_cmd_exec_inout(mdev, create_flow_group, in, out);
if (err)
goto out;
@@ -309,14 +306,14 @@ int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev,
u32 group_id)
{
u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {};
- u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {};
- MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_DESTROY_FLOW_GROUP);
+ MLX5_SET(destroy_flow_group_in, in, opcode,
+ MLX5_CMD_OP_DESTROY_FLOW_GROUP);
MLX5_SET(destroy_flow_group_in, in, table_type, table_type);
MLX5_SET(destroy_flow_group_in, in, table_id, table_id);
MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(mdev, destroy_flow_group, in);
}
int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
@@ -360,7 +357,7 @@ int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
attr->reformat_en);
- err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(mdev, create_flow_table, in, out);
if (err)
return err;
@@ -379,7 +376,6 @@ int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev,
u32 table_id,
u32 table_type)
{
- u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {};
u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {};
MLX5_SET(destroy_flow_table_in, in, opcode,
@@ -387,7 +383,7 @@ int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev,
MLX5_SET(destroy_flow_table_in, in, table_type, table_type);
MLX5_SET(destroy_flow_table_in, in, table_id, table_id);
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(mdev, destroy_flow_table, in);
}
int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
@@ -434,7 +430,6 @@ int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev,
u32 reformat_id)
{
- u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_out)] = {};
u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {};
MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
@@ -442,7 +437,7 @@ void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev,
MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
reformat_id);
- mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(mdev, dealloc_packet_reformat_context, in);
}
int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
@@ -458,7 +453,7 @@ int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
MLX5_SET(query_roce_address_in, in, roce_address_index, index);
MLX5_SET(query_roce_address_in, in, vhca_port_num, vhca_port_num);
- err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(mdev, query_roce_address, in, out);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
index 30d2d7376f56..cc33515b9aba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
@@ -95,13 +95,12 @@ static int dr_icm_create_dm_mkey(struct mlx5_core_dev *mdev,
}
static struct mlx5dr_icm_mr *
-dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool,
- enum mlx5_sw_icm_type type,
- size_t align_base)
+dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool)
{
struct mlx5_core_dev *mdev = pool->dmn->mdev;
+ enum mlx5_sw_icm_type dm_type;
struct mlx5dr_icm_mr *icm_mr;
- size_t align_diff;
+ size_t log_align_base;
int err;
icm_mr = kvzalloc(sizeof(*icm_mr), GFP_KERNEL);
@@ -111,14 +110,22 @@ dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool,
icm_mr->pool = pool;
INIT_LIST_HEAD(&icm_mr->mr_list);
- icm_mr->dm.type = type;
-
- /* 2^log_biggest_table * entry-size * double-for-alignment */
icm_mr->dm.length = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
- pool->icm_type) * 2;
+ pool->icm_type);
+
+ if (pool->icm_type == DR_ICM_TYPE_STE) {
+ dm_type = MLX5_SW_ICM_TYPE_STEERING;
+ log_align_base = ilog2(icm_mr->dm.length);
+ } else {
+ dm_type = MLX5_SW_ICM_TYPE_HEADER_MODIFY;
+ /* Align base is 64B */
+ log_align_base = ilog2(DR_ICM_MODIFY_HDR_ALIGN_BASE);
+ }
+ icm_mr->dm.type = dm_type;
- err = mlx5_dm_sw_icm_alloc(mdev, icm_mr->dm.type, icm_mr->dm.length, 0,
- &icm_mr->dm.addr, &icm_mr->dm.obj_id);
+ err = mlx5_dm_sw_icm_alloc(mdev, icm_mr->dm.type, icm_mr->dm.length,
+ log_align_base, 0, &icm_mr->dm.addr,
+ &icm_mr->dm.obj_id);
if (err) {
mlx5dr_err(pool->dmn, "Failed to allocate SW ICM memory, err (%d)\n", err);
goto free_icm_mr;
@@ -137,15 +144,18 @@ dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool,
icm_mr->icm_start_addr = icm_mr->dm.addr;
- /* align_base is always a power of 2 */
- align_diff = icm_mr->icm_start_addr & (align_base - 1);
- if (align_diff)
- icm_mr->used_length = align_base - align_diff;
+ if (icm_mr->icm_start_addr & (BIT(log_align_base) - 1)) {
+ mlx5dr_err(pool->dmn, "Failed to get Aligned ICM mem (asked: %zu)\n",
+ log_align_base);
+ goto free_mkey;
+ }
list_add_tail(&icm_mr->mr_list, &pool->icm_mr_list);
return icm_mr;
+free_mkey:
+ mlx5_core_destroy_mkey(mdev, &icm_mr->mkey);
free_dm:
mlx5_dm_sw_icm_dealloc(mdev, icm_mr->dm.type, icm_mr->dm.length, 0,
icm_mr->dm.addr, icm_mr->dm.obj_id);
@@ -200,24 +210,11 @@ static int dr_icm_chunks_create(struct mlx5dr_icm_bucket *bucket)
struct mlx5dr_icm_pool *pool = bucket->pool;
struct mlx5dr_icm_mr *icm_mr = NULL;
struct mlx5dr_icm_chunk *chunk;
- enum mlx5_sw_icm_type dm_type;
- size_t align_base;
int i, err = 0;
mr_req_size = bucket->num_of_entries * bucket->entry_size;
mr_row_size = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
pool->icm_type);
-
- if (pool->icm_type == DR_ICM_TYPE_STE) {
- dm_type = MLX5_SW_ICM_TYPE_STEERING;
- /* Align base is the biggest chunk size / row size */
- align_base = mr_row_size;
- } else {
- dm_type = MLX5_SW_ICM_TYPE_HEADER_MODIFY;
- /* Align base is 64B */
- align_base = DR_ICM_MODIFY_HDR_ALIGN_BASE;
- }
-
mutex_lock(&pool->mr_mutex);
if (!list_empty(&pool->icm_mr_list)) {
icm_mr = list_last_entry(&pool->icm_mr_list,
@@ -228,7 +225,7 @@ static int dr_icm_chunks_create(struct mlx5dr_icm_bucket *bucket)
}
if (!icm_mr || mr_free_size < mr_row_size) {
- icm_mr = dr_icm_pool_mr_create(pool, dm_type, align_base);
+ icm_mr = dr_icm_pool_mr_create(pool);
if (!icm_mr) {
err = -ENOMEM;
goto out_err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
index 18719acb7e54..b8d97d44be7b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -100,14 +100,10 @@ static int dr_poll_cq(struct mlx5dr_cq *dr_cq, int ne)
return err == CQ_POLL_ERR ? err : npolled;
}
-static void dr_qp_event(struct mlx5_core_qp *mqp, int event)
-{
- pr_info("DR QP event %u on QP #%u\n", event, mqp->qpn);
-}
-
static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
struct dr_qp_init_attr *attr)
{
+ u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
u32 temp_qpc[MLX5_ST_SZ_DW(qpc)] = {};
struct mlx5_wq_param wqp;
struct mlx5dr_qp *dr_qp;
@@ -180,14 +176,12 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
(__be64 *)MLX5_ADDR_OF(create_qp_in,
in, pas));
- err = mlx5_core_create_qp(mdev, &dr_qp->mqp, in, inlen);
+ MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
+ err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
+ dr_qp->qpn = MLX5_GET(create_qp_out, out, qpn);
kfree(in);
-
- if (err) {
- mlx5_core_warn(mdev, " Can't create QP\n");
+ if (err)
goto err_in;
- }
- dr_qp->mqp.event = dr_qp_event;
dr_qp->uar = attr->uar;
return dr_qp;
@@ -204,7 +198,12 @@ err_wq:
static void dr_destroy_qp(struct mlx5_core_dev *mdev,
struct mlx5dr_qp *dr_qp)
{
- mlx5_core_destroy_qp(mdev, &dr_qp->mqp);
+ u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
+
+ MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
+ MLX5_SET(destroy_qp_in, in, qpn, dr_qp->qpn);
+ mlx5_cmd_exec_in(mdev, destroy_qp, in);
+
kfree(dr_qp->sq.wqe_head);
mlx5_wq_destroy(&dr_qp->wq_ctrl);
kfree(dr_qp);
@@ -242,7 +241,7 @@ static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr,
MLX5_WQE_CTRL_CQ_UPDATE : 0;
wq_ctrl->opmod_idx_opcode = cpu_to_be32(((dr_qp->sq.pc & 0xffff) << 8) |
opcode);
- wq_ctrl->qpn_ds = cpu_to_be32(size | dr_qp->mqp.qpn << 8);
+ wq_ctrl->qpn_ds = cpu_to_be32(size | dr_qp->qpn << 8);
wq_raddr = (void *)(wq_ctrl + 1);
wq_raddr->raddr = cpu_to_be64(remote_addr);
wq_raddr->rkey = cpu_to_be32(rkey);
@@ -585,8 +584,10 @@ static int dr_modify_qp_rst2init(struct mlx5_core_dev *mdev,
MLX5_SET(qpc, qpc, rre, 1);
MLX5_SET(qpc, qpc, rwe, 1);
- return mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, qpc,
- &dr_qp->mqp);
+ MLX5_SET(rst2init_qp_in, in, opcode, MLX5_CMD_OP_RST2INIT_QP);
+ MLX5_SET(rst2init_qp_in, in, qpn, dr_qp->qpn);
+
+ return mlx5_cmd_exec_in(mdev, rst2init_qp, in);
}
static int dr_cmd_modify_qp_rtr2rts(struct mlx5_core_dev *mdev,
@@ -598,14 +599,15 @@ static int dr_cmd_modify_qp_rtr2rts(struct mlx5_core_dev *mdev,
qpc = MLX5_ADDR_OF(rtr2rts_qp_in, in, qpc);
- MLX5_SET(rtr2rts_qp_in, in, qpn, dr_qp->mqp.qpn);
+ MLX5_SET(rtr2rts_qp_in, in, qpn, dr_qp->qpn);
- MLX5_SET(qpc, qpc, log_ack_req_freq, 0);
MLX5_SET(qpc, qpc, retry_count, attr->retry_cnt);
MLX5_SET(qpc, qpc, rnr_retry, attr->rnr_retry);
- return mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, 0, qpc,
- &dr_qp->mqp);
+ MLX5_SET(rtr2rts_qp_in, in, opcode, MLX5_CMD_OP_RTR2RTS_QP);
+ MLX5_SET(rtr2rts_qp_in, in, qpn, dr_qp->qpn);
+
+ return mlx5_cmd_exec_in(mdev, rtr2rts_qp, in);
}
static int dr_cmd_modify_qp_init2rtr(struct mlx5_core_dev *mdev,
@@ -617,7 +619,7 @@ static int dr_cmd_modify_qp_init2rtr(struct mlx5_core_dev *mdev,
qpc = MLX5_ADDR_OF(init2rtr_qp_in, in, qpc);
- MLX5_SET(init2rtr_qp_in, in, qpn, dr_qp->mqp.qpn);
+ MLX5_SET(init2rtr_qp_in, in, qpn, dr_qp->qpn);
MLX5_SET(qpc, qpc, mtu, attr->mtu);
MLX5_SET(qpc, qpc, log_msg_max, DR_CHUNK_SIZE_MAX - 1);
@@ -636,8 +638,10 @@ static int dr_cmd_modify_qp_init2rtr(struct mlx5_core_dev *mdev,
MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, attr->port_num);
MLX5_SET(qpc, qpc, min_rnr_nak, 1);
- return mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, qpc,
- &dr_qp->mqp);
+ MLX5_SET(init2rtr_qp_in, in, opcode, MLX5_CMD_OP_INIT2RTR_QP);
+ MLX5_SET(init2rtr_qp_in, in, qpn, dr_qp->qpn);
+
+ return mlx5_cmd_exec_in(mdev, init2rtr_qp, in);
}
static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
@@ -663,7 +667,7 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
return ret;
rtr_attr.mtu = mtu;
- rtr_attr.qp_num = dr_qp->mqp.qpn;
+ rtr_attr.qp_num = dr_qp->qpn;
rtr_attr.min_rnr_timer = 12;
rtr_attr.port_num = port;
rtr_attr.sgid_index = gid_index;
@@ -689,12 +693,6 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
return 0;
}
-static void dr_cq_event(struct mlx5_core_cq *mcq,
- enum mlx5_event event)
-{
- pr_info("CQ event %u on CQ #%u\n", event, mcq->cqn);
-}
-
static void dr_cq_complete(struct mlx5_core_cq *mcq,
struct mlx5_eqe *eqe)
{
@@ -761,7 +759,6 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, pas);
- cq->mcq.event = dr_cq_event;
cq->mcq.comp = dr_cq_complete;
err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 3fa739951b34..984783238baa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -990,7 +990,7 @@ struct mlx5dr_qp {
struct mlx5_wq_qp wq;
struct mlx5_uars_page *uar;
struct mlx5_wq_ctrl wq_ctrl;
- struct mlx5_core_qp mqp;
+ u32 qpn;
struct {
unsigned int pc;
unsigned int cc;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index 3b3f5b9d4f95..8887b2440c7d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -576,7 +576,7 @@ static int mlx5_cmd_dr_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
struct mlx5dr_action *action;
size_t actions_sz;
- actions_sz = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto) *
+ actions_sz = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) *
num_actions;
action = mlx5dr_action_create_modify_header(dr_domain, 0,
actions_sz,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index b1068500f1df..01cc00ad8acf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -36,14 +36,14 @@
int mlx5_core_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn)
{
- u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {0};
+ u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {};
int err;
MLX5_SET(alloc_transport_domain_in, in, opcode,
MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, alloc_transport_domain, in, out);
if (!err)
*tdn = MLX5_GET(alloc_transport_domain_out, out,
transport_domain);
@@ -54,19 +54,18 @@ EXPORT_SYMBOL(mlx5_core_alloc_transport_domain);
void mlx5_core_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn)
{
- u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(dealloc_transport_domain_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {};
MLX5_SET(dealloc_transport_domain_in, in, opcode,
MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev, dealloc_transport_domain, in);
}
EXPORT_SYMBOL(mlx5_core_dealloc_transport_domain);
int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn)
{
- u32 out[MLX5_ST_SZ_DW(create_rq_out)] = {0};
+ u32 out[MLX5_ST_SZ_DW(create_rq_out)] = {};
int err;
MLX5_SET(create_rq_in, in, opcode, MLX5_CMD_OP_CREATE_RQ);
@@ -78,44 +77,39 @@ int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn)
}
EXPORT_SYMBOL(mlx5_core_create_rq);
-int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen)
+int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in)
{
- u32 out[MLX5_ST_SZ_DW(modify_rq_out)];
-
MLX5_SET(modify_rq_in, in, rqn, rqn);
MLX5_SET(modify_rq_in, in, opcode, MLX5_CMD_OP_MODIFY_RQ);
- memset(out, 0, sizeof(out));
- return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, modify_rq, in);
}
EXPORT_SYMBOL(mlx5_core_modify_rq);
void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn)
{
- u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(destroy_rq_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {};
MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ);
MLX5_SET(destroy_rq_in, in, rqn, rqn);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev, destroy_rq, in);
}
EXPORT_SYMBOL(mlx5_core_destroy_rq);
int mlx5_core_query_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *out)
{
- u32 in[MLX5_ST_SZ_DW(query_rq_in)] = {0};
- int outlen = MLX5_ST_SZ_BYTES(query_rq_out);
+ u32 in[MLX5_ST_SZ_DW(query_rq_in)] = {};
MLX5_SET(query_rq_in, in, opcode, MLX5_CMD_OP_QUERY_RQ);
MLX5_SET(query_rq_in, in, rqn, rqn);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
+ return mlx5_cmd_exec_inout(dev, query_rq, in, out);
}
EXPORT_SYMBOL(mlx5_core_query_rq);
int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn)
{
- u32 out[MLX5_ST_SZ_DW(create_sq_out)] = {0};
+ u32 out[MLX5_ST_SZ_DW(create_sq_out)] = {};
int err;
MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);
@@ -126,34 +120,30 @@ int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn)
return err;
}
-int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen)
+int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in)
{
- u32 out[MLX5_ST_SZ_DW(modify_sq_out)] = {0};
-
MLX5_SET(modify_sq_in, in, sqn, sqn);
MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ);
- return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, modify_sq, in);
}
EXPORT_SYMBOL(mlx5_core_modify_sq);
void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn)
{
- u32 in[MLX5_ST_SZ_DW(destroy_sq_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(destroy_sq_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_sq_in)] = {};
MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ);
MLX5_SET(destroy_sq_in, in, sqn, sqn);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev, destroy_sq, in);
}
int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out)
{
- u32 in[MLX5_ST_SZ_DW(query_sq_in)] = {0};
- int outlen = MLX5_ST_SZ_BYTES(query_sq_out);
+ u32 in[MLX5_ST_SZ_DW(query_sq_in)] = {};
MLX5_SET(query_sq_in, in, opcode, MLX5_CMD_OP_QUERY_SQ);
MLX5_SET(query_sq_in, in, sqn, sqn);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
+ return mlx5_cmd_exec_inout(dev, query_sq, in, out);
}
EXPORT_SYMBOL(mlx5_core_query_sq);
@@ -182,24 +172,13 @@ out:
}
EXPORT_SYMBOL_GPL(mlx5_core_query_sq_state);
-int mlx5_core_create_tir_out(struct mlx5_core_dev *dev,
- u32 *in, int inlen,
- u32 *out, int outlen)
-{
- MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
-
- return mlx5_cmd_exec(dev, in, inlen, out, outlen);
-}
-EXPORT_SYMBOL(mlx5_core_create_tir_out);
-
-int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
- u32 *tirn)
+int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, u32 *tirn)
{
u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {};
int err;
- err = mlx5_core_create_tir_out(dev, in, inlen,
- out, sizeof(out));
+ MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
+ err = mlx5_cmd_exec_inout(dev, create_tir, in, out);
if (!err)
*tirn = MLX5_GET(create_tir_out, out, tirn);
@@ -207,35 +186,30 @@ int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
}
EXPORT_SYMBOL(mlx5_core_create_tir);
-int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in,
- int inlen)
+int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in)
{
- u32 out[MLX5_ST_SZ_DW(modify_tir_out)] = {0};
-
MLX5_SET(modify_tir_in, in, tirn, tirn);
MLX5_SET(modify_tir_in, in, opcode, MLX5_CMD_OP_MODIFY_TIR);
- return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, modify_tir, in);
}
void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn)
{
- u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(destroy_tir_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {};
MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
MLX5_SET(destroy_tir_in, in, tirn, tirn);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev, destroy_tir, in);
}
EXPORT_SYMBOL(mlx5_core_destroy_tir);
-int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
- u32 *tisn)
+int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, u32 *tisn)
{
- u32 out[MLX5_ST_SZ_DW(create_tis_out)] = {0};
+ u32 out[MLX5_ST_SZ_DW(create_tis_out)] = {};
int err;
MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
- err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, create_tis, in, out);
if (!err)
*tisn = MLX5_GET(create_tis_out, out, tisn);
@@ -243,33 +217,29 @@ int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
}
EXPORT_SYMBOL(mlx5_core_create_tis);
-int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in,
- int inlen)
+int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in)
{
- u32 out[MLX5_ST_SZ_DW(modify_tis_out)] = {0};
-
MLX5_SET(modify_tis_in, in, tisn, tisn);
MLX5_SET(modify_tis_in, in, opcode, MLX5_CMD_OP_MODIFY_TIS);
- return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, modify_tis, in);
}
EXPORT_SYMBOL(mlx5_core_modify_tis);
void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn)
{
- u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(destroy_tis_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {};
MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
MLX5_SET(destroy_tis_in, in, tisn, tisn);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev, destroy_tis, in);
}
EXPORT_SYMBOL(mlx5_core_destroy_tis);
int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *rqtn)
{
- u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {0};
+ u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {};
int err;
MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
@@ -284,7 +254,7 @@ EXPORT_SYMBOL(mlx5_core_create_rqt);
int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
int inlen)
{
- u32 out[MLX5_ST_SZ_DW(modify_rqt_out)] = {0};
+ u32 out[MLX5_ST_SZ_DW(modify_rqt_out)] = {};
MLX5_SET(modify_rqt_in, in, rqtn, rqtn);
MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT);
@@ -293,12 +263,11 @@ int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn)
{
- u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {};
MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev, destroy_rqt, in);
}
EXPORT_SYMBOL(mlx5_core_destroy_rqt);
@@ -383,7 +352,7 @@ static int mlx5_hairpin_modify_rq(struct mlx5_core_dev *func_mdev, u32 rqn,
int curr_state, int next_state,
u16 peer_vhca, u32 peer_sq)
{
- u32 in[MLX5_ST_SZ_DW(modify_rq_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(modify_rq_in)] = {};
void *rqc;
rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
@@ -396,8 +365,7 @@ static int mlx5_hairpin_modify_rq(struct mlx5_core_dev *func_mdev, u32 rqn,
MLX5_SET(modify_rq_in, in, rq_state, curr_state);
MLX5_SET(rqc, rqc, state, next_state);
- return mlx5_core_modify_rq(func_mdev, rqn,
- in, MLX5_ST_SZ_BYTES(modify_rq_in));
+ return mlx5_core_modify_rq(func_mdev, rqn, in);
}
static int mlx5_hairpin_modify_sq(struct mlx5_core_dev *peer_mdev, u32 sqn,
@@ -417,8 +385,7 @@ static int mlx5_hairpin_modify_sq(struct mlx5_core_dev *peer_mdev, u32 sqn,
MLX5_SET(modify_sq_in, in, sq_state, curr_state);
MLX5_SET(sqc, sqc, state, next_state);
- return mlx5_core_modify_sq(peer_mdev, sqn,
- in, MLX5_ST_SZ_BYTES(modify_sq_in));
+ return mlx5_core_modify_sq(peer_mdev, sqn, in);
}
static int mlx5_hairpin_pair_queues(struct mlx5_hairpin *hp)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 0d006224d7b0..da481a7c12f4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -34,17 +34,16 @@
#include <linux/module.h>
#include <linux/io-mapping.h>
#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn)
{
- u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(alloc_uar_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_uar_in)] = {};
int err;
MLX5_SET(alloc_uar_in, in, opcode, MLX5_CMD_OP_ALLOC_UAR);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, alloc_uar, in, out);
if (!err)
*uarn = MLX5_GET(alloc_uar_out, out, uar);
return err;
@@ -53,12 +52,11 @@ EXPORT_SYMBOL(mlx5_cmd_alloc_uar);
int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn)
{
- u32 out[MLX5_ST_SZ_DW(dealloc_uar_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)] = {};
MLX5_SET(dealloc_uar_in, in, opcode, MLX5_CMD_OP_DEALLOC_UAR);
MLX5_SET(dealloc_uar_in, in, uar, uarn);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, dealloc_uar, in);
}
EXPORT_SYMBOL(mlx5_cmd_free_uar);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 23f879da9104..c107d92dc118 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -40,10 +40,11 @@
/* Mutex to hold while enabling or disabling RoCE */
static DEFINE_MUTEX(mlx5_roce_en_lock);
-static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
- u16 vport, u32 *out, int outlen)
+u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
{
- u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {};
+ int err;
MLX5_SET(query_vport_state_in, in, opcode,
MLX5_CMD_OP_QUERY_VPORT_STATE);
@@ -52,14 +53,9 @@ static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
if (vport)
MLX5_SET(query_vport_state_in, in, other_vport, 1);
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
-}
-
-u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
-{
- u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
-
- _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
+ err = mlx5_cmd_exec_inout(mdev, query_vport_state, in, out);
+ if (err)
+ return 0;
return MLX5_GET(query_vport_state_out, out, state);
}
@@ -67,8 +63,7 @@ u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
u16 vport, u8 other_vport, u8 state)
{
- u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {};
MLX5_SET(modify_vport_state_in, in, opcode,
MLX5_CMD_OP_MODIFY_VPORT_STATE);
@@ -77,13 +72,13 @@ int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
MLX5_SET(modify_vport_state_in, in, other_vport, other_vport);
MLX5_SET(modify_vport_state_in, in, admin_state, state);
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(mdev, modify_vport_state, in);
}
static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
- u32 *out, int outlen)
+ u32 *out)
{
- u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
MLX5_SET(query_nic_vport_context_in, in, opcode,
MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
@@ -91,26 +86,16 @@ static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
if (vport)
MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
-}
-
-static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
- int inlen)
-{
- u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
-
- MLX5_SET(modify_nic_vport_context_in, in, opcode,
- MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
- return mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
+ return mlx5_cmd_exec_inout(mdev, query_nic_vport_context, in, out);
}
int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
u16 vport, u8 *min_inline)
{
- u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0};
+ u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {};
int err;
- err = mlx5_query_nic_vport_context(mdev, vport, out, sizeof(out));
+ err = mlx5_query_nic_vport_context(mdev, vport, out);
if (!err)
*min_inline = MLX5_GET(query_nic_vport_context_out, out,
nic_vport_context.min_wqe_inline_mode);
@@ -139,8 +124,7 @@ EXPORT_SYMBOL_GPL(mlx5_query_min_inline);
int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
u16 vport, u8 min_inline)
{
- u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
- int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+ u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {};
void *nic_vport_ctx;
MLX5_SET(modify_nic_vport_context_in, in,
@@ -152,23 +136,20 @@ int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
in, nic_vport_context);
MLX5_SET(nic_vport_context, nic_vport_ctx,
min_wqe_inline_mode, min_inline);
+ MLX5_SET(modify_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
- return mlx5_modify_nic_vport_context(mdev, in, inlen);
+ return mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
}
int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
u16 vport, bool other, u8 *addr)
{
- int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+ u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {};
u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
u8 *out_addr;
- u32 *out;
int err;
- out = kvzalloc(outlen, GFP_KERNEL);
- if (!out)
- return -ENOMEM;
-
out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
nic_vport_context.permanent_address);
@@ -177,11 +158,10 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
MLX5_SET(query_nic_vport_context_in, in, other_vport, other);
- err = mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
+ err = mlx5_cmd_exec_inout(mdev, query_nic_vport_context, in, out);
if (!err)
ether_addr_copy(addr, &out_addr[2]);
- kvfree(out);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
@@ -216,8 +196,10 @@ int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
permanent_address);
ether_addr_copy(&perm_mac[2], addr);
+ MLX5_SET(modify_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
- err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+ err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
kvfree(in);
@@ -235,7 +217,7 @@ int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
if (!out)
return -ENOMEM;
- err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
+ err = mlx5_query_nic_vport_context(mdev, 0, out);
if (!err)
*mtu = MLX5_GET(query_nic_vport_context_out, out,
nic_vport_context.mtu);
@@ -257,8 +239,10 @@ int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
+ MLX5_SET(modify_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
- err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+ err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
kvfree(in);
return err;
@@ -292,7 +276,7 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
req_list_size = max_list_size;
}
- out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
+ out_sz = MLX5_ST_SZ_BYTES(query_nic_vport_context_in) +
req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
out = kzalloc(out_sz, GFP_KERNEL);
@@ -332,7 +316,7 @@ int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
u8 addr_list[][ETH_ALEN],
int list_size)
{
- u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
+ u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {};
void *nic_vport_ctx;
int max_list_size;
int in_sz;
@@ -350,7 +334,6 @@ int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
- memset(out, 0, sizeof(out));
in = kzalloc(in_sz, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -442,7 +425,7 @@ int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
if (!out)
return -ENOMEM;
- mlx5_query_nic_vport_context(mdev, 0, out, outlen);
+ mlx5_query_nic_vport_context(mdev, 0, out);
*system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
nic_vport_context.system_image_guid);
@@ -462,7 +445,7 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
if (!out)
return -ENOMEM;
- mlx5_query_nic_vport_context(mdev, 0, out, outlen);
+ mlx5_query_nic_vport_context(mdev, 0, out);
*node_guid = MLX5_GET64(query_nic_vport_context_out, out,
nic_vport_context.node_guid);
@@ -498,8 +481,10 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
in, nic_vport_context);
MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
+ MLX5_SET(modify_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
- err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+ err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
kvfree(in);
@@ -516,7 +501,7 @@ int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
if (!out)
return -ENOMEM;
- mlx5_query_nic_vport_context(mdev, 0, out, outlen);
+ mlx5_query_nic_vport_context(mdev, 0, out);
*qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
nic_vport_context.qkey_violation_counter);
@@ -664,7 +649,7 @@ int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
struct mlx5_hca_vport_context *rep)
{
int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
- int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)] = {0};
+ int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)] = {};
int is_group_manager;
void *out;
void *ctx;
@@ -691,7 +676,7 @@ int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
if (MLX5_CAP_GEN(dev, num_ports) == 2)
MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
+ err = mlx5_cmd_exec_inout(dev, query_hca_vport_context, in, out);
if (err)
goto ex;
@@ -788,7 +773,7 @@ int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
if (!out)
return -ENOMEM;
- err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
+ err = mlx5_query_nic_vport_context(mdev, vport, out);
if (err)
goto out;
@@ -825,8 +810,10 @@ int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
nic_vport_context.promisc_mc, promisc_mc);
MLX5_SET(modify_nic_vport_context_in, in,
nic_vport_context.promisc_all, promisc_all);
+ MLX5_SET(modify_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
- err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+ err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
kvfree(in);
@@ -865,8 +852,10 @@ int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable)
if (MLX5_CAP_GEN(mdev, disable_local_lb_uc))
MLX5_SET(modify_nic_vport_context_in, in,
field_select.disable_uc_local_lb, 1);
+ MLX5_SET(modify_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
- err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+ err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
if (!err)
mlx5_core_dbg(mdev, "%s local_lb\n",
@@ -888,7 +877,7 @@ int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status)
if (!out)
return -ENOMEM;
- err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
+ err = mlx5_query_nic_vport_context(mdev, 0, out);
if (err)
goto out;
@@ -925,8 +914,10 @@ static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev,
MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
state);
+ MLX5_SET(modify_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
- err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+ err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
kvfree(in);
@@ -965,16 +956,15 @@ int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
mutex_unlock(&mlx5_roce_en_lock);
return err;
}
-EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
+EXPORT_SYMBOL(mlx5_nic_vport_disable_roce);
int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
- int vf, u8 port_num, void *out,
- size_t out_sz)
+ int vf, u8 port_num, void *out)
{
- int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
- int is_group_manager;
- void *in;
- int err;
+ int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
+ int is_group_manager;
+ void *in;
+ int err;
is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
in = kvzalloc(in_sz, GFP_KERNEL);
@@ -997,7 +987,7 @@ int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
if (MLX5_CAP_GEN(dev, num_ports) == 2)
MLX5_SET(query_vport_counter_in, in, port_num, port_num);
- err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
+ err = mlx5_cmd_exec_inout(dev, query_vport_counter, in, out);
free:
kvfree(in);
return err;
@@ -1008,8 +998,8 @@ int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport,
u8 other_vport, u64 *rx_discard_vport_down,
u64 *tx_discard_vport_down)
{
- u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
int err;
MLX5_SET(query_vnic_env_in, in, opcode,
@@ -1018,7 +1008,7 @@ int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport,
MLX5_SET(query_vnic_env_in, in, vport_number, vport);
MLX5_SET(query_vnic_env_in, in, other_vport, other_vport);
- err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(mdev, query_vnic_env, in, out);
if (err)
return err;
@@ -1035,11 +1025,10 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
struct mlx5_hca_vport_context *req)
{
int in_sz = MLX5_ST_SZ_BYTES(modify_hca_vport_context_in);
- u8 out[MLX5_ST_SZ_BYTES(modify_hca_vport_context_out)];
int is_group_manager;
+ void *ctx;
void *in;
int err;
- void *ctx;
mlx5_core_dbg(dev, "vf %d\n", vf);
is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
@@ -1047,7 +1036,6 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
if (!in)
return -ENOMEM;
- memset(out, 0, sizeof(out));
MLX5_SET(modify_hca_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT);
if (other_vport) {
if (is_group_manager) {
@@ -1074,7 +1062,7 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1);
MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select,
req->cap_mask1_perm);
- err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
+ err = mlx5_cmd_exec_in(dev, modify_hca_vport_context, in);
ex:
kfree(in);
return err;
@@ -1103,8 +1091,10 @@ int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev,
MLX5_SET(modify_nic_vport_context_in, in,
nic_vport_context.affiliation_criteria,
MLX5_CAP_GEN(port_mdev, affiliate_nic_vport_criteria));
+ MLX5_SET(modify_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
- err = mlx5_modify_nic_vport_context(port_mdev, in, inlen);
+ err = mlx5_cmd_exec_in(port_mdev, modify_nic_vport_context, in);
if (err)
mlx5_nic_vport_disable_roce(port_mdev);
@@ -1129,8 +1119,10 @@ int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev)
nic_vport_context.affiliated_vhca_id, 0);
MLX5_SET(modify_nic_vport_context_in, in,
nic_vport_context.affiliation_criteria, 0);
+ MLX5_SET(modify_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
- err = mlx5_modify_nic_vport_context(port_mdev, in, inlen);
+ err = mlx5_cmd_exec_in(port_mdev, modify_nic_vport_context, in);
if (!err)
mlx5_nic_vport_disable_roce(port_mdev);
@@ -1170,4 +1162,4 @@ u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev)
{
return MLX5_SPECIAL_VPORTS(dev) + mlx5_core_max_vfs(dev);
}
-EXPORT_SYMBOL(mlx5_eswitch_get_total_vports);
+EXPORT_SYMBOL_GPL(mlx5_eswitch_get_total_vports);
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
index 046a0cb82ed8..7a04c626a2aa 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
@@ -76,7 +76,7 @@ static int mlxfw_fsm_state_err(struct mlxfw_dev *mlxfw_dev,
case MLXFW_FSM_STATE_ERR_MAX:
MLXFW_ERR_MSG(mlxfw_dev, extack, "unknown error", err);
break;
- };
+ }
return mlxfw_fsm_state_errno[fsm_state_err];
};
@@ -159,7 +159,7 @@ mlxfw_fsm_reactivate_err(struct mlxfw_dev *mlxfw_dev,
case MLXFW_FSM_REACTIVATE_STATUS_MAX:
MLXFW_REACT_ERR("unexpected error", err);
break;
- };
+ }
return -EREMOTEIO;
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 0e86a581d45b..4aeabb35c943 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -21,6 +21,7 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_acl_atcam.o spectrum_acl_erp.o \
spectrum1_acl_tcam.o spectrum2_acl_tcam.o \
spectrum_acl_bloom_filter.o spectrum_acl.o \
+ spectrum_flow.o spectrum_matchall.o \
spectrum_flower.o spectrum_cnt.o \
spectrum_fid.o spectrum_ipip.o \
spectrum_acl_flex_actions.o \
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 9b39b8e70519..3c3db1c874b6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -3203,7 +3203,7 @@ MLXSW_ITEM32_INDEXED(reg, iedr, rec_type, MLXSW_REG_IEDR_BASE_LEN, 24, 8,
* Size of entries do be deleted. The unit is 1 entry, regardless of entry type.
* Access: OP
*/
-MLXSW_ITEM32_INDEXED(reg, iedr, rec_size, MLXSW_REG_IEDR_BASE_LEN, 0, 11,
+MLXSW_ITEM32_INDEXED(reg, iedr, rec_size, MLXSW_REG_IEDR_BASE_LEN, 0, 13,
MLXSW_REG_IEDR_REC_LEN, 0x00, false);
/* reg_iedr_rec_index_start
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 24ca8d5bc564..f78bde8bc16e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -25,9 +25,7 @@
#include <linux/log2.h>
#include <net/switchdev.h>
#include <net/pkt_cls.h>
-#include <net/tc_act/tc_mirred.h>
#include <net/netevent.h>
-#include <net/tc_act/tc_sample.h>
#include <net/addrconf.h>
#include "spectrum.h"
@@ -582,16 +580,6 @@ static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
return 0;
}
-static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
- bool enable, u32 rate)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char mpsc_pl[MLXSW_REG_MPSC_LEN];
-
- mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
-}
-
static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool is_up)
{
@@ -1362,412 +1350,6 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev,
return 0;
}
-static struct mlxsw_sp_port_mall_tc_entry *
-mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port,
- unsigned long cookie) {
- struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
-
- list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list)
- if (mall_tc_entry->cookie == cookie)
- return mall_tc_entry;
-
- return NULL;
-}
-
-static int
-mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_port_mall_mirror_tc_entry *mirror,
- const struct flow_action_entry *act,
- bool ingress)
-{
- enum mlxsw_sp_span_type span_type;
-
- if (!act->dev) {
- netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
- return -EINVAL;
- }
-
- mirror->ingress = ingress;
- span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
- return mlxsw_sp_span_mirror_add(mlxsw_sp_port, act->dev, span_type,
- true, &mirror->span_id);
-}
-
-static void
-mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_port_mall_mirror_tc_entry *mirror)
-{
- enum mlxsw_sp_span_type span_type;
-
- span_type = mirror->ingress ?
- MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
- mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->span_id,
- span_type, true);
-}
-
-static int
-mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
- struct tc_cls_matchall_offload *cls,
- const struct flow_action_entry *act,
- bool ingress)
-{
- int err;
-
- if (!mlxsw_sp_port->sample)
- return -EOPNOTSUPP;
- if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) {
- netdev_err(mlxsw_sp_port->dev, "sample already active\n");
- return -EEXIST;
- }
- if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) {
- netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n");
- return -EOPNOTSUPP;
- }
-
- rcu_assign_pointer(mlxsw_sp_port->sample->psample_group,
- act->sample.psample_group);
- mlxsw_sp_port->sample->truncate = act->sample.truncate;
- mlxsw_sp_port->sample->trunc_size = act->sample.trunc_size;
- mlxsw_sp_port->sample->rate = act->sample.rate;
-
- err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, act->sample.rate);
- if (err)
- goto err_port_sample_set;
- return 0;
-
-err_port_sample_set:
- RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
- return err;
-}
-
-static void
-mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port)
-{
- if (!mlxsw_sp_port->sample)
- return;
-
- mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1);
- RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
-}
-
-static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
- struct tc_cls_matchall_offload *f,
- bool ingress)
-{
- struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
- __be16 protocol = f->common.protocol;
- struct flow_action_entry *act;
- int err;
-
- if (!flow_offload_has_one_action(&f->rule->action)) {
- netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
- return -EOPNOTSUPP;
- }
-
- mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
- if (!mall_tc_entry)
- return -ENOMEM;
- mall_tc_entry->cookie = f->cookie;
-
- act = &f->rule->action.entries[0];
-
- if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) {
- struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
-
- mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
- mirror = &mall_tc_entry->mirror;
- err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port,
- mirror, act,
- ingress);
- } else if (act->id == FLOW_ACTION_SAMPLE &&
- protocol == htons(ETH_P_ALL)) {
- mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE;
- err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f,
- act, ingress);
- } else {
- err = -EOPNOTSUPP;
- }
-
- if (err)
- goto err_add_action;
-
- list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list);
- return 0;
-
-err_add_action:
- kfree(mall_tc_entry);
- return err;
-}
-
-static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
- struct tc_cls_matchall_offload *f)
-{
- struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
-
- mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port,
- f->cookie);
- if (!mall_tc_entry) {
- netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n");
- return;
- }
- list_del(&mall_tc_entry->list);
-
- switch (mall_tc_entry->type) {
- case MLXSW_SP_PORT_MALL_MIRROR:
- mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port,
- &mall_tc_entry->mirror);
- break;
- case MLXSW_SP_PORT_MALL_SAMPLE:
- mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port);
- break;
- default:
- WARN_ON(1);
- }
-
- kfree(mall_tc_entry);
-}
-
-static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
- struct tc_cls_matchall_offload *f,
- bool ingress)
-{
- switch (f->command) {
- case TC_CLSMATCHALL_REPLACE:
- return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f,
- ingress);
- case TC_CLSMATCHALL_DESTROY:
- mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int
-mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block,
- struct flow_cls_offload *f)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block);
-
- switch (f->command) {
- case FLOW_CLS_REPLACE:
- return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f);
- case FLOW_CLS_DESTROY:
- mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f);
- return 0;
- case FLOW_CLS_STATS:
- return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f);
- case FLOW_CLS_TMPLT_CREATE:
- return mlxsw_sp_flower_tmplt_create(mlxsw_sp, acl_block, f);
- case FLOW_CLS_TMPLT_DESTROY:
- mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, acl_block, f);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type,
- void *type_data,
- void *cb_priv, bool ingress)
-{
- struct mlxsw_sp_port *mlxsw_sp_port = cb_priv;
-
- switch (type) {
- case TC_SETUP_CLSMATCHALL:
- if (!tc_cls_can_offload_and_chain0(mlxsw_sp_port->dev,
- type_data))
- return -EOPNOTSUPP;
-
- return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data,
- ingress);
- case TC_SETUP_CLSFLOWER:
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type,
- void *type_data,
- void *cb_priv)
-{
- return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data,
- cb_priv, true);
-}
-
-static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type,
- void *type_data,
- void *cb_priv)
-{
- return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data,
- cb_priv, false);
-}
-
-static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type,
- void *type_data, void *cb_priv)
-{
- struct mlxsw_sp_acl_block *acl_block = cb_priv;
-
- switch (type) {
- case TC_SETUP_CLSMATCHALL:
- return 0;
- case TC_SETUP_CLSFLOWER:
- if (mlxsw_sp_acl_block_disabled(acl_block))
- return -EOPNOTSUPP;
-
- return mlxsw_sp_setup_tc_cls_flower(acl_block, type_data);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static void mlxsw_sp_tc_block_flower_release(void *cb_priv)
-{
- struct mlxsw_sp_acl_block *acl_block = cb_priv;
-
- mlxsw_sp_acl_block_destroy(acl_block);
-}
-
-static LIST_HEAD(mlxsw_sp_block_cb_list);
-
-static int
-mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
- struct flow_block_offload *f, bool ingress)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- struct mlxsw_sp_acl_block *acl_block;
- struct flow_block_cb *block_cb;
- bool register_block = false;
- int err;
-
- block_cb = flow_block_cb_lookup(f->block,
- mlxsw_sp_setup_tc_block_cb_flower,
- mlxsw_sp);
- if (!block_cb) {
- acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, f->net);
- if (!acl_block)
- return -ENOMEM;
- block_cb = flow_block_cb_alloc(mlxsw_sp_setup_tc_block_cb_flower,
- mlxsw_sp, acl_block,
- mlxsw_sp_tc_block_flower_release);
- if (IS_ERR(block_cb)) {
- mlxsw_sp_acl_block_destroy(acl_block);
- err = PTR_ERR(block_cb);
- goto err_cb_register;
- }
- register_block = true;
- } else {
- acl_block = flow_block_cb_priv(block_cb);
- }
- flow_block_cb_incref(block_cb);
- err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block,
- mlxsw_sp_port, ingress, f->extack);
- if (err)
- goto err_block_bind;
-
- if (ingress)
- mlxsw_sp_port->ing_acl_block = acl_block;
- else
- mlxsw_sp_port->eg_acl_block = acl_block;
-
- if (register_block) {
- flow_block_cb_add(block_cb, f);
- list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list);
- }
-
- return 0;
-
-err_block_bind:
- if (!flow_block_cb_decref(block_cb))
- flow_block_cb_free(block_cb);
-err_cb_register:
- return err;
-}
-
-static void
-mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
- struct flow_block_offload *f, bool ingress)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- struct mlxsw_sp_acl_block *acl_block;
- struct flow_block_cb *block_cb;
- int err;
-
- block_cb = flow_block_cb_lookup(f->block,
- mlxsw_sp_setup_tc_block_cb_flower,
- mlxsw_sp);
- if (!block_cb)
- return;
-
- if (ingress)
- mlxsw_sp_port->ing_acl_block = NULL;
- else
- mlxsw_sp_port->eg_acl_block = NULL;
-
- acl_block = flow_block_cb_priv(block_cb);
- err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block,
- mlxsw_sp_port, ingress);
- if (!err && !flow_block_cb_decref(block_cb)) {
- flow_block_cb_remove(block_cb, f);
- list_del(&block_cb->driver_list);
- }
-}
-
-static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
- struct flow_block_offload *f)
-{
- struct flow_block_cb *block_cb;
- flow_setup_cb_t *cb;
- bool ingress;
- int err;
-
- if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
- cb = mlxsw_sp_setup_tc_block_cb_matchall_ig;
- ingress = true;
- } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
- cb = mlxsw_sp_setup_tc_block_cb_matchall_eg;
- ingress = false;
- } else {
- return -EOPNOTSUPP;
- }
-
- f->driver_block_list = &mlxsw_sp_block_cb_list;
-
- switch (f->command) {
- case FLOW_BLOCK_BIND:
- if (flow_block_cb_is_busy(cb, mlxsw_sp_port,
- &mlxsw_sp_block_cb_list))
- return -EBUSY;
-
- block_cb = flow_block_cb_alloc(cb, mlxsw_sp_port,
- mlxsw_sp_port, NULL);
- if (IS_ERR(block_cb))
- return PTR_ERR(block_cb);
- err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port, f,
- ingress);
- if (err) {
- flow_block_cb_free(block_cb);
- return err;
- }
- flow_block_cb_add(block_cb, f);
- list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list);
- return 0;
- case FLOW_BLOCK_UNBIND:
- mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port,
- f, ingress);
- block_cb = flow_block_cb_lookup(f->block, cb, mlxsw_sp_port);
- if (!block_cb)
- return -ENOENT;
-
- flow_block_cb_remove(block_cb, f);
- list_del(&block_cb->driver_list);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
-
static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
@@ -1791,23 +1373,21 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
}
}
-
static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
if (!enable) {
- if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->ing_acl_block) ||
- mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->eg_acl_block) ||
- !list_empty(&mlxsw_sp_port->mall_tc_list)) {
+ if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) ||
+ mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) {
netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
return -EINVAL;
}
- mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->ing_acl_block);
- mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->eg_acl_block);
+ mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block);
+ mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block);
} else {
- mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->ing_acl_block);
- mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->eg_acl_block);
+ mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block);
+ mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block);
}
return 0;
}
@@ -3695,7 +3275,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
mlxsw_sp_port->mapping = *port_mapping;
mlxsw_sp_port->link.autoneg = 1;
INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
- INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list);
mlxsw_sp_port->pcpu_stats =
netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
@@ -3704,13 +3283,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_alloc_stats;
}
- mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample),
- GFP_KERNEL);
- if (!mlxsw_sp_port->sample) {
- err = -ENOMEM;
- goto err_alloc_sample;
- }
-
INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw,
&update_stats_cache);
@@ -3897,8 +3469,6 @@ err_dev_addr_init:
err_port_swid_set:
mlxsw_sp_port_module_unmap(mlxsw_sp_port);
err_port_module_map:
- kfree(mlxsw_sp_port->sample);
-err_alloc_sample:
free_percpu(mlxsw_sp_port->pcpu_stats);
err_alloc_stats:
free_netdev(dev);
@@ -3926,7 +3496,6 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
mlxsw_sp_port_module_unmap(mlxsw_sp_port);
- kfree(mlxsw_sp_port->sample);
free_percpu(mlxsw_sp_port->pcpu_stats);
WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
free_netdev(mlxsw_sp_port->dev);
@@ -4413,7 +3982,7 @@ static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port,
{
struct mlxsw_sp *mlxsw_sp = priv;
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
- struct psample_group *psample_group;
+ struct mlxsw_sp_port_sample *sample;
u32 size;
if (unlikely(!mlxsw_sp_port)) {
@@ -4421,22 +3990,14 @@ static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port,
local_port);
goto out;
}
- if (unlikely(!mlxsw_sp_port->sample)) {
- dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n",
- local_port);
- goto out;
- }
-
- size = mlxsw_sp_port->sample->truncate ?
- mlxsw_sp_port->sample->trunc_size : skb->len;
rcu_read_lock();
- psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group);
- if (!psample_group)
+ sample = rcu_dereference(mlxsw_sp_port->sample);
+ if (!sample)
goto out_unlock;
- psample_sample_packet(psample_group, skb, size,
- mlxsw_sp_port->dev->ifindex, 0,
- mlxsw_sp_port->sample->rate);
+ size = sample->truncate ? sample->trunc_size : skb->len;
+ psample_sample_packet(sample->psample_group, skb, size,
+ mlxsw_sp_port->dev->ifindex, 0, sample->rate);
out_unlock:
rcu_read_unlock();
out:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index ca56e72cb4b7..147a5634244b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -109,25 +109,6 @@ struct mlxsw_sp_mid {
unsigned long *ports_in_mid; /* bits array */
};
-enum mlxsw_sp_port_mall_action_type {
- MLXSW_SP_PORT_MALL_MIRROR,
- MLXSW_SP_PORT_MALL_SAMPLE,
-};
-
-struct mlxsw_sp_port_mall_mirror_tc_entry {
- int span_id;
- bool ingress;
-};
-
-struct mlxsw_sp_port_mall_tc_entry {
- struct list_head list;
- unsigned long cookie;
- enum mlxsw_sp_port_mall_action_type type;
- union {
- struct mlxsw_sp_port_mall_mirror_tc_entry mirror;
- };
-};
-
struct mlxsw_sp_sb;
struct mlxsw_sp_bridge;
struct mlxsw_sp_router;
@@ -211,7 +192,7 @@ struct mlxsw_sp_port_pcpu_stats {
};
struct mlxsw_sp_port_sample {
- struct psample_group __rcu *psample_group;
+ struct psample_group *psample_group;
u32 trunc_size;
u32 rate;
bool truncate;
@@ -274,21 +255,19 @@ struct mlxsw_sp_port {
* the same localport can have
* different mapping.
*/
- /* TC handles */
- struct list_head mall_tc_list;
struct {
#define MLXSW_HW_STATS_UPDATE_TIME HZ
struct rtnl_link_stats64 stats;
struct mlxsw_sp_port_xstats xstats;
struct delayed_work update_dw;
} periodic_hw_stats;
- struct mlxsw_sp_port_sample *sample;
+ struct mlxsw_sp_port_sample __rcu *sample;
struct list_head vlans_list;
struct mlxsw_sp_port_vlan *default_vlan;
struct mlxsw_sp_qdisc_state *qdisc;
unsigned acl_rule_count;
- struct mlxsw_sp_acl_block *ing_acl_block;
- struct mlxsw_sp_acl_block *eg_acl_block;
+ struct mlxsw_sp_flow_block *ing_flow_block;
+ struct mlxsw_sp_flow_block *eg_flow_block;
struct {
struct delayed_work shaper_dw;
struct hwtstamp_config hwtstamp_config;
@@ -654,17 +633,14 @@ struct mlxsw_sp_acl_rule_info {
unsigned int counter_index;
};
-struct mlxsw_sp_acl_block;
-struct mlxsw_sp_acl_ruleset;
-
-/* spectrum_acl.c */
-enum mlxsw_sp_acl_profile {
- MLXSW_SP_ACL_PROFILE_FLOWER,
- MLXSW_SP_ACL_PROFILE_MR,
-};
-
-struct mlxsw_sp_acl_block {
+/* spectrum_flow.c */
+struct mlxsw_sp_flow_block {
struct list_head binding_list;
+ struct {
+ struct list_head list;
+ unsigned int min_prio;
+ unsigned int max_prio;
+ } mall;
struct mlxsw_sp_acl_ruleset *ruleset_zero;
struct mlxsw_sp *mlxsw_sp;
unsigned int rule_count;
@@ -676,40 +652,100 @@ struct mlxsw_sp_acl_block {
struct net *net;
};
+struct mlxsw_sp_flow_block_binding {
+ struct list_head list;
+ struct net_device *dev;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ bool ingress;
+};
+
+static inline struct mlxsw_sp *
+mlxsw_sp_flow_block_mlxsw_sp(struct mlxsw_sp_flow_block *block)
+{
+ return block->mlxsw_sp;
+}
+
+static inline unsigned int
+mlxsw_sp_flow_block_rule_count(const struct mlxsw_sp_flow_block *block)
+{
+ return block ? block->rule_count : 0;
+}
+
+static inline void
+mlxsw_sp_flow_block_disable_inc(struct mlxsw_sp_flow_block *block)
+{
+ if (block)
+ block->disable_count++;
+}
+
+static inline void
+mlxsw_sp_flow_block_disable_dec(struct mlxsw_sp_flow_block *block)
+{
+ if (block)
+ block->disable_count--;
+}
+
+static inline bool
+mlxsw_sp_flow_block_disabled(const struct mlxsw_sp_flow_block *block)
+{
+ return block->disable_count;
+}
+
+static inline bool
+mlxsw_sp_flow_block_is_egress_bound(const struct mlxsw_sp_flow_block *block)
+{
+ return block->egress_binding_count;
+}
+
+static inline bool
+mlxsw_sp_flow_block_is_ingress_bound(const struct mlxsw_sp_flow_block *block)
+{
+ return block->ingress_binding_count;
+}
+
+static inline bool
+mlxsw_sp_flow_block_is_mixed_bound(const struct mlxsw_sp_flow_block *block)
+{
+ return block->ingress_binding_count && block->egress_binding_count;
+}
+
+struct mlxsw_sp_flow_block *mlxsw_sp_flow_block_create(struct mlxsw_sp *mlxsw_sp,
+ struct net *net);
+void mlxsw_sp_flow_block_destroy(struct mlxsw_sp_flow_block *block);
+int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct flow_block_offload *f);
+
+/* spectrum_acl.c */
+struct mlxsw_sp_acl_ruleset;
+
+enum mlxsw_sp_acl_profile {
+ MLXSW_SP_ACL_PROFILE_FLOWER,
+ MLXSW_SP_ACL_PROFILE_MR,
+};
+
struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl);
-struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block);
-unsigned int
-mlxsw_sp_acl_block_rule_count(const struct mlxsw_sp_acl_block *block);
-void mlxsw_sp_acl_block_disable_inc(struct mlxsw_sp_acl_block *block);
-void mlxsw_sp_acl_block_disable_dec(struct mlxsw_sp_acl_block *block);
-bool mlxsw_sp_acl_block_disabled(const struct mlxsw_sp_acl_block *block);
-struct mlxsw_sp_acl_block *mlxsw_sp_acl_block_create(struct mlxsw_sp *mlxsw_sp,
- struct net *net);
-void mlxsw_sp_acl_block_destroy(struct mlxsw_sp_acl_block *block);
-int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
- struct mlxsw_sp_port *mlxsw_sp_port,
- bool ingress,
- struct netlink_ext_ack *extack);
-int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
- struct mlxsw_sp_port *mlxsw_sp_port,
- bool ingress);
-bool mlxsw_sp_acl_block_is_egress_bound(const struct mlxsw_sp_acl_block *block);
-bool mlxsw_sp_acl_block_is_ingress_bound(const struct mlxsw_sp_acl_block *block);
-bool mlxsw_sp_acl_block_is_mixed_bound(const struct mlxsw_sp_acl_block *block);
+
+int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_flow_block_binding *binding);
+void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_flow_block_binding *binding);
struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block, u32 chain_index,
+ struct mlxsw_sp_flow_block *block, u32 chain_index,
enum mlxsw_sp_acl_profile profile);
struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block, u32 chain_index,
+ struct mlxsw_sp_flow_block *block, u32 chain_index,
enum mlxsw_sp_acl_profile profile,
struct mlxsw_afk_element_usage *tmplt_elusage);
void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset);
u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset);
+void mlxsw_sp_acl_ruleset_prio_get(struct mlxsw_sp_acl_ruleset *ruleset,
+ unsigned int *p_min_prio,
+ unsigned int *p_max_prio);
struct mlxsw_sp_acl_rule_info *
mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl,
@@ -736,7 +772,7 @@ int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei,
int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei);
int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
- struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_flow_block *block,
struct net_device *out_dev,
struct netlink_ext_ack *extack);
int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
@@ -857,22 +893,39 @@ extern const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops;
extern const struct mlxsw_afk_ops mlxsw_sp1_afk_ops;
extern const struct mlxsw_afk_ops mlxsw_sp2_afk_ops;
+/* spectrum_matchall.c */
+int mlxsw_sp_mall_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct tc_cls_matchall_offload *f);
+void mlxsw_sp_mall_destroy(struct mlxsw_sp_flow_block *block,
+ struct tc_cls_matchall_offload *f);
+int mlxsw_sp_mall_port_bind(struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port);
+void mlxsw_sp_mall_port_unbind(struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port);
+int mlxsw_sp_mall_prio_get(struct mlxsw_sp_flow_block *block, u32 chain_index,
+ unsigned int *p_min_prio, unsigned int *p_max_prio);
+
/* spectrum_flower.c */
int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_flow_block *block,
struct flow_cls_offload *f);
void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_flow_block *block,
struct flow_cls_offload *f);
int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_flow_block *block,
struct flow_cls_offload *f);
int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_flow_block *block,
struct flow_cls_offload *f);
void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_flow_block *block,
struct flow_cls_offload *f);
+int mlxsw_sp_flower_prio_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ u32 chain_index, unsigned int *p_min_prio,
+ unsigned int *p_max_prio);
/* spectrum_qdisc.c */
int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
index e31ec75ac035..a11d911302f1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
@@ -9,7 +9,7 @@
struct mlxsw_sp2_mr_tcam {
struct mlxsw_sp *mlxsw_sp;
- struct mlxsw_sp_acl_block *acl_block;
+ struct mlxsw_sp_flow_block *flow_block;
struct mlxsw_sp_acl_ruleset *ruleset4;
struct mlxsw_sp_acl_ruleset *ruleset6;
};
@@ -61,7 +61,7 @@ static int mlxsw_sp2_mr_tcam_ipv4_init(struct mlxsw_sp2_mr_tcam *mr_tcam)
mlxsw_sp2_mr_tcam_usage_ipv4,
ARRAY_SIZE(mlxsw_sp2_mr_tcam_usage_ipv4));
mr_tcam->ruleset4 = mlxsw_sp_acl_ruleset_get(mr_tcam->mlxsw_sp,
- mr_tcam->acl_block,
+ mr_tcam->flow_block,
MLXSW_SP_L3_PROTO_IPV4,
MLXSW_SP_ACL_PROFILE_MR,
&elusage);
@@ -111,7 +111,7 @@ static int mlxsw_sp2_mr_tcam_ipv6_init(struct mlxsw_sp2_mr_tcam *mr_tcam)
mlxsw_sp2_mr_tcam_usage_ipv6,
ARRAY_SIZE(mlxsw_sp2_mr_tcam_usage_ipv6));
mr_tcam->ruleset6 = mlxsw_sp_acl_ruleset_get(mr_tcam->mlxsw_sp,
- mr_tcam->acl_block,
+ mr_tcam->flow_block,
MLXSW_SP_L3_PROTO_IPV6,
MLXSW_SP_ACL_PROFILE_MR,
&elusage);
@@ -289,8 +289,8 @@ static int mlxsw_sp2_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
int err;
mr_tcam->mlxsw_sp = mlxsw_sp;
- mr_tcam->acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, NULL);
- if (!mr_tcam->acl_block)
+ mr_tcam->flow_block = mlxsw_sp_flow_block_create(mlxsw_sp, NULL);
+ if (!mr_tcam->flow_block)
return -ENOMEM;
err = mlxsw_sp2_mr_tcam_ipv4_init(mr_tcam);
@@ -306,7 +306,7 @@ static int mlxsw_sp2_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
err_ipv6_init:
mlxsw_sp2_mr_tcam_ipv4_fini(mr_tcam);
err_ipv4_init:
- mlxsw_sp_acl_block_destroy(mr_tcam->acl_block);
+ mlxsw_sp_flow_block_destroy(mr_tcam->flow_block);
return err;
}
@@ -316,7 +316,7 @@ static void mlxsw_sp2_mr_tcam_fini(void *priv)
mlxsw_sp2_mr_tcam_ipv6_fini(mr_tcam);
mlxsw_sp2_mr_tcam_ipv4_fini(mr_tcam);
- mlxsw_sp_acl_block_destroy(mr_tcam->acl_block);
+ mlxsw_sp_flow_block_destroy(mr_tcam->flow_block);
}
const struct mlxsw_sp_mr_tcam_ops mlxsw_sp2_mr_tcam_ops = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 01cff711bbd2..47da9ee0045d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -40,15 +40,8 @@ struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
return acl->afk;
}
-struct mlxsw_sp_acl_block_binding {
- struct list_head list;
- struct net_device *dev;
- struct mlxsw_sp_port *mlxsw_sp_port;
- bool ingress;
-};
-
struct mlxsw_sp_acl_ruleset_ht_key {
- struct mlxsw_sp_acl_block *block;
+ struct mlxsw_sp_flow_block *block;
u32 chain_index;
const struct mlxsw_sp_acl_profile_ops *ops;
};
@@ -58,6 +51,8 @@ struct mlxsw_sp_acl_ruleset {
struct mlxsw_sp_acl_ruleset_ht_key ht_key;
struct rhashtable rule_ht;
unsigned int ref_count;
+ unsigned int min_prio;
+ unsigned int max_prio;
unsigned long priv[];
/* priv has to be always the last item */
};
@@ -94,49 +89,6 @@ struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp)
return mlxsw_sp->acl->dummy_fid;
}
-struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block)
-{
- return block->mlxsw_sp;
-}
-
-unsigned int
-mlxsw_sp_acl_block_rule_count(const struct mlxsw_sp_acl_block *block)
-{
- return block ? block->rule_count : 0;
-}
-
-void mlxsw_sp_acl_block_disable_inc(struct mlxsw_sp_acl_block *block)
-{
- if (block)
- block->disable_count++;
-}
-
-void mlxsw_sp_acl_block_disable_dec(struct mlxsw_sp_acl_block *block)
-{
- if (block)
- block->disable_count--;
-}
-
-bool mlxsw_sp_acl_block_disabled(const struct mlxsw_sp_acl_block *block)
-{
- return block->disable_count;
-}
-
-bool mlxsw_sp_acl_block_is_egress_bound(const struct mlxsw_sp_acl_block *block)
-{
- return block->egress_binding_count;
-}
-
-bool mlxsw_sp_acl_block_is_ingress_bound(const struct mlxsw_sp_acl_block *block)
-{
- return block->ingress_binding_count;
-}
-
-bool mlxsw_sp_acl_block_is_mixed_bound(const struct mlxsw_sp_acl_block *block)
-{
- return block->ingress_binding_count && block->egress_binding_count;
-}
-
static bool
mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset *ruleset)
{
@@ -144,10 +96,9 @@ mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset *ruleset)
return ruleset->ref_count == 2;
}
-static int
-mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
- struct mlxsw_sp_acl_block_binding *binding)
+int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_flow_block_binding *binding)
{
struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
@@ -156,10 +107,9 @@ mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
binding->mlxsw_sp_port, binding->ingress);
}
-static void
-mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
- struct mlxsw_sp_acl_block_binding *binding)
+void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_flow_block_binding *binding)
{
struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
@@ -168,18 +118,12 @@ mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
binding->mlxsw_sp_port, binding->ingress);
}
-static bool
-mlxsw_sp_acl_ruleset_block_bound(const struct mlxsw_sp_acl_block *block)
-{
- return block->ruleset_zero;
-}
-
static int
mlxsw_sp_acl_ruleset_block_bind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset,
- struct mlxsw_sp_acl_block *block)
+ struct mlxsw_sp_flow_block *block)
{
- struct mlxsw_sp_acl_block_binding *binding;
+ struct mlxsw_sp_flow_block_binding *binding;
int err;
block->ruleset_zero = ruleset;
@@ -202,122 +146,18 @@ rollback:
static void
mlxsw_sp_acl_ruleset_block_unbind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset,
- struct mlxsw_sp_acl_block *block)
+ struct mlxsw_sp_flow_block *block)
{
- struct mlxsw_sp_acl_block_binding *binding;
+ struct mlxsw_sp_flow_block_binding *binding;
list_for_each_entry(binding, &block->binding_list, list)
mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
block->ruleset_zero = NULL;
}
-struct mlxsw_sp_acl_block *mlxsw_sp_acl_block_create(struct mlxsw_sp *mlxsw_sp,
- struct net *net)
-{
- struct mlxsw_sp_acl_block *block;
-
- block = kzalloc(sizeof(*block), GFP_KERNEL);
- if (!block)
- return NULL;
- INIT_LIST_HEAD(&block->binding_list);
- block->mlxsw_sp = mlxsw_sp;
- block->net = net;
- return block;
-}
-
-void mlxsw_sp_acl_block_destroy(struct mlxsw_sp_acl_block *block)
-{
- WARN_ON(!list_empty(&block->binding_list));
- kfree(block);
-}
-
-static struct mlxsw_sp_acl_block_binding *
-mlxsw_sp_acl_block_lookup(struct mlxsw_sp_acl_block *block,
- struct mlxsw_sp_port *mlxsw_sp_port, bool ingress)
-{
- struct mlxsw_sp_acl_block_binding *binding;
-
- list_for_each_entry(binding, &block->binding_list, list)
- if (binding->mlxsw_sp_port == mlxsw_sp_port &&
- binding->ingress == ingress)
- return binding;
- return NULL;
-}
-
-int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
- struct mlxsw_sp_port *mlxsw_sp_port,
- bool ingress,
- struct netlink_ext_ack *extack)
-{
- struct mlxsw_sp_acl_block_binding *binding;
- int err;
-
- if (WARN_ON(mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress)))
- return -EEXIST;
-
- if (ingress && block->ingress_blocker_rule_count) {
- NL_SET_ERR_MSG_MOD(extack, "Block cannot be bound to ingress because it contains unsupported rules");
- return -EOPNOTSUPP;
- }
-
- if (!ingress && block->egress_blocker_rule_count) {
- NL_SET_ERR_MSG_MOD(extack, "Block cannot be bound to egress because it contains unsupported rules");
- return -EOPNOTSUPP;
- }
-
- binding = kzalloc(sizeof(*binding), GFP_KERNEL);
- if (!binding)
- return -ENOMEM;
- binding->mlxsw_sp_port = mlxsw_sp_port;
- binding->ingress = ingress;
-
- if (mlxsw_sp_acl_ruleset_block_bound(block)) {
- err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
- if (err)
- goto err_ruleset_bind;
- }
-
- if (ingress)
- block->ingress_binding_count++;
- else
- block->egress_binding_count++;
- list_add(&binding->list, &block->binding_list);
- return 0;
-
-err_ruleset_bind:
- kfree(binding);
- return err;
-}
-
-int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
- struct mlxsw_sp_port *mlxsw_sp_port,
- bool ingress)
-{
- struct mlxsw_sp_acl_block_binding *binding;
-
- binding = mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress);
- if (!binding)
- return -ENOENT;
-
- list_del(&binding->list);
-
- if (ingress)
- block->ingress_binding_count--;
- else
- block->egress_binding_count--;
-
- if (mlxsw_sp_acl_ruleset_block_bound(block))
- mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
-
- kfree(binding);
- return 0;
-}
-
static struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block, u32 chain_index,
+ struct mlxsw_sp_flow_block *block, u32 chain_index,
const struct mlxsw_sp_acl_profile_ops *ops,
struct mlxsw_afk_element_usage *tmplt_elusage)
{
@@ -340,7 +180,8 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
goto err_rhashtable_init;
err = ops->ruleset_add(mlxsw_sp, &acl->tcam, ruleset->priv,
- tmplt_elusage);
+ tmplt_elusage, &ruleset->min_prio,
+ &ruleset->max_prio);
if (err)
goto err_ops_ruleset_add;
@@ -388,7 +229,7 @@ static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
static struct mlxsw_sp_acl_ruleset *
__mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl,
- struct mlxsw_sp_acl_block *block, u32 chain_index,
+ struct mlxsw_sp_flow_block *block, u32 chain_index,
const struct mlxsw_sp_acl_profile_ops *ops)
{
struct mlxsw_sp_acl_ruleset_ht_key ht_key;
@@ -403,7 +244,7 @@ __mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl,
struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block, u32 chain_index,
+ struct mlxsw_sp_flow_block *block, u32 chain_index,
enum mlxsw_sp_acl_profile profile)
{
const struct mlxsw_sp_acl_profile_ops *ops;
@@ -421,7 +262,7 @@ mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block, u32 chain_index,
+ struct mlxsw_sp_flow_block *block, u32 chain_index,
enum mlxsw_sp_acl_profile profile,
struct mlxsw_afk_element_usage *tmplt_elusage)
{
@@ -455,6 +296,14 @@ u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset)
return ops->ruleset_group_id(ruleset->priv);
}
+void mlxsw_sp_acl_ruleset_prio_get(struct mlxsw_sp_acl_ruleset *ruleset,
+ unsigned int *p_min_prio,
+ unsigned int *p_max_prio)
+{
+ *p_min_prio = ruleset->min_prio;
+ *p_max_prio = ruleset->max_prio;
+}
+
struct mlxsw_sp_acl_rule_info *
mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl,
struct mlxsw_afa_block *afa_block)
@@ -584,11 +433,11 @@ int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
- struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_flow_block *block,
struct net_device *out_dev,
struct netlink_ext_ack *extack)
{
- struct mlxsw_sp_acl_block_binding *binding;
+ struct mlxsw_sp_flow_block_binding *binding;
struct mlxsw_sp_port *in_port;
if (!list_is_singular(&block->binding_list)) {
@@ -596,7 +445,7 @@ int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
return -EOPNOTSUPP;
}
binding = list_first_entry(&block->binding_list,
- struct mlxsw_sp_acl_block_binding, list);
+ struct mlxsw_sp_flow_block_binding, list);
in_port = binding->mlxsw_sp_port;
return mlxsw_afa_block_append_mirror(rulei->act_block,
@@ -818,7 +667,7 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
- struct mlxsw_sp_acl_block *block = ruleset->ht_key.block;
+ struct mlxsw_sp_flow_block *block = ruleset->ht_key.block;
int err;
err = ops->rule_add(mlxsw_sp, ruleset->priv, rule->priv, rule->rulei);
@@ -862,18 +711,17 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
- struct mlxsw_sp_acl_block *block = ruleset->ht_key.block;
+ struct mlxsw_sp_flow_block *block = ruleset->ht_key.block;
block->egress_blocker_rule_count -= rule->rulei->egress_bind_blocker;
block->ingress_blocker_rule_count -= rule->rulei->ingress_bind_blocker;
- ruleset->ht_key.block->rule_count--;
+ block->rule_count--;
mutex_lock(&mlxsw_sp->acl->rules_lock);
list_del(&rule->list);
mutex_unlock(&mlxsw_sp->acl->rules_lock);
if (!ruleset->ht_key.chain_index &&
mlxsw_sp_acl_ruleset_is_singular(ruleset))
- mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset,
- ruleset->ht_key.block);
+ mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset, block);
rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
mlxsw_sp_acl_rule_ht_params);
ops->rule_del(mlxsw_sp, rule->priv);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
index e47d1d286e93..73d56012654b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
@@ -136,28 +136,35 @@ mlxsw_sp_act_mirror_add(void *priv, u8 local_in_port,
const struct net_device *out_dev,
bool ingress, int *p_span_id)
{
- struct mlxsw_sp_port *in_port;
+ struct mlxsw_sp_port *mlxsw_sp_port;
struct mlxsw_sp *mlxsw_sp = priv;
- enum mlxsw_sp_span_type type;
+ int err;
- type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
- in_port = mlxsw_sp->ports[local_in_port];
+ err = mlxsw_sp_span_agent_get(mlxsw_sp, out_dev, p_span_id);
+ if (err)
+ return err;
+
+ mlxsw_sp_port = mlxsw_sp->ports[local_in_port];
+ err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, ingress);
+ if (err)
+ goto err_analyzed_port_get;
- return mlxsw_sp_span_mirror_add(in_port, out_dev, type,
- false, p_span_id);
+ return 0;
+
+err_analyzed_port_get:
+ mlxsw_sp_span_agent_put(mlxsw_sp, *p_span_id);
+ return err;
}
static void
mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, int span_id, bool ingress)
{
+ struct mlxsw_sp_port *mlxsw_sp_port;
struct mlxsw_sp *mlxsw_sp = priv;
- struct mlxsw_sp_port *in_port;
- enum mlxsw_sp_span_type type;
-
- type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
- in_port = mlxsw_sp->ports[local_in_port];
- mlxsw_sp_span_mirror_del(in_port, span_id, type, false);
+ mlxsw_sp_port = mlxsw_sp->ports[local_in_port];
+ mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, ingress);
+ mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
}
const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index a6e30e020b5c..5c020403342f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -179,6 +179,8 @@ struct mlxsw_sp_acl_tcam_vgroup {
bool tmplt_elusage_set;
struct mlxsw_afk_element_usage tmplt_elusage;
bool vregion_rehash_enabled;
+ unsigned int *p_min_prio;
+ unsigned int *p_max_prio;
};
struct mlxsw_sp_acl_tcam_rehash_ctx {
@@ -316,13 +318,17 @@ mlxsw_sp_acl_tcam_vgroup_add(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_acl_tcam_pattern *patterns,
unsigned int patterns_count,
struct mlxsw_afk_element_usage *tmplt_elusage,
- bool vregion_rehash_enabled)
+ bool vregion_rehash_enabled,
+ unsigned int *p_min_prio,
+ unsigned int *p_max_prio)
{
int err;
vgroup->patterns = patterns;
vgroup->patterns_count = patterns_count;
vgroup->vregion_rehash_enabled = vregion_rehash_enabled;
+ vgroup->p_min_prio = p_min_prio;
+ vgroup->p_max_prio = p_max_prio;
if (tmplt_elusage) {
vgroup->tmplt_elusage_set = true;
@@ -416,6 +422,21 @@ mlxsw_sp_acl_tcam_vregion_max_prio(struct mlxsw_sp_acl_tcam_vregion *vregion)
return vchunk->priority;
}
+static void
+mlxsw_sp_acl_tcam_vgroup_prio_update(struct mlxsw_sp_acl_tcam_vgroup *vgroup)
+{
+ struct mlxsw_sp_acl_tcam_vregion *vregion;
+
+ if (list_empty(&vgroup->vregion_list))
+ return;
+ vregion = list_first_entry(&vgroup->vregion_list,
+ typeof(*vregion), list);
+ *vgroup->p_min_prio = mlxsw_sp_acl_tcam_vregion_prio(vregion);
+ vregion = list_last_entry(&vgroup->vregion_list,
+ typeof(*vregion), list);
+ *vgroup->p_max_prio = mlxsw_sp_acl_tcam_vregion_max_prio(vregion);
+}
+
static int
mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_group *group,
@@ -1035,6 +1056,7 @@ mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
}
list_add_tail(&vchunk->list, pos);
mutex_unlock(&vregion->lock);
+ mlxsw_sp_acl_tcam_vgroup_prio_update(vgroup);
return vchunk;
@@ -1066,6 +1088,7 @@ mlxsw_sp_acl_tcam_vchunk_destroy(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_tcam_vchunk_ht_params);
mlxsw_sp_acl_tcam_vregion_put(mlxsw_sp, vchunk->vregion);
kfree(vchunk);
+ mlxsw_sp_acl_tcam_vgroup_prio_update(vgroup);
}
static struct mlxsw_sp_acl_tcam_vchunk *
@@ -1582,14 +1605,17 @@ static int
mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam,
void *ruleset_priv,
- struct mlxsw_afk_element_usage *tmplt_elusage)
+ struct mlxsw_afk_element_usage *tmplt_elusage,
+ unsigned int *p_min_prio,
+ unsigned int *p_max_prio)
{
struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
return mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup,
mlxsw_sp_acl_tcam_patterns,
MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
- tmplt_elusage, true);
+ tmplt_elusage, true,
+ p_min_prio, p_max_prio);
}
static void
@@ -1698,7 +1724,9 @@ static int
mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam,
void *ruleset_priv,
- struct mlxsw_afk_element_usage *tmplt_elusage)
+ struct mlxsw_afk_element_usage *tmplt_elusage,
+ unsigned int *p_min_prio,
+ unsigned int *p_max_prio)
{
struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
int err;
@@ -1706,7 +1734,8 @@ mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup,
mlxsw_sp_acl_tcam_patterns,
MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
- tmplt_elusage, false);
+ tmplt_elusage, false,
+ p_min_prio, p_max_prio);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
index 96437992b102..a41df10ade9b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
@@ -42,7 +42,8 @@ struct mlxsw_sp_acl_profile_ops {
size_t ruleset_priv_size;
int (*ruleset_add)(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam, void *ruleset_priv,
- struct mlxsw_afk_element_usage *tmplt_elusage);
+ struct mlxsw_afk_element_usage *tmplt_elusage,
+ unsigned int *p_min_prio, unsigned int *p_max_prio);
void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
struct mlxsw_sp_port *mlxsw_sp_port,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c
new file mode 100644
index 000000000000..47b66f347ff1
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2020 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <net/net_namespace.h>
+
+#include "spectrum.h"
+
+struct mlxsw_sp_flow_block *
+mlxsw_sp_flow_block_create(struct mlxsw_sp *mlxsw_sp, struct net *net)
+{
+ struct mlxsw_sp_flow_block *block;
+
+ block = kzalloc(sizeof(*block), GFP_KERNEL);
+ if (!block)
+ return NULL;
+ INIT_LIST_HEAD(&block->binding_list);
+ INIT_LIST_HEAD(&block->mall.list);
+ block->mlxsw_sp = mlxsw_sp;
+ block->net = net;
+ return block;
+}
+
+void mlxsw_sp_flow_block_destroy(struct mlxsw_sp_flow_block *block)
+{
+ WARN_ON(!list_empty(&block->binding_list));
+ kfree(block);
+}
+
+static struct mlxsw_sp_flow_block_binding *
+mlxsw_sp_flow_block_lookup(struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port, bool ingress)
+{
+ struct mlxsw_sp_flow_block_binding *binding;
+
+ list_for_each_entry(binding, &block->binding_list, list)
+ if (binding->mlxsw_sp_port == mlxsw_sp_port &&
+ binding->ingress == ingress)
+ return binding;
+ return NULL;
+}
+
+static bool
+mlxsw_sp_flow_block_ruleset_bound(const struct mlxsw_sp_flow_block *block)
+{
+ return block->ruleset_zero;
+}
+
+static int mlxsw_sp_flow_block_bind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_flow_block_binding *binding;
+ int err;
+
+ if (WARN_ON(mlxsw_sp_flow_block_lookup(block, mlxsw_sp_port, ingress)))
+ return -EEXIST;
+
+ if (ingress && block->ingress_blocker_rule_count) {
+ NL_SET_ERR_MSG_MOD(extack, "Block cannot be bound to ingress because it contains unsupported rules");
+ return -EOPNOTSUPP;
+ }
+
+ if (!ingress && block->egress_blocker_rule_count) {
+ NL_SET_ERR_MSG_MOD(extack, "Block cannot be bound to egress because it contains unsupported rules");
+ return -EOPNOTSUPP;
+ }
+
+ err = mlxsw_sp_mall_port_bind(block, mlxsw_sp_port);
+ if (err)
+ return err;
+
+ binding = kzalloc(sizeof(*binding), GFP_KERNEL);
+ if (!binding) {
+ err = -ENOMEM;
+ goto err_binding_alloc;
+ }
+ binding->mlxsw_sp_port = mlxsw_sp_port;
+ binding->ingress = ingress;
+
+ if (mlxsw_sp_flow_block_ruleset_bound(block)) {
+ err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
+ if (err)
+ goto err_ruleset_bind;
+ }
+
+ if (ingress)
+ block->ingress_binding_count++;
+ else
+ block->egress_binding_count++;
+ list_add(&binding->list, &block->binding_list);
+ return 0;
+
+err_ruleset_bind:
+ kfree(binding);
+err_binding_alloc:
+ mlxsw_sp_mall_port_unbind(block, mlxsw_sp_port);
+
+ return err;
+}
+
+static int mlxsw_sp_flow_block_unbind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
+{
+ struct mlxsw_sp_flow_block_binding *binding;
+
+ binding = mlxsw_sp_flow_block_lookup(block, mlxsw_sp_port, ingress);
+ if (!binding)
+ return -ENOENT;
+
+ list_del(&binding->list);
+
+ if (ingress)
+ block->ingress_binding_count--;
+ else
+ block->egress_binding_count--;
+
+ if (mlxsw_sp_flow_block_ruleset_bound(block))
+ mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
+
+ kfree(binding);
+
+ mlxsw_sp_mall_port_unbind(block, mlxsw_sp_port);
+
+ return 0;
+}
+
+static int mlxsw_sp_flow_block_mall_cb(struct mlxsw_sp_flow_block *flow_block,
+ struct tc_cls_matchall_offload *f)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_flow_block_mlxsw_sp(flow_block);
+
+ switch (f->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return mlxsw_sp_mall_replace(mlxsw_sp, flow_block, f);
+ case TC_CLSMATCHALL_DESTROY:
+ mlxsw_sp_mall_destroy(flow_block, f);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mlxsw_sp_flow_block_flower_cb(struct mlxsw_sp_flow_block *flow_block,
+ struct flow_cls_offload *f)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_flow_block_mlxsw_sp(flow_block);
+
+ switch (f->command) {
+ case FLOW_CLS_REPLACE:
+ return mlxsw_sp_flower_replace(mlxsw_sp, flow_block, f);
+ case FLOW_CLS_DESTROY:
+ mlxsw_sp_flower_destroy(mlxsw_sp, flow_block, f);
+ return 0;
+ case FLOW_CLS_STATS:
+ return mlxsw_sp_flower_stats(mlxsw_sp, flow_block, f);
+ case FLOW_CLS_TMPLT_CREATE:
+ return mlxsw_sp_flower_tmplt_create(mlxsw_sp, flow_block, f);
+ case FLOW_CLS_TMPLT_DESTROY:
+ mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, flow_block, f);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mlxsw_sp_flow_block_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct mlxsw_sp_flow_block *flow_block = cb_priv;
+
+ if (mlxsw_sp_flow_block_disabled(flow_block))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSMATCHALL:
+ return mlxsw_sp_flow_block_mall_cb(flow_block, type_data);
+ case TC_SETUP_CLSFLOWER:
+ return mlxsw_sp_flow_block_flower_cb(flow_block, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void mlxsw_sp_tc_block_release(void *cb_priv)
+{
+ struct mlxsw_sp_flow_block *flow_block = cb_priv;
+
+ mlxsw_sp_flow_block_destroy(flow_block);
+}
+
+static LIST_HEAD(mlxsw_sp_block_cb_list);
+
+static int mlxsw_sp_setup_tc_block_bind(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct flow_block_offload *f,
+ bool ingress)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_flow_block *flow_block;
+ struct flow_block_cb *block_cb;
+ bool register_block = false;
+ int err;
+
+ block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_flow_block_cb,
+ mlxsw_sp);
+ if (!block_cb) {
+ flow_block = mlxsw_sp_flow_block_create(mlxsw_sp, f->net);
+ if (!flow_block)
+ return -ENOMEM;
+ block_cb = flow_block_cb_alloc(mlxsw_sp_flow_block_cb,
+ mlxsw_sp, flow_block,
+ mlxsw_sp_tc_block_release);
+ if (IS_ERR(block_cb)) {
+ mlxsw_sp_flow_block_destroy(flow_block);
+ err = PTR_ERR(block_cb);
+ goto err_cb_register;
+ }
+ register_block = true;
+ } else {
+ flow_block = flow_block_cb_priv(block_cb);
+ }
+ flow_block_cb_incref(block_cb);
+ err = mlxsw_sp_flow_block_bind(mlxsw_sp, flow_block,
+ mlxsw_sp_port, ingress, f->extack);
+ if (err)
+ goto err_block_bind;
+
+ if (ingress)
+ mlxsw_sp_port->ing_flow_block = flow_block;
+ else
+ mlxsw_sp_port->eg_flow_block = flow_block;
+
+ if (register_block) {
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list);
+ }
+
+ return 0;
+
+err_block_bind:
+ if (!flow_block_cb_decref(block_cb))
+ flow_block_cb_free(block_cb);
+err_cb_register:
+ return err;
+}
+
+static void mlxsw_sp_setup_tc_block_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct flow_block_offload *f,
+ bool ingress)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_flow_block *flow_block;
+ struct flow_block_cb *block_cb;
+ int err;
+
+ block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_flow_block_cb,
+ mlxsw_sp);
+ if (!block_cb)
+ return;
+
+ if (ingress)
+ mlxsw_sp_port->ing_flow_block = NULL;
+ else
+ mlxsw_sp_port->eg_flow_block = NULL;
+
+ flow_block = flow_block_cb_priv(block_cb);
+ err = mlxsw_sp_flow_block_unbind(mlxsw_sp, flow_block,
+ mlxsw_sp_port, ingress);
+ if (!err && !flow_block_cb_decref(block_cb)) {
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
+ }
+}
+
+int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct flow_block_offload *f)
+{
+ bool ingress;
+
+ if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ ingress = true;
+ else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
+ ingress = false;
+ else
+ return -EOPNOTSUPP;
+
+ f->driver_block_list = &mlxsw_sp_block_cb_list;
+
+ switch (f->command) {
+ case FLOW_BLOCK_BIND:
+ return mlxsw_sp_setup_tc_block_bind(mlxsw_sp_port, f, ingress);
+ case FLOW_BLOCK_UNBIND:
+ mlxsw_sp_setup_tc_block_unbind(mlxsw_sp_port, f, ingress);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 890b078851c9..b286fe158820 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -15,7 +15,7 @@
#include "core_acl_flex_keys.h"
static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_flow_block *block,
struct mlxsw_sp_acl_rule_info *rulei,
struct flow_action *flow_action,
struct netlink_ext_ack *extack)
@@ -54,11 +54,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
case FLOW_ACTION_DROP: {
bool ingress;
- if (mlxsw_sp_acl_block_is_mixed_bound(block)) {
+ if (mlxsw_sp_flow_block_is_mixed_bound(block)) {
NL_SET_ERR_MSG_MOD(extack, "Drop action is not supported when block is bound to ingress and egress");
return -EOPNOTSUPP;
}
- ingress = mlxsw_sp_acl_block_is_ingress_bound(block);
+ ingress = mlxsw_sp_flow_block_is_ingress_bound(block);
err = mlxsw_sp_acl_rulei_act_drop(rulei, ingress,
act->cookie, extack);
if (err) {
@@ -107,7 +107,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fid *fid;
u16 fid_index;
- if (mlxsw_sp_acl_block_is_egress_bound(block)) {
+ if (mlxsw_sp_flow_block_is_egress_bound(block)) {
NL_SET_ERR_MSG_MOD(extack, "Redirect action is not supported on egress");
return -EOPNOTSUPP;
}
@@ -191,7 +191,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_flower_parse_meta(struct mlxsw_sp_acl_rule_info *rulei,
struct flow_cls_offload *f,
- struct mlxsw_sp_acl_block *block)
+ struct mlxsw_sp_flow_block *block)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct mlxsw_sp_port *mlxsw_sp_port;
@@ -372,7 +372,7 @@ static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
}
static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_flow_block *block,
struct mlxsw_sp_acl_rule_info *rulei,
struct flow_cls_offload *f)
{
@@ -461,7 +461,7 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
struct flow_match_vlan match;
flow_rule_match_vlan(rule, &match);
- if (mlxsw_sp_acl_block_is_egress_bound(block)) {
+ if (mlxsw_sp_flow_block_is_egress_bound(block)) {
NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress");
return -EOPNOTSUPP;
}
@@ -505,8 +505,36 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
f->common.extack);
}
+static int mlxsw_sp_flower_mall_prio_check(struct mlxsw_sp_flow_block *block,
+ struct flow_cls_offload *f)
+{
+ bool ingress = mlxsw_sp_flow_block_is_ingress_bound(block);
+ unsigned int mall_min_prio;
+ unsigned int mall_max_prio;
+ int err;
+
+ err = mlxsw_sp_mall_prio_get(block, f->common.chain_index,
+ &mall_min_prio, &mall_max_prio);
+ if (err) {
+ if (err == -ENOENT)
+ /* No matchall filters installed on this chain. */
+ return 0;
+ NL_SET_ERR_MSG(f->common.extack, "Failed to get matchall priorities");
+ return err;
+ }
+ if (ingress && f->common.prio <= mall_min_prio) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add in front of existing matchall rules");
+ return -EOPNOTSUPP;
+ }
+ if (!ingress && f->common.prio >= mall_max_prio) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add behind of existing matchall rules");
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_flow_block *block,
struct flow_cls_offload *f)
{
struct mlxsw_sp_acl_rule_info *rulei;
@@ -514,6 +542,10 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule *rule;
int err;
+ err = mlxsw_sp_flower_mall_prio_check(block, f);
+ if (err)
+ return err;
+
ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
f->common.chain_index,
MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
@@ -553,7 +585,7 @@ err_rule_create:
}
void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_flow_block *block,
struct flow_cls_offload *f)
{
struct mlxsw_sp_acl_ruleset *ruleset;
@@ -575,7 +607,7 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
}
int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_flow_block *block,
struct flow_cls_offload *f)
{
enum flow_action_hw_stats used_hw_stats = FLOW_ACTION_HW_STATS_DISABLED;
@@ -612,7 +644,7 @@ err_rule_get_stats:
}
int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_flow_block *block,
struct flow_cls_offload *f)
{
struct mlxsw_sp_acl_ruleset *ruleset;
@@ -633,7 +665,7 @@ int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
}
void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_flow_block *block,
struct flow_cls_offload *f)
{
struct mlxsw_sp_acl_ruleset *ruleset;
@@ -647,3 +679,23 @@ void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
}
+
+int mlxsw_sp_flower_prio_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ u32 chain_index, unsigned int *p_min_prio,
+ unsigned int *p_max_prio)
+{
+ struct mlxsw_sp_acl_ruleset *ruleset;
+
+ ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block,
+ chain_index,
+ MLXSW_SP_ACL_PROFILE_FLOWER);
+ if (IS_ERR(ruleset))
+ /* In case there are no flower rules, the caller
+ * receives -ENOENT to indicate there is no need
+ * to check the priorities.
+ */
+ return PTR_ERR(ruleset);
+ mlxsw_sp_acl_ruleset_prio_get(ruleset, p_min_prio, p_max_prio);
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c
new file mode 100644
index 000000000000..f1a44a8eda55
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c
@@ -0,0 +1,378 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2020 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <net/flow_offload.h>
+
+#include "spectrum.h"
+#include "spectrum_span.h"
+#include "reg.h"
+
+enum mlxsw_sp_mall_action_type {
+ MLXSW_SP_MALL_ACTION_TYPE_MIRROR,
+ MLXSW_SP_MALL_ACTION_TYPE_SAMPLE,
+};
+
+struct mlxsw_sp_mall_mirror_entry {
+ const struct net_device *to_dev;
+ int span_id;
+};
+
+struct mlxsw_sp_mall_entry {
+ struct list_head list;
+ unsigned long cookie;
+ unsigned int priority;
+ enum mlxsw_sp_mall_action_type type;
+ bool ingress;
+ union {
+ struct mlxsw_sp_mall_mirror_entry mirror;
+ struct mlxsw_sp_port_sample sample;
+ };
+ struct rcu_head rcu;
+};
+
+static struct mlxsw_sp_mall_entry *
+mlxsw_sp_mall_entry_find(struct mlxsw_sp_flow_block *block, unsigned long cookie)
+{
+ struct mlxsw_sp_mall_entry *mall_entry;
+
+ list_for_each_entry(mall_entry, &block->mall.list, list)
+ if (mall_entry->cookie == cookie)
+ return mall_entry;
+
+ return NULL;
+}
+
+static int
+mlxsw_sp_mall_port_mirror_add(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mall_entry *mall_entry)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_span_trigger_parms parms;
+ enum mlxsw_sp_span_trigger trigger;
+ int err;
+
+ if (!mall_entry->mirror.to_dev) {
+ netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
+ return -EINVAL;
+ }
+
+ err = mlxsw_sp_span_agent_get(mlxsw_sp, mall_entry->mirror.to_dev,
+ &mall_entry->mirror.span_id);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port,
+ mall_entry->ingress);
+ if (err)
+ goto err_analyzed_port_get;
+
+ trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS :
+ MLXSW_SP_SPAN_TRIGGER_EGRESS;
+ parms.span_id = mall_entry->mirror.span_id;
+ err = mlxsw_sp_span_agent_bind(mlxsw_sp, trigger, mlxsw_sp_port,
+ &parms);
+ if (err)
+ goto err_agent_bind;
+
+ return 0;
+
+err_agent_bind:
+ mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, mall_entry->ingress);
+err_analyzed_port_get:
+ mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->mirror.span_id);
+ return err;
+}
+
+static void
+mlxsw_sp_mall_port_mirror_del(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mall_entry *mall_entry)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_span_trigger_parms parms;
+ enum mlxsw_sp_span_trigger trigger;
+
+ trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS :
+ MLXSW_SP_SPAN_TRIGGER_EGRESS;
+ parms.span_id = mall_entry->mirror.span_id;
+ mlxsw_sp_span_agent_unbind(mlxsw_sp, trigger, mlxsw_sp_port, &parms);
+ mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, mall_entry->ingress);
+ mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->mirror.span_id);
+}
+
+static int mlxsw_sp_mall_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool enable, u32 rate)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char mpsc_pl[MLXSW_REG_MPSC_LEN];
+
+ mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
+}
+
+static int
+mlxsw_sp_mall_port_sample_add(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mall_entry *mall_entry)
+{
+ int err;
+
+ if (rtnl_dereference(mlxsw_sp_port->sample)) {
+ netdev_err(mlxsw_sp_port->dev, "sample already active\n");
+ return -EEXIST;
+ }
+ rcu_assign_pointer(mlxsw_sp_port->sample, &mall_entry->sample);
+
+ err = mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, true,
+ mall_entry->sample.rate);
+ if (err)
+ goto err_port_sample_set;
+ return 0;
+
+err_port_sample_set:
+ RCU_INIT_POINTER(mlxsw_sp_port->sample, NULL);
+ return err;
+}
+
+static void
+mlxsw_sp_mall_port_sample_del(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ if (!mlxsw_sp_port->sample)
+ return;
+
+ mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, false, 1);
+ RCU_INIT_POINTER(mlxsw_sp_port->sample, NULL);
+}
+
+static int
+mlxsw_sp_mall_port_rule_add(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mall_entry *mall_entry)
+{
+ switch (mall_entry->type) {
+ case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
+ return mlxsw_sp_mall_port_mirror_add(mlxsw_sp_port, mall_entry);
+ case MLXSW_SP_MALL_ACTION_TYPE_SAMPLE:
+ return mlxsw_sp_mall_port_sample_add(mlxsw_sp_port, mall_entry);
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+}
+
+static void
+mlxsw_sp_mall_port_rule_del(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mall_entry *mall_entry)
+{
+ switch (mall_entry->type) {
+ case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
+ mlxsw_sp_mall_port_mirror_del(mlxsw_sp_port, mall_entry);
+ break;
+ case MLXSW_SP_MALL_ACTION_TYPE_SAMPLE:
+ mlxsw_sp_mall_port_sample_del(mlxsw_sp_port);
+ break;
+ default:
+ WARN_ON(1);
+ }
+}
+
+static void mlxsw_sp_mall_prio_update(struct mlxsw_sp_flow_block *block)
+{
+ struct mlxsw_sp_mall_entry *mall_entry;
+
+ if (list_empty(&block->mall.list))
+ return;
+ block->mall.min_prio = UINT_MAX;
+ block->mall.max_prio = 0;
+ list_for_each_entry(mall_entry, &block->mall.list, list) {
+ if (mall_entry->priority < block->mall.min_prio)
+ block->mall.min_prio = mall_entry->priority;
+ if (mall_entry->priority > block->mall.max_prio)
+ block->mall.max_prio = mall_entry->priority;
+ }
+}
+
+int mlxsw_sp_mall_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct tc_cls_matchall_offload *f)
+{
+ struct mlxsw_sp_flow_block_binding *binding;
+ struct mlxsw_sp_mall_entry *mall_entry;
+ __be16 protocol = f->common.protocol;
+ struct flow_action_entry *act;
+ unsigned int flower_min_prio;
+ unsigned int flower_max_prio;
+ bool flower_prio_valid;
+ int err;
+
+ if (!flow_offload_has_one_action(&f->rule->action)) {
+ NL_SET_ERR_MSG(f->common.extack, "Only singular actions are supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (f->common.chain_index) {
+ NL_SET_ERR_MSG(f->common.extack, "Only chain 0 is supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (mlxsw_sp_flow_block_is_mixed_bound(block)) {
+ NL_SET_ERR_MSG(f->common.extack, "Only not mixed bound blocks are supported");
+ return -EOPNOTSUPP;
+ }
+
+ err = mlxsw_sp_flower_prio_get(mlxsw_sp, block, f->common.chain_index,
+ &flower_min_prio, &flower_max_prio);
+ if (err) {
+ if (err != -ENOENT) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to get flower priorities");
+ return err;
+ }
+ flower_prio_valid = false;
+ /* No flower filters are installed in specified chain. */
+ } else {
+ flower_prio_valid = true;
+ }
+
+ mall_entry = kzalloc(sizeof(*mall_entry), GFP_KERNEL);
+ if (!mall_entry)
+ return -ENOMEM;
+ mall_entry->cookie = f->cookie;
+ mall_entry->priority = f->common.prio;
+ mall_entry->ingress = mlxsw_sp_flow_block_is_ingress_bound(block);
+
+ act = &f->rule->action.entries[0];
+
+ if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) {
+ if (flower_prio_valid && mall_entry->ingress &&
+ mall_entry->priority >= flower_min_prio) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add behind existing flower rules");
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
+ if (flower_prio_valid && !mall_entry->ingress &&
+ mall_entry->priority <= flower_max_prio) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add in front of existing flower rules");
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
+ mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_MIRROR;
+ mall_entry->mirror.to_dev = act->dev;
+ } else if (act->id == FLOW_ACTION_SAMPLE &&
+ protocol == htons(ETH_P_ALL)) {
+ if (!mall_entry->ingress) {
+ NL_SET_ERR_MSG(f->common.extack, "Sample is not supported on egress");
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
+ if (flower_prio_valid &&
+ mall_entry->priority >= flower_min_prio) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add behind existing flower rules");
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
+ if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) {
+ NL_SET_ERR_MSG(f->common.extack, "Sample rate not supported");
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
+ mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_SAMPLE;
+ mall_entry->sample.psample_group = act->sample.psample_group;
+ mall_entry->sample.truncate = act->sample.truncate;
+ mall_entry->sample.trunc_size = act->sample.trunc_size;
+ mall_entry->sample.rate = act->sample.rate;
+ } else {
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
+
+ list_for_each_entry(binding, &block->binding_list, list) {
+ err = mlxsw_sp_mall_port_rule_add(binding->mlxsw_sp_port,
+ mall_entry);
+ if (err)
+ goto rollback;
+ }
+
+ block->rule_count++;
+ if (mall_entry->ingress)
+ block->egress_blocker_rule_count++;
+ else
+ block->ingress_blocker_rule_count++;
+ list_add_tail(&mall_entry->list, &block->mall.list);
+ mlxsw_sp_mall_prio_update(block);
+ return 0;
+
+rollback:
+ list_for_each_entry_continue_reverse(binding, &block->binding_list,
+ list)
+ mlxsw_sp_mall_port_rule_del(binding->mlxsw_sp_port, mall_entry);
+errout:
+ kfree(mall_entry);
+ return err;
+}
+
+void mlxsw_sp_mall_destroy(struct mlxsw_sp_flow_block *block,
+ struct tc_cls_matchall_offload *f)
+{
+ struct mlxsw_sp_flow_block_binding *binding;
+ struct mlxsw_sp_mall_entry *mall_entry;
+
+ mall_entry = mlxsw_sp_mall_entry_find(block, f->cookie);
+ if (!mall_entry) {
+ NL_SET_ERR_MSG(f->common.extack, "Entry not found");
+ return;
+ }
+
+ list_del(&mall_entry->list);
+ if (mall_entry->ingress)
+ block->egress_blocker_rule_count--;
+ else
+ block->ingress_blocker_rule_count--;
+ block->rule_count--;
+ list_for_each_entry(binding, &block->binding_list, list)
+ mlxsw_sp_mall_port_rule_del(binding->mlxsw_sp_port, mall_entry);
+ kfree_rcu(mall_entry, rcu); /* sample RX packets may be in-flight */
+ mlxsw_sp_mall_prio_update(block);
+}
+
+int mlxsw_sp_mall_port_bind(struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp_mall_entry *mall_entry;
+ int err;
+
+ list_for_each_entry(mall_entry, &block->mall.list, list) {
+ err = mlxsw_sp_mall_port_rule_add(mlxsw_sp_port, mall_entry);
+ if (err)
+ goto rollback;
+ }
+ return 0;
+
+rollback:
+ list_for_each_entry_continue_reverse(mall_entry, &block->mall.list,
+ list)
+ mlxsw_sp_mall_port_rule_del(mlxsw_sp_port, mall_entry);
+ return err;
+}
+
+void mlxsw_sp_mall_port_unbind(struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp_mall_entry *mall_entry;
+
+ list_for_each_entry(mall_entry, &block->mall.list, list)
+ mlxsw_sp_mall_port_rule_del(mlxsw_sp_port, mall_entry);
+}
+
+int mlxsw_sp_mall_prio_get(struct mlxsw_sp_flow_block *block, u32 chain_index,
+ unsigned int *p_min_prio, unsigned int *p_max_prio)
+{
+ if (chain_index || list_empty(&block->mall.list))
+ /* In case there are no matchall rules, the caller
+ * receives -ENOENT to indicate there is no need
+ * to check the priorities.
+ */
+ return -ENOENT;
+ *p_min_prio = block->mall.min_prio;
+ *p_max_prio = block->mall.max_prio;
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index d5bca1be3ef5..71aee4914619 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -2999,6 +2999,7 @@ static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
for (i = 0; i < nh_grp->count; i++) {
nh = &nh_grp->nexthops[i];
val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed);
+ val ^= jhash(&nh->gw_addr, sizeof(nh->gw_addr), seed);
}
return jhash(&val, sizeof(val), seed);
default:
@@ -3012,11 +3013,14 @@ mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
{
unsigned int val = fib6_entry->nrt6;
struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
- struct net_device *dev;
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
- dev = mlxsw_sp_rt6->rt->fib6_nh->fib_nh_dev;
+ struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
+ struct net_device *dev = fib6_nh->fib_nh_dev;
+ struct in6_addr *gw = &fib6_nh->fib_nh_gw6;
+
val ^= jhash(&dev->ifindex, sizeof(dev->ifindex), seed);
+ val ^= jhash(gw, sizeof(*gw), seed);
}
return jhash(&val, sizeof(val), seed);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 9fb2e9d93929..304eb8c3d8bd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -3,6 +3,8 @@
#include <linux/if_bridge.h>
#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/refcount.h>
#include <linux/rtnetlink.h>
#include <linux/workqueue.h>
#include <net/arp.h>
@@ -19,9 +21,27 @@
struct mlxsw_sp_span {
struct work_struct work;
struct mlxsw_sp *mlxsw_sp;
+ struct list_head analyzed_ports_list;
+ struct mutex analyzed_ports_lock; /* Protects analyzed_ports_list */
+ struct list_head trigger_entries_list;
atomic_t active_entries_count;
int entries_count;
- struct mlxsw_sp_span_entry entries[0];
+ struct mlxsw_sp_span_entry entries[];
+};
+
+struct mlxsw_sp_span_analyzed_port {
+ struct list_head list; /* Member of analyzed_ports_list */
+ refcount_t ref_count;
+ u8 local_port;
+ bool ingress;
+};
+
+struct mlxsw_sp_span_trigger_entry {
+ struct list_head list; /* Member of trigger_entries_list */
+ refcount_t ref_count;
+ u8 local_port;
+ enum mlxsw_sp_span_trigger trigger;
+ struct mlxsw_sp_span_trigger_parms parms;
};
static void mlxsw_sp_span_respin_work(struct work_struct *work);
@@ -48,15 +68,14 @@ int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
return -ENOMEM;
span->entries_count = entries_count;
atomic_set(&span->active_entries_count, 0);
+ mutex_init(&span->analyzed_ports_lock);
+ INIT_LIST_HEAD(&span->analyzed_ports_list);
+ INIT_LIST_HEAD(&span->trigger_entries_list);
span->mlxsw_sp = mlxsw_sp;
mlxsw_sp->span = span;
- for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
- struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
-
- INIT_LIST_HEAD(&curr->bound_ports_list);
- curr->id = i;
- }
+ for (i = 0; i < mlxsw_sp->span->entries_count; i++)
+ mlxsw_sp->span->entries[i].id = i;
devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_SPAN,
mlxsw_sp_span_occ_get, mlxsw_sp);
@@ -68,16 +87,13 @@ int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
- int i;
cancel_work_sync(&mlxsw_sp->span->work);
devlink_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_SPAN);
- for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
- struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
-
- WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
- }
+ WARN_ON_ONCE(!list_empty(&mlxsw_sp->span->trigger_entries_list));
+ WARN_ON_ONCE(!list_empty(&mlxsw_sp->span->analyzed_ports_list));
+ mutex_destroy(&mlxsw_sp->span->analyzed_ports_lock);
kfree(mlxsw_sp->span);
}
@@ -130,7 +146,7 @@ mlxsw_sp_span_entry_phys_deconfigure(struct mlxsw_sp_span_entry *span_entry)
static const
struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = {
.can_handle = mlxsw_sp_port_dev_check,
- .parms = mlxsw_sp_span_entry_phys_parms,
+ .parms_set = mlxsw_sp_span_entry_phys_parms,
.configure = mlxsw_sp_span_entry_phys_configure,
.deconfigure = mlxsw_sp_span_entry_phys_deconfigure,
};
@@ -418,7 +434,7 @@ mlxsw_sp_span_entry_gretap4_deconfigure(struct mlxsw_sp_span_entry *span_entry)
static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap4 = {
.can_handle = netif_is_gretap,
- .parms = mlxsw_sp_span_entry_gretap4_parms,
+ .parms_set = mlxsw_sp_span_entry_gretap4_parms,
.configure = mlxsw_sp_span_entry_gretap4_configure,
.deconfigure = mlxsw_sp_span_entry_gretap4_deconfigure,
};
@@ -519,7 +535,7 @@ mlxsw_sp_span_entry_gretap6_deconfigure(struct mlxsw_sp_span_entry *span_entry)
static const
struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap6 = {
.can_handle = netif_is_ip6gretap,
- .parms = mlxsw_sp_span_entry_gretap6_parms,
+ .parms_set = mlxsw_sp_span_entry_gretap6_parms,
.configure = mlxsw_sp_span_entry_gretap6_configure,
.deconfigure = mlxsw_sp_span_entry_gretap6_deconfigure,
};
@@ -575,7 +591,7 @@ mlxsw_sp_span_entry_vlan_deconfigure(struct mlxsw_sp_span_entry *span_entry)
static const
struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_vlan = {
.can_handle = mlxsw_sp_span_vlan_can_handle,
- .parms = mlxsw_sp_span_entry_vlan_parms,
+ .parms_set = mlxsw_sp_span_entry_vlan_parms,
.configure = mlxsw_sp_span_entry_vlan_configure,
.deconfigure = mlxsw_sp_span_entry_vlan_deconfigure,
};
@@ -612,7 +628,7 @@ mlxsw_sp_span_entry_nop_deconfigure(struct mlxsw_sp_span_entry *span_entry)
}
static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_nop = {
- .parms = mlxsw_sp_span_entry_nop_parms,
+ .parms_set = mlxsw_sp_span_entry_nop_parms,
.configure = mlxsw_sp_span_entry_nop_configure,
.deconfigure = mlxsw_sp_span_entry_nop_deconfigure,
};
@@ -622,18 +638,27 @@ mlxsw_sp_span_entry_configure(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_span_entry *span_entry,
struct mlxsw_sp_span_parms sparms)
{
- if (sparms.dest_port) {
- if (sparms.dest_port->mlxsw_sp != mlxsw_sp) {
- netdev_err(span_entry->to_dev, "Cannot mirror to %s, which belongs to a different mlxsw instance",
- sparms.dest_port->dev->name);
- sparms.dest_port = NULL;
- } else if (span_entry->ops->configure(span_entry, sparms)) {
- netdev_err(span_entry->to_dev, "Failed to offload mirror to %s",
- sparms.dest_port->dev->name);
- sparms.dest_port = NULL;
- }
+ int err;
+
+ if (!sparms.dest_port)
+ goto set_parms;
+
+ if (sparms.dest_port->mlxsw_sp != mlxsw_sp) {
+ netdev_err(span_entry->to_dev, "Cannot mirror to %s, which belongs to a different mlxsw instance",
+ sparms.dest_port->dev->name);
+ sparms.dest_port = NULL;
+ goto set_parms;
+ }
+
+ err = span_entry->ops->configure(span_entry, sparms);
+ if (err) {
+ netdev_err(span_entry->to_dev, "Failed to offload mirror to %s",
+ sparms.dest_port->dev->name);
+ sparms.dest_port = NULL;
+ goto set_parms;
}
+set_parms:
span_entry->parms = sparms;
}
@@ -655,7 +680,7 @@ mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp,
/* find a free entry to use */
for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
- if (!mlxsw_sp->span->entries[i].ref_count) {
+ if (!refcount_read(&mlxsw_sp->span->entries[i].ref_count)) {
span_entry = &mlxsw_sp->span->entries[i];
break;
}
@@ -665,7 +690,7 @@ mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp,
atomic_inc(&mlxsw_sp->span->active_entries_count);
span_entry->ops = ops;
- span_entry->ref_count = 1;
+ refcount_set(&span_entry->ref_count, 1);
span_entry->to_dev = to_dev;
mlxsw_sp_span_entry_configure(mlxsw_sp, span_entry, sparms);
@@ -688,7 +713,7 @@ mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp,
for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
- if (curr->ref_count && curr->to_dev == to_dev)
+ if (refcount_read(&curr->ref_count) && curr->to_dev == to_dev)
return curr;
}
return NULL;
@@ -709,7 +734,7 @@ mlxsw_sp_span_entry_find_by_id(struct mlxsw_sp *mlxsw_sp, int span_id)
for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
- if (curr->ref_count && curr->id == span_id)
+ if (refcount_read(&curr->ref_count) && curr->id == span_id)
return curr;
}
return NULL;
@@ -726,7 +751,7 @@ mlxsw_sp_span_entry_get(struct mlxsw_sp *mlxsw_sp,
span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, to_dev);
if (span_entry) {
/* Already exists, just take a reference */
- span_entry->ref_count++;
+ refcount_inc(&span_entry->ref_count);
return span_entry;
}
@@ -736,32 +761,13 @@ mlxsw_sp_span_entry_get(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_span_entry *span_entry)
{
- WARN_ON(!span_entry->ref_count);
- if (--span_entry->ref_count == 0)
+ if (refcount_dec_and_test(&span_entry->ref_count))
mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
return 0;
}
-static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
-{
- struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
- struct mlxsw_sp_span_inspected_port *p;
- int i;
-
- for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
- struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
-
- list_for_each_entry(p, &curr->bound_ports_list, list)
- if (p->local_port == port->local_port &&
- p->type == MLXSW_SP_SPAN_EGRESS)
- return true;
- }
-
- return false;
-}
-
static int
-mlxsw_sp_span_port_buffsize_update(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
+mlxsw_sp_span_port_buffer_update(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char sbib_pl[MLXSW_REG_SBIB_LEN];
@@ -780,20 +786,54 @@ mlxsw_sp_span_port_buffsize_update(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
}
+static void mlxsw_sp_span_port_buffer_disable(struct mlxsw_sp *mlxsw_sp,
+ u8 local_port)
+{
+ char sbib_pl[MLXSW_REG_SBIB_LEN];
+
+ mlxsw_reg_sbib_pack(sbib_pl, local_port, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
+}
+
+static struct mlxsw_sp_span_analyzed_port *
+mlxsw_sp_span_analyzed_port_find(struct mlxsw_sp_span *span, u8 local_port,
+ bool ingress)
+{
+ struct mlxsw_sp_span_analyzed_port *analyzed_port;
+
+ list_for_each_entry(analyzed_port, &span->analyzed_ports_list, list) {
+ if (analyzed_port->local_port == local_port &&
+ analyzed_port->ingress == ingress)
+ return analyzed_port;
+ }
+
+ return NULL;
+}
+
int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
{
+ struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
+ int err = 0;
+
/* If port is egress mirrored, the shared buffer size should be
* updated according to the mtu value
*/
- if (mlxsw_sp_span_is_egress_mirror(port))
- return mlxsw_sp_span_port_buffsize_update(port, mtu);
- return 0;
+ mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
+
+ if (mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span, port->local_port,
+ false))
+ err = mlxsw_sp_span_port_buffer_update(port, mtu);
+
+ mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
+
+ return err;
}
void mlxsw_sp_span_speed_update_work(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_sp *mlxsw_sp;
mlxsw_sp_port = container_of(dwork, struct mlxsw_sp_port,
span.speed_update_dw);
@@ -801,237 +841,368 @@ void mlxsw_sp_span_speed_update_work(struct work_struct *work)
/* If port is egress mirrored, the shared buffer size should be
* updated according to the speed value.
*/
- if (mlxsw_sp_span_is_egress_mirror(mlxsw_sp_port))
- mlxsw_sp_span_port_buffsize_update(mlxsw_sp_port,
- mlxsw_sp_port->dev->mtu);
+ mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
+
+ if (mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span,
+ mlxsw_sp_port->local_port, false))
+ mlxsw_sp_span_port_buffer_update(mlxsw_sp_port,
+ mlxsw_sp_port->dev->mtu);
+
+ mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
}
-static struct mlxsw_sp_span_inspected_port *
-mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_span_entry *span_entry,
- enum mlxsw_sp_span_type type,
- struct mlxsw_sp_port *port,
- bool bind)
+static const struct mlxsw_sp_span_entry_ops *
+mlxsw_sp_span_entry_ops(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *to_dev)
{
- struct mlxsw_sp_span_inspected_port *p;
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sp_span_entry_types); ++i)
+ if (mlxsw_sp_span_entry_types[i]->can_handle(to_dev))
+ return mlxsw_sp_span_entry_types[i];
- list_for_each_entry(p, &span_entry->bound_ports_list, list)
- if (type == p->type &&
- port->local_port == p->local_port &&
- bind == p->bound)
- return p;
return NULL;
}
-static int
-mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
- struct mlxsw_sp_span_entry *span_entry,
- enum mlxsw_sp_span_type type,
- bool bind)
+static void mlxsw_sp_span_respin_work(struct work_struct *work)
{
- struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
- char mpar_pl[MLXSW_REG_MPAR_LEN];
- int pa_id = span_entry->id;
+ struct mlxsw_sp_span *span;
+ struct mlxsw_sp *mlxsw_sp;
+ int i, err;
+
+ span = container_of(work, struct mlxsw_sp_span, work);
+ mlxsw_sp = span->mlxsw_sp;
+
+ rtnl_lock();
+ for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
+ struct mlxsw_sp_span_parms sparms = {NULL};
+
+ if (!refcount_read(&curr->ref_count))
+ continue;
- /* bind the port to the SPAN entry */
- mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
- (enum mlxsw_reg_mpar_i_e)type, bind, pa_id);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
+ err = curr->ops->parms_set(curr->to_dev, &sparms);
+ if (err)
+ continue;
+
+ if (memcmp(&sparms, &curr->parms, sizeof(sparms))) {
+ mlxsw_sp_span_entry_deconfigure(curr);
+ mlxsw_sp_span_entry_configure(mlxsw_sp, curr, sparms);
+ }
+ }
+ rtnl_unlock();
}
-static int
-mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port,
- struct mlxsw_sp_span_entry *span_entry,
- enum mlxsw_sp_span_type type,
- bool bind)
+void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp)
{
- struct mlxsw_sp_span_inspected_port *inspected_port;
- struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
- char sbib_pl[MLXSW_REG_SBIB_LEN];
- int i;
+ if (atomic_read(&mlxsw_sp->span->active_entries_count) == 0)
+ return;
+ mlxsw_core_schedule_work(&mlxsw_sp->span->work);
+}
+
+int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *to_dev, int *p_span_id)
+{
+ const struct mlxsw_sp_span_entry_ops *ops;
+ struct mlxsw_sp_span_entry *span_entry;
+ struct mlxsw_sp_span_parms sparms;
int err;
- /* A given (source port, direction) can only be bound to one analyzer,
- * so if a binding is requested, check for conflicts.
- */
- if (bind)
- for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
- struct mlxsw_sp_span_entry *curr =
- &mlxsw_sp->span->entries[i];
-
- if (mlxsw_sp_span_entry_bound_port_find(curr, type,
- port, bind))
- return -EEXIST;
- }
+ ASSERT_RTNL();
- /* if it is an egress SPAN, bind a shared buffer to it */
- if (type == MLXSW_SP_SPAN_EGRESS) {
- err = mlxsw_sp_span_port_buffsize_update(port, port->dev->mtu);
- if (err)
- return err;
+ ops = mlxsw_sp_span_entry_ops(mlxsw_sp, to_dev);
+ if (!ops) {
+ dev_err(mlxsw_sp->bus_info->dev, "Cannot mirror to requested destination\n");
+ return -EOPNOTSUPP;
}
- if (bind) {
- err = mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
- true);
- if (err)
- goto err_port_bind;
- }
+ memset(&sparms, 0, sizeof(sparms));
+ err = ops->parms_set(to_dev, &sparms);
+ if (err)
+ return err;
- inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
- if (!inspected_port) {
- err = -ENOMEM;
- goto err_inspected_port_alloc;
- }
- inspected_port->local_port = port->local_port;
- inspected_port->type = type;
- inspected_port->bound = bind;
- list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
+ span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms);
+ if (!span_entry)
+ return -ENOBUFS;
+
+ *p_span_id = span_entry->id;
return 0;
+}
-err_inspected_port_alloc:
- if (bind)
- mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
- false);
-err_port_bind:
- if (type == MLXSW_SP_SPAN_EGRESS) {
- mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
+void mlxsw_sp_span_agent_put(struct mlxsw_sp *mlxsw_sp, int span_id)
+{
+ struct mlxsw_sp_span_entry *span_entry;
+
+ ASSERT_RTNL();
+
+ span_entry = mlxsw_sp_span_entry_find_by_id(mlxsw_sp, span_id);
+ if (WARN_ON_ONCE(!span_entry))
+ return;
+
+ mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
+}
+
+static struct mlxsw_sp_span_analyzed_port *
+mlxsw_sp_span_analyzed_port_create(struct mlxsw_sp_span *span,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
+{
+ struct mlxsw_sp_span_analyzed_port *analyzed_port;
+ int err;
+
+ analyzed_port = kzalloc(sizeof(*analyzed_port), GFP_KERNEL);
+ if (!analyzed_port)
+ return ERR_PTR(-ENOMEM);
+
+ refcount_set(&analyzed_port->ref_count, 1);
+ analyzed_port->local_port = mlxsw_sp_port->local_port;
+ analyzed_port->ingress = ingress;
+ list_add_tail(&analyzed_port->list, &span->analyzed_ports_list);
+
+ /* An egress mirror buffer should be allocated on the egress port which
+ * does the mirroring.
+ */
+ if (!ingress) {
+ u16 mtu = mlxsw_sp_port->dev->mtu;
+
+ err = mlxsw_sp_span_port_buffer_update(mlxsw_sp_port, mtu);
+ if (err)
+ goto err_buffer_update;
}
- return err;
+
+ return analyzed_port;
+
+err_buffer_update:
+ list_del(&analyzed_port->list);
+ kfree(analyzed_port);
+ return ERR_PTR(err);
}
static void
-mlxsw_sp_span_inspected_port_del(struct mlxsw_sp_port *port,
- struct mlxsw_sp_span_entry *span_entry,
- enum mlxsw_sp_span_type type,
- bool bind)
+mlxsw_sp_span_analyzed_port_destroy(struct mlxsw_sp_span *span,
+ struct mlxsw_sp_span_analyzed_port *
+ analyzed_port)
{
- struct mlxsw_sp_span_inspected_port *inspected_port;
- struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
- char sbib_pl[MLXSW_REG_SBIB_LEN];
+ struct mlxsw_sp *mlxsw_sp = span->mlxsw_sp;
- inspected_port = mlxsw_sp_span_entry_bound_port_find(span_entry, type,
- port, bind);
- if (!inspected_port)
- return;
+ /* Remove egress mirror buffer now that port is no longer analyzed
+ * at egress.
+ */
+ if (!analyzed_port->ingress)
+ mlxsw_sp_span_port_buffer_disable(mlxsw_sp,
+ analyzed_port->local_port);
+
+ list_del(&analyzed_port->list);
+ kfree(analyzed_port);
+}
+
+int mlxsw_sp_span_analyzed_port_get(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_span_analyzed_port *analyzed_port;
+ u8 local_port = mlxsw_sp_port->local_port;
+ int err = 0;
+
+ mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
- if (bind)
- mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
- false);
- /* remove the SBIB buffer if it was egress SPAN */
- if (type == MLXSW_SP_SPAN_EGRESS) {
- mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
+ analyzed_port = mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span,
+ local_port, ingress);
+ if (analyzed_port) {
+ refcount_inc(&analyzed_port->ref_count);
+ goto out_unlock;
}
- mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
+ analyzed_port = mlxsw_sp_span_analyzed_port_create(mlxsw_sp->span,
+ mlxsw_sp_port,
+ ingress);
+ if (IS_ERR(analyzed_port))
+ err = PTR_ERR(analyzed_port);
- list_del(&inspected_port->list);
- kfree(inspected_port);
+out_unlock:
+ mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
+ return err;
}
-static const struct mlxsw_sp_span_entry_ops *
-mlxsw_sp_span_entry_ops(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *to_dev)
+void mlxsw_sp_span_analyzed_port_put(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
{
- size_t i;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_span_analyzed_port *analyzed_port;
+ u8 local_port = mlxsw_sp_port->local_port;
- for (i = 0; i < ARRAY_SIZE(mlxsw_sp_span_entry_types); ++i)
- if (mlxsw_sp_span_entry_types[i]->can_handle(to_dev))
- return mlxsw_sp_span_entry_types[i];
+ mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
- return NULL;
+ analyzed_port = mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span,
+ local_port, ingress);
+ if (WARN_ON_ONCE(!analyzed_port))
+ goto out_unlock;
+
+ if (!refcount_dec_and_test(&analyzed_port->ref_count))
+ goto out_unlock;
+
+ mlxsw_sp_span_analyzed_port_destroy(mlxsw_sp->span, analyzed_port);
+
+out_unlock:
+ mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
}
-int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
- const struct net_device *to_dev,
- enum mlxsw_sp_span_type type, bool bind,
- int *p_span_id)
+static int
+__mlxsw_sp_span_trigger_entry_bind(struct mlxsw_sp_span *span,
+ struct mlxsw_sp_span_trigger_entry *
+ trigger_entry, bool enable)
{
- struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
- const struct mlxsw_sp_span_entry_ops *ops;
- struct mlxsw_sp_span_parms sparms = {NULL};
- struct mlxsw_sp_span_entry *span_entry;
- int err;
-
- ops = mlxsw_sp_span_entry_ops(mlxsw_sp, to_dev);
- if (!ops) {
- netdev_err(to_dev, "Cannot mirror to %s", to_dev->name);
- return -EOPNOTSUPP;
+ char mpar_pl[MLXSW_REG_MPAR_LEN];
+ enum mlxsw_reg_mpar_i_e i_e;
+
+ switch (trigger_entry->trigger) {
+ case MLXSW_SP_SPAN_TRIGGER_INGRESS:
+ i_e = MLXSW_REG_MPAR_TYPE_INGRESS;
+ break;
+ case MLXSW_SP_SPAN_TRIGGER_EGRESS:
+ i_e = MLXSW_REG_MPAR_TYPE_EGRESS;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -EINVAL;
}
- err = ops->parms(to_dev, &sparms);
- if (err)
- return err;
+ mlxsw_reg_mpar_pack(mpar_pl, trigger_entry->local_port, i_e, enable,
+ trigger_entry->parms.span_id);
+ return mlxsw_reg_write(span->mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
+}
- span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms);
- if (!span_entry)
- return -ENOBUFS;
+static int
+mlxsw_sp_span_trigger_entry_bind(struct mlxsw_sp_span *span,
+ struct mlxsw_sp_span_trigger_entry *
+ trigger_entry)
+{
+ return __mlxsw_sp_span_trigger_entry_bind(span, trigger_entry, true);
+}
- netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
- span_entry->id);
+static void
+mlxsw_sp_span_trigger_entry_unbind(struct mlxsw_sp_span *span,
+ struct mlxsw_sp_span_trigger_entry *
+ trigger_entry)
+{
+ __mlxsw_sp_span_trigger_entry_bind(span, trigger_entry, false);
+}
- err = mlxsw_sp_span_inspected_port_add(from, span_entry, type, bind);
+static struct mlxsw_sp_span_trigger_entry *
+mlxsw_sp_span_trigger_entry_create(struct mlxsw_sp_span *span,
+ enum mlxsw_sp_span_trigger trigger,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_span_trigger_parms
+ *parms)
+{
+ struct mlxsw_sp_span_trigger_entry *trigger_entry;
+ int err;
+
+ trigger_entry = kzalloc(sizeof(*trigger_entry), GFP_KERNEL);
+ if (!trigger_entry)
+ return ERR_PTR(-ENOMEM);
+
+ refcount_set(&trigger_entry->ref_count, 1);
+ trigger_entry->local_port = mlxsw_sp_port->local_port;
+ trigger_entry->trigger = trigger;
+ memcpy(&trigger_entry->parms, parms, sizeof(trigger_entry->parms));
+ list_add_tail(&trigger_entry->list, &span->trigger_entries_list);
+
+ err = mlxsw_sp_span_trigger_entry_bind(span, trigger_entry);
if (err)
- goto err_port_bind;
+ goto err_trigger_entry_bind;
- *p_span_id = span_entry->id;
- return 0;
+ return trigger_entry;
-err_port_bind:
- mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
- return err;
+err_trigger_entry_bind:
+ list_del(&trigger_entry->list);
+ kfree(trigger_entry);
+ return ERR_PTR(err);
}
-void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, int span_id,
- enum mlxsw_sp_span_type type, bool bind)
+static void
+mlxsw_sp_span_trigger_entry_destroy(struct mlxsw_sp_span *span,
+ struct mlxsw_sp_span_trigger_entry *
+ trigger_entry)
{
- struct mlxsw_sp_span_entry *span_entry;
+ mlxsw_sp_span_trigger_entry_unbind(span, trigger_entry);
+ list_del(&trigger_entry->list);
+ kfree(trigger_entry);
+}
- span_entry = mlxsw_sp_span_entry_find_by_id(from->mlxsw_sp, span_id);
- if (!span_entry) {
- netdev_err(from->dev, "no span entry found\n");
- return;
+static struct mlxsw_sp_span_trigger_entry *
+mlxsw_sp_span_trigger_entry_find(struct mlxsw_sp_span *span,
+ enum mlxsw_sp_span_trigger trigger,
+ struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp_span_trigger_entry *trigger_entry;
+
+ list_for_each_entry(trigger_entry, &span->trigger_entries_list, list) {
+ if (trigger_entry->trigger == trigger &&
+ trigger_entry->local_port == mlxsw_sp_port->local_port)
+ return trigger_entry;
}
- netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
- span_entry->id);
- mlxsw_sp_span_inspected_port_del(from, span_entry, type, bind);
+ return NULL;
}
-static void mlxsw_sp_span_respin_work(struct work_struct *work)
+int mlxsw_sp_span_agent_bind(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_span_trigger trigger,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_span_trigger_parms *parms)
{
- struct mlxsw_sp_span *span;
- struct mlxsw_sp *mlxsw_sp;
- int i, err;
+ struct mlxsw_sp_span_trigger_entry *trigger_entry;
+ int err = 0;
- span = container_of(work, struct mlxsw_sp_span, work);
- mlxsw_sp = span->mlxsw_sp;
+ ASSERT_RTNL();
- rtnl_lock();
- for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
- struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
- struct mlxsw_sp_span_parms sparms = {NULL};
+ if (!mlxsw_sp_span_entry_find_by_id(mlxsw_sp, parms->span_id))
+ return -EINVAL;
- if (!curr->ref_count)
- continue;
+ trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span,
+ trigger,
+ mlxsw_sp_port);
+ if (trigger_entry) {
+ if (trigger_entry->parms.span_id != parms->span_id)
+ return -EINVAL;
+ refcount_inc(&trigger_entry->ref_count);
+ goto out;
+ }
- err = curr->ops->parms(curr->to_dev, &sparms);
- if (err)
- continue;
+ trigger_entry = mlxsw_sp_span_trigger_entry_create(mlxsw_sp->span,
+ trigger,
+ mlxsw_sp_port,
+ parms);
+ if (IS_ERR(trigger_entry))
+ err = PTR_ERR(trigger_entry);
- if (memcmp(&sparms, &curr->parms, sizeof(sparms))) {
- mlxsw_sp_span_entry_deconfigure(curr);
- mlxsw_sp_span_entry_configure(mlxsw_sp, curr, sparms);
- }
- }
- rtnl_unlock();
+out:
+ return err;
}
-void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp)
+void mlxsw_sp_span_agent_unbind(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_span_trigger trigger,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_span_trigger_parms *parms)
{
- if (atomic_read(&mlxsw_sp->span->active_entries_count) == 0)
+ struct mlxsw_sp_span_trigger_entry *trigger_entry;
+
+ ASSERT_RTNL();
+
+ if (WARN_ON_ONCE(!mlxsw_sp_span_entry_find_by_id(mlxsw_sp,
+ parms->span_id)))
return;
- mlxsw_core_schedule_work(&mlxsw_sp->span->work);
+
+ trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span,
+ trigger,
+ mlxsw_sp_port);
+ if (WARN_ON_ONCE(!trigger_entry))
+ return;
+
+ if (!refcount_dec_and_test(&trigger_entry->ref_count))
+ return;
+
+ mlxsw_sp_span_trigger_entry_destroy(mlxsw_sp->span, trigger_entry);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
index 59724335525f..9f6dd2d0f4e6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
@@ -6,26 +6,13 @@
#include <linux/types.h>
#include <linux/if_ether.h>
+#include <linux/refcount.h>
#include "spectrum_router.h"
struct mlxsw_sp;
struct mlxsw_sp_port;
-enum mlxsw_sp_span_type {
- MLXSW_SP_SPAN_EGRESS,
- MLXSW_SP_SPAN_INGRESS
-};
-
-struct mlxsw_sp_span_inspected_port {
- struct list_head list;
- enum mlxsw_sp_span_type type;
- u8 local_port;
-
- /* Whether this is a directly bound mirror (port-to-port) or an ACL. */
- bool bound;
-};
-
struct mlxsw_sp_span_parms {
struct mlxsw_sp_port *dest_port; /* NULL for unoffloaded SPAN. */
unsigned int ttl;
@@ -36,21 +23,29 @@ struct mlxsw_sp_span_parms {
u16 vid;
};
+enum mlxsw_sp_span_trigger {
+ MLXSW_SP_SPAN_TRIGGER_INGRESS,
+ MLXSW_SP_SPAN_TRIGGER_EGRESS,
+};
+
+struct mlxsw_sp_span_trigger_parms {
+ int span_id;
+};
+
struct mlxsw_sp_span_entry_ops;
struct mlxsw_sp_span_entry {
const struct net_device *to_dev;
const struct mlxsw_sp_span_entry_ops *ops;
struct mlxsw_sp_span_parms parms;
- struct list_head bound_ports_list;
- int ref_count;
+ refcount_t ref_count;
int id;
};
struct mlxsw_sp_span_entry_ops {
bool (*can_handle)(const struct net_device *to_dev);
- int (*parms)(const struct net_device *to_dev,
- struct mlxsw_sp_span_parms *sparmsp);
+ int (*parms_set)(const struct net_device *to_dev,
+ struct mlxsw_sp_span_parms *sparmsp);
int (*configure)(struct mlxsw_sp_span_entry *span_entry,
struct mlxsw_sp_span_parms sparms);
void (*deconfigure)(struct mlxsw_sp_span_entry *span_entry);
@@ -60,12 +55,6 @@ int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp);
-int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
- const struct net_device *to_dev,
- enum mlxsw_sp_span_type type,
- bool bind, int *p_span_id);
-void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, int span_id,
- enum mlxsw_sp_span_type type, bool bind);
struct mlxsw_sp_span_entry *
mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp,
const struct net_device *to_dev);
@@ -76,4 +65,21 @@ void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp,
int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu);
void mlxsw_sp_span_speed_update_work(struct work_struct *work);
+int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *to_dev, int *p_span_id);
+void mlxsw_sp_span_agent_put(struct mlxsw_sp *mlxsw_sp, int span_id);
+int mlxsw_sp_span_analyzed_port_get(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress);
+void mlxsw_sp_span_analyzed_port_put(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress);
+int mlxsw_sp_span_agent_bind(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_span_trigger trigger,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_span_trigger_parms *parms);
+void
+mlxsw_sp_span_agent_unbind(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_span_trigger trigger,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_span_trigger_parms *parms);
+
#endif
diff --git a/drivers/net/ethernet/microchip/encx24j600-regmap.c b/drivers/net/ethernet/microchip/encx24j600-regmap.c
index 1f496fac7033..5bd7fb917b7a 100644
--- a/drivers/net/ethernet/microchip/encx24j600-regmap.c
+++ b/drivers/net/ethernet/microchip/encx24j600-regmap.c
@@ -17,11 +17,6 @@
#include "encx24j600_hw.h"
-static inline bool is_bits_set(int value, int mask)
-{
- return (value & mask) == mask;
-}
-
static int encx24j600_switch_bank(struct encx24j600_context *ctx,
int bank)
{
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
index b25a13da900a..2c0dcd7acf3f 100644
--- a/drivers/net/ethernet/microchip/encx24j600.c
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -604,9 +604,8 @@ static void encx24j600_set_rxfilter_mode(struct encx24j600_priv *priv)
}
}
-static int encx24j600_hw_init(struct encx24j600_priv *priv)
+static void encx24j600_hw_init(struct encx24j600_priv *priv)
{
- int ret = 0;
u16 macon2;
priv->hw_enabled = false;
@@ -649,8 +648,6 @@ static int encx24j600_hw_init(struct encx24j600_priv *priv)
if (netif_msg_hw(priv))
encx24j600_dump_config(priv, "Hw is initialized");
-
- return ret;
}
static void encx24j600_hw_enable(struct encx24j600_priv *priv)
@@ -1042,12 +1039,7 @@ static int encx24j600_spi_probe(struct spi_device *spi)
}
/* Initialize the device HW to the consistent state */
- if (encx24j600_hw_init(priv)) {
- netif_err(priv, probe, ndev,
- DRV_NAME ": HW initialization error\n");
- ret = -EIO;
- goto out_free;
- }
+ encx24j600_hw_init(priv);
kthread_init_worker(&priv->kworker);
kthread_init_work(&priv->tx_work, encx24j600_tx_proc);
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index f70bb81e1ed6..49fd843c4c8a 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -331,14 +331,15 @@ static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t moxart_mac_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
{
struct moxart_mac_priv_t *priv = netdev_priv(ndev);
void *desc;
unsigned int len;
unsigned int tx_head;
u32 txdes1;
- int ret = NETDEV_TX_BUSY;
+ netdev_tx_t ret = NETDEV_TX_BUSY;
spin_lock_irq(&priv->txlock);
diff --git a/drivers/net/ethernet/mscc/Makefile b/drivers/net/ethernet/mscc/Makefile
index 9a36c26095c8..91b33b55054e 100644
--- a/drivers/net/ethernet/mscc/Makefile
+++ b/drivers/net/ethernet/mscc/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: (GPL-2.0 OR MIT)
obj-$(CONFIG_MSCC_OCELOT_SWITCH) += mscc_ocelot_common.o
mscc_ocelot_common-y := ocelot.o ocelot_io.o
-mscc_ocelot_common-y += ocelot_regs.o ocelot_tc.o ocelot_police.o ocelot_ace.o ocelot_flower.o
+mscc_ocelot_common-y += ocelot_regs.o ocelot_tc.o ocelot_police.o ocelot_ace.o ocelot_flower.o ocelot_ptp.o
obj-$(CONFIG_MSCC_OCELOT_SWITCH_OCELOT) += ocelot_board.o
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 02350c3d9d01..c798431ce2a1 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
-#include <linux/ptp_clock_kernel.h>
#include <linux/skbuff.h>
#include <linux/iopoll.h>
#include <net/arp.h>
@@ -1348,6 +1347,12 @@ int ocelot_get_ts_info(struct ocelot *ocelot, int port,
{
info->phc_index = ocelot->ptp_clock ?
ptp_clock_index(ocelot->ptp_clock) : -1;
+ if (info->phc_index == -1) {
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ return 0;
+ }
info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE |
@@ -1996,200 +2001,6 @@ struct notifier_block ocelot_switchdev_blocking_nb __read_mostly = {
};
EXPORT_SYMBOL(ocelot_switchdev_blocking_nb);
-int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
-{
- struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
- unsigned long flags;
- time64_t s;
- u32 val;
- s64 ns;
-
- spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
-
- val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
- val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
- val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_SAVE);
- ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
-
- s = ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_MSB, TOD_ACC_PIN) & 0xffff;
- s <<= 32;
- s += ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN);
- ns = ocelot_read_rix(ocelot, PTP_PIN_TOD_NSEC, TOD_ACC_PIN);
-
- spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
-
- /* Deal with negative values */
- if (ns >= 0x3ffffff0 && ns <= 0x3fffffff) {
- s--;
- ns &= 0xf;
- ns += 999999984;
- }
-
- set_normalized_timespec64(ts, s, ns);
- return 0;
-}
-EXPORT_SYMBOL(ocelot_ptp_gettime64);
-
-static int ocelot_ptp_settime64(struct ptp_clock_info *ptp,
- const struct timespec64 *ts)
-{
- struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
- unsigned long flags;
- u32 val;
-
- spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
-
- val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
- val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
- val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_IDLE);
-
- ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
-
- ocelot_write_rix(ocelot, lower_32_bits(ts->tv_sec), PTP_PIN_TOD_SEC_LSB,
- TOD_ACC_PIN);
- ocelot_write_rix(ocelot, upper_32_bits(ts->tv_sec), PTP_PIN_TOD_SEC_MSB,
- TOD_ACC_PIN);
- ocelot_write_rix(ocelot, ts->tv_nsec, PTP_PIN_TOD_NSEC, TOD_ACC_PIN);
-
- val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
- val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
- val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_LOAD);
-
- ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
-
- spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
- return 0;
-}
-
-static int ocelot_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
-{
- if (delta > -(NSEC_PER_SEC / 2) && delta < (NSEC_PER_SEC / 2)) {
- struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
- unsigned long flags;
- u32 val;
-
- spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
-
- val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
- val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
- val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_IDLE);
-
- ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
-
- ocelot_write_rix(ocelot, 0, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN);
- ocelot_write_rix(ocelot, 0, PTP_PIN_TOD_SEC_MSB, TOD_ACC_PIN);
- ocelot_write_rix(ocelot, delta, PTP_PIN_TOD_NSEC, TOD_ACC_PIN);
-
- val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
- val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
- val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_DELTA);
-
- ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
-
- spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
- } else {
- /* Fall back using ocelot_ptp_settime64 which is not exact. */
- struct timespec64 ts;
- u64 now;
-
- ocelot_ptp_gettime64(ptp, &ts);
-
- now = ktime_to_ns(timespec64_to_ktime(ts));
- ts = ns_to_timespec64(now + delta);
-
- ocelot_ptp_settime64(ptp, &ts);
- }
- return 0;
-}
-
-static int ocelot_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
-{
- struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
- u32 unit = 0, direction = 0;
- unsigned long flags;
- u64 adj = 0;
-
- spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
-
- if (!scaled_ppm)
- goto disable_adj;
-
- if (scaled_ppm < 0) {
- direction = PTP_CFG_CLK_ADJ_CFG_DIR;
- scaled_ppm = -scaled_ppm;
- }
-
- adj = PSEC_PER_SEC << 16;
- do_div(adj, scaled_ppm);
- do_div(adj, 1000);
-
- /* If the adjustment value is too large, use ns instead */
- if (adj >= (1L << 30)) {
- unit = PTP_CFG_CLK_ADJ_FREQ_NS;
- do_div(adj, 1000);
- }
-
- /* Still too big */
- if (adj >= (1L << 30))
- goto disable_adj;
-
- ocelot_write(ocelot, unit | adj, PTP_CLK_CFG_ADJ_FREQ);
- ocelot_write(ocelot, PTP_CFG_CLK_ADJ_CFG_ENA | direction,
- PTP_CLK_CFG_ADJ_CFG);
-
- spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
- return 0;
-
-disable_adj:
- ocelot_write(ocelot, 0, PTP_CLK_CFG_ADJ_CFG);
-
- spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
- return 0;
-}
-
-static struct ptp_clock_info ocelot_ptp_clock_info = {
- .owner = THIS_MODULE,
- .name = "ocelot ptp",
- .max_adj = 0x7fffffff,
- .n_alarm = 0,
- .n_ext_ts = 0,
- .n_per_out = 0,
- .n_pins = 0,
- .pps = 0,
- .gettime64 = ocelot_ptp_gettime64,
- .settime64 = ocelot_ptp_settime64,
- .adjtime = ocelot_ptp_adjtime,
- .adjfine = ocelot_ptp_adjfine,
-};
-
-static int ocelot_init_timestamp(struct ocelot *ocelot)
-{
- struct ptp_clock *ptp_clock;
-
- ocelot->ptp_info = ocelot_ptp_clock_info;
- ptp_clock = ptp_clock_register(&ocelot->ptp_info, ocelot->dev);
- if (IS_ERR(ptp_clock))
- return PTR_ERR(ptp_clock);
- /* Check if PHC support is missing at the configuration level */
- if (!ptp_clock)
- return 0;
-
- ocelot->ptp_clock = ptp_clock;
-
- ocelot_write(ocelot, SYS_PTP_CFG_PTP_STAMP_WID(30), SYS_PTP_CFG);
- ocelot_write(ocelot, 0xffffffff, ANA_TABLES_PTP_ID_LOW);
- ocelot_write(ocelot, 0xffffffff, ANA_TABLES_PTP_ID_HIGH);
-
- ocelot_write(ocelot, PTP_CFG_MISC_PTP_EN, PTP_CFG_MISC);
-
- /* There is no device reconfiguration, PTP Rx stamping is always
- * enabled.
- */
- ocelot->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
-
- return 0;
-}
-
/* Configure the maximum SDU (L2 payload) on RX to the value specified in @sdu.
* The length of VLAN tags is accounted for automatically via DEV_MAC_TAGS_CFG.
* In the special case that it's the NPI port that we're configuring, the
@@ -2535,15 +2346,6 @@ int ocelot_init(struct ocelot *ocelot)
queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
OCELOT_STATS_CHECK_DELAY);
- if (ocelot->ptp) {
- ret = ocelot_init_timestamp(ocelot);
- if (ret) {
- dev_err(ocelot->dev,
- "Timestamp initialization failed\n");
- return ret;
- }
- }
-
return 0;
}
EXPORT_SYMBOL(ocelot_init);
@@ -2556,8 +2358,6 @@ void ocelot_deinit(struct ocelot *ocelot)
cancel_delayed_work(&ocelot->stats_work);
destroy_workqueue(ocelot->stats_queue);
mutex_destroy(&ocelot->stats_lock);
- if (ocelot->ptp_clock)
- ptp_clock_unregister(ocelot->ptp_clock);
for (i = 0; i < ocelot->num_phys_ports; i++) {
port = ocelot->ports[i];
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index 641af929497f..f0a15aa187f2 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -15,18 +15,17 @@
#include <linux/phy.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
-#include <linux/ptp_clock_kernel.h>
#include <linux/regmap.h>
#include <soc/mscc/ocelot_qsys.h>
#include <soc/mscc/ocelot_sys.h>
#include <soc/mscc/ocelot_dev.h>
#include <soc/mscc/ocelot_ana.h>
+#include <soc/mscc/ocelot_ptp.h>
#include <soc/mscc/ocelot.h>
#include "ocelot_rew.h"
#include "ocelot_qs.h"
#include "ocelot_tc.h"
-#include "ocelot_ptp.h"
#define OCELOT_BUFFER_CELL_SZ 60
diff --git a/drivers/net/ethernet/mscc/ocelot_ace.c b/drivers/net/ethernet/mscc/ocelot_ace.c
index 3bd286044480..dfd82a3baab2 100644
--- a/drivers/net/ethernet/mscc/ocelot_ace.c
+++ b/drivers/net/ethernet/mscc/ocelot_ace.c
@@ -706,13 +706,124 @@ ocelot_ace_rule_get_rule_index(struct ocelot_acl_block *block, int index)
return NULL;
}
+/* If @on=false, then SNAP, ARP, IP and OAM frames will not match on keys based
+ * on destination and source MAC addresses, but only on higher-level protocol
+ * information. The only frame types to match on keys containing MAC addresses
+ * in this case are non-SNAP, non-ARP, non-IP and non-OAM frames.
+ *
+ * If @on=true, then the above frame types (SNAP, ARP, IP and OAM) will match
+ * on MAC_ETYPE keys such as destination and source MAC on this ingress port.
+ * However the setting has the side effect of making these frames not matching
+ * on any _other_ keys than MAC_ETYPE ones.
+ */
+static void ocelot_match_all_as_mac_etype(struct ocelot *ocelot, int port,
+ bool on)
+{
+ u32 val = 0;
+
+ if (on)
+ val = ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS(3) |
+ ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS(3) |
+ ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS(3) |
+ ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS(3) |
+ ANA_PORT_VCAP_S2_CFG_S2_OAM_DIS(3);
+
+ ocelot_rmw_gix(ocelot, val,
+ ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS_M |
+ ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS_M |
+ ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS_M |
+ ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS_M |
+ ANA_PORT_VCAP_S2_CFG_S2_OAM_DIS_M,
+ ANA_PORT_VCAP_S2_CFG, port);
+}
+
+static bool ocelot_ace_is_problematic_mac_etype(struct ocelot_ace_rule *ace)
+{
+ u16 proto, mask;
+
+ if (ace->type != OCELOT_ACE_TYPE_ETYPE)
+ return false;
+
+ proto = ntohs(*(u16 *)ace->frame.etype.etype.value);
+ mask = ntohs(*(u16 *)ace->frame.etype.etype.mask);
+
+ /* ETH_P_ALL match, so all protocols below are included */
+ if (mask == 0)
+ return true;
+ if (proto == ETH_P_ARP)
+ return true;
+ if (proto == ETH_P_IP)
+ return true;
+ if (proto == ETH_P_IPV6)
+ return true;
+
+ return false;
+}
+
+static bool ocelot_ace_is_problematic_non_mac_etype(struct ocelot_ace_rule *ace)
+{
+ if (ace->type == OCELOT_ACE_TYPE_SNAP)
+ return true;
+ if (ace->type == OCELOT_ACE_TYPE_ARP)
+ return true;
+ if (ace->type == OCELOT_ACE_TYPE_IPV4)
+ return true;
+ if (ace->type == OCELOT_ACE_TYPE_IPV6)
+ return true;
+ return false;
+}
+
+static bool ocelot_exclusive_mac_etype_ace_rules(struct ocelot *ocelot,
+ struct ocelot_ace_rule *ace)
+{
+ struct ocelot_acl_block *block = &ocelot->acl_block;
+ struct ocelot_ace_rule *tmp;
+ unsigned long port;
+ int i;
+
+ if (ocelot_ace_is_problematic_mac_etype(ace)) {
+ /* Search for any non-MAC_ETYPE rules on the port */
+ for (i = 0; i < block->count; i++) {
+ tmp = ocelot_ace_rule_get_rule_index(block, i);
+ if (tmp->ingress_port_mask & ace->ingress_port_mask &&
+ ocelot_ace_is_problematic_non_mac_etype(tmp))
+ return false;
+ }
+
+ for_each_set_bit(port, &ace->ingress_port_mask,
+ ocelot->num_phys_ports)
+ ocelot_match_all_as_mac_etype(ocelot, port, true);
+ } else if (ocelot_ace_is_problematic_non_mac_etype(ace)) {
+ /* Search for any MAC_ETYPE rules on the port */
+ for (i = 0; i < block->count; i++) {
+ tmp = ocelot_ace_rule_get_rule_index(block, i);
+ if (tmp->ingress_port_mask & ace->ingress_port_mask &&
+ ocelot_ace_is_problematic_mac_etype(tmp))
+ return false;
+ }
+
+ for_each_set_bit(port, &ace->ingress_port_mask,
+ ocelot->num_phys_ports)
+ ocelot_match_all_as_mac_etype(ocelot, port, false);
+ }
+
+ return true;
+}
+
int ocelot_ace_rule_offload_add(struct ocelot *ocelot,
- struct ocelot_ace_rule *rule)
+ struct ocelot_ace_rule *rule,
+ struct netlink_ext_ack *extack)
{
struct ocelot_acl_block *block = &ocelot->acl_block;
struct ocelot_ace_rule *ace;
int i, index;
+ if (!ocelot_exclusive_mac_etype_ace_rules(ocelot, rule)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot mix MAC_ETYPE with non-MAC_ETYPE rules");
+ return -EBUSY;
+ }
+
/* Add rule to the linked list */
ocelot_ace_rule_add(ocelot, block, rule);
diff --git a/drivers/net/ethernet/mscc/ocelot_ace.h b/drivers/net/ethernet/mscc/ocelot_ace.h
index 29d22c566786..099e177f2617 100644
--- a/drivers/net/ethernet/mscc/ocelot_ace.h
+++ b/drivers/net/ethernet/mscc/ocelot_ace.h
@@ -194,7 +194,7 @@ struct ocelot_ace_rule {
enum ocelot_ace_action action;
struct ocelot_ace_stats stats;
- u16 ingress_port_mask;
+ unsigned long ingress_port_mask;
enum ocelot_vcap_bit dmac_mc;
enum ocelot_vcap_bit dmac_bc;
@@ -215,7 +215,8 @@ struct ocelot_ace_rule {
};
int ocelot_ace_rule_offload_add(struct ocelot *ocelot,
- struct ocelot_ace_rule *rule);
+ struct ocelot_ace_rule *rule,
+ struct netlink_ext_ack *extack);
int ocelot_ace_rule_offload_del(struct ocelot *ocelot,
struct ocelot_ace_rule *rule);
int ocelot_ace_rule_stats_update(struct ocelot *ocelot,
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
index 0ac9fbf77a01..67a8d61c926a 100644
--- a/drivers/net/ethernet/mscc/ocelot_board.c
+++ b/drivers/net/ethernet/mscc/ocelot_board.c
@@ -366,6 +366,23 @@ static const struct vcap_props vsc7514_vcap_props[] = {
},
};
+static struct ptp_clock_info ocelot_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "ocelot ptp",
+ .max_adj = 0x7fffffff,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = OCELOT_PTP_PINS_NUM,
+ .n_pins = OCELOT_PTP_PINS_NUM,
+ .pps = 0,
+ .gettime64 = ocelot_ptp_gettime64,
+ .settime64 = ocelot_ptp_settime64,
+ .adjtime = ocelot_ptp_adjtime,
+ .adjfine = ocelot_ptp_adjfine,
+ .verify = ocelot_ptp_verify,
+ .enable = ocelot_ptp_enable,
+};
+
static int mscc_ocelot_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -469,6 +486,15 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
ocelot->vcap = vsc7514_vcap_props;
ocelot_init(ocelot);
+ if (ocelot->ptp) {
+ err = ocelot_init_timestamp(ocelot, &ocelot_ptp_clock_info);
+ if (err) {
+ dev_err(ocelot->dev,
+ "Timestamp initialization failed\n");
+ ocelot->ptp = 0;
+ }
+ }
+
/* No NPI port */
ocelot_configure_cpu(ocelot, -1, OCELOT_TAG_PREFIX_NONE,
OCELOT_TAG_PREFIX_NONE);
@@ -574,6 +600,7 @@ static int mscc_ocelot_remove(struct platform_device *pdev)
{
struct ocelot *ocelot = platform_get_drvdata(pdev);
+ ocelot_deinit_timestamp(ocelot);
ocelot_deinit(ocelot);
unregister_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb);
unregister_switchdev_notifier(&ocelot_switchdev_nb);
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index 341923311fec..5ce172e22b43 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -51,6 +51,8 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_dissector *dissector = rule->match.dissector;
+ u16 proto = ntohs(f->common.protocol);
+ bool match_protocol = true;
if (dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
@@ -71,7 +73,6 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_match_eth_addrs match;
- u16 proto = ntohs(f->common.protocol);
/* The hw support mac matches only for MAC_ETYPE key,
* therefore if other matches(port, tcp flags, etc) are added
@@ -86,11 +87,6 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
BIT(FLOW_DISSECTOR_KEY_CONTROL)))
return -EOPNOTSUPP;
- if (proto == ETH_P_IP ||
- proto == ETH_P_IPV6 ||
- proto == ETH_P_ARP)
- return -EOPNOTSUPP;
-
flow_rule_match_eth_addrs(rule, &match);
ace->type = OCELOT_ACE_TYPE_ETYPE;
ether_addr_copy(ace->frame.etype.dmac.value,
@@ -114,6 +110,7 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
match.key->ip_proto;
ace->frame.ipv4.proto.mask[0] =
match.mask->ip_proto;
+ match_protocol = false;
}
if (ntohs(match.key->n_proto) == ETH_P_IPV6) {
ace->type = OCELOT_ACE_TYPE_IPV6;
@@ -121,11 +118,12 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
match.key->ip_proto;
ace->frame.ipv6.proto.mask[0] =
match.mask->ip_proto;
+ match_protocol = false;
}
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) &&
- ntohs(f->common.protocol) == ETH_P_IP) {
+ proto == ETH_P_IP) {
struct flow_match_ipv4_addrs match;
u8 *tmp;
@@ -141,10 +139,11 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
tmp = &ace->frame.ipv4.dip.mask.addr[0];
memcpy(tmp, &match.mask->dst, 4);
+ match_protocol = false;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) &&
- ntohs(f->common.protocol) == ETH_P_IPV6) {
+ proto == ETH_P_IPV6) {
return -EOPNOTSUPP;
}
@@ -156,6 +155,7 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
ace->frame.ipv4.sport.mask = ntohs(match.mask->src);
ace->frame.ipv4.dport.value = ntohs(match.key->dst);
ace->frame.ipv4.dport.mask = ntohs(match.mask->dst);
+ match_protocol = false;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
@@ -167,9 +167,20 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
ace->vlan.vid.mask = match.mask->vlan_id;
ace->vlan.pcp.value[0] = match.key->vlan_priority;
ace->vlan.pcp.mask[0] = match.mask->vlan_priority;
+ match_protocol = false;
}
finished_key_parsing:
+ if (match_protocol && proto != ETH_P_ALL) {
+ /* TODO: support SNAP, LLC etc */
+ if (proto < ETH_P_802_3_MIN)
+ return -EOPNOTSUPP;
+ ace->type = OCELOT_ACE_TYPE_ETYPE;
+ *(u16 *)ace->frame.etype.etype.value = htons(proto);
+ *(u16 *)ace->frame.etype.etype.mask = 0xffff;
+ }
+ /* else, a rule of type OCELOT_ACE_TYPE_ANY is implicitly added */
+
ace->prio = f->common.prio;
ace->id = f->cookie;
return ocelot_flower_parse_action(f, ace);
@@ -205,7 +216,7 @@ int ocelot_cls_flower_replace(struct ocelot *ocelot, int port,
return ret;
}
- return ocelot_ace_rule_offload_add(ocelot, ace);
+ return ocelot_ace_rule_offload_add(ocelot, ace, f->common.extack);
}
EXPORT_SYMBOL_GPL(ocelot_cls_flower_replace);
diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c
new file mode 100644
index 000000000000..a3088a1676ed
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_ptp.c
@@ -0,0 +1,324 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Microsemi Ocelot PTP clock driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ * Copyright 2020 NXP
+ */
+#include <soc/mscc/ocelot_ptp.h>
+#include <soc/mscc/ocelot_sys.h>
+#include <soc/mscc/ocelot.h>
+
+int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
+ unsigned long flags;
+ time64_t s;
+ u32 val;
+ s64 ns;
+
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+
+ val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
+ val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
+ val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_SAVE);
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
+
+ s = ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_MSB, TOD_ACC_PIN) & 0xffff;
+ s <<= 32;
+ s += ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN);
+ ns = ocelot_read_rix(ocelot, PTP_PIN_TOD_NSEC, TOD_ACC_PIN);
+
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+
+ /* Deal with negative values */
+ if (ns >= 0x3ffffff0 && ns <= 0x3fffffff) {
+ s--;
+ ns &= 0xf;
+ ns += 999999984;
+ }
+
+ set_normalized_timespec64(ts, s, ns);
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_ptp_gettime64);
+
+int ocelot_ptp_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+
+ val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
+ val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
+ val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_IDLE);
+
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
+
+ ocelot_write_rix(ocelot, lower_32_bits(ts->tv_sec), PTP_PIN_TOD_SEC_LSB,
+ TOD_ACC_PIN);
+ ocelot_write_rix(ocelot, upper_32_bits(ts->tv_sec), PTP_PIN_TOD_SEC_MSB,
+ TOD_ACC_PIN);
+ ocelot_write_rix(ocelot, ts->tv_nsec, PTP_PIN_TOD_NSEC, TOD_ACC_PIN);
+
+ val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
+ val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
+ val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_LOAD);
+
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
+
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_ptp_settime64);
+
+int ocelot_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ if (delta > -(NSEC_PER_SEC / 2) && delta < (NSEC_PER_SEC / 2)) {
+ struct ocelot *ocelot = container_of(ptp, struct ocelot,
+ ptp_info);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+
+ val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
+ val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK |
+ PTP_PIN_CFG_DOM);
+ val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_IDLE);
+
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
+
+ ocelot_write_rix(ocelot, 0, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN);
+ ocelot_write_rix(ocelot, 0, PTP_PIN_TOD_SEC_MSB, TOD_ACC_PIN);
+ ocelot_write_rix(ocelot, delta, PTP_PIN_TOD_NSEC, TOD_ACC_PIN);
+
+ val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
+ val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK |
+ PTP_PIN_CFG_DOM);
+ val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_DELTA);
+
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
+
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+ } else {
+ /* Fall back using ocelot_ptp_settime64 which is not exact. */
+ struct timespec64 ts;
+ u64 now;
+
+ ocelot_ptp_gettime64(ptp, &ts);
+
+ now = ktime_to_ns(timespec64_to_ktime(ts));
+ ts = ns_to_timespec64(now + delta);
+
+ ocelot_ptp_settime64(ptp, &ts);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_ptp_adjtime);
+
+int ocelot_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
+ u32 unit = 0, direction = 0;
+ unsigned long flags;
+ u64 adj = 0;
+
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+
+ if (!scaled_ppm)
+ goto disable_adj;
+
+ if (scaled_ppm < 0) {
+ direction = PTP_CFG_CLK_ADJ_CFG_DIR;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ adj = PSEC_PER_SEC << 16;
+ do_div(adj, scaled_ppm);
+ do_div(adj, 1000);
+
+ /* If the adjustment value is too large, use ns instead */
+ if (adj >= (1L << 30)) {
+ unit = PTP_CFG_CLK_ADJ_FREQ_NS;
+ do_div(adj, 1000);
+ }
+
+ /* Still too big */
+ if (adj >= (1L << 30))
+ goto disable_adj;
+
+ ocelot_write(ocelot, unit | adj, PTP_CLK_CFG_ADJ_FREQ);
+ ocelot_write(ocelot, PTP_CFG_CLK_ADJ_CFG_ENA | direction,
+ PTP_CLK_CFG_ADJ_CFG);
+
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+ return 0;
+
+disable_adj:
+ ocelot_write(ocelot, 0, PTP_CLK_CFG_ADJ_CFG);
+
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_ptp_adjfine);
+
+int ocelot_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_PEROUT:
+ break;
+ case PTP_PF_EXTTS:
+ case PTP_PF_PHYSYNC:
+ return -1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_ptp_verify);
+
+int ocelot_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
+ struct timespec64 ts_start, ts_period;
+ enum ocelot_ptp_pins ptp_pin;
+ unsigned long flags;
+ bool pps = false;
+ int pin = -1;
+ u32 val;
+ s64 ns;
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_PEROUT:
+ /* Reject requests with unsupported flags */
+ if (rq->perout.flags)
+ return -EOPNOTSUPP;
+
+ pin = ptp_find_pin(ocelot->ptp_clock, PTP_PF_PEROUT,
+ rq->perout.index);
+ if (pin == 0)
+ ptp_pin = PTP_PIN_0;
+ else if (pin == 1)
+ ptp_pin = PTP_PIN_1;
+ else if (pin == 2)
+ ptp_pin = PTP_PIN_2;
+ else if (pin == 3)
+ ptp_pin = PTP_PIN_3;
+ else
+ return -EBUSY;
+
+ ts_start.tv_sec = rq->perout.start.sec;
+ ts_start.tv_nsec = rq->perout.start.nsec;
+ ts_period.tv_sec = rq->perout.period.sec;
+ ts_period.tv_nsec = rq->perout.period.nsec;
+
+ if (ts_period.tv_sec == 1 && ts_period.tv_nsec == 0)
+ pps = true;
+
+ if (ts_start.tv_sec || (ts_start.tv_nsec && !pps)) {
+ dev_warn(ocelot->dev,
+ "Absolute start time not supported!\n");
+ dev_warn(ocelot->dev,
+ "Accept nsec for PPS phase adjustment, otherwise start time should be 0 0.\n");
+ return -EINVAL;
+ }
+
+ /* Handle turning off */
+ if (!on) {
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+ val = PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_IDLE);
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, ptp_pin);
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+ break;
+ }
+
+ /* Handle PPS request */
+ if (pps) {
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+ /* Pulse generated perout.start.nsec after TOD has
+ * increased seconds.
+ * Pulse width is set to 1us.
+ */
+ ocelot_write_rix(ocelot, ts_start.tv_nsec,
+ PTP_PIN_WF_LOW_PERIOD, ptp_pin);
+ ocelot_write_rix(ocelot, 1000,
+ PTP_PIN_WF_HIGH_PERIOD, ptp_pin);
+ val = PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_CLOCK);
+ val |= PTP_PIN_CFG_SYNC;
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, ptp_pin);
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+ break;
+ }
+
+ /* Handle periodic clock */
+ ns = timespec64_to_ns(&ts_period);
+ ns = ns >> 1;
+ if (ns > 0x3fffffff || ns <= 0x6)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+ ocelot_write_rix(ocelot, ns, PTP_PIN_WF_LOW_PERIOD, ptp_pin);
+ ocelot_write_rix(ocelot, ns, PTP_PIN_WF_HIGH_PERIOD, ptp_pin);
+ val = PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_CLOCK);
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, ptp_pin);
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_ptp_enable);
+
+int ocelot_init_timestamp(struct ocelot *ocelot, struct ptp_clock_info *info)
+{
+ struct ptp_clock *ptp_clock;
+ int i;
+
+ ocelot->ptp_info = *info;
+
+ for (i = 0; i < OCELOT_PTP_PINS_NUM; i++) {
+ struct ptp_pin_desc *p = &ocelot->ptp_pins[i];
+
+ snprintf(p->name, sizeof(p->name), "switch_1588_dat%d", i);
+ p->index = i;
+ p->func = PTP_PF_NONE;
+ }
+
+ ocelot->ptp_info.pin_config = &ocelot->ptp_pins[0];
+
+ ptp_clock = ptp_clock_register(&ocelot->ptp_info, ocelot->dev);
+ if (IS_ERR(ptp_clock))
+ return PTR_ERR(ptp_clock);
+ /* Check if PHC support is missing at the configuration level */
+ if (!ptp_clock)
+ return 0;
+
+ ocelot->ptp_clock = ptp_clock;
+
+ ocelot_write(ocelot, SYS_PTP_CFG_PTP_STAMP_WID(30), SYS_PTP_CFG);
+ ocelot_write(ocelot, 0xffffffff, ANA_TABLES_PTP_ID_LOW);
+ ocelot_write(ocelot, 0xffffffff, ANA_TABLES_PTP_ID_HIGH);
+
+ ocelot_write(ocelot, PTP_CFG_MISC_PTP_EN, PTP_CFG_MISC);
+
+ /* There is no device reconfiguration, PTP Rx stamping is always
+ * enabled.
+ */
+ ocelot->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_init_timestamp);
+
+int ocelot_deinit_timestamp(struct ocelot *ocelot)
+{
+ if (ocelot->ptp_clock)
+ ptp_clock_unregister(ocelot->ptp_clock);
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_deinit_timestamp);
diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.h b/drivers/net/ethernet/mscc/ocelot_ptp.h
deleted file mode 100644
index 9ede14a12573..000000000000
--- a/drivers/net/ethernet/mscc/ocelot_ptp.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
-/*
- * Microsemi Ocelot Switch driver
- *
- * License: Dual MIT/GPL
- * Copyright (c) 2017 Microsemi Corporation
- */
-
-#ifndef _MSCC_OCELOT_PTP_H_
-#define _MSCC_OCELOT_PTP_H_
-
-#define PTP_PIN_CFG_RSZ 0x20
-#define PTP_PIN_TOD_SEC_MSB_RSZ PTP_PIN_CFG_RSZ
-#define PTP_PIN_TOD_SEC_LSB_RSZ PTP_PIN_CFG_RSZ
-#define PTP_PIN_TOD_NSEC_RSZ PTP_PIN_CFG_RSZ
-
-#define PTP_PIN_CFG_DOM BIT(0)
-#define PTP_PIN_CFG_SYNC BIT(2)
-#define PTP_PIN_CFG_ACTION(x) ((x) << 3)
-#define PTP_PIN_CFG_ACTION_MASK PTP_PIN_CFG_ACTION(0x7)
-
-enum {
- PTP_PIN_ACTION_IDLE = 0,
- PTP_PIN_ACTION_LOAD,
- PTP_PIN_ACTION_SAVE,
- PTP_PIN_ACTION_CLOCK,
- PTP_PIN_ACTION_DELTA,
- PTP_PIN_ACTION_NOSYNC,
- PTP_PIN_ACTION_SYNC,
-};
-
-#define PTP_CFG_MISC_PTP_EN BIT(2)
-
-#define PSEC_PER_SEC 1000000000000LL
-
-#define PTP_CFG_CLK_ADJ_CFG_ENA BIT(0)
-#define PTP_CFG_CLK_ADJ_CFG_DIR BIT(1)
-
-#define PTP_CFG_CLK_ADJ_FREQ_NS BIT(30)
-
-#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_regs.c b/drivers/net/ethernet/mscc/ocelot_regs.c
index 7d4fd1b6adda..81d81ff75646 100644
--- a/drivers/net/ethernet/mscc/ocelot_regs.c
+++ b/drivers/net/ethernet/mscc/ocelot_regs.c
@@ -239,6 +239,8 @@ static const u32 ocelot_ptp_regmap[] = {
REG(PTP_PIN_TOD_SEC_MSB, 0x000004),
REG(PTP_PIN_TOD_SEC_LSB, 0x000008),
REG(PTP_PIN_TOD_NSEC, 0x00000c),
+ REG(PTP_PIN_WF_HIGH_PERIOD, 0x000014),
+ REG(PTP_PIN_WF_LOW_PERIOD, 0x000018),
REG(PTP_CFG_MISC, 0x0000a0),
REG(PTP_CLK_CFG_ADJ_CFG, 0x0000a4),
REG(PTP_CLK_CFG_ADJ_FREQ, 0x0000a8),
diff --git a/drivers/net/ethernet/mscc/ocelot_tc.c b/drivers/net/ethernet/mscc/ocelot_tc.c
index d326e231f0ad..b7baf7624e18 100644
--- a/drivers/net/ethernet/mscc/ocelot_tc.c
+++ b/drivers/net/ethernet/mscc/ocelot_tc.c
@@ -48,7 +48,7 @@ static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv,
if (priv->tc.police_id && priv->tc.police_id != f->cookie) {
NL_SET_ERR_MSG_MOD(extack,
- "Only one policer per port is supported\n");
+ "Only one policer per port is supported");
return -EEXIST;
}
@@ -59,7 +59,7 @@ static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv,
err = ocelot_port_policer_add(ocelot, port, &pol);
if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Could not add policer\n");
+ NL_SET_ERR_MSG_MOD(extack, "Could not add policer");
return err;
}
@@ -73,7 +73,7 @@ static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv,
err = ocelot_port_policer_del(ocelot, port);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
- "Could not delete policer\n");
+ "Could not delete policer");
return err;
}
priv->tc.police_id = 0;
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 2616fd735aab..e1e1f4e3639e 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -1174,18 +1174,6 @@ myri10ge_submit_8rx(struct mcp_kreq_ether_recv __iomem * dst,
mb();
}
-static inline void myri10ge_vlan_ip_csum(struct sk_buff *skb, __wsum hw_csum)
-{
- struct vlan_hdr *vh = (struct vlan_hdr *)(skb->data);
-
- if ((skb->protocol == htons(ETH_P_8021Q)) &&
- (vh->h_vlan_encapsulated_proto == htons(ETH_P_IP) ||
- vh->h_vlan_encapsulated_proto == htons(ETH_P_IPV6))) {
- skb->csum = hw_csum;
- skb->ip_summed = CHECKSUM_COMPLETE;
- }
-}
-
static void
myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
int bytes, int watchdog)
diff --git a/drivers/net/ethernet/neterion/Kconfig b/drivers/net/ethernet/neterion/Kconfig
index 5e630f3a0189..a82a37094579 100644
--- a/drivers/net/ethernet/neterion/Kconfig
+++ b/drivers/net/ethernet/neterion/Kconfig
@@ -27,7 +27,7 @@ config S2IO
on its age.
More specific information on configuring the driver is in
- <file:Documentation/networking/device_drivers/neterion/s2io.txt>.
+ <file:Documentation/networking/device_drivers/neterion/s2io.rst>.
To compile this driver as a module, choose M here. The module
will be called s2io.
@@ -42,7 +42,7 @@ config VXGE
labeled as either one, depending on its age.
More specific information on configuring the driver is in
- <file:Documentation/networking/device_drivers/neterion/vxge.txt>.
+ <file:Documentation/networking/device_drivers/neterion/vxge.rst>.
To compile this driver as a module, choose M here. The module
will be called vxge.
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index 4d282fc56009..7ff2ccbd43b0 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -14,7 +14,6 @@
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/firmware.h>
-#include <linux/vermagic.h>
#include <linux/vmalloc.h>
#include <net/devlink.h>
@@ -31,7 +30,6 @@
#include "nfp_net.h"
static const char nfp_driver_name[] = "nfp";
-const char nfp_driver_version[] = VERMAGIC_STRING;
static const struct pci_device_id nfp_pci_device_ids[] = {
{ PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000,
@@ -920,4 +918,3 @@ MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_1x10_1x25.nffw");
MODULE_AUTHOR("Netronome Systems <oss-drivers@netronome.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("The Netronome Flow Processor (NFP) driver.");
-MODULE_VERSION(UTS_RELEASE);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 9bfb3b077bc1..0e0cc3d58bdc 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1741,10 +1741,15 @@ nfp_net_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
struct nfp_net_rx_buf *rxbuf, unsigned int dma_off,
unsigned int pkt_len, bool *completed)
{
+ unsigned int dma_map_sz = dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA;
struct nfp_net_tx_buf *txbuf;
struct nfp_net_tx_desc *txd;
int wr_idx;
+ /* Reject if xdp_adjust_tail grow packet beyond DMA area */
+ if (pkt_len + dma_off > dma_map_sz)
+ return false;
+
if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
if (!*completed) {
nfp_net_xdp_complete(tx_ring);
@@ -1817,6 +1822,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
rcu_read_lock();
xdp_prog = READ_ONCE(dp->xdp_prog);
true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
+ xdp.frame_sz = PAGE_SIZE - NFP_NET_RX_BUF_HEADROOM;
xdp.rxq = &rx_ring->xdp_rxq;
tx_ring = r_vec->xdp_ring;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 2779f1526d1e..a5aa3219d112 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -203,8 +203,6 @@ nfp_get_drvinfo(struct nfp_app *app, struct pci_dev *pdev,
char nsp_version[ETHTOOL_FWVERS_LEN] = {};
strlcpy(drvinfo->driver, pdev->driver->name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, nfp_driver_version, sizeof(drvinfo->version));
-
nfp_net_get_nspinfo(app, nsp_version);
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%s %s %s %s", vnic_version, nsp_version,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 79d72c88bbef..b3cabc274121 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -299,6 +299,20 @@ static void nfp_repr_clean(struct nfp_repr *repr)
nfp_port_free(repr->port);
}
+static struct lock_class_key nfp_repr_netdev_xmit_lock_key;
+
+static void nfp_repr_set_lockdep_class_one(struct net_device *dev,
+ struct netdev_queue *txq,
+ void *_unused)
+{
+ lockdep_set_class(&txq->_xmit_lock, &nfp_repr_netdev_xmit_lock_key);
+}
+
+static void nfp_repr_set_lockdep_class(struct net_device *dev)
+{
+ netdev_for_each_tx_queue(dev, nfp_repr_set_lockdep_class_one, NULL);
+}
+
int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
u32 cmsg_port_id, struct nfp_port *port,
struct net_device *pf_netdev)
@@ -308,6 +322,8 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
u32 repr_cap = nn->tlv_caps.repr_cap;
int err;
+ nfp_repr_set_lockdep_class(netdev);
+
repr->port = port;
repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, GFP_KERNEL);
if (!repr->dst)
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 2fdd0753b3af..d2708a57f2ff 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -502,7 +502,8 @@ static int nixge_check_tx_bd_space(struct nixge_priv *priv,
return 0;
}
-static int nixge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t nixge_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
{
struct nixge_priv *priv = netdev_priv(ndev);
struct nixge_hw_dma_bd *cur_p;
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index d20cf03a3ea0..d3cbb4215f5c 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -823,7 +823,8 @@ static int lpc_mii_init(struct netdata_local *pldat)
if (err)
goto err_out_unregister_bus;
- if (lpc_mii_probe(pldat->ndev) != 0)
+ err = lpc_mii_probe(pldat->ndev);
+ if (err)
goto err_out_unregister_bus;
return 0;
@@ -1029,7 +1030,8 @@ static int lpc_eth_close(struct net_device *ndev)
return 0;
}
-static int lpc_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t lpc_eth_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
{
struct netdata_local *pldat = netdev_priv(ndev);
u32 len, txidx;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index f4ae40ae1e53..d83eff0ae0ac 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -388,6 +388,19 @@ int ionic_set_vf_config(struct ionic *ionic, int vf, u8 attr, u8 *data)
}
/* LIF commands */
+void ionic_dev_cmd_queue_identify(struct ionic_dev *idev,
+ u16 lif_type, u8 qtype, u8 qver)
+{
+ union ionic_dev_cmd cmd = {
+ .q_identify.opcode = IONIC_CMD_Q_IDENTIFY,
+ .q_identify.lif_type = lif_type,
+ .q_identify.type = qtype,
+ .q_identify.ver = qver,
+ };
+
+ ionic_dev_cmd_go(idev, &cmd);
+}
+
void ionic_dev_cmd_lif_identify(struct ionic_dev *idev, u8 type, u8 ver)
{
union ionic_dev_cmd cmd = {
@@ -431,6 +444,7 @@ void ionic_dev_cmd_adminq_init(struct ionic_dev *idev, struct ionic_qcq *qcq,
.q_init.opcode = IONIC_CMD_Q_INIT,
.q_init.lif_index = cpu_to_le16(lif_index),
.q_init.type = q->type,
+ .q_init.ver = qcq->q.lif->qtype_info[q->type].version,
.q_init.index = cpu_to_le32(q->index),
.q_init.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
IONIC_QINIT_F_ENA),
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index 587398b01997..525434f10025 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -12,7 +12,8 @@
#define IONIC_MIN_MTU ETH_MIN_MTU
#define IONIC_MAX_MTU 9194
-#define IONIC_MAX_TXRX_DESC 16384
+#define IONIC_MAX_TX_DESC 8192
+#define IONIC_MAX_RX_DESC 16384
#define IONIC_MIN_TXRX_DESC 16
#define IONIC_DEF_TXRX_DESC 4096
#define IONIC_LIFS_MAX 1024
@@ -83,6 +84,8 @@ static_assert(sizeof(struct ionic_q_init_cmd) == 64);
static_assert(sizeof(struct ionic_q_init_comp) == 16);
static_assert(sizeof(struct ionic_q_control_cmd) == 64);
static_assert(sizeof(ionic_q_control_comp) == 16);
+static_assert(sizeof(struct ionic_q_identify_cmd) == 64);
+static_assert(sizeof(struct ionic_q_identify_comp) == 16);
static_assert(sizeof(struct ionic_rx_mode_set_cmd) == 64);
static_assert(sizeof(ionic_rx_mode_set_comp) == 16);
@@ -179,7 +182,7 @@ struct ionic_desc_info {
void *cb_arg;
};
-#define QUEUE_NAME_MAX_SZ 32
+#define IONIC_QUEUE_NAME_MAX_SZ 32
struct ionic_queue {
u64 dbell_count;
@@ -204,14 +207,14 @@ struct ionic_queue {
unsigned int desc_size;
unsigned int sg_desc_size;
unsigned int pid;
- char name[QUEUE_NAME_MAX_SZ];
+ char name[IONIC_QUEUE_NAME_MAX_SZ];
};
-#define INTR_INDEX_NOT_ASSIGNED -1
-#define INTR_NAME_MAX_SZ 32
+#define IONIC_INTR_INDEX_NOT_ASSIGNED -1
+#define IONIC_INTR_NAME_MAX_SZ 32
struct ionic_intr_info {
- char name[INTR_NAME_MAX_SZ];
+ char name[IONIC_INTR_NAME_MAX_SZ];
unsigned int index;
unsigned int vector;
u64 rearm_count;
@@ -283,6 +286,8 @@ void ionic_dev_cmd_port_fec(struct ionic_dev *idev, u8 fec_type);
void ionic_dev_cmd_port_pause(struct ionic_dev *idev, u8 pause_type);
int ionic_set_vf_config(struct ionic *ionic, int vf, u8 attr, u8 *data);
+void ionic_dev_cmd_queue_identify(struct ionic_dev *idev,
+ u16 lif_type, u8 qtype, u8 qver);
void ionic_dev_cmd_lif_identify(struct ionic_dev *idev, u8 type, u8 ver);
void ionic_dev_cmd_lif_init(struct ionic_dev *idev, u16 lif_index,
dma_addr_t addr);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index 6996229facfd..f7e3ce3de04d 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -12,10 +12,11 @@
#include "ionic_stats.h"
static const char ionic_priv_flags_strings[][ETH_GSTRING_LEN] = {
-#define PRIV_F_SW_DBG_STATS BIT(0)
+#define IONIC_PRIV_F_SW_DBG_STATS BIT(0)
"sw-dbg-stats",
};
-#define PRIV_FLAGS_COUNT ARRAY_SIZE(ionic_priv_flags_strings)
+
+#define IONIC_PRIV_FLAGS_COUNT ARRAY_SIZE(ionic_priv_flags_strings)
static void ionic_get_stats_strings(struct ionic_lif *lif, u8 *buf)
{
@@ -58,7 +59,7 @@ static int ionic_get_sset_count(struct net_device *netdev, int sset)
count = ionic_get_stats_count(lif);
break;
case ETH_SS_PRIV_FLAGS:
- count = PRIV_FLAGS_COUNT;
+ count = IONIC_PRIV_FLAGS_COUNT;
break;
}
return count;
@@ -75,7 +76,7 @@ static void ionic_get_strings(struct net_device *netdev,
break;
case ETH_SS_PRIV_FLAGS:
memcpy(buf, ionic_priv_flags_strings,
- PRIV_FLAGS_COUNT * ETH_GSTRING_LEN);
+ IONIC_PRIV_FLAGS_COUNT * ETH_GSTRING_LEN);
break;
}
}
@@ -159,6 +160,8 @@ static int ionic_get_link_ksettings(struct net_device *netdev,
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseSR4_Full);
break;
+ case IONIC_XCVR_PID_QSFP_100G_CWDM4:
+ case IONIC_XCVR_PID_QSFP_100G_PSM4:
case IONIC_XCVR_PID_QSFP_100G_LR4:
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseLR4_ER4_Full);
@@ -178,6 +181,7 @@ static int ionic_get_link_ksettings(struct net_device *netdev,
break;
case IONIC_XCVR_PID_SFP_25GBASE_SR:
case IONIC_XCVR_PID_SFP_25GBASE_AOC:
+ case IONIC_XCVR_PID_SFP_25GBASE_ACC:
ethtool_link_ksettings_add_link_mode(ks, supported,
25000baseSR_Full);
break;
@@ -458,9 +462,9 @@ static void ionic_get_ringparam(struct net_device *netdev,
{
struct ionic_lif *lif = netdev_priv(netdev);
- ring->tx_max_pending = IONIC_MAX_TXRX_DESC;
+ ring->tx_max_pending = IONIC_MAX_TX_DESC;
ring->tx_pending = lif->ntxq_descs;
- ring->rx_max_pending = IONIC_MAX_TXRX_DESC;
+ ring->rx_max_pending = IONIC_MAX_RX_DESC;
ring->rx_pending = lif->nrxq_descs;
}
@@ -554,7 +558,7 @@ static u32 ionic_get_priv_flags(struct net_device *netdev)
u32 priv_flags = 0;
if (test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state))
- priv_flags |= PRIV_F_SW_DBG_STATS;
+ priv_flags |= IONIC_PRIV_F_SW_DBG_STATS;
return priv_flags;
}
@@ -564,7 +568,7 @@ static int ionic_set_priv_flags(struct net_device *netdev, u32 priv_flags)
struct ionic_lif *lif = netdev_priv(netdev);
clear_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state);
- if (priv_flags & PRIV_F_SW_DBG_STATS)
+ if (priv_flags & IONIC_PRIV_F_SW_DBG_STATS)
set_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state);
return 0;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index ceeb7629e7a0..7e22ba4ed915 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: (GPL-2.0 OR Linux-OpenIB) OR BSD-2-Clause */
-/* Copyright (c) 2017-2019 Pensando Systems, Inc. All rights reserved. */
+/* Copyright (c) 2017-2020 Pensando Systems, Inc. All rights reserved. */
#ifndef _IONIC_IF_H_
#define _IONIC_IF_H_
@@ -9,7 +9,7 @@
#define IONIC_IFNAMSIZ 16
/**
- * Commands
+ * enum ionic_cmd_opcode - Device commands
*/
enum ionic_cmd_opcode {
IONIC_CMD_NOP = 0,
@@ -40,6 +40,7 @@ enum ionic_cmd_opcode {
IONIC_CMD_RX_FILTER_DEL = 32,
/* Queue commands */
+ IONIC_CMD_Q_IDENTIFY = 39,
IONIC_CMD_Q_INIT = 40,
IONIC_CMD_Q_CONTROL = 41,
@@ -57,6 +58,7 @@ enum ionic_cmd_opcode {
IONIC_CMD_QOS_CLASS_IDENTIFY = 240,
IONIC_CMD_QOS_CLASS_INIT = 241,
IONIC_CMD_QOS_CLASS_RESET = 242,
+ IONIC_CMD_QOS_CLASS_UPDATE = 243,
/* Firmware commands */
IONIC_CMD_FW_DOWNLOAD = 254,
@@ -64,7 +66,7 @@ enum ionic_cmd_opcode {
};
/**
- * Command Return codes
+ * enum ionic_status_code - Device command return codes
*/
enum ionic_status_code {
IONIC_RC_SUCCESS = 0, /* Success */
@@ -97,6 +99,7 @@ enum ionic_notifyq_opcode {
IONIC_EVENT_RESET = 2,
IONIC_EVENT_HEARTBEAT = 3,
IONIC_EVENT_LOG = 4,
+ IONIC_EVENT_XCVR = 5,
};
/**
@@ -114,12 +117,11 @@ struct ionic_admin_cmd {
/**
* struct ionic_admin_comp - General admin command completion format
- * @status: The status of the command (enum status_code)
- * @comp_index: The index in the descriptor ring for which this
- * is the completion.
- * @cmd_data: Command-specific bytes.
- * @color: Color bit. (Always 0 for commands issued to the
- * Device Cmd Registers.)
+ * @status: Status of the command (enum ionic_status_code)
+ * @comp_index: Index in the descriptor ring for which this is the completion
+ * @cmd_data: Command-specific bytes
+ * @color: Color bit (Always 0 for commands issued to the
+ * Device Cmd Registers)
*/
struct ionic_admin_comp {
u8 status;
@@ -146,7 +148,7 @@ struct ionic_nop_cmd {
/**
* struct ionic_nop_comp - NOP command completion
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
*/
struct ionic_nop_comp {
u8 status;
@@ -156,7 +158,7 @@ struct ionic_nop_comp {
/**
* struct ionic_dev_init_cmd - Device init command
* @opcode: opcode
- * @type: device type
+ * @type: Device type
*/
struct ionic_dev_init_cmd {
u8 opcode;
@@ -166,7 +168,7 @@ struct ionic_dev_init_cmd {
/**
* struct init_comp - Device init command completion
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
*/
struct ionic_dev_init_comp {
u8 status;
@@ -184,7 +186,7 @@ struct ionic_dev_reset_cmd {
/**
* struct reset_comp - Reset command completion
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
*/
struct ionic_dev_reset_comp {
u8 status;
@@ -205,8 +207,8 @@ struct ionic_dev_identify_cmd {
};
/**
- * struct dev_identify_comp - Driver/device identify command completion
- * @status: The status of the command (enum status_code)
+ * struct ionic_dev_identify_comp - Driver/device identify command completion
+ * @status: Status of the command (enum ionic_status_code)
* @ver: Version of identify returned by device
*/
struct ionic_dev_identify_comp {
@@ -225,8 +227,8 @@ enum ionic_os_type {
};
/**
- * union drv_identity - driver identity information
- * @os_type: OS type (see enum os_type)
+ * union ionic_drv_identity - driver identity information
+ * @os_type: OS type (see enum ionic_os_type)
* @os_dist: OS distribution, numeric format
* @os_dist_str: OS distribution, string format
* @kernel_ver: Kernel version, numeric format
@@ -242,26 +244,26 @@ union ionic_drv_identity {
char kernel_ver_str[32];
char driver_ver_str[32];
};
- __le32 words[512];
+ __le32 words[478];
};
/**
- * union dev_identity - device identity information
+ * union ionic_dev_identity - device identity information
* @version: Version of device identify
* @type: Identify type (0 for now)
* @nports: Number of ports provisioned
* @nlifs: Number of LIFs provisioned
* @nintrs: Number of interrupts provisioned
* @ndbpgs_per_lif: Number of doorbell pages per LIF
- * @intr_coal_mult: Interrupt coalescing multiplication factor.
+ * @intr_coal_mult: Interrupt coalescing multiplication factor
* Scale user-supplied interrupt coalescing
* value in usecs to device units using:
* device units = usecs * mult / div
- * @intr_coal_div: Interrupt coalescing division factor.
+ * @intr_coal_div: Interrupt coalescing division factor
* Scale user-supplied interrupt coalescing
* value in usecs to device units using:
* device units = usecs * mult / div
- *
+ * @eq_count: Number of shared event queues
*/
union ionic_dev_identity {
struct {
@@ -275,8 +277,9 @@ union ionic_dev_identity {
__le32 ndbpgs_per_lif;
__le32 intr_coal_mult;
__le32 intr_coal_div;
+ __le32 eq_count;
};
- __le32 words[512];
+ __le32 words[478];
};
enum ionic_lif_type {
@@ -286,10 +289,10 @@ enum ionic_lif_type {
};
/**
- * struct ionic_lif_identify_cmd - lif identify command
+ * struct ionic_lif_identify_cmd - LIF identify command
* @opcode: opcode
- * @type: lif type (enum lif_type)
- * @ver: version of identify returned by device
+ * @type: LIF type (enum ionic_lif_type)
+ * @ver: Version of identify returned by device
*/
struct ionic_lif_identify_cmd {
u8 opcode;
@@ -299,9 +302,9 @@ struct ionic_lif_identify_cmd {
};
/**
- * struct ionic_lif_identify_comp - lif identify command completion
- * @status: status of the command (enum status_code)
- * @ver: version of identify returned by device
+ * struct ionic_lif_identify_comp - LIF identify command completion
+ * @status: Status of the command (enum ionic_status_code)
+ * @ver: Version of identify returned by device
*/
struct ionic_lif_identify_comp {
u8 status;
@@ -309,13 +312,24 @@ struct ionic_lif_identify_comp {
u8 rsvd2[14];
};
+/**
+ * enum ionic_lif_capability - LIF capabilities
+ * @IONIC_LIF_CAP_ETH: LIF supports Ethernet
+ * @IONIC_LIF_CAP_RDMA: LIF support RDMA
+ */
enum ionic_lif_capability {
IONIC_LIF_CAP_ETH = BIT(0),
IONIC_LIF_CAP_RDMA = BIT(1),
};
/**
- * Logical Queue Types
+ * enum ionic_logical_qtype - Logical Queue Types
+ * @IONIC_QTYPE_ADMINQ: Administrative Queue
+ * @IONIC_QTYPE_NOTIFYQ: Notify Queue
+ * @IONIC_QTYPE_RXQ: Receive Queue
+ * @IONIC_QTYPE_TXQ: Transmit Queue
+ * @IONIC_QTYPE_EQ: Event Queue
+ * @IONIC_QTYPE_MAX: Max queue type supported
*/
enum ionic_logical_qtype {
IONIC_QTYPE_ADMINQ = 0,
@@ -327,10 +341,10 @@ enum ionic_logical_qtype {
};
/**
- * struct ionic_lif_logical_qtype - Descriptor of logical to hardware queue type.
- * @qtype: Hardware Queue Type.
- * @qid_count: Number of Queue IDs of the logical type.
- * @qid_base: Minimum Queue ID of the logical type.
+ * struct ionic_lif_logical_qtype - Descriptor of logical to HW queue type
+ * @qtype: Hardware Queue Type
+ * @qid_count: Number of Queue IDs of the logical type
+ * @qid_base: Minimum Queue ID of the logical type
*/
struct ionic_lif_logical_qtype {
u8 qtype;
@@ -339,6 +353,12 @@ struct ionic_lif_logical_qtype {
__le32 qid_base;
};
+/**
+ * enum ionic_lif_state - LIF state
+ * @IONIC_LIF_DISABLE: LIF disabled
+ * @IONIC_LIF_ENABLE: LIF enabled
+ * @IONIC_LIF_HANG_RESET: LIF hung, being reset
+ */
enum ionic_lif_state {
IONIC_LIF_DISABLE = 0,
IONIC_LIF_ENABLE = 1,
@@ -346,13 +366,13 @@ enum ionic_lif_state {
};
/**
- * LIF configuration
- * @state: lif state (enum lif_state)
- * @name: lif name
- * @mtu: mtu
- * @mac: station mac address
- * @features: features (enum ionic_eth_hw_features)
- * @queue_count: queue counts per queue-type
+ * union ionic_lif_config - LIF configuration
+ * @state: LIF state (enum ionic_lif_state)
+ * @name: LIF name
+ * @mtu: MTU
+ * @mac: Station MAC address
+ * @features: Features (enum ionic_eth_hw_features)
+ * @queue_count: Queue counts per queue-type
*/
union ionic_lif_config {
struct {
@@ -369,37 +389,36 @@ union ionic_lif_config {
};
/**
- * struct ionic_lif_identity - lif identity information (type-specific)
+ * struct ionic_lif_identity - LIF identity information (type-specific)
*
- * @capabilities LIF capabilities
+ * @capabilities: LIF capabilities
*
- * Ethernet:
- * @version: Ethernet identify structure version.
- * @features: Ethernet features supported on this lif type.
- * @max_ucast_filters: Number of perfect unicast addresses supported.
- * @max_mcast_filters: Number of perfect multicast addresses supported.
- * @min_frame_size: Minimum size of frames to be sent
- * @max_frame_size: Maximim size of frames to be sent
- * @config: LIF config struct with features, mtu, mac, q counts
+ * @eth: Ethernet identify structure
+ * @version: Ethernet identify structure version
+ * @max_ucast_filters: Number of perfect unicast addresses supported
+ * @max_mcast_filters: Number of perfect multicast addresses supported
+ * @min_frame_size: Minimum size of frames to be sent
+ * @max_frame_size: Maximim size of frames to be sent
+ * @config: LIF config struct with features, mtu, mac, q counts
*
- * RDMA:
- * @version: RDMA version of opcodes and queue descriptors.
- * @qp_opcodes: Number of rdma queue pair opcodes supported.
- * @admin_opcodes: Number of rdma admin opcodes supported.
- * @npts_per_lif: Page table size per lif
- * @nmrs_per_lif: Number of memory regions per lif
- * @nahs_per_lif: Number of address handles per lif
- * @max_stride: Max work request stride.
- * @cl_stride: Cache line stride.
- * @pte_stride: Page table entry stride.
- * @rrq_stride: Remote RQ work request stride.
- * @rsq_stride: Remote SQ work request stride.
+ * @rdma: RDMA identify structure
+ * @version: RDMA version of opcodes and queue descriptors
+ * @qp_opcodes: Number of RDMA queue pair opcodes supported
+ * @admin_opcodes: Number of RDMA admin opcodes supported
+ * @npts_per_lif: Page table size per LIF
+ * @nmrs_per_lif: Number of memory regions per LIF
+ * @nahs_per_lif: Number of address handles per LIF
+ * @max_stride: Max work request stride
+ * @cl_stride: Cache line stride
+ * @pte_stride: Page table entry stride
+ * @rrq_stride: Remote RQ work request stride
+ * @rsq_stride: Remote SQ work request stride
* @dcqcn_profiles: Number of DCQCN profiles
- * @aq_qtype: RDMA Admin Qtype.
- * @sq_qtype: RDMA Send Qtype.
- * @rq_qtype: RDMA Receive Qtype.
- * @cq_qtype: RDMA Completion Qtype.
- * @eq_qtype: RDMA Event Qtype.
+ * @aq_qtype: RDMA Admin Qtype
+ * @sq_qtype: RDMA Send Qtype
+ * @rq_qtype: RDMA Receive Qtype
+ * @cq_qtype: RDMA Completion Qtype
+ * @eq_qtype: RDMA Event Qtype
*/
union ionic_lif_identity {
struct {
@@ -439,15 +458,15 @@ union ionic_lif_identity {
struct ionic_lif_logical_qtype eq_qtype;
} __packed rdma;
} __packed;
- __le32 words[512];
+ __le32 words[478];
};
/**
* struct ionic_lif_init_cmd - LIF init command
- * @opcode: opcode
- * @type: LIF type (enum lif_type)
+ * @opcode: Opcode
+ * @type: LIF type (enum ionic_lif_type)
* @index: LIF index
- * @info_pa: destination address for lif info (struct ionic_lif_info)
+ * @info_pa: Destination address for LIF info (struct ionic_lif_info)
*/
struct ionic_lif_init_cmd {
u8 opcode;
@@ -460,7 +479,8 @@ struct ionic_lif_init_cmd {
/**
* struct ionic_lif_init_comp - LIF init command completion
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
+ * @hw_index: Hardware index of the initialized LIF
*/
struct ionic_lif_init_comp {
u8 status;
@@ -469,14 +489,74 @@ struct ionic_lif_init_comp {
u8 rsvd2[12];
};
+ /**
+ * struct ionic_q_identify_cmd - queue identify command
+ * @opcode: opcode
+ * @lif_type: LIF type (enum ionic_lif_type)
+ * @type: Logical queue type (enum ionic_logical_qtype)
+ * @ver: Highest queue type version that the driver supports
+ */
+struct ionic_q_identify_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 lif_type;
+ u8 type;
+ u8 ver;
+ u8 rsvd2[58];
+};
+
+/**
+ * struct ionic_q_identify_comp - queue identify command completion
+ * @status: Status of the command (enum ionic_status_code)
+ * @comp_index: Index in the descriptor ring for which this is the completion
+ * @ver: Queue type version that can be used with FW
+ */
+struct ionic_q_identify_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ u8 ver;
+ u8 rsvd2[11];
+};
+
+/**
+ * union ionic_q_identity - queue identity information
+ * @version: Queue type version that can be used with FW
+ * @supported: Bitfield of queue versions, first bit = ver 0
+ * @features: Queue features
+ * @desc_sz: Descriptor size
+ * @comp_sz: Completion descriptor size
+ * @sg_desc_sz: Scatter/Gather descriptor size
+ * @max_sg_elems: Maximum number of Scatter/Gather elements
+ * @sg_desc_stride: Number of Scatter/Gather elements per descriptor
+ */
+union ionic_q_identity {
+ struct {
+ u8 version;
+ u8 supported;
+ u8 rsvd[6];
+#define IONIC_QIDENT_F_CQ 0x01 /* queue has completion ring */
+#define IONIC_QIDENT_F_SG 0x02 /* queue has scatter/gather ring */
+#define IONIC_QIDENT_F_EQ 0x04 /* queue can use event queue */
+#define IONIC_QIDENT_F_CMB 0x08 /* queue is in cmb bar */
+ __le64 features;
+ __le16 desc_sz;
+ __le16 comp_sz;
+ __le16 sg_desc_sz;
+ __le16 max_sg_elems;
+ __le16 sg_desc_stride;
+ };
+ __le32 words[478];
+};
+
/**
* struct ionic_q_init_cmd - Queue init command
* @opcode: opcode
* @type: Logical queue type
- * @ver: Queue version (defines opcode/descriptor scope)
+ * @ver: Queue type version
* @lif_index: LIF index
- * @index: (lif, qtype) relative admin queue index
- * @intr_index: Interrupt control register index
+ * @index: (LIF, qtype) relative admin queue index
+ * @intr_index: Interrupt control register index, or Event queue index
* @pid: Process ID
* @flags:
* IRQ: Interrupt requested on completion
@@ -494,12 +574,11 @@ struct ionic_lif_init_comp {
* descriptors. Values of ring_size <2 and >16 are
* reserved.
* EQ: Enable the Event Queue
- * @cos: Class of service for this queue.
+ * @cos: Class of service for this queue
* @ring_size: Queue ring size, encoded as a log2(size)
* @ring_base: Queue ring base address
* @cq_ring_base: Completion queue ring base address
* @sg_ring_base: Scatter/Gather ring base address
- * @eq_index: Event queue index
*/
struct ionic_q_init_cmd {
u8 opcode;
@@ -516,29 +595,27 @@ struct ionic_q_init_cmd {
#define IONIC_QINIT_F_ENA 0x02 /* Enable the queue */
#define IONIC_QINIT_F_SG 0x04 /* Enable scatter/gather on the queue */
#define IONIC_QINIT_F_EQ 0x08 /* Enable event queue */
-#define IONIC_QINIT_F_DEBUG 0x80 /* Enable queue debugging */
+#define IONIC_QINIT_F_CMB 0x10 /* Enable cmb-based queue */
+#define IONIC_QINIT_F_DEBUG 0x80 /* Enable queue debugging */
u8 cos;
u8 ring_size;
__le64 ring_base;
__le64 cq_ring_base;
__le64 sg_ring_base;
- __le32 eq_index;
- u8 rsvd2[16];
+ u8 rsvd2[20];
} __packed;
/**
* struct ionic_q_init_comp - Queue init command completion
- * @status: The status of the command (enum status_code)
- * @ver: Queue version (defines opcode/descriptor scope)
- * @comp_index: The index in the descriptor ring for which this
- * is the completion.
+ * @status: Status of the command (enum ionic_status_code)
+ * @comp_index: Index in the descriptor ring for which this is the completion
* @hw_index: Hardware Queue ID
* @hw_type: Hardware Queue type
* @color: Color
*/
struct ionic_q_init_comp {
u8 status;
- u8 ver;
+ u8 rsvd;
__le16 comp_index;
__le32 hw_index;
u8 hw_type;
@@ -559,10 +636,9 @@ enum ionic_txq_desc_opcode {
/**
* struct ionic_txq_desc - Ethernet Tx queue descriptor format
- * @opcode: Tx operation, see TXQ_DESC_OPCODE_*:
+ * @cmd: Tx operation, see IONIC_TXQ_DESC_OPCODE_*:
*
* IONIC_TXQ_DESC_OPCODE_CSUM_NONE:
- *
* Non-offload send. No segmentation,
* fragmentation or checksum calc/insertion is
* performed by device; packet is prepared
@@ -570,7 +646,6 @@ enum ionic_txq_desc_opcode {
* no further manipulation from device.
*
* IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL:
- *
* Offload 16-bit L4 checksum
* calculation/insertion. The device will
* calculate the L4 checksum value and
@@ -579,14 +654,16 @@ enum ionic_txq_desc_opcode {
* is calculated starting at @csum_start bytes
* into the packet to the end of the packet.
* The checksum insertion position is given
- * in @csum_offset. This feature is only
- * applicable to protocols such as TCP, UDP
- * and ICMP where a standard (i.e. the
- * 'IP-style' checksum) one's complement
- * 16-bit checksum is used, using an IP
- * pseudo-header to seed the calculation.
- * Software will preload the L4 checksum
- * field with the IP pseudo-header checksum.
+ * in @csum_offset, which is the offset from
+ * @csum_start to the checksum field in the L4
+ * header. This feature is only applicable to
+ * protocols such as TCP, UDP and ICMP where a
+ * standard (i.e. the 'IP-style' checksum)
+ * one's complement 16-bit checksum is used,
+ * using an IP pseudo-header to seed the
+ * calculation. Software will preload the L4
+ * checksum field with the IP pseudo-header
+ * checksum.
*
* For tunnel encapsulation, @csum_start and
* @csum_offset refer to the inner L4
@@ -602,7 +679,6 @@ enum ionic_txq_desc_opcode {
* for more info).
*
* IONIC_TXQ_DESC_OPCODE_CSUM_HW:
- *
* Offload 16-bit checksum computation to hardware.
* If @csum_l3 is set then the packet's L3 checksum is
* updated. Similarly, if @csum_l4 is set the the L4
@@ -610,7 +686,6 @@ enum ionic_txq_desc_opcode {
* checksums are also updated.
*
* IONIC_TXQ_DESC_OPCODE_TSO:
- *
* Device preforms TCP segmentation offload
* (TSO). @hdr_len is the number of bytes
* to the end of TCP header (the offset to
@@ -637,40 +712,41 @@ enum ionic_txq_desc_opcode {
* clear CWR in remaining segments.
* @flags:
* vlan:
- * Insert an L2 VLAN header using @vlan_tci.
+ * Insert an L2 VLAN header using @vlan_tci
* encap:
- * Calculate encap header checksum.
+ * Calculate encap header checksum
* csum_l3:
- * Compute L3 header checksum.
+ * Compute L3 header checksum
* csum_l4:
- * Compute L4 header checksum.
+ * Compute L4 header checksum
* tso_sot:
* TSO start
* tso_eot:
* TSO end
* @num_sg_elems: Number of scatter-gather elements in SG
* descriptor
- * @addr: First data buffer's DMA address.
- * (Subsequent data buffers are on txq_sg_desc).
+ * @addr: First data buffer's DMA address
+ * (Subsequent data buffers are on txq_sg_desc)
* @len: First data buffer's length, in bytes
* @vlan_tci: VLAN tag to insert in the packet (if requested
* by @V-bit). Includes .1p and .1q tags
* @hdr_len: Length of packet headers, including
- * encapsulating outer header, if applicable.
- * Valid for opcodes TXQ_DESC_OPCODE_CALC_CSUM and
- * TXQ_DESC_OPCODE_TSO. Should be set to zero for
+ * encapsulating outer header, if applicable
+ * Valid for opcodes IONIC_TXQ_DESC_OPCODE_CALC_CSUM and
+ * IONIC_TXQ_DESC_OPCODE_TSO. Should be set to zero for
* all other modes. For
- * TXQ_DESC_OPCODE_CALC_CSUM, @hdr_len is length
+ * IONIC_TXQ_DESC_OPCODE_CALC_CSUM, @hdr_len is length
* of headers up to inner-most L4 header. For
- * TXQ_DESC_OPCODE_TSO, @hdr_len is up to
+ * IONIC_TXQ_DESC_OPCODE_TSO, @hdr_len is up to
* inner-most L4 payload, so inclusive of
* inner-most L4 header.
- * @mss: Desired MSS value for TSO. Only applicable for
- * TXQ_DESC_OPCODE_TSO.
- * @csum_start: Offset into inner-most L3 header of checksum
- * @csum_offset: Offset into inner-most L4 header of checksum
+ * @mss: Desired MSS value for TSO; only applicable for
+ * IONIC_TXQ_DESC_OPCODE_TSO
+ * @csum_start: Offset from packet to first byte checked in L4 checksum
+ * @csum_offset: Offset from csum_start to L4 checksum field
*/
-
+struct ionic_txq_desc {
+ __le64 cmd;
#define IONIC_TXQ_DESC_OPCODE_MASK 0xf
#define IONIC_TXQ_DESC_OPCODE_SHIFT 4
#define IONIC_TXQ_DESC_FLAGS_MASK 0xf
@@ -692,8 +768,6 @@ enum ionic_txq_desc_opcode {
#define IONIC_TXQ_DESC_FLAG_TSO_SOT 0x4
#define IONIC_TXQ_DESC_FLAG_TSO_EOT 0x8
-struct ionic_txq_desc {
- __le64 cmd;
__le16 len;
union {
__le16 vlan_tci;
@@ -733,28 +807,38 @@ static inline void decode_txq_desc_cmd(u64 cmd, u8 *opcode, u8 *flags,
*addr = (cmd >> IONIC_TXQ_DESC_ADDR_SHIFT) & IONIC_TXQ_DESC_ADDR_MASK;
};
-#define IONIC_TX_MAX_SG_ELEMS 8
-#define IONIC_RX_MAX_SG_ELEMS 8
-
/**
- * struct ionic_txq_sg_desc - Transmit scatter-gather (SG) list
+ * struct ionic_txq_sg_elem - Transmit scatter-gather (SG) descriptor element
* @addr: DMA address of SG element data buffer
* @len: Length of SG element data buffer, in bytes
*/
+struct ionic_txq_sg_elem {
+ __le64 addr;
+ __le16 len;
+ __le16 rsvd[3];
+};
+
+/**
+ * struct ionic_txq_sg_desc - Transmit scatter-gather (SG) list
+ * @elems: Scatter-gather elements
+ */
struct ionic_txq_sg_desc {
- struct ionic_txq_sg_elem {
- __le64 addr;
- __le16 len;
- __le16 rsvd[3];
- } elems[IONIC_TX_MAX_SG_ELEMS];
+#define IONIC_TX_MAX_SG_ELEMS 8
+#define IONIC_TX_SG_DESC_STRIDE 8
+ struct ionic_txq_sg_elem elems[IONIC_TX_MAX_SG_ELEMS];
+};
+
+struct ionic_txq_sg_desc_v1 {
+#define IONIC_TX_MAX_SG_ELEMS_V1 15
+#define IONIC_TX_SG_DESC_STRIDE_V1 16
+ struct ionic_txq_sg_elem elems[IONIC_TX_SG_DESC_STRIDE_V1];
};
/**
* struct ionic_txq_comp - Ethernet transmit queue completion descriptor
- * @status: The status of the command (enum status_code)
- * @comp_index: The index in the descriptor ring for which this
- * is the completion.
- * @color: Color bit.
+ * @status: Status of the command (enum ionic_status_code)
+ * @comp_index: Index in the descriptor ring for which this is the completion
+ * @color: Color bit
*/
struct ionic_txq_comp {
u8 status;
@@ -771,16 +855,15 @@ enum ionic_rxq_desc_opcode {
/**
* struct ionic_rxq_desc - Ethernet Rx queue descriptor format
- * @opcode: Rx operation, see RXQ_DESC_OPCODE_*:
- *
- * RXQ_DESC_OPCODE_SIMPLE:
+ * @opcode: Rx operation, see IONIC_RXQ_DESC_OPCODE_*:
*
+ * IONIC_RXQ_DESC_OPCODE_SIMPLE:
* Receive full packet into data buffer
* starting at @addr. Results of
* receive, including actual bytes received,
* are recorded in Rx completion descriptor.
*
- * @len: Data buffer's length, in bytes.
+ * @len: Data buffer's length, in bytes
* @addr: Data buffer's DMA address
*/
struct ionic_rxq_desc {
@@ -791,26 +874,33 @@ struct ionic_rxq_desc {
};
/**
- * struct ionic_rxq_sg_desc - Receive scatter-gather (SG) list
+ * struct ionic_rxq_sg_elem - Receive scatter-gather (SG) descriptor element
* @addr: DMA address of SG element data buffer
* @len: Length of SG element data buffer, in bytes
*/
+struct ionic_rxq_sg_elem {
+ __le64 addr;
+ __le16 len;
+ __le16 rsvd[3];
+};
+
+/**
+ * struct ionic_rxq_sg_desc - Receive scatter-gather (SG) list
+ * @elems: Scatter-gather elements
+ */
struct ionic_rxq_sg_desc {
- struct ionic_rxq_sg_elem {
- __le64 addr;
- __le16 len;
- __le16 rsvd[3];
- } elems[IONIC_RX_MAX_SG_ELEMS];
+#define IONIC_RX_MAX_SG_ELEMS 8
+#define IONIC_RX_SG_DESC_STRIDE 8
+ struct ionic_rxq_sg_elem elems[IONIC_RX_SG_DESC_STRIDE];
};
/**
* struct ionic_rxq_comp - Ethernet receive queue completion descriptor
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
* @num_sg_elems: Number of SG elements used by this descriptor
- * @comp_index: The index in the descriptor ring for which this
- * is the completion.
+ * @comp_index: Index in the descriptor ring for which this is the completion
* @rss_hash: 32-bit RSS hash
- * @csum: 16-bit sum of the packet's L2 payload.
+ * @csum: 16-bit sum of the packet's L2 payload
* If the packet's L2 payload is odd length, an extra
* zero-value byte is included in the @csum calculation but
* not included in @len.
@@ -818,33 +908,51 @@ struct ionic_rxq_sg_desc {
* set. Includes .1p and .1q tags.
* @len: Received packet length, in bytes. Excludes FCS.
* @csum_calc L2 payload checksum is computed or not
- * @csum_tcp_ok: The TCP checksum calculated by the device
- * matched the checksum in the receive packet's
- * TCP header
- * @csum_tcp_bad: The TCP checksum calculated by the device did
- * not match the checksum in the receive packet's
- * TCP header.
- * @csum_udp_ok: The UDP checksum calculated by the device
- * matched the checksum in the receive packet's
- * UDP header
- * @csum_udp_bad: The UDP checksum calculated by the device did
- * not match the checksum in the receive packet's
- * UDP header.
- * @csum_ip_ok: The IPv4 checksum calculated by the device
- * matched the checksum in the receive packet's
- * first IPv4 header. If the receive packet
- * contains both a tunnel IPv4 header and a
- * transport IPv4 header, the device validates the
- * checksum for the both IPv4 headers.
- * @csum_ip_bad: The IPv4 checksum calculated by the device did
- * not match the checksum in the receive packet's
- * first IPv4 header. If the receive packet
- * contains both a tunnel IPv4 header and a
- * transport IPv4 header, the device validates the
- * checksum for both IP headers.
- * @VLAN: VLAN header was stripped and placed in @vlan_tci.
- * @pkt_type: Packet type
- * @color: Color bit.
+ * @csum_flags: See IONIC_RXQ_COMP_CSUM_F_*:
+ *
+ * IONIC_RXQ_COMP_CSUM_F_TCP_OK:
+ * The TCP checksum calculated by the device
+ * matched the checksum in the receive packet's
+ * TCP header.
+ *
+ * IONIC_RXQ_COMP_CSUM_F_TCP_BAD:
+ * The TCP checksum calculated by the device did
+ * not match the checksum in the receive packet's
+ * TCP header.
+ *
+ * IONIC_RXQ_COMP_CSUM_F_UDP_OK:
+ * The UDP checksum calculated by the device
+ * matched the checksum in the receive packet's
+ * UDP header
+ *
+ * IONIC_RXQ_COMP_CSUM_F_UDP_BAD:
+ * The UDP checksum calculated by the device did
+ * not match the checksum in the receive packet's
+ * UDP header.
+ *
+ * IONIC_RXQ_COMP_CSUM_F_IP_OK:
+ * The IPv4 checksum calculated by the device
+ * matched the checksum in the receive packet's
+ * first IPv4 header. If the receive packet
+ * contains both a tunnel IPv4 header and a
+ * transport IPv4 header, the device validates the
+ * checksum for the both IPv4 headers.
+ *
+ * IONIC_RXQ_COMP_CSUM_F_IP_BAD:
+ * The IPv4 checksum calculated by the device did
+ * not match the checksum in the receive packet's
+ * first IPv4 header. If the receive packet
+ * contains both a tunnel IPv4 header and a
+ * transport IPv4 header, the device validates the
+ * checksum for both IP headers.
+ *
+ * IONIC_RXQ_COMP_CSUM_F_VLAN:
+ * The VLAN header was stripped and placed in @vlan_tci.
+ *
+ * IONIC_RXQ_COMP_CSUM_F_CALC:
+ * The checksum was calculated by the device.
+ *
+ * @pkt_type_color: Packet type and color bit; see IONIC_RXQ_COMP_PKT_TYPE_MASK
*/
struct ionic_rxq_comp {
u8 status;
@@ -891,8 +999,8 @@ enum ionic_eth_hw_features {
IONIC_ETH_HW_TSO_ECN = BIT(10),
IONIC_ETH_HW_TSO_GRE = BIT(11),
IONIC_ETH_HW_TSO_GRE_CSUM = BIT(12),
- IONIC_ETH_HW_TSO_IPXIP4 = BIT(13),
- IONIC_ETH_HW_TSO_IPXIP6 = BIT(14),
+ IONIC_ETH_HW_TSO_IPXIP4 = BIT(13),
+ IONIC_ETH_HW_TSO_IPXIP6 = BIT(14),
IONIC_ETH_HW_TSO_UDP = BIT(15),
IONIC_ETH_HW_TSO_UDP_CSUM = BIT(16),
};
@@ -923,7 +1031,10 @@ enum q_control_oper {
};
/**
- * Physical connection type
+ * enum ionic_phy_type - Physical connection type
+ * @IONIC_PHY_TYPE_NONE: No PHY installed
+ * @IONIC_PHY_TYPE_COPPER: Copper PHY
+ * @IONIC_PHY_TYPE_FIBER: Fiber PHY
*/
enum ionic_phy_type {
IONIC_PHY_TYPE_NONE = 0,
@@ -932,18 +1043,23 @@ enum ionic_phy_type {
};
/**
- * Transceiver status
+ * enum ionic_xcvr_state - Transceiver status
+ * @IONIC_XCVR_STATE_REMOVED: Transceiver removed
+ * @IONIC_XCVR_STATE_INSERTED: Transceiver inserted
+ * @IONIC_XCVR_STATE_PENDING: Transceiver pending
+ * @IONIC_XCVR_STATE_SPROM_READ: Transceiver data read
+ * @IONIC_XCVR_STATE_SPROM_READ_ERR: Transceiver data read error
*/
enum ionic_xcvr_state {
IONIC_XCVR_STATE_REMOVED = 0,
IONIC_XCVR_STATE_INSERTED = 1,
IONIC_XCVR_STATE_PENDING = 2,
IONIC_XCVR_STATE_SPROM_READ = 3,
- IONIC_XCVR_STATE_SPROM_READ_ERR = 4,
+ IONIC_XCVR_STATE_SPROM_READ_ERR = 4,
};
/**
- * Supported link modes
+ * enum ionic_xcvr_pid - Supported link modes
*/
enum ionic_xcvr_pid {
IONIC_XCVR_PID_UNKNOWN = 0,
@@ -977,64 +1093,83 @@ enum ionic_xcvr_pid {
IONIC_XCVR_PID_SFP_10GBASE_CU = 68,
IONIC_XCVR_PID_QSFP_100G_CWDM4 = 69,
IONIC_XCVR_PID_QSFP_100G_PSM4 = 70,
+ IONIC_XCVR_PID_SFP_25GBASE_ACC = 71,
};
/**
- * Port types
+ * enum ionic_port_type - Port types
+ * @IONIC_PORT_TYPE_NONE: Port type not configured
+ * @IONIC_PORT_TYPE_ETH: Port carries ethernet traffic (inband)
+ * @IONIC_PORT_TYPE_MGMT: Port carries mgmt traffic (out-of-band)
*/
enum ionic_port_type {
- IONIC_PORT_TYPE_NONE = 0, /* port type not configured */
- IONIC_PORT_TYPE_ETH = 1, /* port carries ethernet traffic (inband) */
- IONIC_PORT_TYPE_MGMT = 2, /* port carries mgmt traffic (out-of-band) */
+ IONIC_PORT_TYPE_NONE = 0,
+ IONIC_PORT_TYPE_ETH = 1,
+ IONIC_PORT_TYPE_MGMT = 2,
};
/**
- * Port config state
+ * enum ionic_port_admin_state - Port config state
+ * @IONIC_PORT_ADMIN_STATE_NONE: Port admin state not configured
+ * @IONIC_PORT_ADMIN_STATE_DOWN: Port admin disabled
+ * @IONIC_PORT_ADMIN_STATE_UP: Port admin enabled
*/
enum ionic_port_admin_state {
- IONIC_PORT_ADMIN_STATE_NONE = 0, /* port admin state not configured */
- IONIC_PORT_ADMIN_STATE_DOWN = 1, /* port is admin disabled */
- IONIC_PORT_ADMIN_STATE_UP = 2, /* port is admin enabled */
+ IONIC_PORT_ADMIN_STATE_NONE = 0,
+ IONIC_PORT_ADMIN_STATE_DOWN = 1,
+ IONIC_PORT_ADMIN_STATE_UP = 2,
};
/**
- * Port operational status
+ * enum ionic_port_oper_status - Port operational status
+ * @IONIC_PORT_OPER_STATUS_NONE: Port disabled
+ * @IONIC_PORT_OPER_STATUS_UP: Port link status up
+ * @IONIC_PORT_OPER_STATUS_DOWN: Port link status down
*/
enum ionic_port_oper_status {
- IONIC_PORT_OPER_STATUS_NONE = 0, /* port is disabled */
- IONIC_PORT_OPER_STATUS_UP = 1, /* port is linked up */
- IONIC_PORT_OPER_STATUS_DOWN = 2, /* port link status is down */
+ IONIC_PORT_OPER_STATUS_NONE = 0,
+ IONIC_PORT_OPER_STATUS_UP = 1,
+ IONIC_PORT_OPER_STATUS_DOWN = 2,
};
/**
- * Ethernet Forward error correction (fec) modes
+ * enum ionic_port_fec_type - Ethernet Forward error correction (FEC) modes
+ * @IONIC_PORT_FEC_TYPE_NONE: FEC Disabled
+ * @IONIC_PORT_FEC_TYPE_FC: FireCode FEC
+ * @IONIC_PORT_FEC_TYPE_RS: ReedSolomon FEC
*/
enum ionic_port_fec_type {
- IONIC_PORT_FEC_TYPE_NONE = 0, /* Disabled */
- IONIC_PORT_FEC_TYPE_FC = 1, /* FireCode */
- IONIC_PORT_FEC_TYPE_RS = 2, /* ReedSolomon */
+ IONIC_PORT_FEC_TYPE_NONE = 0,
+ IONIC_PORT_FEC_TYPE_FC = 1,
+ IONIC_PORT_FEC_TYPE_RS = 2,
};
/**
- * Ethernet pause (flow control) modes
+ * enum ionic_port_pause_type - Ethernet pause (flow control) modes
+ * @IONIC_PORT_PAUSE_TYPE_NONE: Disable Pause
+ * @IONIC_PORT_PAUSE_TYPE_LINK: Link level pause
+ * @IONIC_PORT_PAUSE_TYPE_PFC: Priority-Flow Control
*/
enum ionic_port_pause_type {
- IONIC_PORT_PAUSE_TYPE_NONE = 0, /* Disable Pause */
- IONIC_PORT_PAUSE_TYPE_LINK = 1, /* Link level pause */
- IONIC_PORT_PAUSE_TYPE_PFC = 2, /* Priority-Flow control */
+ IONIC_PORT_PAUSE_TYPE_NONE = 0,
+ IONIC_PORT_PAUSE_TYPE_LINK = 1,
+ IONIC_PORT_PAUSE_TYPE_PFC = 2,
};
/**
- * Loopback modes
+ * enum ionic_port_loopback_mode - Loopback modes
+ * @IONIC_PORT_LOOPBACK_MODE_NONE: Disable loopback
+ * @IONIC_PORT_LOOPBACK_MODE_MAC: MAC loopback
+ * @IONIC_PORT_LOOPBACK_MODE_PHY: PHY/SerDes loopback
*/
enum ionic_port_loopback_mode {
- IONIC_PORT_LOOPBACK_MODE_NONE = 0, /* Disable loopback */
- IONIC_PORT_LOOPBACK_MODE_MAC = 1, /* MAC loopback */
- IONIC_PORT_LOOPBACK_MODE_PHY = 2, /* PHY/Serdes loopback */
+ IONIC_PORT_LOOPBACK_MODE_NONE = 0,
+ IONIC_PORT_LOOPBACK_MODE_MAC = 1,
+ IONIC_PORT_LOOPBACK_MODE_PHY = 2,
};
/**
- * Transceiver Status information
+ * struct ionic_xcvr_status - Transceiver Status information
* @state: Transceiver status (enum ionic_xcvr_state)
* @phy: Physical connection type (enum ionic_phy_type)
* @pid: Transceiver link mode (enum pid)
@@ -1048,7 +1183,7 @@ struct ionic_xcvr_status {
};
/**
- * Port configuration
+ * union ionic_port_config - Port configuration
* @speed: port speed (in Mbps)
* @mtu: mtu
* @state: port admin state (enum port_admin_state)
@@ -1081,17 +1216,21 @@ union ionic_port_config {
};
/**
- * Port Status information
+ * struct ionic_port_status - Port Status information
* @status: link status (enum ionic_port_oper_status)
* @id: port id
* @speed: link speed (in Mbps)
+ * @link_down_count: number of times link went from from up to down
+ * @fec_type: fec type (enum ionic_port_fec_type)
* @xcvr: tranceiver status
*/
struct ionic_port_status {
__le32 id;
__le32 speed;
u8 status;
- u8 rsvd[51];
+ __le16 link_down_count;
+ u8 fec_type;
+ u8 rsvd[48];
struct ionic_xcvr_status xcvr;
} __packed;
@@ -1110,7 +1249,7 @@ struct ionic_port_identify_cmd {
/**
* struct ionic_port_identify_comp - Port identify command completion
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
* @ver: Version of identify returned by device
*/
struct ionic_port_identify_comp {
@@ -1135,7 +1274,7 @@ struct ionic_port_init_cmd {
/**
* struct ionic_port_init_comp - Port initialization command completion
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
*/
struct ionic_port_init_comp {
u8 status;
@@ -1155,7 +1294,7 @@ struct ionic_port_reset_cmd {
/**
* struct ionic_port_reset_comp - Port reset command completion
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
*/
struct ionic_port_reset_comp {
u8 status;
@@ -1163,15 +1302,23 @@ struct ionic_port_reset_comp {
};
/**
- * enum stats_ctl_cmd - List of commands for stats control
+ * enum ionic_stats_ctl_cmd - List of commands for stats control
+ * @IONIC_STATS_CTL_RESET: Reset statistics
*/
enum ionic_stats_ctl_cmd {
IONIC_STATS_CTL_RESET = 0,
};
-
/**
* enum ionic_port_attr - List of device attributes
+ * @IONIC_PORT_ATTR_STATE: Port state attribute
+ * @IONIC_PORT_ATTR_SPEED: Port speed attribute
+ * @IONIC_PORT_ATTR_MTU: Port MTU attribute
+ * @IONIC_PORT_ATTR_AUTONEG: Port autonegotation attribute
+ * @IONIC_PORT_ATTR_FEC: Port FEC attribute
+ * @IONIC_PORT_ATTR_PAUSE: Port pause attribute
+ * @IONIC_PORT_ATTR_LOOPBACK: Port loopback attribute
+ * @IONIC_PORT_ATTR_STATS_CTRL: Port statistics control attribute
*/
enum ionic_port_attr {
IONIC_PORT_ATTR_STATE = 0,
@@ -1186,9 +1333,17 @@ enum ionic_port_attr {
/**
* struct ionic_port_setattr_cmd - Set port attributes on the NIC
- * @opcode: Opcode
- * @index: port index
- * @attr: Attribute type (enum ionic_port_attr)
+ * @opcode: Opcode
+ * @index: Port index
+ * @attr: Attribute type (enum ionic_port_attr)
+ * @state: Port state
+ * @speed: Port speed
+ * @mtu: Port MTU
+ * @an_enable: Port autonegotiation setting
+ * @fec_type: Port FEC type setting
+ * @pause_type: Port pause type setting
+ * @loopback_mode: Port loopback mode
+ * @stats_ctl: Port stats setting
*/
struct ionic_port_setattr_cmd {
u8 opcode;
@@ -1203,14 +1358,14 @@ struct ionic_port_setattr_cmd {
u8 fec_type;
u8 pause_type;
u8 loopback_mode;
- u8 stats_ctl;
+ u8 stats_ctl;
u8 rsvd2[60];
};
};
/**
* struct ionic_port_setattr_comp - Port set attr command completion
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
* @color: Color bit
*/
struct ionic_port_setattr_comp {
@@ -1234,8 +1389,15 @@ struct ionic_port_getattr_cmd {
/**
* struct ionic_port_getattr_comp - Port get attr command completion
- * @status: The status of the command (enum status_code)
- * @color: Color bit
+ * @status: Status of the command (enum ionic_status_code)
+ * @state: Port state
+ * @speed: Port speed
+ * @mtu: Port MTU
+ * @an_enable: Port autonegotiation setting
+ * @fec_type: Port FEC type setting
+ * @pause_type: Port pause type setting
+ * @loopback_mode: Port loopback mode
+ * @color: Color bit
*/
struct ionic_port_getattr_comp {
u8 status;
@@ -1254,12 +1416,12 @@ struct ionic_port_getattr_comp {
};
/**
- * struct ionic_lif_status - Lif status register
+ * struct ionic_lif_status - LIF status register
* @eid: most recent NotifyQ event id
- * @port_num: port the lif is connected to
+ * @port_num: port the LIF is connected to
* @link_status: port status (enum ionic_port_oper_status)
* @link_speed: speed of link in Mbps
- * @link_down_count: number of times link status changes
+ * @link_down_count: number of times link went from up to down
*/
struct ionic_lif_status {
__le64 eid;
@@ -1293,6 +1455,9 @@ enum ionic_dev_state {
/**
* enum ionic_dev_attr - List of device attributes
+ * @IONIC_DEV_ATTR_STATE: Device state attribute
+ * @IONIC_DEV_ATTR_NAME: Device name attribute
+ * @IONIC_DEV_ATTR_FEATURES: Device feature attributes
*/
enum ionic_dev_attr {
IONIC_DEV_ATTR_STATE = 0,
@@ -1322,7 +1487,7 @@ struct ionic_dev_setattr_cmd {
/**
* struct ionic_dev_setattr_comp - Device set attr command completion
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
* @features: Device features
* @color: Color bit
*/
@@ -1349,7 +1514,7 @@ struct ionic_dev_getattr_cmd {
/**
* struct ionic_dev_setattr_comp - Device set attr command completion
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
* @features: Device features
* @color: Color bit
*/
@@ -1379,6 +1544,13 @@ enum ionic_rss_hash_types {
/**
* enum ionic_lif_attr - List of LIF attributes
+ * @IONIC_LIF_ATTR_STATE: LIF state attribute
+ * @IONIC_LIF_ATTR_NAME: LIF name attribute
+ * @IONIC_LIF_ATTR_MTU: LIF MTU attribute
+ * @IONIC_LIF_ATTR_MAC: LIF MAC attribute
+ * @IONIC_LIF_ATTR_FEATURES: LIF features attribute
+ * @IONIC_LIF_ATTR_RSS: LIF RSS attribute
+ * @IONIC_LIF_ATTR_STATS_CTRL: LIF statistics control attribute
*/
enum ionic_lif_attr {
IONIC_LIF_ATTR_STATE = 0,
@@ -1393,18 +1565,18 @@ enum ionic_lif_attr {
/**
* struct ionic_lif_setattr_cmd - Set LIF attributes on the NIC
* @opcode: Opcode
- * @type: Attribute type (enum ionic_lif_attr)
+ * @attr: Attribute type (enum ionic_lif_attr)
* @index: LIF index
- * @state: lif state (enum lif_state)
+ * @state: LIF state (enum ionic_lif_state)
* @name: The netdev name string, 0 terminated
* @mtu: Mtu
* @mac: Station mac
* @features: Features (enum ionic_eth_hw_features)
* @rss: RSS properties
- * @types: The hash types to enable (see rss_hash_types).
- * @key: The hash secret key.
- * @addr: Address for the indirection table shared memory.
- * @stats_ctl: stats control commands (enum stats_ctl_cmd)
+ * @types: The hash types to enable (see rss_hash_types)
+ * @key: The hash secret key
+ * @addr: Address for the indirection table shared memory
+ * @stats_ctl: stats control commands (enum ionic_stats_ctl_cmd)
*/
struct ionic_lif_setattr_cmd {
u8 opcode;
@@ -1422,16 +1594,15 @@ struct ionic_lif_setattr_cmd {
u8 rsvd[6];
__le64 addr;
} rss;
- u8 stats_ctl;
+ u8 stats_ctl;
u8 rsvd[60];
} __packed;
};
/**
* struct ionic_lif_setattr_comp - LIF set attr command completion
- * @status: The status of the command (enum status_code)
- * @comp_index: The index in the descriptor ring for which this
- * is the completion.
+ * @status: Status of the command (enum ionic_status_code)
+ * @comp_index: Index in the descriptor ring for which this is the completion
* @features: features (enum ionic_eth_hw_features)
* @color: Color bit
*/
@@ -1461,10 +1632,9 @@ struct ionic_lif_getattr_cmd {
/**
* struct ionic_lif_getattr_comp - LIF get attr command completion
- * @status: The status of the command (enum status_code)
- * @comp_index: The index in the descriptor ring for which this
- * is the completion.
- * @state: lif state (enum lif_state)
+ * @status: Status of the command (enum ionic_status_code)
+ * @comp_index: Index in the descriptor ring for which this is the completion
+ * @state: LIF state (enum ionic_lif_state)
* @name: The netdev name string, 0 terminated
* @mtu: Mtu
* @mac: Station mac
@@ -1486,11 +1656,12 @@ struct ionic_lif_getattr_comp {
};
enum ionic_rx_mode {
- IONIC_RX_MODE_F_UNICAST = BIT(0),
- IONIC_RX_MODE_F_MULTICAST = BIT(1),
- IONIC_RX_MODE_F_BROADCAST = BIT(2),
- IONIC_RX_MODE_F_PROMISC = BIT(3),
- IONIC_RX_MODE_F_ALLMULTI = BIT(4),
+ IONIC_RX_MODE_F_UNICAST = BIT(0),
+ IONIC_RX_MODE_F_MULTICAST = BIT(1),
+ IONIC_RX_MODE_F_BROADCAST = BIT(2),
+ IONIC_RX_MODE_F_PROMISC = BIT(3),
+ IONIC_RX_MODE_F_ALLMULTI = BIT(4),
+ IONIC_RX_MODE_F_RDMA_SNIFFER = BIT(5),
};
/**
@@ -1498,11 +1669,12 @@ enum ionic_rx_mode {
* @opcode: opcode
* @lif_index: LIF index
* @rx_mode: Rx mode flags:
- * IONIC_RX_MODE_F_UNICAST: Accept known unicast packets.
- * IONIC_RX_MODE_F_MULTICAST: Accept known multicast packets.
- * IONIC_RX_MODE_F_BROADCAST: Accept broadcast packets.
- * IONIC_RX_MODE_F_PROMISC: Accept any packets.
- * IONIC_RX_MODE_F_ALLMULTI: Accept any multicast packets.
+ * IONIC_RX_MODE_F_UNICAST: Accept known unicast packets
+ * IONIC_RX_MODE_F_MULTICAST: Accept known multicast packets
+ * IONIC_RX_MODE_F_BROADCAST: Accept broadcast packets
+ * IONIC_RX_MODE_F_PROMISC: Accept any packets
+ * IONIC_RX_MODE_F_ALLMULTI: Accept any multicast packets
+ * IONIC_RX_MODE_F_RDMA_SNIFFER: Sniff RDMA packets
*/
struct ionic_rx_mode_set_cmd {
u8 opcode;
@@ -1526,9 +1698,14 @@ enum ionic_rx_filter_match_type {
* @qtype: Queue type
* @lif_index: LIF index
* @qid: Queue ID
- * @match: Rx filter match type. (See IONIC_RX_FILTER_MATCH_xxx)
- * @vlan: VLAN ID
- * @addr: MAC address (network-byte order)
+ * @match: Rx filter match type (see IONIC_RX_FILTER_MATCH_xxx)
+ * @vlan: VLAN filter
+ * @vlan: VLAN ID
+ * @mac: MAC filter
+ * @addr: MAC address (network-byte order)
+ * @mac_vlan: MACVLAN filter
+ * @vlan: VLAN ID
+ * @addr: MAC address (network-byte order)
*/
struct ionic_rx_filter_add_cmd {
u8 opcode;
@@ -1553,11 +1730,10 @@ struct ionic_rx_filter_add_cmd {
/**
* struct ionic_rx_filter_add_comp - Add LIF Rx filter command completion
- * @status: The status of the command (enum status_code)
- * @comp_index: The index in the descriptor ring for which this
- * is the completion.
+ * @status: Status of the command (enum ionic_status_code)
+ * @comp_index: Index in the descriptor ring for which this is the completion
* @filter_id: Filter ID
- * @color: Color bit.
+ * @color: Color bit
*/
struct ionic_rx_filter_add_comp {
u8 status;
@@ -1584,63 +1760,6 @@ struct ionic_rx_filter_del_cmd {
typedef struct ionic_admin_comp ionic_rx_filter_del_comp;
-/**
- * struct ionic_qos_identify_cmd - QoS identify command
- * @opcode: opcode
- * @ver: Highest version of identify supported by driver
- *
- */
-struct ionic_qos_identify_cmd {
- u8 opcode;
- u8 ver;
- u8 rsvd[62];
-};
-
-/**
- * struct ionic_qos_identify_comp - QoS identify command completion
- * @status: The status of the command (enum status_code)
- * @ver: Version of identify returned by device
- */
-struct ionic_qos_identify_comp {
- u8 status;
- u8 ver;
- u8 rsvd[14];
-};
-
-#define IONIC_QOS_CLASS_MAX 7
-#define IONIC_QOS_CLASS_NAME_SZ 32
-#define IONIC_QOS_DSCP_MAX_VALUES 64
-
-/**
- * enum ionic_qos_class
- */
-enum ionic_qos_class {
- IONIC_QOS_CLASS_DEFAULT = 0,
- IONIC_QOS_CLASS_USER_DEFINED_1 = 1,
- IONIC_QOS_CLASS_USER_DEFINED_2 = 2,
- IONIC_QOS_CLASS_USER_DEFINED_3 = 3,
- IONIC_QOS_CLASS_USER_DEFINED_4 = 4,
- IONIC_QOS_CLASS_USER_DEFINED_5 = 5,
- IONIC_QOS_CLASS_USER_DEFINED_6 = 6,
-};
-
-/**
- * enum ionic_qos_class_type - Traffic classification criteria
- */
-enum ionic_qos_class_type {
- IONIC_QOS_CLASS_TYPE_NONE = 0,
- IONIC_QOS_CLASS_TYPE_PCP = 1, /* Dot1Q pcp */
- IONIC_QOS_CLASS_TYPE_DSCP = 2, /* IP dscp */
-};
-
-/**
- * enum ionic_qos_sched_type - Qos class scheduling type
- */
-enum ionic_qos_sched_type {
- IONIC_QOS_SCHED_TYPE_STRICT = 0, /* Strict priority */
- IONIC_QOS_SCHED_TYPE_DWRR = 1, /* Deficit weighted round-robin */
-};
-
enum ionic_vf_attr {
IONIC_VF_ATTR_SPOOFCHK = 1,
IONIC_VF_ATTR_TRUST = 2,
@@ -1652,26 +1771,29 @@ enum ionic_vf_attr {
};
/**
- * VF link status
+ * enum ionic_vf_link_status - Virtual Function link status
+ * @IONIC_VF_LINK_STATUS_AUTO: Use link state of the uplink
+ * @IONIC_VF_LINK_STATUS_UP: Link always up
+ * @IONIC_VF_LINK_STATUS_DOWN: Link always down
*/
enum ionic_vf_link_status {
- IONIC_VF_LINK_STATUS_AUTO = 0, /* link state of the uplink */
- IONIC_VF_LINK_STATUS_UP = 1, /* link is always up */
- IONIC_VF_LINK_STATUS_DOWN = 2, /* link is always down */
+ IONIC_VF_LINK_STATUS_AUTO = 0,
+ IONIC_VF_LINK_STATUS_UP = 1,
+ IONIC_VF_LINK_STATUS_DOWN = 2,
};
/**
* struct ionic_vf_setattr_cmd - Set VF attributes on the NIC
* @opcode: Opcode
- * @index: VF index
* @attr: Attribute type (enum ionic_vf_attr)
- * macaddr mac address
- * vlanid vlan ID
- * maxrate max Tx rate in Mbps
- * spoofchk enable address spoof checking
- * trust enable VF trust
- * linkstate set link up or down
- * stats_pa set DMA address for VF stats
+ * @vf_index: VF index
+ * @macaddr: mac address
+ * @vlanid: vlan ID
+ * @maxrate: max Tx rate in Mbps
+ * @spoofchk: enable address spoof checking
+ * @trust: enable VF trust
+ * @linkstate: set link up or down
+ * @stats_pa: set DMA address for VF stats
*/
struct ionic_vf_setattr_cmd {
u8 opcode;
@@ -1701,8 +1823,8 @@ struct ionic_vf_setattr_comp {
/**
* struct ionic_vf_getattr_cmd - Get VF attributes from the NIC
* @opcode: Opcode
- * @index: VF index
* @attr: Attribute type (enum ionic_vf_attr)
+ * @vf_index: VF index
*/
struct ionic_vf_getattr_cmd {
u8 opcode;
@@ -1729,19 +1851,85 @@ struct ionic_vf_getattr_comp {
};
/**
- * union ionic_qos_config - Qos configuration structure
+ * struct ionic_qos_identify_cmd - QoS identify command
+ * @opcode: opcode
+ * @ver: Highest version of identify supported by driver
+ *
+ */
+struct ionic_qos_identify_cmd {
+ u8 opcode;
+ u8 ver;
+ u8 rsvd[62];
+};
+
+/**
+ * struct ionic_qos_identify_comp - QoS identify command completion
+ * @status: Status of the command (enum ionic_status_code)
+ * @ver: Version of identify returned by device
+ */
+struct ionic_qos_identify_comp {
+ u8 status;
+ u8 ver;
+ u8 rsvd[14];
+};
+
+#define IONIC_QOS_TC_MAX 8
+/* Capri max supported, should be renamed. */
+#define IONIC_QOS_CLASS_MAX 7
+#define IONIC_QOS_PCP_MAX 8
+#define IONIC_QOS_CLASS_NAME_SZ 32
+#define IONIC_QOS_DSCP_MAX 64
+#define IONIC_QOS_ALL_PCP 0xFF
+
+/**
+ * enum ionic_qos_class
+ */
+enum ionic_qos_class {
+ IONIC_QOS_CLASS_DEFAULT = 0,
+ IONIC_QOS_CLASS_USER_DEFINED_1 = 1,
+ IONIC_QOS_CLASS_USER_DEFINED_2 = 2,
+ IONIC_QOS_CLASS_USER_DEFINED_3 = 3,
+ IONIC_QOS_CLASS_USER_DEFINED_4 = 4,
+ IONIC_QOS_CLASS_USER_DEFINED_5 = 5,
+ IONIC_QOS_CLASS_USER_DEFINED_6 = 6,
+};
+
+/**
+ * enum ionic_qos_class_type - Traffic classification criteria
+ * @IONIC_QOS_CLASS_TYPE_NONE: No QoS
+ * @IONIC_QOS_CLASS_TYPE_PCP: Dot1Q PCP
+ * @IONIC_QOS_CLASS_TYPE_DSCP: IP DSCP
+ */
+enum ionic_qos_class_type {
+ IONIC_QOS_CLASS_TYPE_NONE = 0,
+ IONIC_QOS_CLASS_TYPE_PCP = 1,
+ IONIC_QOS_CLASS_TYPE_DSCP = 2,
+};
+
+/**
+ * enum ionic_qos_sched_type - QoS class scheduling type
+ * @IONIC_QOS_SCHED_TYPE_STRICT: Strict priority
+ * @IONIC_QOS_SCHED_TYPE_DWRR: Deficit weighted round-robin
+ */
+enum ionic_qos_sched_type {
+ IONIC_QOS_SCHED_TYPE_STRICT = 0,
+ IONIC_QOS_SCHED_TYPE_DWRR = 1,
+};
+
+/**
+ * union ionic_qos_config - QoS configuration structure
* @flags: Configuration flags
* IONIC_QOS_CONFIG_F_ENABLE enable
- * IONIC_QOS_CONFIG_F_DROP drop/nodrop
+ * IONIC_QOS_CONFIG_F_NO_DROP drop/nodrop
* IONIC_QOS_CONFIG_F_RW_DOT1Q_PCP enable dot1q pcp rewrite
* IONIC_QOS_CONFIG_F_RW_IP_DSCP enable ip dscp rewrite
- * @sched_type: Qos class scheduling type (enum ionic_qos_sched_type)
- * @class_type: Qos class type (enum ionic_qos_class_type)
- * @pause_type: Qos pause type (enum ionic_qos_pause_type)
- * @name: Qos class name
+ * @sched_type: QoS class scheduling type (enum ionic_qos_sched_type)
+ * @class_type: QoS class type (enum ionic_qos_class_type)
+ * @pause_type: QoS pause type (enum ionic_qos_pause_type)
+ * @name: QoS class name
* @mtu: MTU of the class
- * @pfc_dot1q_pcp: Pcp value for pause frames (valid iff F_NODROP)
- * @dwrr_weight: Qos class scheduling weight
+ * @pfc_cos: Priority-Flow Control class of service
+ * @dwrr_weight: QoS class scheduling weight
* @strict_rlmt: Rate limit for strict priority scheduling
* @rw_dot1q_pcp: Rewrite dot1q pcp to this value (valid iff F_RW_DOT1Q_PCP)
* @rw_ip_dscp: Rewrite ip dscp to this value (valid iff F_RW_IP_DSCP)
@@ -1752,7 +1940,8 @@ struct ionic_vf_getattr_comp {
union ionic_qos_config {
struct {
#define IONIC_QOS_CONFIG_F_ENABLE BIT(0)
-#define IONIC_QOS_CONFIG_F_DROP BIT(1)
+#define IONIC_QOS_CONFIG_F_NO_DROP BIT(1)
+/* Used to rewrite PCP or DSCP value. */
#define IONIC_QOS_CONFIG_F_RW_DOT1Q_PCP BIT(2)
#define IONIC_QOS_CONFIG_F_RW_IP_DSCP BIT(3)
u8 flags;
@@ -1769,6 +1958,7 @@ union ionic_qos_config {
__le64 strict_rlmt;
};
/* marking */
+ /* Used to rewrite PCP or DSCP value. */
union {
u8 rw_dot1q_pcp;
u8 rw_ip_dscp;
@@ -1778,7 +1968,7 @@ union ionic_qos_config {
u8 dot1q_pcp;
struct {
u8 ndscp;
- u8 ip_dscp[IONIC_QOS_DSCP_MAX_VALUES];
+ u8 ip_dscp[IONIC_QOS_DSCP_MAX];
};
};
};
@@ -1797,15 +1987,15 @@ union ionic_qos_identity {
u8 version;
u8 type;
u8 rsvd[62];
- union ionic_qos_config config[IONIC_QOS_CLASS_MAX];
+ union ionic_qos_config config[IONIC_QOS_CLASS_MAX];
};
- __le32 words[512];
+ __le32 words[478];
};
/**
- * struct qos_init_cmd - QoS config init command
+ * struct ionic_qos_init_cmd - QoS config init command
* @opcode: Opcode
- * @group: Qos class id
+ * @group: QoS class id
* @info_pa: destination address for qos info
*/
struct ionic_qos_init_cmd {
@@ -1819,8 +2009,9 @@ struct ionic_qos_init_cmd {
typedef struct ionic_admin_comp ionic_qos_init_comp;
/**
- * struct ionic_qos_reset_cmd - Qos config reset command
+ * struct ionic_qos_reset_cmd - QoS config reset command
* @opcode: Opcode
+ * @group: QoS class id
*/
struct ionic_qos_reset_cmd {
u8 opcode;
@@ -1847,10 +2038,16 @@ struct ionic_fw_download_cmd {
typedef struct ionic_admin_comp ionic_fw_download_comp;
+/**
+ * enum ionic_fw_control_oper - FW control operations
+ * @IONIC_FW_RESET: Reset firmware
+ * @IONIC_FW_INSTALL: Install firmware
+ * @IONIC_FW_ACTIVATE: Activate firmware
+ */
enum ionic_fw_control_oper {
- IONIC_FW_RESET = 0, /* Reset firmware */
- IONIC_FW_INSTALL = 1, /* Install firmware */
- IONIC_FW_ACTIVATE = 2, /* Activate firmware */
+ IONIC_FW_RESET = 0,
+ IONIC_FW_INSTALL = 1,
+ IONIC_FW_ACTIVATE = 2,
};
/**
@@ -1869,8 +2066,10 @@ struct ionic_fw_control_cmd {
/**
* struct ionic_fw_control_comp - Firmware control copletion
- * @opcode: opcode
- * @slot: slot where the firmware was installed
+ * @status: Status of the command (enum ionic_status_code)
+ * @comp_index: Index in the descriptor ring for which this is the completion
+ * @slot: Slot where the firmware was installed
+ * @color: Color bit
*/
struct ionic_fw_control_comp {
u8 status;
@@ -1888,11 +2087,11 @@ struct ionic_fw_control_comp {
/**
* struct ionic_rdma_reset_cmd - Reset RDMA LIF cmd
* @opcode: opcode
- * @lif_index: lif index
+ * @lif_index: LIF index
*
- * There is no rdma specific dev command completion struct. Completion uses
+ * There is no RDMA specific dev command completion struct. Completion uses
* the common struct ionic_admin_comp. Only the status is indicated.
- * Nonzero status means the LIF does not support rdma.
+ * Nonzero status means the LIF does not support RDMA.
**/
struct ionic_rdma_reset_cmd {
u8 opcode;
@@ -1904,30 +2103,29 @@ struct ionic_rdma_reset_cmd {
/**
* struct ionic_rdma_queue_cmd - Create RDMA Queue command
* @opcode: opcode, 52, 53
- * @lif_index lif index
- * @qid_ver: (qid | (rdma version << 24))
+ * @lif_index: LIF index
+ * @qid_ver: (qid | (RDMA version << 24))
* @cid: intr, eq_id, or cq_id
* @dbid: doorbell page id
* @depth_log2: log base two of queue depth
* @stride_log2: log base two of queue stride
* @dma_addr: address of the queue memory
- * @xxx_table_index: temporary, but should not need pgtbl for contig. queues.
*
- * The same command struct is used to create an rdma event queue, completion
- * queue, or rdma admin queue. The cid is an interrupt number for an event
+ * The same command struct is used to create an RDMA event queue, completion
+ * queue, or RDMA admin queue. The cid is an interrupt number for an event
* queue, an event queue id for a completion queue, or a completion queue id
- * for an rdma admin queue.
+ * for an RDMA admin queue.
*
* The queue created via a dev command must be contiguous in dma space.
*
* The dev commands are intended only to be used during driver initialization,
- * to create queues supporting the rdma admin queue. Other queues, and other
- * types of rdma resources like memory regions, will be created and registered
- * via the rdma admin queue, and will support a more complete interface
+ * to create queues supporting the RDMA admin queue. Other queues, and other
+ * types of RDMA resources like memory regions, will be created and registered
+ * via the RDMA admin queue, and will support a more complete interface
* providing scatter gather lists for larger, scattered queue buffers and
* memory registration.
*
- * There is no rdma specific dev command completion struct. Completion uses
+ * There is no RDMA specific dev command completion struct. Completion uses
* the common struct ionic_admin_comp. Only the status is indicated.
**/
struct ionic_rdma_queue_cmd {
@@ -1940,8 +2138,7 @@ struct ionic_rdma_queue_cmd {
u8 depth_log2;
u8 stride_log2;
__le64 dma_addr;
- u8 rsvd2[36];
- __le32 xxx_table_index;
+ u8 rsvd2[40];
};
/******************************************************************
@@ -1949,7 +2146,7 @@ struct ionic_rdma_queue_cmd {
******************************************************************/
/**
- * struct ionic_notifyq_event
+ * struct ionic_notifyq_event - Generic event reporting structure
* @eid: event number
* @ecode: event code
* @data: unspecified data about the event
@@ -1964,9 +2161,9 @@ struct ionic_notifyq_event {
};
/**
- * struct ionic_link_change_event
+ * struct ionic_link_change_event - Link change event notification
* @eid: event number
- * @ecode: event code = EVENT_OPCODE_LINK_CHANGE
+ * @ecode: event code = IONIC_EVENT_LINK_CHANGE
* @link_status: link up or down, with error bits (enum port_status)
* @link_speed: speed of the network link
*
@@ -1981,9 +2178,9 @@ struct ionic_link_change_event {
};
/**
- * struct ionic_reset_event
+ * struct ionic_reset_event - Reset event notification
* @eid: event number
- * @ecode: event code = EVENT_OPCODE_RESET
+ * @ecode: event code = IONIC_EVENT_RESET
* @reset_code: reset type
* @state: 0=pending, 1=complete, 2=error
*
@@ -1999,11 +2196,9 @@ struct ionic_reset_event {
};
/**
- * struct ionic_heartbeat_event
+ * struct ionic_heartbeat_event - Sent periodically by NIC to indicate health
* @eid: event number
- * @ecode: event code = EVENT_OPCODE_HEARTBEAT
- *
- * Sent periodically by the NIC to indicate continued health
+ * @ecode: event code = IONIC_EVENT_HEARTBEAT
*/
struct ionic_heartbeat_event {
__le64 eid;
@@ -2012,12 +2207,10 @@ struct ionic_heartbeat_event {
};
/**
- * struct ionic_log_event
+ * struct ionic_log_event - Sent to notify the driver of an internal error
* @eid: event number
- * @ecode: event code = EVENT_OPCODE_LOG
+ * @ecode: event code = IONIC_EVENT_LOG
* @data: log data
- *
- * Sent to notify the driver of an internal error.
*/
struct ionic_log_event {
__le64 eid;
@@ -2026,7 +2219,18 @@ struct ionic_log_event {
};
/**
- * struct ionic_port_stats
+ * struct ionic_xcvr_event - Transceiver change event
+ * @eid: event number
+ * @ecode: event code = IONIC_EVENT_XCVR
+ */
+struct ionic_xcvr_event {
+ __le64 eid;
+ __le16 ecode;
+ u8 rsvd[54];
+};
+
+/**
+ * struct ionic_port_stats - Port statistics structure
*/
struct ionic_port_stats {
__le64 frames_rx_ok;
@@ -2131,28 +2335,61 @@ struct ionic_mgmt_port_stats {
__le64 frames_rx_multicast;
__le64 frames_rx_broadcast;
__le64 frames_rx_pause;
- __le64 frames_rx_bad_length0;
- __le64 frames_rx_undersized1;
- __le64 frames_rx_oversized2;
- __le64 frames_rx_fragments3;
- __le64 frames_rx_jabber4;
- __le64 frames_rx_64b5;
- __le64 frames_rx_65b_127b6;
- __le64 frames_rx_128b_255b7;
- __le64 frames_rx_256b_511b8;
- __le64 frames_rx_512b_1023b9;
- __le64 frames_rx_1024b_1518b0;
- __le64 frames_rx_gt_1518b1;
- __le64 frames_rx_fifo_full2;
- __le64 frames_tx_ok3;
- __le64 frames_tx_all4;
- __le64 frames_tx_bad5;
- __le64 octets_tx_ok6;
- __le64 octets_tx_total7;
- __le64 frames_tx_unicast8;
- __le64 frames_tx_multicast9;
- __le64 frames_tx_broadcast0;
- __le64 frames_tx_pause1;
+ __le64 frames_rx_bad_length;
+ __le64 frames_rx_undersized;
+ __le64 frames_rx_oversized;
+ __le64 frames_rx_fragments;
+ __le64 frames_rx_jabber;
+ __le64 frames_rx_64b;
+ __le64 frames_rx_65b_127b;
+ __le64 frames_rx_128b_255b;
+ __le64 frames_rx_256b_511b;
+ __le64 frames_rx_512b_1023b;
+ __le64 frames_rx_1024b_1518b;
+ __le64 frames_rx_gt_1518b;
+ __le64 frames_rx_fifo_full;
+ __le64 frames_tx_ok;
+ __le64 frames_tx_all;
+ __le64 frames_tx_bad;
+ __le64 octets_tx_ok;
+ __le64 octets_tx_total;
+ __le64 frames_tx_unicast;
+ __le64 frames_tx_multicast;
+ __le64 frames_tx_broadcast;
+ __le64 frames_tx_pause;
+};
+
+enum ionic_pb_buffer_drop_stats {
+ IONIC_BUFFER_INTRINSIC_DROP = 0,
+ IONIC_BUFFER_DISCARDED,
+ IONIC_BUFFER_ADMITTED,
+ IONIC_BUFFER_OUT_OF_CELLS_DROP,
+ IONIC_BUFFER_OUT_OF_CELLS_DROP_2,
+ IONIC_BUFFER_OUT_OF_CREDIT_DROP,
+ IONIC_BUFFER_TRUNCATION_DROP,
+ IONIC_BUFFER_PORT_DISABLED_DROP,
+ IONIC_BUFFER_COPY_TO_CPU_TAIL_DROP,
+ IONIC_BUFFER_SPAN_TAIL_DROP,
+ IONIC_BUFFER_MIN_SIZE_VIOLATION_DROP,
+ IONIC_BUFFER_ENQUEUE_ERROR_DROP,
+ IONIC_BUFFER_INVALID_PORT_DROP,
+ IONIC_BUFFER_INVALID_OUTPUT_QUEUE_DROP,
+ IONIC_BUFFER_DROP_MAX,
+};
+
+/**
+ * struct port_pb_stats - packet buffers system stats
+ * uses ionic_pb_buffer_drop_stats for drop_counts[]
+ */
+struct ionic_port_pb_stats {
+ __le64 sop_count_in;
+ __le64 eop_count_in;
+ __le64 sop_count_out;
+ __le64 eop_count_out;
+ __le64 drop_counts[IONIC_BUFFER_DROP_MAX];
+ __le64 input_queue_buffer_occupancy[IONIC_QOS_TC_MAX];
+ __le64 input_queue_port_monitor[IONIC_QOS_TC_MAX];
+ __le64 output_queue_port_monitor[IONIC_QOS_TC_MAX];
};
/**
@@ -2184,22 +2421,31 @@ union ionic_port_identity {
u8 rsvd2[44];
union ionic_port_config config;
};
- __le32 words[512];
+ __le32 words[478];
};
/**
* struct ionic_port_info - port info structure
- * @port_status: port status
- * @port_stats: port stats
+ * @config: Port configuration data
+ * @status: Port status data
+ * @stats: Port statistics data
+ * @mgmt_stats: Port management statistics data
+ * @port_pb_drop_stats: uplink pb drop stats
*/
struct ionic_port_info {
union ionic_port_config config;
struct ionic_port_status status;
- struct ionic_port_stats stats;
+ union {
+ struct ionic_port_stats stats;
+ struct ionic_mgmt_port_stats mgmt_stats;
+ };
+ /* room for pb_stats to start at 2k offset */
+ u8 rsvd[760];
+ struct ionic_port_pb_stats pb_stats;
};
/**
- * struct ionic_lif_stats
+ * struct ionic_lif_stats - LIF statistics structure
*/
struct ionic_lif_stats {
/* RX */
@@ -2252,7 +2498,7 @@ struct ionic_lif_stats {
__le64 tx_queue_error;
__le64 tx_desc_fetch_error;
__le64 tx_desc_data_error;
- __le64 rsvd9;
+ __le64 tx_queue_empty;
__le64 rsvd10;
__le64 rsvd11;
__le64 rsvd12;
@@ -2353,7 +2599,10 @@ struct ionic_lif_stats {
};
/**
- * struct ionic_lif_info - lif info structure
+ * struct ionic_lif_info - LIF info structure
+ * @config: LIF configuration structure
+ * @status: LIF status structure
+ * @stats: LIF statistics structure
*/
struct ionic_lif_info {
union ionic_lif_config config;
@@ -2389,7 +2638,9 @@ union ionic_dev_cmd {
struct ionic_qos_init_cmd qos_init;
struct ionic_qos_reset_cmd qos_reset;
+ struct ionic_q_identify_cmd q_identify;
struct ionic_q_init_cmd q_init;
+ struct ionic_q_control_cmd q_control;
};
union ionic_dev_cmd_comp {
@@ -2421,19 +2672,20 @@ union ionic_dev_cmd_comp {
ionic_qos_init_comp qos_init;
ionic_qos_reset_comp qos_reset;
+ struct ionic_q_identify_comp q_identify;
struct ionic_q_init_comp q_init;
};
/**
- * union dev_info - Device info register format (read-only)
- * @signature: Signature value of 0x44455649 ('DEVI').
- * @version: Current version of info.
- * @asic_type: Asic type.
- * @asic_rev: Asic revision.
- * @fw_status: Firmware status.
- * @fw_heartbeat: Firmware heartbeat counter.
- * @serial_num: Serial number.
- * @fw_version: Firmware version.
+ * union ionic_dev_info_regs - Device info register format (read-only)
+ * @signature: Signature value of 0x44455649 ('DEVI')
+ * @version: Current version of info
+ * @asic_type: Asic type
+ * @asic_rev: Asic revision
+ * @fw_status: Firmware status
+ * @fw_heartbeat: Firmware heartbeat counter
+ * @serial_num: Serial number
+ * @fw_version: Firmware version
*/
union ionic_dev_info_regs {
#define IONIC_DEVINFO_FWVERS_BUFLEN 32
@@ -2454,10 +2706,10 @@ union ionic_dev_info_regs {
/**
* union ionic_dev_cmd_regs - Device command register format (read-write)
- * @doorbell: Device Cmd Doorbell, write-only.
+ * @doorbell: Device Cmd Doorbell, write-only
* Write a 1 to signal device to process cmd,
* poll done for completion.
- * @done: Done indicator, bit 0 == 1 when command is complete.
+ * @done: Done indicator, bit 0 == 1 when command is complete
* @cmd: Opcode-specific command bytes
* @comp: Opcode-specific response bytes
* @data: Opcode-specific side-data
@@ -2475,7 +2727,7 @@ union ionic_dev_cmd_regs {
};
/**
- * union ionic_dev_regs - Device register format in for bar 0 page 0
+ * union ionic_dev_regs - Device register format for bar 0 page 0
* @info: Device info registers
* @devcmd: Device command registers
*/
@@ -2490,6 +2742,7 @@ union ionic_dev_regs {
union ionic_adminq_cmd {
struct ionic_admin_cmd cmd;
struct ionic_nop_cmd nop;
+ struct ionic_q_identify_cmd q_identify;
struct ionic_q_init_cmd q_init;
struct ionic_q_control_cmd q_control;
struct ionic_lif_setattr_cmd lif_setattr;
@@ -2506,6 +2759,7 @@ union ionic_adminq_cmd {
union ionic_adminq_comp {
struct ionic_admin_comp comp;
struct ionic_nop_comp nop;
+ struct ionic_q_identify_comp q_identify;
struct ionic_q_init_comp q_init;
struct ionic_lif_setattr_comp lif_setattr;
struct ionic_lif_getattr_comp lif_getattr;
@@ -2531,14 +2785,14 @@ union ionic_adminq_comp {
/**
* struct ionic_doorbell - Doorbell register layout
* @p_index: Producer index
- * @ring: Selects the specific ring of the queue to update.
+ * @ring: Selects the specific ring of the queue to update
* Type-specific meaning:
- * ring=0: Default producer/consumer queue.
+ * ring=0: Default producer/consumer queue
* ring=1: (CQ, EQ) Re-Arm queue. RDMA CQs
* send events to EQs when armed. EQs send
* interrupts when armed.
- * @qid: The queue id selects the queue destination for the
- * producer index and flags.
+ * @qid_lo: Queue destination for the producer index and flags (low bits)
+ * @qid_hi: Queue destination for the producer index and flags (high bits)
*/
struct ionic_doorbell {
__le16 p_index;
@@ -2571,6 +2825,7 @@ struct ionic_identity {
union ionic_lif_identity lif;
union ionic_port_identity port;
union ionic_qos_identity qos;
+ union ionic_q_identity txq;
};
#endif /* _IONIC_IF_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index f8a9c1bcffc9..7321a92f8395 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -17,6 +17,16 @@
#include "ionic_ethtool.h"
#include "ionic_debugfs.h"
+/* queuetype support level */
+static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = {
+ [IONIC_QTYPE_ADMINQ] = 0, /* 0 = Base version with CQ support */
+ [IONIC_QTYPE_NOTIFYQ] = 0, /* 0 = Base version */
+ [IONIC_QTYPE_RXQ] = 0, /* 0 = Base version with CQ+SG support */
+ [IONIC_QTYPE_TXQ] = 1, /* 0 = Base version with CQ+SG support
+ * 1 = ... with Tx SG version 1
+ */
+};
+
static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode);
static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr);
static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr);
@@ -27,6 +37,7 @@ static void ionic_lif_set_netdev_info(struct ionic_lif *lif);
static int ionic_start_queues(struct ionic_lif *lif);
static void ionic_stop_queues(struct ionic_lif *lif);
+static void ionic_lif_queue_identify(struct ionic_lif *lif);
static void ionic_lif_deferred_work(struct work_struct *work)
{
@@ -186,10 +197,10 @@ static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
return 0;
}
-static void ionic_intr_free(struct ionic_lif *lif, int index)
+static void ionic_intr_free(struct ionic *ionic, int index)
{
- if (index != INTR_INDEX_NOT_ASSIGNED && index < lif->ionic->nintrs)
- clear_bit(index, lif->ionic->intrs);
+ if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < ionic->nintrs)
+ clear_bit(index, ionic->intrs);
}
static int ionic_qcq_enable(struct ionic_qcq *qcq)
@@ -299,7 +310,7 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
irq_set_affinity_hint(qcq->intr.vector, NULL);
devm_free_irq(dev, qcq->intr.vector, &qcq->napi);
qcq->intr.vector = 0;
- ionic_intr_free(lif, qcq->intr.index);
+ ionic_intr_free(lif->ionic, qcq->intr.index);
}
devm_kfree(dev, qcq->cq.info);
@@ -345,7 +356,7 @@ static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
struct ionic_qcq *n_qcq)
{
if (WARN_ON(n_qcq->flags & IONIC_QCQ_F_INTR)) {
- ionic_intr_free(n_qcq->cq.lif, n_qcq->intr.index);
+ ionic_intr_free(n_qcq->cq.lif->ionic, n_qcq->intr.index);
n_qcq->flags &= ~IONIC_QCQ_F_INTR;
}
@@ -444,7 +455,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
cpumask_set_cpu(new->intr.cpu,
&new->intr.affinity_mask);
} else {
- new->intr.index = INTR_INDEX_NOT_ASSIGNED;
+ new->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
}
new->cq.info = devm_kzalloc(dev, sizeof(*new->cq.info) * num_descs,
@@ -497,7 +508,7 @@ err_out_free_irq:
devm_free_irq(dev, new->intr.vector, &new->napi);
err_out_free_intr:
if (flags & IONIC_QCQ_F_INTR)
- ionic_intr_free(lif, new->intr.index);
+ ionic_intr_free(lif->ionic, new->intr.index);
err_out:
dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
return err;
@@ -597,6 +608,7 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
.opcode = IONIC_CMD_Q_INIT,
.lif_index = cpu_to_le16(lif->index),
.type = q->type,
+ .ver = lif->qtype_info[q->type].version,
.index = cpu_to_le32(q->index),
.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
IONIC_QINIT_F_SG),
@@ -614,6 +626,8 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index);
dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
+ dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
+ dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver);
q->tail = q->info;
q->head = q->tail;
@@ -646,6 +660,7 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
.opcode = IONIC_CMD_Q_INIT,
.lif_index = cpu_to_le16(lif->index),
.type = q->type,
+ .ver = lif->qtype_info[q->type].version,
.index = cpu_to_le32(q->index),
.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
IONIC_QINIT_F_SG),
@@ -663,6 +678,8 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index);
dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
+ dev_dbg(dev, "rxq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
+ dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver);
q->tail = q->info;
q->head = q->tail;
@@ -726,7 +743,7 @@ static bool ionic_notifyq_service(struct ionic_cq *cq,
}
break;
default:
- netdev_warn(netdev, "Notifyq unknown event ecode=%d eid=%lld\n",
+ netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n",
comp->event.ecode, eid);
break;
}
@@ -775,8 +792,8 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget)
return max(n_work, a_work);
}
-static void ionic_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64 *ns)
+void ionic_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *ns)
{
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic_lif_stats *ls;
@@ -1509,17 +1526,25 @@ static void ionic_txrx_free(struct ionic_lif *lif)
static int ionic_txrx_alloc(struct ionic_lif *lif)
{
+ unsigned int sg_desc_sz;
unsigned int flags;
unsigned int i;
int err = 0;
+ if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
+ lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
+ sizeof(struct ionic_txq_sg_desc_v1))
+ sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
+ else
+ sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
+
flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
for (i = 0; i < lif->nxqs; i++) {
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
lif->ntxq_descs,
sizeof(struct ionic_txq_desc),
sizeof(struct ionic_txq_comp),
- sizeof(struct ionic_txq_sg_desc),
+ sg_desc_sz,
lif->kern_pid, &lif->txqcqs[i].qcq);
if (err)
goto err_out;
@@ -1682,7 +1707,7 @@ int ionic_stop(struct net_device *netdev)
{
struct ionic_lif *lif = netdev_priv(netdev);
- if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ if (!netif_device_present(netdev))
return 0;
ionic_stop_queues(lif);
@@ -1699,6 +1724,9 @@ static int ionic_get_vf_config(struct net_device *netdev,
struct ionic *ionic = lif->ionic;
int ret = 0;
+ if (!netif_device_present(netdev))
+ return -EBUSY;
+
down_read(&ionic->vf_op_lock);
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
@@ -1726,6 +1754,9 @@ static int ionic_get_vf_stats(struct net_device *netdev, int vf,
struct ionic_lif_stats *vs;
int ret = 0;
+ if (!netif_device_present(netdev))
+ return -EBUSY;
+
down_read(&ionic->vf_op_lock);
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
@@ -1761,6 +1792,9 @@ static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac)))
return -EINVAL;
+ if (!netif_device_present(netdev))
+ return -EBUSY;
+
down_write(&ionic->vf_op_lock);
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
@@ -1792,6 +1826,9 @@ static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
if (proto != htons(ETH_P_8021Q))
return -EPROTONOSUPPORT;
+ if (!netif_device_present(netdev))
+ return -EBUSY;
+
down_write(&ionic->vf_op_lock);
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
@@ -1818,6 +1855,9 @@ static int ionic_set_vf_rate(struct net_device *netdev, int vf,
if (tx_min)
return -EINVAL;
+ if (!netif_device_present(netdev))
+ return -EBUSY;
+
down_write(&ionic->vf_op_lock);
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
@@ -1840,6 +1880,9 @@ static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set)
u8 data = set; /* convert to u8 for config */
int ret;
+ if (!netif_device_present(netdev))
+ return -EBUSY;
+
down_write(&ionic->vf_op_lock);
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
@@ -1862,6 +1905,9 @@ static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set)
u8 data = set; /* convert to u8 for config */
int ret;
+ if (!netif_device_present(netdev))
+ return -EBUSY;
+
down_write(&ionic->vf_op_lock);
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
@@ -1898,6 +1944,9 @@ static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set)
return -EINVAL;
}
+ if (!netif_device_present(netdev))
+ return -EBUSY;
+
down_write(&ionic->vf_op_lock);
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
@@ -2065,9 +2114,17 @@ int ionic_lifs_alloc(struct ionic *ionic)
/* only build the first lif, others are for later features */
set_bit(0, ionic->lifbits);
+
lif = ionic_lif_alloc(ionic, 0);
+ if (IS_ERR_OR_NULL(lif)) {
+ clear_bit(0, ionic->lifbits);
+ return -ENOMEM;
+ }
- return PTR_ERR_OR_ZERO(lif);
+ lif->lif_type = IONIC_LIF_TYPE_CLASSIC;
+ ionic_lif_queue_identify(lif);
+
+ return 0;
}
static void ionic_lif_reset(struct ionic_lif *lif)
@@ -2292,6 +2349,7 @@ static int ionic_lif_notifyq_init(struct ionic_lif *lif)
.opcode = IONIC_CMD_Q_INIT,
.lif_index = cpu_to_le16(lif->index),
.type = q->type,
+ .ver = lif->qtype_info[q->type].version,
.index = cpu_to_le32(q->index),
.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
IONIC_QINIT_F_ENA),
@@ -2578,6 +2636,80 @@ void ionic_lifs_unregister(struct ionic *ionic)
unregister_netdev(ionic->master_lif->netdev);
}
+static void ionic_lif_queue_identify(struct ionic_lif *lif)
+{
+ struct ionic *ionic = lif->ionic;
+ union ionic_q_identity *q_ident;
+ struct ionic_dev *idev;
+ int qtype;
+ int err;
+
+ idev = &lif->ionic->idev;
+ q_ident = (union ionic_q_identity *)&idev->dev_cmd_regs->data;
+
+ for (qtype = 0; qtype < ARRAY_SIZE(ionic_qtype_versions); qtype++) {
+ struct ionic_qtype_info *qti = &lif->qtype_info[qtype];
+
+ /* filter out the ones we know about */
+ switch (qtype) {
+ case IONIC_QTYPE_ADMINQ:
+ case IONIC_QTYPE_NOTIFYQ:
+ case IONIC_QTYPE_RXQ:
+ case IONIC_QTYPE_TXQ:
+ break;
+ default:
+ continue;
+ }
+
+ memset(qti, 0, sizeof(*qti));
+
+ mutex_lock(&ionic->dev_cmd_lock);
+ ionic_dev_cmd_queue_identify(idev, lif->lif_type, qtype,
+ ionic_qtype_versions[qtype]);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ if (!err) {
+ qti->version = q_ident->version;
+ qti->supported = q_ident->supported;
+ qti->features = le64_to_cpu(q_ident->features);
+ qti->desc_sz = le16_to_cpu(q_ident->desc_sz);
+ qti->comp_sz = le16_to_cpu(q_ident->comp_sz);
+ qti->sg_desc_sz = le16_to_cpu(q_ident->sg_desc_sz);
+ qti->max_sg_elems = le16_to_cpu(q_ident->max_sg_elems);
+ qti->sg_desc_stride = le16_to_cpu(q_ident->sg_desc_stride);
+ }
+ mutex_unlock(&ionic->dev_cmd_lock);
+
+ if (err == -EINVAL) {
+ dev_err(ionic->dev, "qtype %d not supported\n", qtype);
+ continue;
+ } else if (err == -EIO) {
+ dev_err(ionic->dev, "q_ident failed, not supported on older FW\n");
+ return;
+ } else if (err) {
+ dev_err(ionic->dev, "q_ident failed, qtype %d: %d\n",
+ qtype, err);
+ return;
+ }
+
+ dev_dbg(ionic->dev, " qtype[%d].version = %d\n",
+ qtype, qti->version);
+ dev_dbg(ionic->dev, " qtype[%d].supported = 0x%02x\n",
+ qtype, qti->supported);
+ dev_dbg(ionic->dev, " qtype[%d].features = 0x%04llx\n",
+ qtype, qti->features);
+ dev_dbg(ionic->dev, " qtype[%d].desc_sz = %d\n",
+ qtype, qti->desc_sz);
+ dev_dbg(ionic->dev, " qtype[%d].comp_sz = %d\n",
+ qtype, qti->comp_sz);
+ dev_dbg(ionic->dev, " qtype[%d].sg_desc_sz = %d\n",
+ qtype, qti->sg_desc_sz);
+ dev_dbg(ionic->dev, " qtype[%d].max_sg_elems = %d\n",
+ qtype, qti->max_sg_elems);
+ dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n",
+ qtype, qti->sg_desc_stride);
+ }
+}
+
int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
union ionic_lif_identity *lid)
{
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 5d4ffda5c05f..c3428034a17b 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -20,11 +20,13 @@ struct ionic_tx_stats {
u64 bytes;
u64 clean;
u64 linearize;
- u64 no_csum;
+ u64 csum_none;
u64 csum;
u64 crc32_csum;
u64 tso;
+ u64 tso_bytes;
u64 frags;
+ u64 vlan_inserted;
u64 sg_cntr[IONIC_MAX_NUM_SG_CNTR];
};
@@ -38,6 +40,7 @@ struct ionic_rx_stats {
u64 csum_error;
u64 buffers_posted;
u64 dropped;
+ u64 vlan_stripped;
};
#define IONIC_QCQ_F_INITED BIT(0)
@@ -114,11 +117,17 @@ struct ionic_lif_sw_stats {
u64 rx_packets;
u64 rx_bytes;
u64 tx_tso;
- u64 tx_no_csum;
+ u64 tx_tso_bytes;
+ u64 tx_csum_none;
u64 tx_csum;
u64 rx_csum_none;
u64 rx_csum_complete;
u64 rx_csum_error;
+ u64 hw_tx_dropped;
+ u64 hw_rx_dropped;
+ u64 hw_rx_over_errors;
+ u64 hw_rx_missed_errors;
+ u64 hw_tx_aborted_errors;
};
enum ionic_lif_state_flags {
@@ -133,6 +142,17 @@ enum ionic_lif_state_flags {
IONIC_LIF_F_STATE_SIZE
};
+struct ionic_qtype_info {
+ u8 version;
+ u8 supported;
+ u64 features;
+ u16 desc_sz;
+ u16 comp_sz;
+ u16 sg_desc_sz;
+ u16 max_sg_elems;
+ u16 sg_desc_stride;
+};
+
#define IONIC_LIF_NAME_MAX_SZ 32
struct ionic_lif {
char name[IONIC_LIF_NAME_MAX_SZ];
@@ -161,11 +181,13 @@ struct ionic_lif {
bool mc_overflow;
unsigned int nmcast;
bool uc_overflow;
+ u16 lif_type;
unsigned int nucast;
struct ionic_lif_info *info;
dma_addr_t info_pa;
u32 info_sz;
+ struct ionic_qtype_info qtype_info[IONIC_QTYPE_MAX];
u16 rss_types;
u8 rss_hash_key[IONIC_RSS_HASH_KEY_SIZE];
@@ -227,6 +249,8 @@ static inline u32 ionic_coal_hw_to_usec(struct ionic *ionic, u32 units)
}
void ionic_link_status_check_request(struct ionic_lif *lif);
+void ionic_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *ns);
void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
struct ionic_deferred_work *work);
int ionic_lifs_alloc(struct ionic *ionic);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index 3344bc1f7671..df5b9bcc3aba 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -6,7 +6,7 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/utsname.h>
-#include <linux/vermagic.h>
+#include <generated/utsrelease.h>
#include "ionic.h"
#include "ionic_bus.h"
@@ -152,6 +152,8 @@ static const char *ionic_opcode_to_str(enum ionic_cmd_opcode opcode)
return "IONIC_CMD_RX_FILTER_ADD";
case IONIC_CMD_RX_FILTER_DEL:
return "IONIC_CMD_RX_FILTER_DEL";
+ case IONIC_CMD_Q_IDENTIFY:
+ return "IONIC_CMD_Q_IDENTIFY";
case IONIC_CMD_Q_INIT:
return "IONIC_CMD_Q_INIT";
case IONIC_CMD_Q_CONTROL:
@@ -356,7 +358,7 @@ try_again:
done = ionic_dev_cmd_done(idev);
if (done)
break;
- msleep(20);
+ msleep(5);
hb = ionic_heartbeat_check(ionic);
} while (!done && !hb && time_before(jiffies, max_wait));
duration = jiffies - start_time;
@@ -413,6 +415,7 @@ int ionic_setup(struct ionic *ionic)
err = ionic_dev_setup(ionic);
if (err)
return err;
+ ionic_reset(ionic);
return 0;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
index 8f2a8fb029f1..2a1885da58a6 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_stats.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
@@ -15,11 +15,109 @@ static const struct ionic_stat_desc ionic_lif_stats_desc[] = {
IONIC_LIF_STAT_DESC(rx_packets),
IONIC_LIF_STAT_DESC(rx_bytes),
IONIC_LIF_STAT_DESC(tx_tso),
- IONIC_LIF_STAT_DESC(tx_no_csum),
+ IONIC_LIF_STAT_DESC(tx_tso_bytes),
+ IONIC_LIF_STAT_DESC(tx_csum_none),
IONIC_LIF_STAT_DESC(tx_csum),
IONIC_LIF_STAT_DESC(rx_csum_none),
IONIC_LIF_STAT_DESC(rx_csum_complete),
IONIC_LIF_STAT_DESC(rx_csum_error),
+ IONIC_LIF_STAT_DESC(hw_tx_dropped),
+ IONIC_LIF_STAT_DESC(hw_rx_dropped),
+ IONIC_LIF_STAT_DESC(hw_rx_over_errors),
+ IONIC_LIF_STAT_DESC(hw_rx_missed_errors),
+ IONIC_LIF_STAT_DESC(hw_tx_aborted_errors),
+};
+
+static const struct ionic_stat_desc ionic_port_stats_desc[] = {
+ IONIC_PORT_STAT_DESC(frames_rx_ok),
+ IONIC_PORT_STAT_DESC(frames_rx_all),
+ IONIC_PORT_STAT_DESC(frames_rx_bad_fcs),
+ IONIC_PORT_STAT_DESC(frames_rx_bad_all),
+ IONIC_PORT_STAT_DESC(octets_rx_ok),
+ IONIC_PORT_STAT_DESC(octets_rx_all),
+ IONIC_PORT_STAT_DESC(frames_rx_unicast),
+ IONIC_PORT_STAT_DESC(frames_rx_multicast),
+ IONIC_PORT_STAT_DESC(frames_rx_broadcast),
+ IONIC_PORT_STAT_DESC(frames_rx_pause),
+ IONIC_PORT_STAT_DESC(frames_rx_bad_length),
+ IONIC_PORT_STAT_DESC(frames_rx_undersized),
+ IONIC_PORT_STAT_DESC(frames_rx_oversized),
+ IONIC_PORT_STAT_DESC(frames_rx_fragments),
+ IONIC_PORT_STAT_DESC(frames_rx_jabber),
+ IONIC_PORT_STAT_DESC(frames_rx_pripause),
+ IONIC_PORT_STAT_DESC(frames_rx_stomped_crc),
+ IONIC_PORT_STAT_DESC(frames_rx_too_long),
+ IONIC_PORT_STAT_DESC(frames_rx_vlan_good),
+ IONIC_PORT_STAT_DESC(frames_rx_dropped),
+ IONIC_PORT_STAT_DESC(frames_rx_less_than_64b),
+ IONIC_PORT_STAT_DESC(frames_rx_64b),
+ IONIC_PORT_STAT_DESC(frames_rx_65b_127b),
+ IONIC_PORT_STAT_DESC(frames_rx_128b_255b),
+ IONIC_PORT_STAT_DESC(frames_rx_256b_511b),
+ IONIC_PORT_STAT_DESC(frames_rx_512b_1023b),
+ IONIC_PORT_STAT_DESC(frames_rx_1024b_1518b),
+ IONIC_PORT_STAT_DESC(frames_rx_1519b_2047b),
+ IONIC_PORT_STAT_DESC(frames_rx_2048b_4095b),
+ IONIC_PORT_STAT_DESC(frames_rx_4096b_8191b),
+ IONIC_PORT_STAT_DESC(frames_rx_8192b_9215b),
+ IONIC_PORT_STAT_DESC(frames_rx_other),
+ IONIC_PORT_STAT_DESC(frames_tx_ok),
+ IONIC_PORT_STAT_DESC(frames_tx_all),
+ IONIC_PORT_STAT_DESC(frames_tx_bad),
+ IONIC_PORT_STAT_DESC(octets_tx_ok),
+ IONIC_PORT_STAT_DESC(octets_tx_total),
+ IONIC_PORT_STAT_DESC(frames_tx_unicast),
+ IONIC_PORT_STAT_DESC(frames_tx_multicast),
+ IONIC_PORT_STAT_DESC(frames_tx_broadcast),
+ IONIC_PORT_STAT_DESC(frames_tx_pause),
+ IONIC_PORT_STAT_DESC(frames_tx_pripause),
+ IONIC_PORT_STAT_DESC(frames_tx_vlan),
+ IONIC_PORT_STAT_DESC(frames_tx_less_than_64b),
+ IONIC_PORT_STAT_DESC(frames_tx_64b),
+ IONIC_PORT_STAT_DESC(frames_tx_65b_127b),
+ IONIC_PORT_STAT_DESC(frames_tx_128b_255b),
+ IONIC_PORT_STAT_DESC(frames_tx_256b_511b),
+ IONIC_PORT_STAT_DESC(frames_tx_512b_1023b),
+ IONIC_PORT_STAT_DESC(frames_tx_1024b_1518b),
+ IONIC_PORT_STAT_DESC(frames_tx_1519b_2047b),
+ IONIC_PORT_STAT_DESC(frames_tx_2048b_4095b),
+ IONIC_PORT_STAT_DESC(frames_tx_4096b_8191b),
+ IONIC_PORT_STAT_DESC(frames_tx_8192b_9215b),
+ IONIC_PORT_STAT_DESC(frames_tx_other),
+ IONIC_PORT_STAT_DESC(frames_tx_pri_0),
+ IONIC_PORT_STAT_DESC(frames_tx_pri_1),
+ IONIC_PORT_STAT_DESC(frames_tx_pri_2),
+ IONIC_PORT_STAT_DESC(frames_tx_pri_3),
+ IONIC_PORT_STAT_DESC(frames_tx_pri_4),
+ IONIC_PORT_STAT_DESC(frames_tx_pri_5),
+ IONIC_PORT_STAT_DESC(frames_tx_pri_6),
+ IONIC_PORT_STAT_DESC(frames_tx_pri_7),
+ IONIC_PORT_STAT_DESC(frames_rx_pri_0),
+ IONIC_PORT_STAT_DESC(frames_rx_pri_1),
+ IONIC_PORT_STAT_DESC(frames_rx_pri_2),
+ IONIC_PORT_STAT_DESC(frames_rx_pri_3),
+ IONIC_PORT_STAT_DESC(frames_rx_pri_4),
+ IONIC_PORT_STAT_DESC(frames_rx_pri_5),
+ IONIC_PORT_STAT_DESC(frames_rx_pri_6),
+ IONIC_PORT_STAT_DESC(frames_rx_pri_7),
+ IONIC_PORT_STAT_DESC(tx_pripause_0_1us_count),
+ IONIC_PORT_STAT_DESC(tx_pripause_1_1us_count),
+ IONIC_PORT_STAT_DESC(tx_pripause_2_1us_count),
+ IONIC_PORT_STAT_DESC(tx_pripause_3_1us_count),
+ IONIC_PORT_STAT_DESC(tx_pripause_4_1us_count),
+ IONIC_PORT_STAT_DESC(tx_pripause_5_1us_count),
+ IONIC_PORT_STAT_DESC(tx_pripause_6_1us_count),
+ IONIC_PORT_STAT_DESC(tx_pripause_7_1us_count),
+ IONIC_PORT_STAT_DESC(rx_pripause_0_1us_count),
+ IONIC_PORT_STAT_DESC(rx_pripause_1_1us_count),
+ IONIC_PORT_STAT_DESC(rx_pripause_2_1us_count),
+ IONIC_PORT_STAT_DESC(rx_pripause_3_1us_count),
+ IONIC_PORT_STAT_DESC(rx_pripause_4_1us_count),
+ IONIC_PORT_STAT_DESC(rx_pripause_5_1us_count),
+ IONIC_PORT_STAT_DESC(rx_pripause_6_1us_count),
+ IONIC_PORT_STAT_DESC(rx_pripause_7_1us_count),
+ IONIC_PORT_STAT_DESC(rx_pause_1us_count),
+ IONIC_PORT_STAT_DESC(frames_tx_truncated),
};
static const struct ionic_stat_desc ionic_tx_stats_desc[] = {
@@ -29,6 +127,11 @@ static const struct ionic_stat_desc ionic_tx_stats_desc[] = {
IONIC_TX_STAT_DESC(dma_map_err),
IONIC_TX_STAT_DESC(linearize),
IONIC_TX_STAT_DESC(frags),
+ IONIC_TX_STAT_DESC(tso),
+ IONIC_TX_STAT_DESC(tso_bytes),
+ IONIC_TX_STAT_DESC(csum_none),
+ IONIC_TX_STAT_DESC(csum),
+ IONIC_TX_STAT_DESC(vlan_inserted),
};
static const struct ionic_stat_desc ionic_rx_stats_desc[] = {
@@ -40,6 +143,7 @@ static const struct ionic_stat_desc ionic_rx_stats_desc[] = {
IONIC_RX_STAT_DESC(csum_complete),
IONIC_RX_STAT_DESC(csum_error),
IONIC_RX_STAT_DESC(dropped),
+ IONIC_RX_STAT_DESC(vlan_stripped),
};
static const struct ionic_stat_desc ionic_txq_stats_desc[] = {
@@ -62,6 +166,7 @@ static const struct ionic_stat_desc ionic_dbg_napi_stats_desc[] = {
};
#define IONIC_NUM_LIF_STATS ARRAY_SIZE(ionic_lif_stats_desc)
+#define IONIC_NUM_PORT_STATS ARRAY_SIZE(ionic_port_stats_desc)
#define IONIC_NUM_TX_STATS ARRAY_SIZE(ionic_tx_stats_desc)
#define IONIC_NUM_RX_STATS ARRAY_SIZE(ionic_rx_stats_desc)
#define IONIC_NUM_TX_Q_STATS ARRAY_SIZE(ionic_txq_stats_desc)
@@ -76,6 +181,7 @@ static void ionic_get_lif_stats(struct ionic_lif *lif,
{
struct ionic_tx_stats *tstats;
struct ionic_rx_stats *rstats;
+ struct rtnl_link_stats64 ns;
struct ionic_qcq *txqcq;
struct ionic_qcq *rxqcq;
int q_num;
@@ -89,7 +195,8 @@ static void ionic_get_lif_stats(struct ionic_lif *lif,
stats->tx_packets += tstats->pkts;
stats->tx_bytes += tstats->bytes;
stats->tx_tso += tstats->tso;
- stats->tx_no_csum += tstats->no_csum;
+ stats->tx_tso_bytes += tstats->tso_bytes;
+ stats->tx_csum_none += tstats->csum_none;
stats->tx_csum += tstats->csum;
}
@@ -103,6 +210,13 @@ static void ionic_get_lif_stats(struct ionic_lif *lif,
stats->rx_csum_error += rstats->csum_error;
}
}
+
+ ionic_get_stats64(lif->netdev, &ns);
+ stats->hw_tx_dropped = ns.tx_dropped;
+ stats->hw_rx_dropped = ns.rx_dropped;
+ stats->hw_rx_over_errors = ns.rx_over_errors;
+ stats->hw_rx_missed_errors = ns.rx_missed_errors;
+ stats->hw_tx_aborted_errors = ns.tx_aborted_errors;
}
static u64 ionic_sw_stats_get_count(struct ionic_lif *lif)
@@ -118,6 +232,9 @@ static u64 ionic_sw_stats_get_count(struct ionic_lif *lif)
/* rx stats */
total += MAX_Q(lif) * IONIC_NUM_RX_STATS;
+ /* port stats */
+ total += IONIC_NUM_PORT_STATS;
+
if (test_bit(IONIC_LIF_F_UP, lif->state) &&
test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) {
/* tx debug stats */
@@ -144,6 +261,13 @@ static void ionic_sw_stats_get_strings(struct ionic_lif *lif, u8 **buf)
snprintf(*buf, ETH_GSTRING_LEN, ionic_lif_stats_desc[i].name);
*buf += ETH_GSTRING_LEN;
}
+
+ for (i = 0; i < IONIC_NUM_PORT_STATS; i++) {
+ snprintf(*buf, ETH_GSTRING_LEN,
+ ionic_port_stats_desc[i].name);
+ *buf += ETH_GSTRING_LEN;
+ }
+
for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
for (i = 0; i < IONIC_NUM_TX_STATS; i++) {
snprintf(*buf, ETH_GSTRING_LEN, "tx_%d_%s",
@@ -225,6 +349,7 @@ static void ionic_sw_stats_get_strings(struct ionic_lif *lif, u8 **buf)
static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
{
+ struct ionic_port_stats *port_stats;
struct ionic_lif_sw_stats lif_stats;
struct ionic_qcq *txqcq, *rxqcq;
struct ionic_tx_stats *txstats;
@@ -238,6 +363,13 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
(*buf)++;
}
+ port_stats = &lif->ionic->idev.port_info->stats;
+ for (i = 0; i < IONIC_NUM_PORT_STATS; i++) {
+ **buf = IONIC_READ_STAT_LE64(port_stats,
+ &ionic_port_stats_desc[i]);
+ (*buf)++;
+ }
+
for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
txstats = &lif_to_txstats(lif, q_num);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.h b/drivers/net/ethernet/pensando/ionic/ionic_stats.h
index d2c1122a2c6e..3f543512616e 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_stats.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.h
@@ -11,6 +11,9 @@
.offset = IONIC_STAT_TO_OFFSET(type, stat_name) \
}
+#define IONIC_PORT_STAT_DESC(stat_name) \
+ IONIC_STAT_DESC(struct ionic_port_stats, stat_name)
+
#define IONIC_LIF_STAT_DESC(stat_name) \
IONIC_STAT_DESC(struct ionic_lif_sw_stats, stat_name)
@@ -45,6 +48,9 @@ extern const int ionic_num_stats_grps;
#define IONIC_READ_STAT64(base_ptr, desc_ptr) \
(*((u64 *)(((u8 *)(base_ptr)) + (desc_ptr)->offset)))
+#define IONIC_READ_STAT_LE64(base_ptr, desc_ptr) \
+ __le64_to_cpu(*((u64 *)(((u8 *)(base_ptr)) + (desc_ptr)->offset)))
+
struct ionic_stat_desc {
char name[ETH_GSTRING_LEN];
u64 offset;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index d233b6e77b1e..b7f900c11834 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -10,8 +10,10 @@
#include "ionic_lif.h"
#include "ionic_txrx.h"
-static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info,
- struct ionic_cq_info *cq_info, void *cb_arg);
+static void ionic_rx_clean(struct ionic_queue *q,
+ struct ionic_desc_info *desc_info,
+ struct ionic_cq_info *cq_info,
+ void *cb_arg);
static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
ionic_desc_cb cb_func, void *cb_arg)
@@ -140,8 +142,10 @@ static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
return skb;
}
-static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info,
- struct ionic_cq_info *cq_info, void *cb_arg)
+static void ionic_rx_clean(struct ionic_queue *q,
+ struct ionic_desc_info *desc_info,
+ struct ionic_cq_info *cq_info,
+ void *cb_arg)
{
struct ionic_rxq_comp *comp = cq_info->cq_desc;
struct ionic_qcq *qcq = q_to_qcq(q);
@@ -210,10 +214,11 @@ static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_i
(comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)))
stats->csum_error++;
- if (likely(netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
- if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
- le16_to_cpu(comp->vlan_tci));
+ if (likely(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN)) {
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ le16_to_cpu(comp->vlan_tci));
+ stats->vlan_stripped++;
}
if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
@@ -475,7 +480,8 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
return work_done;
}
-static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, void *data, size_t len)
+static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
+ void *data, size_t len)
{
struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct device *dev = q->lif->ionic->dev;
@@ -491,7 +497,8 @@ static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, void *data, size_t
return dma_addr;
}
-static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, const skb_frag_t *frag,
+static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
+ const skb_frag_t *frag,
size_t offset, size_t len)
{
struct ionic_tx_stats *stats = q_to_tx_stats(q);
@@ -507,8 +514,10 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, const skb_frag_t *fra
return dma_addr;
}
-static void ionic_tx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info,
- struct ionic_cq_info *cq_info, void *cb_arg)
+static void ionic_tx_clean(struct ionic_queue *q,
+ struct ionic_desc_info *desc_info,
+ struct ionic_cq_info *cq_info,
+ void *cb_arg)
{
struct ionic_txq_sg_desc *sg_desc = desc_info->sg_desc;
struct ionic_txq_sg_elem *elem = sg_desc->elems;
@@ -852,6 +861,7 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
stats->pkts += total_pkts;
stats->bytes += total_bytes;
stats->tso++;
+ stats->tso_bytes += total_bytes;
return 0;
@@ -890,9 +900,12 @@ static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb)
flags, skb_shinfo(skb)->nr_frags, dma_addr);
desc->cmd = cpu_to_le64(cmd);
desc->len = cpu_to_le16(skb_headlen(skb));
- desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb));
desc->csum_offset = cpu_to_le16(skb->csum_offset);
+ if (has_vlan) {
+ desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
+ stats->vlan_inserted++;
+ }
if (skb->csum_not_inet)
stats->crc32_csum++;
@@ -927,9 +940,12 @@ static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb)
flags, skb_shinfo(skb)->nr_frags, dma_addr);
desc->cmd = cpu_to_le64(cmd);
desc->len = cpu_to_le16(skb_headlen(skb));
- desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
+ if (has_vlan) {
+ desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
+ stats->vlan_inserted++;
+ }
- stats->no_csum++;
+ stats->csum_none++;
return 0;
}
@@ -989,6 +1005,7 @@ static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
{
+ int sg_elems = q->lif->qtype_info[IONIC_QTYPE_TXQ].max_sg_elems;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
int err;
@@ -997,7 +1014,7 @@ static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
return (skb->len / skb_shinfo(skb)->gso_size) + 1;
/* If non-TSO, just need 1 desc and nr_frags sg elems */
- if (skb_shinfo(skb)->nr_frags <= IONIC_TX_MAX_SG_ELEMS)
+ if (skb_shinfo(skb)->nr_frags <= sg_elems)
return 1;
/* Too many frags, so linearize */
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index fa41bf08a589..66ed39d6f357 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -740,12 +740,6 @@ struct qed_dbg_feature {
u32 dumped_dwords;
};
-struct qed_dbg_params {
- struct qed_dbg_feature features[DBG_FEATURE_NUM];
- u8 engine_for_debug;
- bool print_data;
-};
-
struct qed_dev {
u32 dp_module;
u8 dp_level;
@@ -844,6 +838,9 @@ struct qed_dev {
/* Recovery */
bool recov_in_prog;
+ /* Indicates whether should prevent attentions from being reasserted */
+ bool attn_clr_en;
+
/* LLH info */
u8 ppfid_bitmap;
struct qed_llh_info *p_llh_info;
@@ -872,17 +869,18 @@ struct qed_dev {
} protocol_ops;
void *ops_cookie;
- struct qed_dbg_params dbg_params;
-
#ifdef CONFIG_QED_LL2
struct qed_cb_ll2_info *ll2;
u8 ll2_mac_address[ETH_ALEN];
#endif
struct qed_dbg_feature dbg_features[DBG_FEATURE_NUM];
+ u8 engine_for_debug;
bool disable_ilt_dump;
DECLARE_HASHTABLE(connections, 10);
const struct firmware *firmware;
+ bool print_dbg_data;
+
u32 rdma_max_sge;
u32 rdma_max_inline;
u32 rdma_max_srq_sge;
@@ -1020,6 +1018,8 @@ u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
u32 input_len, u8 *input_buf,
u32 max_size, u8 *unzip_buf);
void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn);
+void qed_hw_error_occurred(struct qed_hwfn *p_hwfn,
+ enum qed_hw_err_type err_type);
void qed_get_protocol_stats(struct qed_dev *cdev,
enum qed_mcp_protocol_type type,
union qed_mcp_protocol_stats *stats);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index f4eebaabb6d0..57a0dab88431 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -7453,7 +7453,7 @@ static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
enum qed_dbg_features feature_idx)
{
struct qed_dbg_feature *feature =
- &p_hwfn->cdev->dbg_params.features[feature_idx];
+ &p_hwfn->cdev->dbg_features[feature_idx];
u32 text_size_bytes, null_char_pos, i;
enum dbg_status rc;
char *text_buf;
@@ -7502,7 +7502,7 @@ static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
text_buf[i] = '\n';
/* Dump printable feature to log */
- if (p_hwfn->cdev->dbg_params.print_data)
+ if (p_hwfn->cdev->print_dbg_data)
qed_dbg_print_feature(text_buf, text_size_bytes);
/* Free the old dump_buf and point the dump_buf to the newly allocagted
@@ -7523,7 +7523,7 @@ static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
enum qed_dbg_features feature_idx)
{
struct qed_dbg_feature *feature =
- &p_hwfn->cdev->dbg_params.features[feature_idx];
+ &p_hwfn->cdev->dbg_features[feature_idx];
u32 buf_size_dwords;
enum dbg_status rc;
@@ -7648,7 +7648,7 @@ static int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer,
enum qed_nvm_images image_id)
{
struct qed_hwfn *p_hwfn =
- &cdev->hwfns[cdev->dbg_params.engine_for_debug];
+ &cdev->hwfns[cdev->engine_for_debug];
u32 len_rounded, i;
__be32 val;
int rc;
@@ -7780,7 +7780,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
{
u8 cur_engine, omit_engine = 0, org_engine;
struct qed_hwfn *p_hwfn =
- &cdev->hwfns[cdev->dbg_params.engine_for_debug];
+ &cdev->hwfns[cdev->engine_for_debug];
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
int grc_params[MAX_DBG_GRC_PARAMS], i;
u32 offset = 0, feature_size;
@@ -8000,7 +8000,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
int qed_dbg_all_data_size(struct qed_dev *cdev)
{
struct qed_hwfn *p_hwfn =
- &cdev->hwfns[cdev->dbg_params.engine_for_debug];
+ &cdev->hwfns[cdev->engine_for_debug];
u32 regs_len = 0, image_len = 0, ilt_len = 0, total_ilt_len = 0;
u8 cur_engine, org_engine;
@@ -8059,9 +8059,9 @@ int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
enum qed_dbg_features feature, u32 *num_dumped_bytes)
{
struct qed_hwfn *p_hwfn =
- &cdev->hwfns[cdev->dbg_params.engine_for_debug];
+ &cdev->hwfns[cdev->engine_for_debug];
struct qed_dbg_feature *qed_feature =
- &cdev->dbg_params.features[feature];
+ &cdev->dbg_features[feature];
enum dbg_status dbg_rc;
struct qed_ptt *p_ptt;
int rc = 0;
@@ -8084,7 +8084,7 @@ int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
DP_VERBOSE(cdev, QED_MSG_DEBUG,
"copying debugfs feature to external buffer\n");
memcpy(buffer, qed_feature->dump_buf, qed_feature->buf_size);
- *num_dumped_bytes = cdev->dbg_params.features[feature].dumped_dwords *
+ *num_dumped_bytes = cdev->dbg_features[feature].dumped_dwords *
4;
out:
@@ -8095,7 +8095,7 @@ out:
int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
{
struct qed_hwfn *p_hwfn =
- &cdev->hwfns[cdev->dbg_params.engine_for_debug];
+ &cdev->hwfns[cdev->engine_for_debug];
struct qed_dbg_feature *qed_feature = &cdev->dbg_features[feature];
struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
u32 buf_size_dwords;
@@ -8120,14 +8120,14 @@ int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
u8 qed_get_debug_engine(struct qed_dev *cdev)
{
- return cdev->dbg_params.engine_for_debug;
+ return cdev->engine_for_debug;
}
void qed_set_debug_engine(struct qed_dev *cdev, int engine_number)
{
DP_VERBOSE(cdev, QED_MSG_DEBUG, "set debug engine to %d\n",
engine_number);
- cdev->dbg_params.engine_for_debug = engine_number;
+ cdev->engine_for_debug = engine_number;
}
void qed_dbg_pf_init(struct qed_dev *cdev)
@@ -8146,7 +8146,7 @@ void qed_dbg_pf_init(struct qed_dev *cdev)
}
/* Set the hwfn to be 0 as default */
- cdev->dbg_params.engine_for_debug = 0;
+ cdev->engine_for_debug = 0;
}
void qed_dbg_pf_exit(struct qed_dev *cdev)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 38a65b984e47..6e857468e993 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -1972,7 +1972,7 @@ static int qed_init_qm_sanity(struct qed_hwfn *p_hwfn)
return 0;
if (QED_IS_ROCE_PERSONALITY(p_hwfn)) {
- p_hwfn->hw_info.multi_tc_roce_en = 0;
+ p_hwfn->hw_info.multi_tc_roce_en = false;
DP_NOTICE(p_hwfn,
"multi-tc roce was disabled to reduce requested amount of pqs\n");
if (qed_init_qm_get_num_pqs(p_hwfn) <= RESC_NUM(p_hwfn, QED_PQ))
@@ -3085,7 +3085,9 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
rc = qed_final_cleanup(p_hwfn, p_hwfn->p_main_ptt,
p_hwfn->rel_pf_id, false);
if (rc) {
- DP_NOTICE(p_hwfn, "Final cleanup failed\n");
+ qed_hw_err_notify(p_hwfn, p_hwfn->p_main_ptt,
+ QED_HW_ERR_RAMROD_FAIL,
+ "Final cleanup failed\n");
goto load_err;
}
}
@@ -4392,7 +4394,7 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
}
if (QED_IS_ROCE_PERSONALITY(p_hwfn))
- p_hwfn->hw_info.multi_tc_roce_en = 1;
+ p_hwfn->hw_info.multi_tc_roce_en = true;
p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2;
p_hwfn->hw_info.num_active_tc = 1;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 4597015b8bff..f00460d00cab 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -12400,6 +12400,13 @@ struct load_rsp_stc {
#define LOAD_RSP_FLAGS0_DRV_EXISTS (0x1 << 0)
};
+struct mdump_retain_data_stc {
+ u32 valid;
+ u32 epoch;
+ u32 pf;
+ u32 status;
+};
+
union drv_union_data {
u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD];
struct mcp_mac wol_mac;
@@ -12488,10 +12495,14 @@ struct public_drv_mb {
#define DRV_MSG_CODE_BIST_TEST 0x001e0000
#define DRV_MSG_CODE_SET_LED_MODE 0x00200000
#define DRV_MSG_CODE_RESOURCE_CMD 0x00230000
+/* Send crash dump commands with param[3:0] - opcode */
+#define DRV_MSG_CODE_MDUMP_CMD 0x00250000
#define DRV_MSG_CODE_GET_TLV_DONE 0x002f0000
#define DRV_MSG_CODE_GET_ENGINE_CONFIG 0x00370000
#define DRV_MSG_CODE_GET_PPFID_BITMAP 0x43000000
+#define DRV_MSG_CODE_DEBUG_DATA_SEND 0xc0040000
+
#define RESOURCE_CMD_REQ_RESC_MASK 0x0000001F
#define RESOURCE_CMD_REQ_RESC_SHIFT 0
#define RESOURCE_CMD_REQ_OPCODE_MASK 0x000000E0
@@ -12517,6 +12528,21 @@ struct public_drv_mb {
#define RESOURCE_DUMP 0
+/* DRV_MSG_CODE_MDUMP_CMD parameters */
+#define MDUMP_DRV_PARAM_OPCODE_MASK 0x0000000f
+#define DRV_MSG_CODE_MDUMP_ACK 0x01
+#define DRV_MSG_CODE_MDUMP_SET_VALUES 0x02
+#define DRV_MSG_CODE_MDUMP_TRIGGER 0x03
+#define DRV_MSG_CODE_MDUMP_GET_CONFIG 0x04
+#define DRV_MSG_CODE_MDUMP_SET_ENABLE 0x05
+#define DRV_MSG_CODE_MDUMP_CLEAR_LOGS 0x06
+#define DRV_MSG_CODE_MDUMP_GET_RETAIN 0x07
+#define DRV_MSG_CODE_MDUMP_CLR_RETAIN 0x08
+
+#define DRV_MSG_CODE_HW_DUMP_TRIGGER 0x0a
+#define DRV_MSG_CODE_MDUMP_GEN_MDUMP2 0x0b
+#define DRV_MSG_CODE_MDUMP_FREE_MDUMP2 0x0c
+
#define DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL 0x002b0000
#define DRV_MSG_CODE_OS_WOL 0x002e0000
@@ -12626,6 +12652,17 @@ struct public_drv_mb {
#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002
#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK 0x00010000
+/* DRV_MSG_CODE_DEBUG_DATA_SEND parameters */
+#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_OFFSET 0
+#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_MASK 0xFF
+
+/* Driver attributes params */
+#define DRV_MB_PARAM_ATTRIBUTE_KEY_OFFSET 0
+#define DRV_MB_PARAM_ATTRIBUTE_KEY_MASK 0x00FFFFFF
+#define DRV_MB_PARAM_ATTRIBUTE_CMD_OFFSET 24
+#define DRV_MB_PARAM_ATTRIBUTE_CMD_MASK 0xFF000000
+
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_OFFSET 0
#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_SHIFT 0
#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_MASK 0x0000FFFF
#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_SHIFT 16
@@ -12678,6 +12715,14 @@ struct public_drv_mb {
#define FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE 0x00870000
#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
+#define FW_MSG_CODE_DEBUG_DATA_SEND_INV_ARG 0xb0070000
+#define FW_MSG_CODE_DEBUG_DATA_SEND_BUF_FULL 0xb0080000
+#define FW_MSG_CODE_DEBUG_DATA_SEND_NO_BUF 0xb0090000
+#define FW_MSG_CODE_DEBUG_NOT_ENABLED 0xb00a0000
+#define FW_MSG_CODE_DEBUG_DATA_SEND_OK 0xb00b0000
+
+#define FW_MSG_CODE_MDUMP_INVALID_CMD 0x00030000
+
u32 fw_mb_param;
#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000
#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
@@ -12742,9 +12787,9 @@ enum MFW_DRV_MSG_TYPE {
MFW_DRV_MSG_GET_FCOE_STATS,
MFW_DRV_MSG_GET_ISCSI_STATS,
MFW_DRV_MSG_GET_RDMA_STATS,
- MFW_DRV_MSG_BW_UPDATE10,
+ MFW_DRV_MSG_FAILURE_DETECTED,
MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
- MFW_DRV_MSG_BW_UPDATE11,
+ MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED,
MFW_DRV_MSG_RESERVED,
MFW_DRV_MSG_GET_TLV_REQ,
MFW_DRV_MSG_OEM_CFG_UPDATE,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index 4ab8cfaf63d1..5fa251489536 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -762,9 +762,10 @@ static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn,
dst_type,
length_cur);
if (qed_status) {
- DP_NOTICE(p_hwfn,
- "qed_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n",
- qed_status, src_addr, dst_addr, length_cur);
+ qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_DMAE_FAIL,
+ "qed_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n",
+ qed_status, src_addr,
+ dst_addr, length_cur);
break;
}
}
@@ -837,6 +838,41 @@ int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
return rc;
}
+void qed_hw_err_notify(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_hw_err_type err_type, char *fmt, ...)
+{
+ char buf[QED_HW_ERR_MAX_STR_SIZE];
+ va_list vl;
+ int len;
+
+ if (fmt) {
+ va_start(vl, fmt);
+ len = vsnprintf(buf, QED_HW_ERR_MAX_STR_SIZE, fmt, vl);
+ va_end(vl);
+
+ if (len > QED_HW_ERR_MAX_STR_SIZE - 1)
+ len = QED_HW_ERR_MAX_STR_SIZE - 1;
+
+ DP_NOTICE(p_hwfn, "%s", buf);
+ }
+
+ /* Fan failure cannot be masked by handling of another HW error */
+ if (p_hwfn->cdev->recov_in_prog &&
+ err_type != QED_HW_ERR_FAN_FAIL) {
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_DRV,
+ "Recovery is in progress. Avoid notifying about HW error %d.\n",
+ err_type);
+ return;
+ }
+
+ qed_hw_error_occurred(p_hwfn, err_type);
+
+ if (fmt)
+ qed_mcp_send_raw_debug_data(p_hwfn, p_ptt, buf, len);
+}
+
int qed_dmae_sanity(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, const char *phase)
{
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h
index 505e94db939d..f5b109b04b66 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h
@@ -315,4 +315,19 @@ int qed_init_fw_data(struct qed_dev *cdev,
int qed_dmae_sanity(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, const char *phase);
+#define QED_HW_ERR_MAX_STR_SIZE 256
+
+/**
+ * @brief qed_hw_err_notify - Notify upper layer driver and management FW
+ * about a HW error.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param err_type
+ * @param fmt - debug data buffer to send to the MFW
+ * @param ... - buffer format args
+ */
+void qed_hw_err_notify(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_hw_err_type err_type, char *fmt, ...);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 9f5113639eaf..b7b974f0ef21 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -96,6 +96,7 @@ struct aeu_invert_reg_bit {
#define ATTENTION_BB(value) (value << ATTENTION_BB_SHIFT)
#define ATTENTION_BB_DIFFERENT BIT(23)
+#define ATTENTION_CLEAR_ENABLE BIT(28)
unsigned int flags;
/* Callback to call if attention will be triggered */
@@ -363,6 +364,21 @@ static int qed_pglueb_rbc_attn_cb(struct qed_hwfn *p_hwfn)
return qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt);
}
+static int qed_fw_assertion(struct qed_hwfn *p_hwfn)
+{
+ qed_hw_err_notify(p_hwfn, p_hwfn->p_dpc_ptt, QED_HW_ERR_FW_ASSERT,
+ "FW assertion!\n");
+
+ return -EINVAL;
+}
+
+static int qed_general_attention_35(struct qed_hwfn *p_hwfn)
+{
+ DP_INFO(p_hwfn, "General attention 35!\n");
+
+ return 0;
+}
+
#define QED_DORQ_ATTENTION_REASON_MASK (0xfffff)
#define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
#define QED_DORQ_ATTENTION_OPAQUE_SHIFT (0x0)
@@ -605,13 +621,15 @@ static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
{
{ /* After Invert 4 */
- {"General Attention 32", ATTENTION_SINGLE,
- NULL, MAX_BLOCK_ID},
+ {"General Attention 32", ATTENTION_SINGLE |
+ ATTENTION_CLEAR_ENABLE, qed_fw_assertion,
+ MAX_BLOCK_ID},
{"General Attention %d",
(2 << ATTENTION_LENGTH_SHIFT) |
(33 << ATTENTION_OFFSET_SHIFT), NULL, MAX_BLOCK_ID},
- {"General Attention 35", ATTENTION_SINGLE,
- NULL, MAX_BLOCK_ID},
+ {"General Attention 35", ATTENTION_SINGLE |
+ ATTENTION_CLEAR_ENABLE, qed_general_attention_35,
+ MAX_BLOCK_ID},
{"NWS Parity",
ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0),
@@ -927,9 +945,12 @@ qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn,
qed_int_attn_print(p_hwfn, p_aeu->block_index,
ATTN_TYPE_INTERRUPT, !b_fatal);
-
- /* If the attention is benign, no need to prevent it */
- if (!rc)
+ /* Reach assertion if attention is fatal */
+ if (b_fatal)
+ qed_hw_err_notify(p_hwfn, p_hwfn->p_dpc_ptt, QED_HW_ERR_HW_ATTN,
+ "`%s': Fatal attention\n",
+ p_bit_name);
+ else /* If the attention is benign, no need to prevent it */
goto out;
/* Prevent this Attention from being asserted in the future */
@@ -2349,6 +2370,11 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev)
cdev->hwfns[i].b_int_requested = false;
}
+void qed_int_attn_clr_enable(struct qed_dev *cdev, bool clr_enable)
+{
+ cdev->attn_clr_en = clr_enable;
+}
+
int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u8 timer_res, u16 sb_id, bool tx)
{
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index 9ad568d93ae6..e09db3386367 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -191,6 +191,17 @@ void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
void qed_int_disable_post_isr_release(struct qed_dev *cdev);
/**
+ * @brief qed_int_attn_clr_enable - sets whether the general behavior is
+ * preventing attentions from being reasserted, or following the
+ * attributes of the specific attention.
+ *
+ * @param cdev
+ * @param clr_enable
+ *
+ */
+void qed_int_attn_clr_enable(struct qed_dev *cdev, bool clr_enable);
+
+/**
* @brief - Doorbell Recovery handler.
* Run doorbell recovery in case of PF overflow (and flush DORQ if
* needed).
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 037e5978787e..4afd8572ada6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -2331,7 +2331,7 @@ static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
cdev->ll2->cb_cookie = cookie;
}
-struct qed_ll2_cbs ll2_cbs = {
+static struct qed_ll2_cbs ll2_cbs = {
.rx_comp_cb = &qed_ll2b_complete_rx_packet,
.rx_release_cb = &qed_ll2b_release_rx_packet,
.tx_comp_cb = &qed_ll2b_complete_tx_packet,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 96356e897c80..83e798d4eebb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -49,6 +49,7 @@
#include <linux/qed/qed_if.h>
#include <linux/qed/qed_ll2_if.h>
#include <net/devlink.h>
+#include <linux/aer.h>
#include "qed.h"
#include "qed_sriov.h"
@@ -129,6 +130,8 @@ static void qed_free_pci(struct qed_dev *cdev)
{
struct pci_dev *pdev = cdev->pdev;
+ pci_disable_pcie_error_reporting(pdev);
+
if (cdev->doorbells && cdev->db_size)
iounmap(cdev->doorbells);
if (cdev->regview)
@@ -231,6 +234,12 @@ static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
return -ENOMEM;
}
+ /* AER (Advanced Error reporting) configuration */
+ rc = pci_enable_pcie_error_reporting(pdev);
+ if (rc)
+ DP_VERBOSE(cdev, NETIF_MSG_DRV,
+ "Failed to configure PCIe AER [%d]\n", rc);
+
return 0;
err2:
@@ -2459,6 +2468,39 @@ void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn)
ops->schedule_recovery_handler(cookie);
}
+char *qed_hw_err_type_descr[] = {
+ [QED_HW_ERR_FAN_FAIL] = "Fan Failure",
+ [QED_HW_ERR_MFW_RESP_FAIL] = "MFW Response Failure",
+ [QED_HW_ERR_HW_ATTN] = "HW Attention",
+ [QED_HW_ERR_DMAE_FAIL] = "DMAE Failure",
+ [QED_HW_ERR_RAMROD_FAIL] = "Ramrod Failure",
+ [QED_HW_ERR_FW_ASSERT] = "FW Assertion",
+ [QED_HW_ERR_LAST] = "Unknown",
+};
+
+void qed_hw_error_occurred(struct qed_hwfn *p_hwfn,
+ enum qed_hw_err_type err_type)
+{
+ struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common;
+ void *cookie = p_hwfn->cdev->ops_cookie;
+ char *err_str;
+
+ if (err_type > QED_HW_ERR_LAST)
+ err_type = QED_HW_ERR_LAST;
+ err_str = qed_hw_err_type_descr[err_type];
+
+ DP_NOTICE(p_hwfn, "HW error occurred [%s]\n", err_str);
+
+ /* Call the HW error handler of the protocol driver.
+ * If it is not available - perform a minimal handling of preventing
+ * HW attentions from being reasserted.
+ */
+ if (ops && ops->schedule_hw_err_handler)
+ ops->schedule_hw_err_handler(cookie, err_type);
+ else
+ qed_int_attn_clr_enable(p_hwfn->cdev, true);
+}
+
static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
void *handle)
{
@@ -2680,6 +2722,7 @@ const struct qed_common_ops qed_common_ops_pass = {
.set_led = &qed_set_led,
.recovery_process = &qed_recovery_process,
.recovery_prolog = &qed_recovery_prolog,
+ .attn_clr_enable = &qed_int_attn_clr_enable,
.update_drv_state = &qed_update_drv_state,
.update_mac = &qed_update_mac,
.update_mtu = &qed_update_mtu,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 280527cc0578..9624616806e7 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -575,6 +575,8 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
if (!QED_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK))
qed_mcp_cmd_set_blocking(p_hwfn, true);
+ qed_hw_err_notify(p_hwfn, p_ptt,
+ QED_HW_ERR_MFW_RESP_FAIL, NULL);
return -EAGAIN;
}
@@ -1704,6 +1706,127 @@ static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
&resp, &param);
}
+static void qed_mcp_handle_fan_failure(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ /* A single notification should be sent to upper driver in CMT mode */
+ if (p_hwfn != QED_LEADING_HWFN(p_hwfn->cdev))
+ return;
+
+ qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_FAN_FAIL,
+ "Fan failure was detected on the network interface card and it's going to be shut down.\n");
+}
+
+struct qed_mdump_cmd_params {
+ u32 cmd;
+ void *p_data_src;
+ u8 data_src_size;
+ void *p_data_dst;
+ u8 data_dst_size;
+ u32 mcp_resp;
+};
+
+static int
+qed_mcp_mdump_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mdump_cmd_params *p_mdump_cmd_params)
+{
+ struct qed_mcp_mb_params mb_params;
+ int rc;
+
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
+ mb_params.param = p_mdump_cmd_params->cmd;
+ mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
+ mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
+ mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
+ mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc)
+ return rc;
+
+ p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
+
+ if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
+ DP_INFO(p_hwfn,
+ "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n",
+ p_mdump_cmd_params->cmd);
+ rc = -EOPNOTSUPP;
+ } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The mdump command is not supported by the MFW\n");
+ rc = -EOPNOTSUPP;
+ }
+
+ return rc;
+}
+
+static int qed_mcp_mdump_ack(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_mdump_cmd_params mdump_cmd_params;
+
+ memset(&mdump_cmd_params, 0, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
+
+ return qed_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
+}
+
+int
+qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct mdump_retain_data_stc *p_mdump_retain)
+{
+ struct qed_mdump_cmd_params mdump_cmd_params;
+ int rc;
+
+ memset(&mdump_cmd_params, 0, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN;
+ mdump_cmd_params.p_data_dst = p_mdump_retain;
+ mdump_cmd_params.data_dst_size = sizeof(*p_mdump_retain);
+
+ rc = qed_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
+ if (rc)
+ return rc;
+
+ if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
+ DP_INFO(p_hwfn,
+ "Failed to get the mdump retained data [mcp_resp 0x%x]\n",
+ mdump_cmd_params.mcp_resp);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void qed_mcp_handle_critical_error(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct mdump_retain_data_stc mdump_retain;
+ int rc;
+
+ /* In CMT mode - no need for more than a single acknowledgment to the
+ * MFW, and no more than a single notification to the upper driver.
+ */
+ if (p_hwfn != QED_LEADING_HWFN(p_hwfn->cdev))
+ return;
+
+ rc = qed_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain);
+ if (rc == 0 && mdump_retain.valid)
+ DP_NOTICE(p_hwfn,
+ "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n",
+ mdump_retain.epoch,
+ mdump_retain.pf, mdump_retain.status);
+ else
+ DP_NOTICE(p_hwfn,
+ "The MFW notified that a critical error occurred in the device\n");
+
+ DP_NOTICE(p_hwfn,
+ "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n");
+ qed_mcp_mdump_ack(p_hwfn, p_ptt);
+
+ qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_HW_ATTN, NULL);
+}
+
void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct public_func shmem_info;
@@ -1850,6 +1973,12 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
case MFW_DRV_MSG_S_TAG_UPDATE:
qed_mcp_update_stag(p_hwfn, p_ptt);
break;
+ case MFW_DRV_MSG_FAILURE_DETECTED:
+ qed_mcp_handle_fan_failure(p_hwfn, p_ptt);
+ break;
+ case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
+ qed_mcp_handle_critical_error(p_hwfn, p_ptt);
+ break;
case MFW_DRV_MSG_GET_TLV_REQ:
qed_mfw_tlv_req(p_hwfn);
break;
@@ -3819,3 +3948,127 @@ int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
DRV_MSG_CODE_SET_NVM_CFG_OPTION,
mb_param, &resp, &param, len, (u32 *)p_buf);
}
+
+#define QED_MCP_DBG_DATA_MAX_SIZE MCP_DRV_NVM_BUF_LEN
+#define QED_MCP_DBG_DATA_MAX_HEADER_SIZE sizeof(u32)
+#define QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE \
+ (QED_MCP_DBG_DATA_MAX_SIZE - QED_MCP_DBG_DATA_MAX_HEADER_SIZE)
+
+static int
+__qed_mcp_send_debug_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 *p_buf, u8 size)
+{
+ struct qed_mcp_mb_params mb_params;
+ int rc;
+
+ if (size > QED_MCP_DBG_DATA_MAX_SIZE) {
+ DP_ERR(p_hwfn,
+ "Debug data size is %d while it should not exceed %d\n",
+ size, QED_MCP_DBG_DATA_MAX_SIZE);
+ return -EINVAL;
+ }
+
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_DEBUG_DATA_SEND;
+ SET_MFW_FIELD(mb_params.param, DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE, size);
+ mb_params.p_data_src = p_buf;
+ mb_params.data_src_size = size;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc)
+ return rc;
+
+ if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The DEBUG_DATA_SEND command is unsupported by the MFW\n");
+ return -EOPNOTSUPP;
+ } else if (mb_params.mcp_resp == (u32)FW_MSG_CODE_DEBUG_NOT_ENABLED) {
+ DP_INFO(p_hwfn, "The DEBUG_DATA_SEND command is not enabled\n");
+ return -EBUSY;
+ } else if (mb_params.mcp_resp != (u32)FW_MSG_CODE_DEBUG_DATA_SEND_OK) {
+ DP_NOTICE(p_hwfn,
+ "Failed to send debug data to the MFW [resp 0x%08x]\n",
+ mb_params.mcp_resp);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+enum qed_mcp_dbg_data_type {
+ QED_MCP_DBG_DATA_TYPE_RAW,
+};
+
+/* Header format: [31:28] PFID, [27:20] flags, [19:12] type, [11:0] S/N */
+#define QED_MCP_DBG_DATA_HDR_SN_OFFSET 0
+#define QED_MCP_DBG_DATA_HDR_SN_MASK 0x00000fff
+#define QED_MCP_DBG_DATA_HDR_TYPE_OFFSET 12
+#define QED_MCP_DBG_DATA_HDR_TYPE_MASK 0x000ff000
+#define QED_MCP_DBG_DATA_HDR_FLAGS_OFFSET 20
+#define QED_MCP_DBG_DATA_HDR_FLAGS_MASK 0x0ff00000
+#define QED_MCP_DBG_DATA_HDR_PF_OFFSET 28
+#define QED_MCP_DBG_DATA_HDR_PF_MASK 0xf0000000
+
+#define QED_MCP_DBG_DATA_HDR_FLAGS_FIRST 0x1
+#define QED_MCP_DBG_DATA_HDR_FLAGS_LAST 0x2
+
+static int
+qed_mcp_send_debug_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_mcp_dbg_data_type type, u8 *p_buf, u32 size)
+{
+ u8 raw_data[QED_MCP_DBG_DATA_MAX_SIZE], *p_tmp_buf = p_buf;
+ u32 tmp_size = size, *p_header, *p_payload;
+ u8 flags = 0;
+ u16 seq;
+ int rc;
+
+ p_header = (u32 *)raw_data;
+ p_payload = (u32 *)(raw_data + QED_MCP_DBG_DATA_MAX_HEADER_SIZE);
+
+ seq = (u16)atomic_inc_return(&p_hwfn->mcp_info->dbg_data_seq);
+
+ /* First chunk is marked as 'first' */
+ flags |= QED_MCP_DBG_DATA_HDR_FLAGS_FIRST;
+
+ *p_header = 0;
+ SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_SN, seq);
+ SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_TYPE, type);
+ SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS, flags);
+ SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_PF, p_hwfn->abs_pf_id);
+
+ while (tmp_size > QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE) {
+ memcpy(p_payload, p_tmp_buf, QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE);
+ rc = __qed_mcp_send_debug_data(p_hwfn, p_ptt, raw_data,
+ QED_MCP_DBG_DATA_MAX_SIZE);
+ if (rc)
+ return rc;
+
+ /* Clear the 'first' marking after sending the first chunk */
+ if (p_tmp_buf == p_buf) {
+ flags &= ~QED_MCP_DBG_DATA_HDR_FLAGS_FIRST;
+ SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS,
+ flags);
+ }
+
+ p_tmp_buf += QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE;
+ tmp_size -= QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE;
+ }
+
+ /* Last chunk is marked as 'last' */
+ flags |= QED_MCP_DBG_DATA_HDR_FLAGS_LAST;
+ SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS, flags);
+ memcpy(p_payload, p_tmp_buf, tmp_size);
+
+ /* Casting the left size to u8 is ok since at this point it is <= 32 */
+ return __qed_mcp_send_debug_data(p_hwfn, p_ptt, raw_data,
+ (u8)(QED_MCP_DBG_DATA_MAX_HEADER_SIZE +
+ tmp_size));
+}
+
+int
+qed_mcp_send_raw_debug_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 *p_buf, u32 size)
+{
+ return qed_mcp_send_debug_data(p_hwfn, p_ptt,
+ QED_MCP_DBG_DATA_TYPE_RAW, p_buf, size);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 9c4c2763de8d..5750b4c5ef63 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -685,6 +685,18 @@ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
*/
int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+/**
+ * @brief Send raw debug data to the MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_buf - raw debug data buffer
+ * @param size - buffer size
+ */
+int
+qed_mcp_send_raw_debug_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 *p_buf, u32 size);
+
/* Using hwfn number (and not pf_num) is required since in CMT mode,
* same pf_num may be used by two different hwfn
* TODO - this shouldn't really be in .h file, but until all fields
@@ -731,6 +743,9 @@ struct qed_mcp_info {
/* Capabilties negotiated with the MFW */
u32 capabilities;
+
+ /* S/N for debug data mailbox commands */
+ atomic_t dbg_data_seq;
};
struct qed_mcp_mb_params {
@@ -1001,6 +1016,19 @@ int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 mask_parities);
+/* @brief - Gets the mdump retained data from the MFW.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_mdump_retain
+ *
+ * @param return 0 upon success.
+ */
+int
+qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct mdump_retain_data_stc *p_mdump_retain);
+
/**
* @brief - Sets the MFW's max value for the given resource
*
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index 37e70562a964..475b89903f46 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -736,9 +736,9 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
p_ramrod = &p_ent->ramrod.roce_destroy_qp_resp;
- p_ramrod_res = (struct roce_destroy_qp_resp_output_params *)
- dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res),
- &ramrod_res_phys, GFP_KERNEL);
+ p_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(*p_ramrod_res),
+ &ramrod_res_phys, GFP_KERNEL);
if (!p_ramrod_res) {
rc = -ENOMEM;
@@ -872,10 +872,10 @@ int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
}
/* Send a query responder ramrod to FW to get RQ-PSN and state */
- p_resp_ramrod_res = (struct roce_query_qp_resp_output_params *)
- dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
- sizeof(*p_resp_ramrod_res),
- &resp_ramrod_res_phys, GFP_KERNEL);
+ p_resp_ramrod_res =
+ dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(*p_resp_ramrod_res),
+ &resp_ramrod_res_phys, GFP_KERNEL);
if (!p_resp_ramrod_res) {
DP_NOTICE(p_hwfn,
"qed query qp failed: cannot allocate memory (ramrod)\n");
@@ -920,8 +920,7 @@ int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
}
/* Send a query requester ramrod to FW to get SQ-PSN and state */
- p_req_ramrod_res = (struct roce_query_qp_req_output_params *)
- dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ p_req_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(*p_req_ramrod_res),
&req_ramrod_res_phys,
GFP_KERNEL);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index f5f3c03b9dd2..790c28d696a0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -160,12 +160,16 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
return 0;
}
err:
- DP_NOTICE(p_hwfn,
- "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
- le32_to_cpu(p_ent->elem.hdr.cid),
- p_ent->elem.hdr.cmd_id,
- p_ent->elem.hdr.protocol_id,
- le16_to_cpu(p_ent->elem.hdr.echo));
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+ qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_RAMROD_FAIL,
+ "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
+ le32_to_cpu(p_ent->elem.hdr.cid),
+ p_ent->elem.hdr.cmd_id,
+ p_ent->elem.hdr.protocol_id,
+ le16_to_cpu(p_ent->elem.hdr.echo));
+ qed_ptt_release(p_hwfn, p_ptt);
return -EBUSY;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
index 368e88565783..aabeaf03135e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -32,6 +32,7 @@
#ifndef _QED_SRIOV_H
#define _QED_SRIOV_H
+#include <linux/crash_dump.h>
#include <linux/types.h>
#include "qed_vf.h"
@@ -40,9 +41,12 @@
#define QED_VF_ARRAY_LENGTH (3)
#ifdef CONFIG_QED_SRIOV
-#define IS_VF(cdev) ((cdev)->b_is_vf)
-#define IS_PF(cdev) (!((cdev)->b_is_vf))
-#define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info))
+#define IS_VF(cdev) (is_kdump_kernel() ? \
+ (0) : ((cdev)->b_is_vf))
+#define IS_PF(cdev) (is_kdump_kernel() ? \
+ (1) : !((cdev)->b_is_vf))
+#define IS_PF_SRIOV(p_hwfn) (is_kdump_kernel() ? \
+ (0) : !!((p_hwfn)->cdev->p_iov_info))
#else
#define IS_VF(cdev) (0)
#define IS_PF(cdev) (1)
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 234c6f30effb..8857da1208d7 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -278,6 +278,14 @@ struct qede_dev {
struct qede_rdma_dev rdma_info;
struct bpf_prog *xdp_prog;
+
+ unsigned long err_flags;
+#define QEDE_ERR_IS_HANDLED 31
+#define QEDE_ERR_ATTN_CLR_EN 0
+#define QEDE_ERR_GET_DBG_INFO 1
+#define QEDE_ERR_IS_RECOVERABLE 2
+#define QEDE_ERR_WARN 3
+
struct qede_dump_info dump_info;
};
@@ -485,11 +493,15 @@ struct qede_fastpath {
#define QEDE_SP_RECOVERY 0
#define QEDE_SP_RX_MODE 1
+#define QEDE_SP_RSVD1 2
+#define QEDE_SP_RSVD2 3
+#define QEDE_SP_HW_ERR 4
+#define QEDE_SP_ARFS_CONFIG 5
+#define QEDE_SP_AER 7
#ifdef CONFIG_RFS_ACCEL
int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id);
-#define QEDE_SP_ARFS_CONFIG 4
#define QEDE_SP_TASK_POLL_DELAY (5 * HZ)
#endif
@@ -521,7 +533,6 @@ u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
netdev_features_t qede_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
-void qede_tx_log_print(struct qede_dev *edev, struct qede_fastpath *fp);
int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy);
int qede_free_tx_pkt(struct qede_dev *edev,
struct qede_tx_queue *txq, int *len);
@@ -574,12 +585,14 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW))
#define NUM_RX_BDS_MAX (RX_RING_SIZE - 1)
#define NUM_RX_BDS_MIN 128
+#define NUM_RX_BDS_KDUMP_MIN 63
#define NUM_RX_BDS_DEF ((u16)BIT(10) - 1)
#define TX_RING_SIZE_POW 13
#define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW))
#define NUM_TX_BDS_MAX (TX_RING_SIZE - 1)
#define NUM_TX_BDS_MIN 128
+#define NUM_TX_BDS_KDUMP_MIN 63
#define NUM_TX_BDS_DEF NUM_TX_BDS_MAX
#define QEDE_MIN_PKT_LEN 64
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 812c7766e096..24cc68391ac4 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -190,12 +190,14 @@ static const struct {
enum {
QEDE_PRI_FLAG_CMT,
QEDE_PRI_FLAG_SMART_AN_SUPPORT, /* MFW supports SmartAN */
+ QEDE_PRI_FLAG_RECOVER_ON_ERROR,
QEDE_PRI_FLAG_LEN,
};
static const char qede_private_arr[QEDE_PRI_FLAG_LEN][ETH_GSTRING_LEN] = {
"Coupled-Function",
"SmartAN capable",
+ "Recover on error",
};
enum qede_ethtool_tests {
@@ -417,9 +419,30 @@ static u32 qede_get_priv_flags(struct net_device *dev)
if (edev->dev_info.common.smart_an)
flags |= BIT(QEDE_PRI_FLAG_SMART_AN_SUPPORT);
+ if (edev->err_flags & BIT(QEDE_ERR_IS_RECOVERABLE))
+ flags |= BIT(QEDE_PRI_FLAG_RECOVER_ON_ERROR);
+
return flags;
}
+static int qede_set_priv_flags(struct net_device *dev, u32 flags)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u32 cflags = qede_get_priv_flags(dev);
+ u32 dflags = flags ^ cflags;
+
+ /* can only change RECOVER_ON_ERROR flag */
+ if (dflags & ~BIT(QEDE_PRI_FLAG_RECOVER_ON_ERROR))
+ return -EINVAL;
+
+ if (flags & BIT(QEDE_PRI_FLAG_RECOVER_ON_ERROR))
+ set_bit(QEDE_ERR_IS_RECOVERABLE, &edev->err_flags);
+ else
+ clear_bit(QEDE_ERR_IS_RECOVERABLE, &edev->err_flags);
+
+ return 0;
+}
+
struct qede_link_mode_mapping {
u32 qed_link_mode;
u32 ethtool_link_mode;
@@ -2098,6 +2121,7 @@ static const struct ethtool_ops qede_ethtool_ops = {
.set_phys_id = qede_set_phys_id,
.get_ethtool_stats = qede_get_ethtool_stats,
.get_priv_flags = qede_get_priv_flags,
+ .set_priv_flags = qede_set_priv_flags,
.get_sset_count = qede_get_sset_count,
.get_rxnfc = qede_get_rxnfc,
.set_rxnfc = qede_set_rxnfc,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index c6c20776b474..7598ebe0962a 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -1066,6 +1066,7 @@ static bool qede_rx_xdp(struct qede_dev *edev,
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + *len;
xdp.rxq = &rxq->xdp_rxq;
+ xdp.frame_sz = rxq->rx_buf_seg_size; /* PAGE_SIZE when XDP enabled */
/* Queues always have a full reset currently, so for the time
* being until there's atomic program replace just mark read
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 34fa3917eb33..b2d154258b07 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -29,6 +29,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
+#include <linux/crash_dump.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/version.h>
@@ -60,6 +61,7 @@
#include <net/ip6_checksum.h>
#include <linux/bitops.h>
#include <linux/vmalloc.h>
+#include <linux/aer.h>
#include "qede.h"
#include "qede_ptp.h"
@@ -124,6 +126,8 @@ static const struct pci_device_id qede_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, qede_pci_tbl);
static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
+static pci_ers_result_t
+qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state);
#define TX_TIMEOUT (5 * HZ)
@@ -135,10 +139,12 @@ static void qede_shutdown(struct pci_dev *pdev);
static void qede_link_update(void *dev, struct qed_link_output *link);
static void qede_schedule_recovery_handler(void *dev);
static void qede_recovery_handler(struct qede_dev *edev);
+static void qede_schedule_hw_err_handler(void *dev,
+ enum qed_hw_err_type err_type);
static void qede_get_eth_tlv_data(void *edev, void *data);
static void qede_get_generic_tlv_data(void *edev,
struct qed_generic_tlvs *data);
-
+static void qede_generic_hw_err_handler(struct qede_dev *edev);
#ifdef CONFIG_QED_SRIOV
static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos,
__be16 vlan_proto)
@@ -203,6 +209,10 @@ static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
}
#endif
+static const struct pci_error_handlers qede_err_handler = {
+ .error_detected = qede_io_error_detected,
+};
+
static struct pci_driver qede_pci_driver = {
.name = "qede",
.id_table = qede_pci_tbl,
@@ -212,6 +222,7 @@ static struct pci_driver qede_pci_driver = {
#ifdef CONFIG_QED_SRIOV
.sriov_configure = qede_sriov_configure,
#endif
+ .err_handler = &qede_err_handler,
};
static struct qed_eth_cb_ops qede_ll_ops = {
@@ -221,6 +232,7 @@ static struct qed_eth_cb_ops qede_ll_ops = {
#endif
.link_update = qede_link_update,
.schedule_recovery_handler = qede_schedule_recovery_handler,
+ .schedule_hw_err_handler = qede_schedule_hw_err_handler,
.get_generic_tlv_data = qede_get_generic_tlv_data,
.get_protocol_tlv_data = qede_get_eth_tlv_data,
},
@@ -527,6 +539,51 @@ static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return 0;
}
+static void qede_tx_log_print(struct qede_dev *edev, struct qede_tx_queue *txq)
+{
+ DP_NOTICE(edev,
+ "Txq[%d]: FW cons [host] %04x, SW cons %04x, SW prod %04x [Jiffies %lu]\n",
+ txq->index, le16_to_cpu(*txq->hw_cons_ptr),
+ qed_chain_get_cons_idx(&txq->tx_pbl),
+ qed_chain_get_prod_idx(&txq->tx_pbl),
+ jiffies);
+}
+
+static void qede_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qede_tx_queue *txq;
+ int cos;
+
+ netif_carrier_off(dev);
+ DP_NOTICE(edev, "TX timeout on queue %u!\n", txqueue);
+
+ if (!(edev->fp_array[txqueue].type & QEDE_FASTPATH_TX))
+ return;
+
+ for_each_cos_in_txq(edev, cos) {
+ txq = &edev->fp_array[txqueue].txq[cos];
+
+ if (qed_chain_get_cons_idx(&txq->tx_pbl) !=
+ qed_chain_get_prod_idx(&txq->tx_pbl))
+ qede_tx_log_print(edev, txq);
+ }
+
+ if (IS_VF(edev))
+ return;
+
+ if (test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) ||
+ edev->state == QEDE_STATE_RECOVERY) {
+ DP_INFO(edev,
+ "Avoid handling a Tx timeout while another HW error is being handled\n");
+ return;
+ }
+
+ set_bit(QEDE_ERR_GET_DBG_INFO, &edev->err_flags);
+ set_bit(QEDE_SP_HW_ERR, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+}
+
static int qede_setup_tc(struct net_device *ndev, u8 num_tc)
{
struct qede_dev *edev = netdev_priv(ndev);
@@ -614,6 +671,7 @@ static const struct net_device_ops qede_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = qede_change_mtu,
.ndo_do_ioctl = qede_ioctl,
+ .ndo_tx_timeout = qede_tx_timeout,
#ifdef CONFIG_QED_SRIOV
.ndo_set_vf_mac = qede_set_vf_mac,
.ndo_set_vf_vlan = qede_set_vf_vlan,
@@ -707,8 +765,14 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
edev->dp_module = dp_module;
edev->dp_level = dp_level;
edev->ops = qed_ops;
- edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
- edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
+
+ if (is_kdump_kernel()) {
+ edev->q_num_rx_buffers = NUM_RX_BDS_KDUMP_MIN;
+ edev->q_num_tx_buffers = NUM_TX_BDS_KDUMP_MIN;
+ } else {
+ edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
+ edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
+ }
DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n",
info->num_queues, info->num_queues);
@@ -974,7 +1038,8 @@ static void qede_sp_task(struct work_struct *work)
/* SRIOV must be disabled outside the lock to avoid a deadlock.
* The recovery of the active VFs is currently not supported.
*/
- qede_sriov_configure(edev->pdev, 0);
+ if (pci_num_vf(edev->pdev))
+ qede_sriov_configure(edev->pdev, 0);
#endif
qede_lock(edev);
qede_recovery_handler(edev);
@@ -993,7 +1058,20 @@ static void qede_sp_task(struct work_struct *work)
qede_process_arfs_filters(edev, false);
}
#endif
+ if (test_and_clear_bit(QEDE_SP_HW_ERR, &edev->sp_flags))
+ qede_generic_hw_err_handler(edev);
__qede_unlock(edev);
+
+ if (test_and_clear_bit(QEDE_SP_AER, &edev->sp_flags)) {
+#ifdef CONFIG_QED_SRIOV
+ /* SRIOV must be disabled outside the lock to avoid a deadlock.
+ * The recovery of the active VFs is currently not supported.
+ */
+ if (pci_num_vf(edev->pdev))
+ qede_sriov_configure(edev->pdev, 0);
+#endif
+ edev->ops->common->recovery_process(edev->cdev);
+ }
}
static void qede_update_pf_params(struct qed_dev *cdev)
@@ -1187,7 +1265,7 @@ static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
case QEDE_PRIVATE_VF:
if (debug & QED_LOG_VERBOSE_MASK)
dev_err(&pdev->dev, "Probing a VF\n");
- is_vf = true;
+ is_vf = is_kdump_kernel() ? false : true;
break;
default:
if (debug & QED_LOG_VERBOSE_MASK)
@@ -1398,7 +1476,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
if (rxq->rx_buf_size + size > PAGE_SIZE)
rxq->rx_buf_size = PAGE_SIZE - size;
- /* Segment size to spilt a page in multiple equal parts ,
+ /* Segment size to split a page in multiple equal parts,
* unless XDP is used in which case we'd use the entire page.
*/
if (!edev->xdp_prog) {
@@ -1694,7 +1772,7 @@ static void qede_init_fp(struct qede_dev *edev)
txq->ndev_txq_id = ndev_tx_id;
if (edev->dev_info.is_legacy)
- txq->is_legacy = 1;
+ txq->is_legacy = true;
txq->dev = &edev->pdev->dev;
}
@@ -2482,6 +2560,100 @@ err:
qede_recovery_failed(edev);
}
+static void qede_atomic_hw_err_handler(struct qede_dev *edev)
+{
+ struct qed_dev *cdev = edev->cdev;
+
+ DP_NOTICE(edev,
+ "Generic non-sleepable HW error handling started - err_flags 0x%lx\n",
+ edev->err_flags);
+
+ /* Get a call trace of the flow that led to the error */
+ WARN_ON(test_bit(QEDE_ERR_WARN, &edev->err_flags));
+
+ /* Prevent HW attentions from being reasserted */
+ if (test_bit(QEDE_ERR_ATTN_CLR_EN, &edev->err_flags))
+ edev->ops->common->attn_clr_enable(cdev, true);
+
+ DP_NOTICE(edev, "Generic non-sleepable HW error handling is done\n");
+}
+
+static void qede_generic_hw_err_handler(struct qede_dev *edev)
+{
+ struct qed_dev *cdev = edev->cdev;
+
+ DP_NOTICE(edev,
+ "Generic sleepable HW error handling started - err_flags 0x%lx\n",
+ edev->err_flags);
+
+ /* Trigger a recovery process.
+ * This is placed in the sleep requiring section just to make
+ * sure it is the last one, and that all the other operations
+ * were completed.
+ */
+ if (test_bit(QEDE_ERR_IS_RECOVERABLE, &edev->err_flags))
+ edev->ops->common->recovery_process(cdev);
+
+ clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
+
+ DP_NOTICE(edev, "Generic sleepable HW error handling is done\n");
+}
+
+static void qede_set_hw_err_flags(struct qede_dev *edev,
+ enum qed_hw_err_type err_type)
+{
+ unsigned long err_flags = 0;
+
+ switch (err_type) {
+ case QED_HW_ERR_DMAE_FAIL:
+ set_bit(QEDE_ERR_WARN, &err_flags);
+ fallthrough;
+ case QED_HW_ERR_MFW_RESP_FAIL:
+ case QED_HW_ERR_HW_ATTN:
+ case QED_HW_ERR_RAMROD_FAIL:
+ case QED_HW_ERR_FW_ASSERT:
+ set_bit(QEDE_ERR_ATTN_CLR_EN, &err_flags);
+ set_bit(QEDE_ERR_GET_DBG_INFO, &err_flags);
+ break;
+
+ default:
+ DP_NOTICE(edev, "Unexpected HW error [%d]\n", err_type);
+ break;
+ }
+
+ edev->err_flags |= err_flags;
+}
+
+static void qede_schedule_hw_err_handler(void *dev,
+ enum qed_hw_err_type err_type)
+{
+ struct qede_dev *edev = dev;
+
+ /* Fan failure cannot be masked by handling of another HW error or by a
+ * concurrent recovery process.
+ */
+ if ((test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) ||
+ edev->state == QEDE_STATE_RECOVERY) &&
+ err_type != QED_HW_ERR_FAN_FAIL) {
+ DP_INFO(edev,
+ "Avoid scheduling an error handling while another HW error is being handled\n");
+ return;
+ }
+
+ if (err_type >= QED_HW_ERR_LAST) {
+ DP_NOTICE(edev, "Unknown HW error [%d]\n", err_type);
+ clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
+ return;
+ }
+
+ qede_set_hw_err_flags(edev, err_type);
+ qede_atomic_hw_err_handler(edev);
+ set_bit(QEDE_SP_HW_ERR, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+
+ DP_INFO(edev, "Scheduled a error handler [err_type %d]\n", err_type);
+}
+
static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq)
{
struct netdev_queue *netdev_txq;
@@ -2579,3 +2751,49 @@ static void qede_get_eth_tlv_data(void *dev, void *data)
etlv->num_txqs_full_set = true;
etlv->num_rxqs_full_set = true;
}
+
+/**
+ * qede_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t
+qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct qede_dev *edev = netdev_priv(dev);
+
+ if (!edev)
+ return PCI_ERS_RESULT_NONE;
+
+ DP_NOTICE(edev, "IO error detected [%d]\n", state);
+
+ __qede_lock(edev);
+ if (edev->state == QEDE_STATE_RECOVERY) {
+ DP_NOTICE(edev, "Device already in the recovery state\n");
+ __qede_unlock(edev);
+ return PCI_ERS_RESULT_NONE;
+ }
+
+ /* PF handles the recovery of its VFs */
+ if (IS_VF(edev)) {
+ DP_VERBOSE(edev, QED_MSG_IOV,
+ "VF recovery is handled by its PF\n");
+ __qede_unlock(edev);
+ return PCI_ERS_RESULT_RECOVERED;
+ }
+
+ /* Close OS Tx */
+ netif_tx_disable(edev->ndev);
+ netif_carrier_off(edev->ndev);
+
+ set_bit(QEDE_SP_AER, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+
+ __qede_unlock(edev);
+
+ return PCI_ERS_RESULT_CAN_RECOVER;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 134611aa2c9a..d838774af5a6 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1880,12 +1880,6 @@ static inline void qlcnic_write_crb(struct qlcnic_adapter *adapter, char *buf,
adapter->ahw->hw_ops->write_crb(adapter, buf, offset, size);
}
-static inline int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter,
- ulong off, u32 data)
-{
- return adapter->ahw->hw_ops->write_reg(adapter, off, data);
-}
-
static inline int qlcnic_get_mac_address(struct qlcnic_adapter *adapter,
u8 *mac, u8 function)
{
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index f7c2f32237cb..7adbb03cb931 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -1582,10 +1582,10 @@ void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
!adapter->fdb_mac_learn) {
qlcnic_alloc_lb_filters_mem(adapter);
- adapter->drv_mac_learn = 1;
+ adapter->drv_mac_learn = true;
adapter->rx_mac_learn = true;
} else {
- adapter->drv_mac_learn = 0;
+ adapter->drv_mac_learn = false;
adapter->rx_mac_learn = false;
}
}
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index 251d4ac4af02..117188e3c7de 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -1431,8 +1431,9 @@ error:
}
/* Transmit the packet using specified transmit queue */
-int emac_mac_tx_buf_send(struct emac_adapter *adpt, struct emac_tx_queue *tx_q,
- struct sk_buff *skb)
+netdev_tx_t emac_mac_tx_buf_send(struct emac_adapter *adpt,
+ struct emac_tx_queue *tx_q,
+ struct sk_buff *skb)
{
struct emac_tpd tpd;
u32 prod_idx;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.h b/drivers/net/ethernet/qualcomm/emac/emac-mac.h
index ae08bdd9046c..920123eb8ace 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.h
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.h
@@ -227,8 +227,9 @@ void emac_mac_stop(struct emac_adapter *adpt);
void emac_mac_mode_config(struct emac_adapter *adpt);
void emac_mac_rx_process(struct emac_adapter *adpt, struct emac_rx_queue *rx_q,
int *num_pkts, int max_pkts);
-int emac_mac_tx_buf_send(struct emac_adapter *adpt, struct emac_tx_queue *tx_q,
- struct sk_buff *skb);
+netdev_tx_t emac_mac_tx_buf_send(struct emac_adapter *adpt,
+ struct emac_tx_queue *tx_q,
+ struct sk_buff *skb);
void emac_mac_tx_process(struct emac_adapter *adpt, struct emac_tx_queue *tx_q);
void emac_mac_rx_tx_ring_init_all(struct platform_device *pdev,
struct emac_adapter *adpt);
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 18b0c7a2d6dc..20b1b43a0e39 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -115,7 +115,8 @@ static int emac_napi_rtx(struct napi_struct *napi, int budget)
}
/* Transmit the packet */
-static int emac_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t emac_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
{
struct emac_adapter *adpt = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 78e15cc00e0a..97a7e27fff16 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -10,7 +10,6 @@
*/
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -19,7 +18,6 @@
#include <linux/ethtool.h>
#include <linux/phy.h>
#include <linux/if_vlan.h>
-#include <linux/crc32.h>
#include <linux/in.h>
#include <linux/io.h>
#include <linux/ip.h>
@@ -27,6 +25,7 @@
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
+#include <linux/bitfield.h>
#include <linux/prefetch.h>
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
@@ -58,9 +57,6 @@
#define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw"
#define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw"
-#define R8169_MSG_DEFAULT \
- (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
-
/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
The RTL chips use a 64 element hash table based on the Ethernet CRC. */
#define MC_FILTER_LIMIT 32
@@ -75,6 +71,8 @@
#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
+#define OCP_STD_PHY_BASE 0xa400
+
#define RTL_CFG_NO_GBIT 1
/* write/read MMIO register */
@@ -85,10 +83,10 @@
#define RTL_R16(tp, reg) readw(tp->mmio_addr + (reg))
#define RTL_R32(tp, reg) readl(tp->mmio_addr + (reg))
-#define JUMBO_4K (4*1024 - ETH_HLEN - 2)
-#define JUMBO_6K (6*1024 - ETH_HLEN - 2)
-#define JUMBO_7K (7*1024 - ETH_HLEN - 2)
-#define JUMBO_9K (9*1024 - ETH_HLEN - 2)
+#define JUMBO_4K (4 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
+#define JUMBO_6K (6 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
+#define JUMBO_7K (7 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
+#define JUMBO_9K (9 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
static const struct {
const char *name;
@@ -176,10 +174,6 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
-static struct {
- u32 msg_enable;
-} debug = { -1 };
-
enum rtl_registers {
MAC0 = 0, /* Ethernet hardware address. */
MAC4 = 4,
@@ -227,10 +221,13 @@ enum rtl_registers {
CPlusCmd = 0xe0,
IntrMitigate = 0xe2,
-#define RTL_COALESCE_MASK 0x0f
-#define RTL_COALESCE_SHIFT 4
-#define RTL_COALESCE_T_MAX (RTL_COALESCE_MASK)
-#define RTL_COALESCE_FRAME_MAX (RTL_COALESCE_MASK << 2)
+#define RTL_COALESCE_TX_USECS GENMASK(15, 12)
+#define RTL_COALESCE_TX_FRAMES GENMASK(11, 8)
+#define RTL_COALESCE_RX_USECS GENMASK(7, 4)
+#define RTL_COALESCE_RX_FRAMES GENMASK(3, 0)
+
+#define RTL_COALESCE_T_MAX 0x0fU
+#define RTL_COALESCE_FRAME_MAX (RTL_COALESCE_T_MAX * 4)
RxDescAddrLow = 0xe4,
RxDescAddrHigh = 0xe8,
@@ -386,10 +383,12 @@ enum rtl_register_content {
/* rx_mode_bits */
AcceptErr = 0x20,
AcceptRunt = 0x10,
+#define RX_CONFIG_ACCEPT_ERR_MASK 0x30
AcceptBroadcast = 0x08,
AcceptMulticast = 0x04,
AcceptMyPhys = 0x02,
AcceptAllPhys = 0x01,
+#define RX_CONFIG_ACCEPT_OK_MASK 0x0f
#define RX_CONFIG_ACCEPT_MASK 0x3f
/* TxConfigBits */
@@ -596,7 +595,6 @@ struct rtl8169_private {
struct net_device *dev;
struct phy_device *phydev;
struct napi_struct napi;
- u32 msg_enable;
enum mac_version mac_version;
u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
@@ -638,8 +636,6 @@ typedef void (*rtl_generic_fct)(struct rtl8169_private *tp);
MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
-module_param_named(debug, debug.msg_enable, int, 0);
-MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
MODULE_SOFTDEP("pre: realtek");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FIRMWARE_8168D_1);
@@ -727,53 +723,35 @@ struct rtl_cond {
const char *msg;
};
-static void rtl_udelay(unsigned int d)
-{
- udelay(d);
-}
-
static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c,
- void (*delay)(unsigned int), unsigned int d, int n,
- bool high)
+ unsigned long usecs, int n, bool high)
{
int i;
for (i = 0; i < n; i++) {
if (c->check(tp) == high)
return true;
- delay(d);
+ fsleep(usecs);
}
- netif_err(tp, drv, tp->dev, "%s == %d (loop: %d, delay: %d).\n",
- c->msg, !high, n, d);
- return false;
-}
-
-static bool rtl_udelay_loop_wait_high(struct rtl8169_private *tp,
- const struct rtl_cond *c,
- unsigned int d, int n)
-{
- return rtl_loop_wait(tp, c, rtl_udelay, d, n, true);
-}
-static bool rtl_udelay_loop_wait_low(struct rtl8169_private *tp,
- const struct rtl_cond *c,
- unsigned int d, int n)
-{
- return rtl_loop_wait(tp, c, rtl_udelay, d, n, false);
+ if (net_ratelimit())
+ netdev_err(tp->dev, "%s == %d (loop: %d, delay: %lu).\n",
+ c->msg, !high, n, usecs);
+ return false;
}
-static bool rtl_msleep_loop_wait_high(struct rtl8169_private *tp,
- const struct rtl_cond *c,
- unsigned int d, int n)
+static bool rtl_loop_wait_high(struct rtl8169_private *tp,
+ const struct rtl_cond *c,
+ unsigned long d, int n)
{
- return rtl_loop_wait(tp, c, msleep, d, n, true);
+ return rtl_loop_wait(tp, c, d, n, true);
}
-static bool rtl_msleep_loop_wait_low(struct rtl8169_private *tp,
- const struct rtl_cond *c,
- unsigned int d, int n)
+static bool rtl_loop_wait_low(struct rtl8169_private *tp,
+ const struct rtl_cond *c,
+ unsigned long d, int n)
{
- return rtl_loop_wait(tp, c, msleep, d, n, false);
+ return rtl_loop_wait(tp, c, d, n, false);
}
#define DECLARE_RTL_COND(name) \
@@ -789,7 +767,8 @@ static bool name ## _check(struct rtl8169_private *tp)
static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg)
{
if (reg & 0xffff0001) {
- netif_err(tp, drv, tp->dev, "Invalid ocp reg %x!\n", reg);
+ if (net_ratelimit())
+ netdev_err(tp->dev, "Invalid ocp reg %x!\n", reg);
return true;
}
return false;
@@ -807,7 +786,7 @@ static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
RTL_W32(tp, GPHY_OCP, OCPAR_FLAG | (reg << 15) | data);
- rtl_udelay_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10);
+ rtl_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10);
}
static int r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
@@ -817,7 +796,7 @@ static int r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
RTL_W32(tp, GPHY_OCP, reg << 15);
- return rtl_udelay_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ?
+ return rtl_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ?
(RTL_R32(tp, GPHY_OCP) & 0xffff) : -ETIMEDOUT;
}
@@ -847,8 +826,6 @@ static void r8168_mac_ocp_modify(struct rtl8169_private *tp, u32 reg, u16 mask,
r8168_mac_ocp_write(tp, reg, (data & ~mask) | set);
}
-#define OCP_STD_PHY_BASE 0xa400
-
static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
{
if (reg == 0x1f) {
@@ -897,7 +874,7 @@ static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value)
{
RTL_W32(tp, PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff));
- rtl_udelay_loop_wait_low(tp, &rtl_phyar_cond, 25, 20);
+ rtl_loop_wait_low(tp, &rtl_phyar_cond, 25, 20);
/*
* According to hardware specs a 20us delay is required after write
* complete indication, but before sending next command.
@@ -911,7 +888,7 @@ static int r8169_mdio_read(struct rtl8169_private *tp, int reg)
RTL_W32(tp, PHYAR, 0x0 | (reg & 0x1f) << 16);
- value = rtl_udelay_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ?
+ value = rtl_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ?
RTL_R32(tp, PHYAR) & 0xffff : -ETIMEDOUT;
/*
@@ -934,7 +911,7 @@ static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data)
RTL_W32(tp, OCPAR, OCPAR_GPHY_WRITE_CMD);
RTL_W32(tp, EPHY_RXER_NUM, 0);
- rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100);
+ rtl_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100);
}
static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value)
@@ -951,7 +928,7 @@ static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg)
RTL_W32(tp, OCPAR, OCPAR_GPHY_READ_CMD);
RTL_W32(tp, EPHY_RXER_NUM, 0);
- return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ?
+ return rtl_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ?
RTL_R32(tp, OCPDR) & OCPDR_DATA_MASK : -ETIMEDOUT;
}
@@ -1037,7 +1014,7 @@ static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value)
RTL_W32(tp, EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
(reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
- rtl_udelay_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100);
+ rtl_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100);
udelay(10);
}
@@ -1046,7 +1023,7 @@ static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
{
RTL_W32(tp, EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
- return rtl_udelay_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ?
+ return rtl_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ?
RTL_R32(tp, EPHYAR) & EPHYAR_DATA_MASK : ~0;
}
@@ -1062,7 +1039,7 @@ static void _rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
RTL_W32(tp, ERIDR, val);
RTL_W32(tp, ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
- rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
+ rtl_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
}
static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
@@ -1075,7 +1052,7 @@ static u32 _rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
{
RTL_W32(tp, ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
- return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
+ return rtl_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
RTL_R32(tp, ERIDR) : ~0;
}
@@ -1108,7 +1085,7 @@ static void rtl_eri_clear_bits(struct rtl8169_private *tp, int addr, u32 mask,
static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
{
RTL_W32(tp, OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
- return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ?
+ return rtl_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ?
RTL_R32(tp, OCPDR) : ~0;
}
@@ -1122,7 +1099,7 @@ static void r8168dp_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg,
{
RTL_W32(tp, OCPDR, data);
RTL_W32(tp, OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
- rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20);
+ rtl_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20);
}
static void r8168ep_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg,
@@ -1170,7 +1147,7 @@ DECLARE_RTL_COND(rtl_ocp_tx_cond)
static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
{
RTL_W8(tp, IBCR2, RTL_R8(tp, IBCR2) & ~0x01);
- rtl_msleep_loop_wait_high(tp, &rtl_ocp_tx_cond, 50, 2000);
+ rtl_loop_wait_high(tp, &rtl_ocp_tx_cond, 50000, 2000);
RTL_W8(tp, IBISR0, RTL_R8(tp, IBISR0) | 0x20);
RTL_W8(tp, IBCR0, RTL_R8(tp, IBCR0) & ~0x01);
}
@@ -1178,7 +1155,7 @@ static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
static void rtl8168dp_driver_start(struct rtl8169_private *tp)
{
r8168dp_oob_notify(tp, OOB_CMD_DRIVER_START);
- rtl_msleep_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10, 10);
+ rtl_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10000, 10);
}
static void rtl8168ep_driver_start(struct rtl8169_private *tp)
@@ -1186,7 +1163,7 @@ static void rtl8168ep_driver_start(struct rtl8169_private *tp)
r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START);
r8168ep_ocp_write(tp, 0x01, 0x30,
r8168ep_ocp_read(tp, 0x01, 0x30) | 0x01);
- rtl_msleep_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10, 10);
+ rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 10);
}
static void rtl8168_driver_start(struct rtl8169_private *tp)
@@ -1209,7 +1186,7 @@ static void rtl8168_driver_start(struct rtl8169_private *tp)
static void rtl8168dp_driver_stop(struct rtl8169_private *tp)
{
r8168dp_oob_notify(tp, OOB_CMD_DRIVER_STOP);
- rtl_msleep_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10, 10);
+ rtl_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10000, 10);
}
static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
@@ -1218,7 +1195,7 @@ static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP);
r8168ep_ocp_write(tp, 0x01, 0x30,
r8168ep_ocp_read(tp, 0x01, 0x30) | 0x01);
- rtl_msleep_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10, 10);
+ rtl_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10);
}
static void rtl8168_driver_stop(struct rtl8169_private *tp)
@@ -1279,7 +1256,7 @@ u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
{
RTL_W32(tp, EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
- return rtl_udelay_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ?
+ return rtl_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ?
RTL_R32(tp, EFUSEAR) & EFUSEAR_DATA_MASK : ~0;
}
@@ -1423,7 +1400,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
break;
case RTL_GIGA_MAC_VER_34:
case RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_52:
+ case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_61:
options = RTL_R8(tp, Config2) & ~PME_SIGNAL;
if (wolopts)
options |= PME_SIGNAL;
@@ -1497,19 +1474,15 @@ static netdev_features_t rtl8169_fix_features(struct net_device *dev,
return features;
}
-static int rtl8169_set_features(struct net_device *dev,
- netdev_features_t features)
+static void rtl_set_rx_config_features(struct rtl8169_private *tp,
+ netdev_features_t features)
{
- struct rtl8169_private *tp = netdev_priv(dev);
- u32 rx_config;
+ u32 rx_config = RTL_R32(tp, RxConfig);
- rtl_lock_work(tp);
-
- rx_config = RTL_R32(tp, RxConfig);
if (features & NETIF_F_RXALL)
- rx_config |= (AcceptErr | AcceptRunt);
+ rx_config |= RX_CONFIG_ACCEPT_ERR_MASK;
else
- rx_config &= ~(AcceptErr | AcceptRunt);
+ rx_config &= ~RX_CONFIG_ACCEPT_ERR_MASK;
if (rtl_is_8125(tp)) {
if (features & NETIF_F_HW_VLAN_CTAG_RX)
@@ -1519,6 +1492,16 @@ static int rtl8169_set_features(struct net_device *dev,
}
RTL_W32(tp, RxConfig, rx_config);
+}
+
+static int rtl8169_set_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ rtl_lock_work(tp);
+
+ rtl_set_rx_config_features(tp, features);
if (features & NETIF_F_RXCSUM)
tp->cp_cmd |= RxChkSum;
@@ -1568,20 +1551,6 @@ static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
rtl_unlock_work(tp);
}
-static u32 rtl8169_get_msglevel(struct net_device *dev)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- return tp->msg_enable;
-}
-
-static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- tp->msg_enable = value;
-}
-
static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
"tx_packets",
"rx_packets",
@@ -1613,7 +1582,7 @@ DECLARE_RTL_COND(rtl_counters_cond)
return RTL_R32(tp, CounterAddrLow) & (CounterReset | CounterDump);
}
-static bool rtl8169_do_counters(struct rtl8169_private *tp, u32 counter_cmd)
+static void rtl8169_do_counters(struct rtl8169_private *tp, u32 counter_cmd)
{
dma_addr_t paddr = tp->counters_phys_addr;
u32 cmd;
@@ -1624,22 +1593,20 @@ static bool rtl8169_do_counters(struct rtl8169_private *tp, u32 counter_cmd)
RTL_W32(tp, CounterAddrLow, cmd);
RTL_W32(tp, CounterAddrLow, cmd | counter_cmd);
- return rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
+ rtl_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
}
-static bool rtl8169_reset_counters(struct rtl8169_private *tp)
+static void rtl8169_reset_counters(struct rtl8169_private *tp)
{
/*
* Versions prior to RTL_GIGA_MAC_VER_19 don't support resetting the
* tally counters.
*/
- if (tp->mac_version < RTL_GIGA_MAC_VER_19)
- return true;
-
- return rtl8169_do_counters(tp, CounterReset);
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_19)
+ rtl8169_do_counters(tp, CounterReset);
}
-static bool rtl8169_update_counters(struct rtl8169_private *tp)
+static void rtl8169_update_counters(struct rtl8169_private *tp)
{
u8 val = RTL_R8(tp, ChipCmd);
@@ -1647,16 +1614,13 @@ static bool rtl8169_update_counters(struct rtl8169_private *tp)
* Some chips are unable to dump tally counters when the receiver
* is disabled. If 0xff chip may be in a PCI power-save state.
*/
- if (!(val & CmdRxEnb) || val == 0xff)
- return true;
-
- return rtl8169_do_counters(tp, CounterDump);
+ if (val & CmdRxEnb && val != 0xff)
+ rtl8169_do_counters(tp, CounterDump);
}
-static bool rtl8169_init_counter_offsets(struct rtl8169_private *tp)
+static void rtl8169_init_counter_offsets(struct rtl8169_private *tp)
{
struct rtl8169_counters *counters = tp->counters;
- bool ret = false;
/*
* rtl8169_init_counter_offsets is called from rtl_open. On chip
@@ -1674,22 +1638,16 @@ static bool rtl8169_init_counter_offsets(struct rtl8169_private *tp)
*/
if (tp->tc_offset.inited)
- return true;
-
- /* If both, reset and update fail, propagate to caller. */
- if (rtl8169_reset_counters(tp))
- ret = true;
+ return;
- if (rtl8169_update_counters(tp))
- ret = true;
+ rtl8169_reset_counters(tp);
+ rtl8169_update_counters(tp);
tp->tc_offset.tx_errors = counters->tx_errors;
tp->tc_offset.tx_multi_collision = counters->tx_multi_collision;
tp->tc_offset.tx_aborted = counters->tx_aborted;
tp->tc_offset.rx_missed = counters->rx_missed;
tp->tc_offset.inited = true;
-
- return ret;
}
static void rtl8169_get_ethtool_stats(struct net_device *dev,
@@ -1760,46 +1718,34 @@ static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
* 1 1 160us 81.92us 1.31ms
*/
-/* rx/tx scale factors for one particular CPlusCmd[0:1] value */
-struct rtl_coalesce_scale {
- /* Rx / Tx */
- u32 nsecs[2];
-};
-
/* rx/tx scale factors for all CPlusCmd[0:1] cases */
struct rtl_coalesce_info {
u32 speed;
- struct rtl_coalesce_scale scalev[4]; /* each CPlusCmd[0:1] case */
+ u32 scale_nsecs[4];
};
-/* produce (r,t) pairs with each being in series of *1, *8, *8*2, *8*2*2 */
-#define rxtx_x1822(r, t) { \
- {{(r), (t)}}, \
- {{(r)*8, (t)*8}}, \
- {{(r)*8*2, (t)*8*2}}, \
- {{(r)*8*2*2, (t)*8*2*2}}, \
-}
+/* produce array with base delay *1, *8, *8*2, *8*2*2 */
+#define COALESCE_DELAY(d) { (d), 8 * (d), 16 * (d), 32 * (d) }
+
static const struct rtl_coalesce_info rtl_coalesce_info_8169[] = {
- /* speed delays: rx00 tx00 */
- { SPEED_10, rxtx_x1822(40960, 40960) },
- { SPEED_100, rxtx_x1822( 2560, 2560) },
- { SPEED_1000, rxtx_x1822( 320, 320) },
+ { SPEED_10, COALESCE_DELAY(40960) },
+ { SPEED_100, COALESCE_DELAY(2560) },
+ { SPEED_1000, COALESCE_DELAY(320) },
{ 0 },
};
static const struct rtl_coalesce_info rtl_coalesce_info_8168_8136[] = {
- /* speed delays: rx00 tx00 */
- { SPEED_10, rxtx_x1822(40960, 40960) },
- { SPEED_100, rxtx_x1822( 2560, 2560) },
- { SPEED_1000, rxtx_x1822( 5000, 5000) },
+ { SPEED_10, COALESCE_DELAY(40960) },
+ { SPEED_100, COALESCE_DELAY(2560) },
+ { SPEED_1000, COALESCE_DELAY(5000) },
{ 0 },
};
-#undef rxtx_x1822
+#undef COALESCE_DELAY
/* get rx/tx scale vector corresponding to current speed */
-static const struct rtl_coalesce_info *rtl_coalesce_info(struct net_device *dev)
+static const struct rtl_coalesce_info *
+rtl_coalesce_info(struct rtl8169_private *tp)
{
- struct rtl8169_private *tp = netdev_priv(dev);
const struct rtl_coalesce_info *ci;
if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
@@ -1819,16 +1765,8 @@ static int rtl_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
{
struct rtl8169_private *tp = netdev_priv(dev);
const struct rtl_coalesce_info *ci;
- const struct rtl_coalesce_scale *scale;
- struct {
- u32 *max_frames;
- u32 *usecs;
- } coal_settings [] = {
- { &ec->rx_max_coalesced_frames, &ec->rx_coalesce_usecs },
- { &ec->tx_max_coalesced_frames, &ec->tx_coalesce_usecs }
- }, *p = coal_settings;
- int i;
- u16 w;
+ u32 scale, c_us, c_fr;
+ u16 intrmit;
if (rtl_is_8125(tp))
return -EOPNOTSUPP;
@@ -1836,111 +1774,102 @@ static int rtl_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
memset(ec, 0, sizeof(*ec));
/* get rx/tx scale corresponding to current speed and CPlusCmd[0:1] */
- ci = rtl_coalesce_info(dev);
+ ci = rtl_coalesce_info(tp);
if (IS_ERR(ci))
return PTR_ERR(ci);
- scale = &ci->scalev[tp->cp_cmd & INTT_MASK];
+ scale = ci->scale_nsecs[tp->cp_cmd & INTT_MASK];
- /* read IntrMitigate and adjust according to scale */
- for (w = RTL_R16(tp, IntrMitigate); w; w >>= RTL_COALESCE_SHIFT, p++) {
- *p->max_frames = (w & RTL_COALESCE_MASK) << 2;
- w >>= RTL_COALESCE_SHIFT;
- *p->usecs = w & RTL_COALESCE_MASK;
- }
+ intrmit = RTL_R16(tp, IntrMitigate);
- for (i = 0; i < 2; i++) {
- p = coal_settings + i;
- *p->usecs = (*p->usecs * scale->nsecs[i]) / 1000;
+ c_us = FIELD_GET(RTL_COALESCE_TX_USECS, intrmit);
+ ec->tx_coalesce_usecs = DIV_ROUND_UP(c_us * scale, 1000);
- /*
- * ethtool_coalesce says it is illegal to set both usecs and
- * max_frames to 0.
- */
- if (!*p->usecs && !*p->max_frames)
- *p->max_frames = 1;
- }
+ c_fr = FIELD_GET(RTL_COALESCE_TX_FRAMES, intrmit);
+ /* ethtool_coalesce states usecs and max_frames must not both be 0 */
+ ec->tx_max_coalesced_frames = (c_us || c_fr) ? c_fr * 4 : 1;
+
+ c_us = FIELD_GET(RTL_COALESCE_RX_USECS, intrmit);
+ ec->rx_coalesce_usecs = DIV_ROUND_UP(c_us * scale, 1000);
+
+ c_fr = FIELD_GET(RTL_COALESCE_RX_FRAMES, intrmit);
+ ec->rx_max_coalesced_frames = (c_us || c_fr) ? c_fr * 4 : 1;
return 0;
}
-/* choose appropriate scale factor and CPlusCmd[0:1] for (speed, nsec) */
-static const struct rtl_coalesce_scale *rtl_coalesce_choose_scale(
- struct net_device *dev, u32 nsec, u16 *cp01)
+/* choose appropriate scale factor and CPlusCmd[0:1] for (speed, usec) */
+static int rtl_coalesce_choose_scale(struct rtl8169_private *tp, u32 usec,
+ u16 *cp01)
{
const struct rtl_coalesce_info *ci;
u16 i;
- ci = rtl_coalesce_info(dev);
+ ci = rtl_coalesce_info(tp);
if (IS_ERR(ci))
- return ERR_CAST(ci);
+ return PTR_ERR(ci);
for (i = 0; i < 4; i++) {
- u32 rxtx_maxscale = max(ci->scalev[i].nsecs[0],
- ci->scalev[i].nsecs[1]);
- if (nsec <= rxtx_maxscale * RTL_COALESCE_T_MAX) {
+ if (usec <= ci->scale_nsecs[i] * RTL_COALESCE_T_MAX / 1000U) {
*cp01 = i;
- return &ci->scalev[i];
+ return ci->scale_nsecs[i];
}
}
- return ERR_PTR(-EINVAL);
+ return -ERANGE;
}
static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
{
struct rtl8169_private *tp = netdev_priv(dev);
- const struct rtl_coalesce_scale *scale;
- struct {
- u32 frames;
- u32 usecs;
- } coal_settings [] = {
- { ec->rx_max_coalesced_frames, ec->rx_coalesce_usecs },
- { ec->tx_max_coalesced_frames, ec->tx_coalesce_usecs }
- }, *p = coal_settings;
- u16 w = 0, cp01;
- int i;
+ u32 tx_fr = ec->tx_max_coalesced_frames;
+ u32 rx_fr = ec->rx_max_coalesced_frames;
+ u32 coal_usec_max, units;
+ u16 w = 0, cp01 = 0;
+ int scale;
if (rtl_is_8125(tp))
return -EOPNOTSUPP;
- scale = rtl_coalesce_choose_scale(dev,
- max(p[0].usecs, p[1].usecs) * 1000, &cp01);
- if (IS_ERR(scale))
- return PTR_ERR(scale);
+ if (rx_fr > RTL_COALESCE_FRAME_MAX || tx_fr > RTL_COALESCE_FRAME_MAX)
+ return -ERANGE;
- for (i = 0; i < 2; i++, p++) {
- u32 units;
+ coal_usec_max = max(ec->rx_coalesce_usecs, ec->tx_coalesce_usecs);
+ scale = rtl_coalesce_choose_scale(tp, coal_usec_max, &cp01);
+ if (scale < 0)
+ return scale;
- /*
- * accept max_frames=1 we returned in rtl_get_coalesce.
- * accept it not only when usecs=0 because of e.g. the following scenario:
- *
- * - both rx_usecs=0 & rx_frames=0 in hardware (no delay on RX)
- * - rtl_get_coalesce returns rx_usecs=0, rx_frames=1
- * - then user does `ethtool -C eth0 rx-usecs 100`
- *
- * since ethtool sends to kernel whole ethtool_coalesce
- * settings, if we do not handle rx_usecs=!0, rx_frames=1
- * we'll reject it below in `frames % 4 != 0`.
- */
- if (p->frames == 1) {
- p->frames = 0;
- }
+ /* Accept max_frames=1 we returned in rtl_get_coalesce. Accept it
+ * not only when usecs=0 because of e.g. the following scenario:
+ *
+ * - both rx_usecs=0 & rx_frames=0 in hardware (no delay on RX)
+ * - rtl_get_coalesce returns rx_usecs=0, rx_frames=1
+ * - then user does `ethtool -C eth0 rx-usecs 100`
+ *
+ * Since ethtool sends to kernel whole ethtool_coalesce settings,
+ * if we want to ignore rx_frames then it has to be set to 0.
+ */
+ if (rx_fr == 1)
+ rx_fr = 0;
+ if (tx_fr == 1)
+ tx_fr = 0;
+
+ /* HW requires time limit to be set if frame limit is set */
+ if ((tx_fr && !ec->tx_coalesce_usecs) ||
+ (rx_fr && !ec->rx_coalesce_usecs))
+ return -EINVAL;
- units = p->usecs * 1000 / scale->nsecs[i];
- if (p->frames > RTL_COALESCE_FRAME_MAX || p->frames % 4)
- return -EINVAL;
+ w |= FIELD_PREP(RTL_COALESCE_TX_FRAMES, DIV_ROUND_UP(tx_fr, 4));
+ w |= FIELD_PREP(RTL_COALESCE_RX_FRAMES, DIV_ROUND_UP(rx_fr, 4));
- w <<= RTL_COALESCE_SHIFT;
- w |= units;
- w <<= RTL_COALESCE_SHIFT;
- w |= p->frames >> 2;
- }
+ units = DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000U, scale);
+ w |= FIELD_PREP(RTL_COALESCE_TX_USECS, units);
+ units = DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000U, scale);
+ w |= FIELD_PREP(RTL_COALESCE_RX_USECS, units);
rtl_lock_work(tp);
- RTL_W16(tp, IntrMitigate, swab16(w));
+ RTL_W16(tp, IntrMitigate, w);
tp->cp_cmd = (tp->cp_cmd & ~INTT_MASK) | cp01;
RTL_W16(tp, CPlusCmd, tp->cp_cmd);
@@ -1989,12 +1918,6 @@ static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data)
goto out;
}
- if (dev->phydev->autoneg == AUTONEG_DISABLE ||
- dev->phydev->duplex != DUPLEX_FULL) {
- ret = -EPROTONOSUPPORT;
- goto out;
- }
-
ret = phy_ethtool_set_eee(tp->phydev, data);
if (!ret)
@@ -2013,8 +1936,6 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_coalesce = rtl_get_coalesce,
.set_coalesce = rtl_set_coalesce,
- .get_msglevel = rtl8169_get_msglevel,
- .set_msglevel = rtl8169_set_msglevel,
.get_regs = rtl8169_get_regs,
.get_wol = rtl8169_get_wol,
.set_wol = rtl8169_set_wol,
@@ -2393,8 +2314,6 @@ static void rtl_pll_power_up(struct rtl8169_private *tp)
}
phy_resume(tp->phydev);
- /* give MAC/PHY some time to resume */
- msleep(20);
}
static void rtl_init_rxcfg(struct rtl8169_private *tp)
@@ -2413,8 +2332,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
break;
case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_61:
- RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_VLAN_8125 |
- RX_DMA_BURST);
+ RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST);
break;
default:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_DMA_BURST);
@@ -2528,7 +2446,7 @@ static void rtl_hw_reset(struct rtl8169_private *tp)
{
RTL_W8(tp, ChipCmd, CmdReset);
- rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
+ rtl_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
}
static void rtl_request_firmware(struct rtl8169_private *tp)
@@ -2540,10 +2458,8 @@ static void rtl_request_firmware(struct rtl8169_private *tp)
return;
rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
- if (!rtl_fw) {
- netif_warn(tp, ifup, tp->dev, "Unable to load firmware, out of memory\n");
+ if (!rtl_fw)
return;
- }
rtl_fw->phy_write = rtl_writephy;
rtl_fw->phy_read = rtl_readphy;
@@ -2573,6 +2489,33 @@ DECLARE_RTL_COND(rtl_txcfg_empty_cond)
return RTL_R32(tp, TxConfig) & TXCFG_EMPTY;
}
+DECLARE_RTL_COND(rtl_rxtx_empty_cond)
+{
+ return (RTL_R8(tp, MCU) & RXTX_EMPTY) == RXTX_EMPTY;
+}
+
+static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp)
+{
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_52:
+ rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42);
+ rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
+ break;
+ case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_61:
+ rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
+ break;
+ default:
+ break;
+ }
+}
+
+static void rtl_enable_rxdvgate(struct rtl8169_private *tp)
+{
+ RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN);
+ fsleep(2000);
+ rtl_wait_txrx_fifo_empty(tp);
+}
+
static void rtl8169_hw_reset(struct rtl8169_private *tp)
{
/* Disable interrupts */
@@ -2584,12 +2527,15 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_27:
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
- rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42);
+ rtl_loop_wait_low(tp, &rtl_npq_cond, 20, 2000);
break;
case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_52:
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
- rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
+ rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
+ break;
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_61:
+ rtl_enable_rxdvgate(tp);
+ fsleep(2000);
break;
default:
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
@@ -2630,7 +2576,7 @@ static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp)
RTL_W32(tp, RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
}
-static void rtl8169_set_magic_reg(struct rtl8169_private *tp, unsigned mac_version)
+static void rtl8169_set_magic_reg(struct rtl8169_private *tp)
{
u32 val;
@@ -2656,8 +2602,6 @@ static void rtl_set_rx_mode(struct net_device *dev)
u32 tmp;
if (dev->flags & IFF_PROMISC) {
- /* Unconditionally log net taps. */
- netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
rx_mode |= AcceptAllPhys;
} else if (netdev_mc_count(dev) > MC_FILTER_LIMIT ||
dev->flags & IFF_ALLMULTI ||
@@ -2670,7 +2614,7 @@ static void rtl_set_rx_mode(struct net_device *dev)
mc_filter[1] = mc_filter[0] = 0;
netdev_for_each_mc_addr(ha, dev) {
- u32 bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
+ u32 bit_nr = eth_hw_addr_crc(ha) >> 26;
mc_filter[bit_nr >> 5] |= BIT(bit_nr & 31);
}
@@ -2681,14 +2625,11 @@ static void rtl_set_rx_mode(struct net_device *dev)
}
}
- if (dev->features & NETIF_F_RXALL)
- rx_mode |= (AcceptErr | AcceptRunt);
-
RTL_W32(tp, MAR0 + 4, mc_filter[1]);
RTL_W32(tp, MAR0 + 0, mc_filter[0]);
tmp = RTL_R32(tp, RxConfig);
- RTL_W32(tp, RxConfig, (tmp & ~RX_CONFIG_ACCEPT_MASK) | rx_mode);
+ RTL_W32(tp, RxConfig, (tmp & ~RX_CONFIG_ACCEPT_OK_MASK) | rx_mode);
}
DECLARE_RTL_COND(rtl_csiar_cond)
@@ -2704,7 +2645,7 @@ static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
CSIAR_BYTE_ENABLE | func << 16);
- rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
+ rtl_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
}
static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
@@ -2714,7 +2655,7 @@ static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | func << 16 |
CSIAR_BYTE_ENABLE);
- return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
+ return rtl_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
RTL_R32(tp, CSIDR) : ~0;
}
@@ -3669,7 +3610,7 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
r8168_mac_ocp_write(tp, 0xe098, 0xc302);
- rtl_udelay_loop_wait_low(tp, &rtl_mac_ocp_e00e_cond, 1000, 10);
+ rtl_loop_wait_low(tp, &rtl_mac_ocp_e00e_cond, 1000, 10);
rtl8125_config_eee_mac(tp);
@@ -3836,7 +3777,7 @@ static void rtl_hw_start_8169(struct rtl8169_private *tp)
RTL_W16(tp, CPlusCmd, tp->cp_cmd);
- rtl8169_set_magic_reg(tp, tp->mac_version);
+ rtl8169_set_magic_reg(tp);
/* disable interrupt coalescing */
RTL_W16(tp, IntrMitigate, 0x0000);
@@ -3846,7 +3787,6 @@ static void rtl_hw_start(struct rtl8169_private *tp)
{
rtl_unlock_config_regs(tp);
- tp->cp_cmd &= CPCMD_MASK;
RTL_W16(tp, CPlusCmd, tp->cp_cmd);
if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
@@ -3868,6 +3808,7 @@ static void rtl_hw_start(struct rtl8169_private *tp)
RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
rtl_init_rxcfg(tp);
rtl_set_tx_config_registers(tp);
+ rtl_set_rx_config_features(tp, tp->dev->features);
rtl_set_rx_mode(tp->dev);
rtl_irq_enable(tp);
}
@@ -3883,12 +3824,6 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
-static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
-{
- desc->addr = cpu_to_le64(0x0badbadbadbadbadull);
- desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
-}
-
static inline void rtl8169_mark_to_asic(struct RxDesc *desc)
{
u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
@@ -3914,8 +3849,7 @@ static struct page *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
mapping = dma_map_page(d, data, 0, R8169_RX_BUF_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(d, mapping))) {
- if (net_ratelimit())
- netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n");
+ netdev_err(tp->dev, "Failed to map RX DMA!\n");
__free_pages(data, get_order(R8169_RX_BUF_SIZE));
return NULL;
}
@@ -3936,15 +3870,11 @@ static void rtl8169_rx_clear(struct rtl8169_private *tp)
R8169_RX_BUF_SIZE, DMA_FROM_DEVICE);
__free_pages(tp->Rx_databuff[i], get_order(R8169_RX_BUF_SIZE));
tp->Rx_databuff[i] = NULL;
- rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
+ tp->RxDescArray[i].addr = 0;
+ tp->RxDescArray[i].opts1 = 0;
}
}
-static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
-{
- desc->opts1 |= cpu_to_le32(RingEnd);
-}
-
static int rtl8169_rx_fill(struct rtl8169_private *tp)
{
unsigned int i;
@@ -3960,7 +3890,8 @@ static int rtl8169_rx_fill(struct rtl8169_private *tp)
tp->Rx_databuff[i] = data;
}
- rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
+ /* mark as last descriptor in the ring */
+ tp->RxDescArray[NUM_RX_DESC - 1].opts1 |= cpu_to_le32(RingEnd);
return 0;
}
@@ -4055,7 +3986,7 @@ static int rtl8169_tx_map(struct rtl8169_private *tp, const u32 *opts, u32 len,
ret = dma_mapping_error(d, mapping);
if (unlikely(ret)) {
if (net_ratelimit())
- netif_err(tp, drv, tp->dev, "Failed to map TX data!\n");
+ netdev_err(tp->dev, "Failed to map TX data!\n");
return ret;
}
@@ -4126,25 +4057,20 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
struct sk_buff *skb, u32 *opts)
{
u32 transport_offset = (u32)skb_transport_offset(skb);
- u32 mss = skb_shinfo(skb)->gso_size;
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ u32 mss = shinfo->gso_size;
if (mss) {
- switch (vlan_get_protocol(skb)) {
- case htons(ETH_P_IP):
+ if (shinfo->gso_type & SKB_GSO_TCPV4) {
opts[0] |= TD1_GTSENV4;
- break;
-
- case htons(ETH_P_IPV6):
+ } else if (shinfo->gso_type & SKB_GSO_TCPV6) {
if (skb_cow_head(skb, 0))
return false;
tcp_v6_gso_csum_prep(skb);
opts[0] |= TD1_GTSENV6;
- break;
-
- default:
+ } else {
WARN_ON_ONCE(1);
- break;
}
opts[0] |= transport_offset << GTTCPHO_SHIFT;
@@ -4226,7 +4152,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
txd_first = tp->TxDescArray + entry;
if (unlikely(!rtl_tx_slots_avail(tp, frags))) {
- netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
+ if (net_ratelimit())
+ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
goto err_stop_0;
}
@@ -4264,8 +4191,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
txd_first->opts1 |= cpu_to_le32(DescOwn | FirstFrag);
- /* Force all memory writes to complete before notifying device */
- wmb();
+ /* rtl_tx needs to see descriptor changes before updated tp->cur_tx */
+ smp_wmb();
tp->cur_tx += frags + 1;
@@ -4310,6 +4237,37 @@ err_stop_0:
return NETDEV_TX_BUSY;
}
+static unsigned int rtl_last_frag_len(struct sk_buff *skb)
+{
+ struct skb_shared_info *info = skb_shinfo(skb);
+ unsigned int nr_frags = info->nr_frags;
+
+ if (!nr_frags)
+ return UINT_MAX;
+
+ return skb_frag_size(info->frags + nr_frags - 1);
+}
+
+/* Workaround for hw issues with TSO on RTL8168evl */
+static netdev_features_t rtl8168evl_fix_tso(struct sk_buff *skb,
+ netdev_features_t features)
+{
+ /* IPv4 header has options field */
+ if (vlan_get_protocol(skb) == htons(ETH_P_IP) &&
+ ip_hdrlen(skb) > sizeof(struct iphdr))
+ features &= ~NETIF_F_ALL_TSO;
+
+ /* IPv4 TCP header has options field */
+ else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 &&
+ tcp_hdrlen(skb) > sizeof(struct tcphdr))
+ features &= ~NETIF_F_ALL_TSO;
+
+ else if (rtl_last_frag_len(skb) <= 6)
+ features &= ~NETIF_F_ALL_TSO;
+
+ return features;
+}
+
static netdev_features_t rtl8169_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
@@ -4318,6 +4276,9 @@ static netdev_features_t rtl8169_features_check(struct sk_buff *skb,
struct rtl8169_private *tp = netdev_priv(dev);
if (skb_is_gso(skb)) {
+ if (tp->mac_version == RTL_GIGA_MAC_VER_34)
+ features = rtl8168evl_fix_tso(skb, features);
+
if (transport_offset > GTTCPHO_MAX &&
rtl_chip_supports_csum_v2(tp))
features &= ~NETIF_F_ALL_TSO;
@@ -4354,9 +4315,9 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
pci_status_errs = pci_status_get_and_clear_errors(pdev);
- netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status_errs = 0x%04x)\n",
- pci_cmd, pci_status_errs);
-
+ if (net_ratelimit())
+ netdev_err(dev, "PCI error (cmd = 0x%04x, status_errs = 0x%04x)\n",
+ pci_cmd, pci_status_errs);
/*
* The recovery sequence below admits a very elaborated explanation:
* - it seems to work;
@@ -4474,8 +4435,9 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget
dma_rmb();
if (unlikely(status & RxRES)) {
- netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
- status);
+ if (net_ratelimit())
+ netdev_warn(dev, "Rx ERROR. status = %08x\n",
+ status);
dev->stats.rx_errors++;
if (status & (RxRWT | RxRUNT))
dev->stats.rx_length_errors++;
@@ -4766,8 +4728,7 @@ static int rtl_open(struct net_device *dev)
rtl_hw_start(tp);
- if (!rtl8169_init_counter_offsets(tp))
- netif_warn(tp, hw, dev, "counter reset/update failed\n");
+ rtl8169_init_counter_offsets(tp);
phy_start(tp->phydev);
netif_start_queue(dev);
@@ -5119,9 +5080,9 @@ DECLARE_RTL_COND(rtl_link_list_ready_cond)
return RTL_R8(tp, MCU) & LINK_LIST_RDY;
}
-DECLARE_RTL_COND(rtl_rxtx_empty_cond)
+static void r8168g_wait_ll_share_fifo_ready(struct rtl8169_private *tp)
{
- return (RTL_R8(tp, MCU) & RXTX_EMPTY) == RXTX_EMPTY;
+ rtl_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42);
}
static int r8169_mdio_read_reg(struct mii_bus *mii_bus, int phyaddr, int phyreg)
@@ -5166,20 +5127,19 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
new_bus->read = r8169_mdio_read_reg;
new_bus->write = r8169_mdio_write_reg;
- ret = mdiobus_register(new_bus);
+ ret = devm_mdiobus_register(new_bus);
if (ret)
return ret;
tp->phydev = mdiobus_get_phy(new_bus, 0);
if (!tp->phydev) {
- mdiobus_unregister(new_bus);
return -ENODEV;
} else if (!tp->phydev->drv) {
/* Most chip versions fail with the genphy driver.
* Therefore ensure that the dedicated PHY driver is loaded.
*/
- dev_err(&pdev->dev, "realtek.ko not loaded, maybe it needs to be added to initramfs?\n");
- mdiobus_unregister(new_bus);
+ dev_err(&pdev->dev, "no dedicated PHY driver found for PHY ID 0x%08x, maybe realtek.ko needs to be added to initramfs?\n",
+ tp->phydev->phy_id);
return -EUNATCH;
}
@@ -5191,53 +5151,34 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
static void rtl_hw_init_8168g(struct rtl8169_private *tp)
{
- tp->ocp_base = OCP_STD_PHY_BASE;
-
- RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN);
-
- if (!rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42))
- return;
-
- if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42))
- return;
+ rtl_enable_rxdvgate(tp);
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
msleep(1);
RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
r8168_mac_ocp_modify(tp, 0xe8de, BIT(14), 0);
-
- if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
- return;
+ r8168g_wait_ll_share_fifo_ready(tp);
r8168_mac_ocp_modify(tp, 0xe8de, 0, BIT(15));
-
- rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42);
+ r8168g_wait_ll_share_fifo_ready(tp);
}
static void rtl_hw_init_8125(struct rtl8169_private *tp)
{
- tp->ocp_base = OCP_STD_PHY_BASE;
-
- RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN);
-
- if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42))
- return;
+ rtl_enable_rxdvgate(tp);
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
msleep(1);
RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
r8168_mac_ocp_modify(tp, 0xe8de, BIT(14), 0);
-
- if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
- return;
+ r8168g_wait_ll_share_fifo_ready(tp);
r8168_mac_ocp_write(tp, 0xc0aa, 0x07d0);
r8168_mac_ocp_write(tp, 0xc0a6, 0x0150);
r8168_mac_ocp_write(tp, 0xc01e, 0x5555);
-
- rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42);
+ r8168g_wait_ll_share_fifo_ready(tp);
}
static void rtl_hw_initialize(struct rtl8169_private *tp)
@@ -5352,9 +5293,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp = netdev_priv(dev);
tp->dev = dev;
tp->pci_dev = pdev;
- tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
tp->supports_gmii = ent->driver_data == RTL_CFG_NO_GBIT ? 0 : 1;
tp->eee_adv = -1;
+ tp->ocp_base = OCP_STD_PHY_BASE;
/* Get the *optional* external "ether_clk" used on some boards */
rc = rtl_get_ether_clk(tp);
@@ -5410,7 +5351,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->mac_version = chipset;
- tp->cp_cmd = RTL_R16(tp, CPlusCmd);
+ tp->cp_cmd = RTL_R16(tp, CPlusCmd) & CPCMD_MASK;
if (sizeof(dma_addr_t) > 4 && tp->mac_version >= RTL_GIGA_MAC_VER_18 &&
!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)))
@@ -5445,14 +5386,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
- dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
- NETIF_F_HIGHDMA;
+ dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
- tp->cp_cmd |= RxChkSum;
- /* RTL8125 uses register RxConfig for VLAN offloading config */
- if (!rtl_is_8125(tp))
- tp->cp_cmd |= RxVlan;
/*
* Pretend we are using VLANs; This bypasses a nasty bug where
* Interrupts stop flowing on high load on 8110SCd controllers.
@@ -5484,6 +5420,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->hw_features |= NETIF_F_RXALL;
dev->hw_features |= NETIF_F_RXFCS;
+ /* configure chip for default features */
+ rtl8169_set_features(dev, dev->features);
+
jumbo_max = rtl_jumbo_max(tp);
if (jumbo_max)
dev->max_mtu = jumbo_max;
@@ -5509,17 +5448,16 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = register_netdev(dev);
if (rc)
- goto err_mdio_unregister;
+ return rc;
- netif_info(tp, probe, dev, "%s, %pM, XID %03x, IRQ %d\n",
- rtl_chip_infos[chipset].name, dev->dev_addr, xid,
- pci_irq_vector(pdev, 0));
+ netdev_info(dev, "%s, %pM, XID %03x, IRQ %d\n",
+ rtl_chip_infos[chipset].name, dev->dev_addr, xid,
+ pci_irq_vector(pdev, 0));
if (jumbo_max)
- netif_info(tp, probe, dev,
- "jumbo features [frames: %d bytes, tx checksumming: %s]\n",
- jumbo_max, tp->mac_version <= RTL_GIGA_MAC_VER_06 ?
- "ok" : "ko");
+ netdev_info(dev, "jumbo features [frames: %d bytes, tx checksumming: %s]\n",
+ jumbo_max, tp->mac_version <= RTL_GIGA_MAC_VER_06 ?
+ "ok" : "ko");
if (r8168_check_dash(tp))
rtl8168_driver_start(tp);
@@ -5528,10 +5466,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pm_runtime_put_sync(&pdev->dev);
return 0;
-
-err_mdio_unregister:
- mdiobus_unregister(tp->phydev->mdio.bus);
- return rc;
}
static struct pci_driver rtl8169_pci_driver = {
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 8ed73f44405d..f45331ed90b0 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2472,7 +2472,8 @@ static void sh_eth_tx_timeout(struct net_device *ndev, unsigned int txqueue)
}
/* Packet transmit function */
-static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t sh_eth_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
struct sh_eth_txdesc *txdesc;
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 3f16bd807c6e..e634e8110585 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -553,7 +553,7 @@ static int efx_ef10_probe(struct efx_nic *efx)
efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
- nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+ efx->vport_id = EVB_PORT_ID_ASSIGNED;
/* In case we're recovering from a crash (kexec), we want to
* cancel any outstanding request by the previous user of this
@@ -1281,13 +1281,13 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
nic_data->must_check_datapath_caps = false;
}
- if (nic_data->must_realloc_vis) {
+ if (efx->must_realloc_vis) {
/* We cannot let the number of VIs change now */
rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis,
nic_data->n_allocated_vis);
if (rc)
return rc;
- nic_data->must_realloc_vis = false;
+ efx->must_realloc_vis = false;
}
if (nic_data->must_restore_piobufs && nic_data->n_piobufs) {
@@ -1326,16 +1326,15 @@ static void efx_ef10_table_reset_mc_allocations(struct efx_nic *efx)
#endif
/* All our allocations have been reset */
- nic_data->must_realloc_vis = true;
- nic_data->must_restore_rss_contexts = true;
- nic_data->must_restore_filters = true;
+ efx->must_realloc_vis = true;
+ efx_mcdi_filter_table_reset_mc_allocations(efx);
nic_data->must_restore_piobufs = true;
efx_ef10_forget_old_piobufs(efx);
efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
/* Driver-created vswitches and vports must be re-created */
nic_data->must_probe_vswitching = true;
- nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+ efx->vport_id = EVB_PORT_ID_ASSIGNED;
#ifdef CONFIG_SFC_SRIOV
if (nic_data->vf)
for (i = 0; i < efx->vf_count; i++)
@@ -2389,6 +2388,86 @@ static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
}
}
+static int efx_ef10_probe_multicast_chaining(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ unsigned int enabled, implemented;
+ bool want_workaround_26807;
+ int rc;
+
+ rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
+ if (rc == -ENOSYS) {
+ /* GET_WORKAROUNDS was implemented before this workaround,
+ * thus it must be unavailable in this firmware.
+ */
+ nic_data->workaround_26807 = false;
+ return 0;
+ }
+ if (rc)
+ return rc;
+ want_workaround_26807 =
+ implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807;
+ nic_data->workaround_26807 =
+ !!(enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807);
+
+ if (want_workaround_26807 && !nic_data->workaround_26807) {
+ unsigned int flags;
+
+ rc = efx_mcdi_set_workaround(efx,
+ MC_CMD_WORKAROUND_BUG26807,
+ true, &flags);
+ if (!rc) {
+ if (flags &
+ 1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) {
+ netif_info(efx, drv, efx->net_dev,
+ "other functions on NIC have been reset\n");
+
+ /* With MCFW v4.6.x and earlier, the
+ * boot count will have incremented,
+ * so re-read the warm_boot_count
+ * value now to ensure this function
+ * doesn't think it has changed next
+ * time it checks.
+ */
+ rc = efx_ef10_get_warm_boot_count(efx);
+ if (rc >= 0) {
+ nic_data->warm_boot_count = rc;
+ rc = 0;
+ }
+ }
+ nic_data->workaround_26807 = true;
+ } else if (rc == -EPERM) {
+ rc = 0;
+ }
+ }
+ return rc;
+}
+
+static int efx_ef10_filter_table_probe(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc = efx_ef10_probe_multicast_chaining(efx);
+ struct efx_mcdi_filter_vlan *vlan;
+
+ if (rc)
+ return rc;
+ rc = efx_mcdi_filter_table_probe(efx, nic_data->workaround_26807);
+
+ if (rc)
+ return rc;
+
+ list_for_each_entry(vlan, &nic_data->vlan_list, list) {
+ rc = efx_mcdi_filter_add_vlan(efx, vlan->vid);
+ if (rc)
+ goto fail_add_vlan;
+ }
+ return 0;
+
+fail_add_vlan:
+ efx_mcdi_filter_table_remove(efx);
+ return rc;
+}
+
/* This creates an entry in the RX descriptor queue */
static inline void
efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
@@ -2464,75 +2543,14 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
struct efx_ef10_nic_data *nic_data;
- unsigned int enabled, implemented;
bool use_v2, cut_thru;
- int rc;
nic_data = efx->nic_data;
use_v2 = nic_data->datapath_caps2 &
1 << MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN;
cut_thru = !(nic_data->datapath_caps &
1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
- rc = efx_mcdi_ev_init(channel, cut_thru, use_v2);
-
- /* IRQ return is ignored */
- if (channel->channel || rc)
- return rc;
-
- /* Successfully created event queue on channel 0 */
- rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
- if (rc == -ENOSYS) {
- /* GET_WORKAROUNDS was implemented before this workaround,
- * thus it must be unavailable in this firmware.
- */
- nic_data->workaround_26807 = false;
- rc = 0;
- } else if (rc) {
- goto fail;
- } else {
- nic_data->workaround_26807 =
- !!(enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807);
-
- if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 &&
- !nic_data->workaround_26807) {
- unsigned int flags;
-
- rc = efx_mcdi_set_workaround(efx,
- MC_CMD_WORKAROUND_BUG26807,
- true, &flags);
-
- if (!rc) {
- if (flags &
- 1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) {
- netif_info(efx, drv, efx->net_dev,
- "other functions on NIC have been reset\n");
-
- /* With MCFW v4.6.x and earlier, the
- * boot count will have incremented,
- * so re-read the warm_boot_count
- * value now to ensure this function
- * doesn't think it has changed next
- * time it checks.
- */
- rc = efx_ef10_get_warm_boot_count(efx);
- if (rc >= 0) {
- nic_data->warm_boot_count = rc;
- rc = 0;
- }
- }
- nic_data->workaround_26807 = true;
- } else if (rc == -EPERM) {
- rc = 0;
- }
- }
- }
-
- if (!rc)
- return 0;
-
-fail:
- efx_mcdi_ev_fini(channel);
- return rc;
+ return efx_mcdi_ev_init(channel, cut_thru, use_v2);
}
static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue,
@@ -3100,16 +3118,15 @@ void efx_ef10_handle_drain_event(struct efx_nic *efx)
static int efx_ef10_fini_dmaq(struct efx_nic *efx)
{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
+ struct efx_channel *channel;
int pending;
/* If the MC has just rebooted, the TX/RX queues will have already been
* torn down, but efx->active_queues needs to be set to zero.
*/
- if (nic_data->must_realloc_vis) {
+ if (efx->must_realloc_vis) {
atomic_set(&efx->active_queues, 0);
return 0;
}
@@ -3158,22 +3175,22 @@ static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
efx_mcdi_filter_table_remove(efx);
up_write(&efx->filter_sem);
- rc = efx_ef10_vadaptor_free(efx, nic_data->vport_id);
+ rc = efx_ef10_vadaptor_free(efx, efx->vport_id);
if (rc)
goto restore_filters;
ether_addr_copy(mac_old, nic_data->vport_mac);
- rc = efx_ef10_vport_del_mac(efx, nic_data->vport_id,
+ rc = efx_ef10_vport_del_mac(efx, efx->vport_id,
nic_data->vport_mac);
if (rc)
goto restore_vadaptor;
- rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id,
+ rc = efx_ef10_vport_add_mac(efx, efx->vport_id,
efx->net_dev->dev_addr);
if (!rc) {
ether_addr_copy(nic_data->vport_mac, efx->net_dev->dev_addr);
} else {
- rc2 = efx_ef10_vport_add_mac(efx, nic_data->vport_id, mac_old);
+ rc2 = efx_ef10_vport_add_mac(efx, efx->vport_id, mac_old);
if (rc2) {
/* Failed to add original MAC, so clear vport_mac */
eth_zero_addr(nic_data->vport_mac);
@@ -3182,12 +3199,12 @@ static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
}
restore_vadaptor:
- rc2 = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+ rc2 = efx_ef10_vadaptor_alloc(efx, efx->vport_id);
if (rc2)
goto reset_nic;
restore_filters:
down_write(&efx->filter_sem);
- rc2 = efx_mcdi_filter_table_probe(efx);
+ rc2 = efx_ef10_filter_table_probe(efx);
up_write(&efx->filter_sem);
if (rc2)
goto reset_nic;
@@ -3225,11 +3242,11 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR),
efx->net_dev->dev_addr);
MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID,
- nic_data->vport_id);
+ efx->vport_id);
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf,
sizeof(inbuf), NULL, 0, NULL);
- efx_mcdi_filter_table_probe(efx);
+ efx_ef10_filter_table_probe(efx);
up_write(&efx->filter_sem);
mutex_unlock(&efx->mac_lock);
@@ -3961,6 +3978,35 @@ out_unlock:
return rc;
}
+/* EF10 may have multiple datapath firmware variants within a
+ * single version. Report which variants are running.
+ */
+static size_t efx_ef10_print_additional_fwver(struct efx_nic *efx, char *buf,
+ size_t len)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ return scnprintf(buf, len, " rx%x tx%x",
+ nic_data->rx_dpcpu_fw_id,
+ nic_data->tx_dpcpu_fw_id);
+}
+
+static unsigned int ef10_check_caps(const struct efx_nic *efx,
+ u8 flag,
+ u32 offset)
+{
+ const struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ switch (offset) {
+ case(MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_OFST):
+ return nic_data->datapath_caps & BIT_ULL(flag);
+ case(MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_OFST):
+ return nic_data->datapath_caps2 & BIT_ULL(flag);
+ default:
+ return 0;
+ }
+}
+
#define EF10_OFFLOAD_FEATURES \
(NETIF_F_IP_CSUM | \
NETIF_F_HW_VLAN_CTAG_FILTER | \
@@ -4027,7 +4073,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.ev_process = efx_ef10_ev_process,
.ev_read_ack = efx_ef10_ev_read_ack,
.ev_test_generate = efx_ef10_ev_test_generate,
- .filter_table_probe = efx_mcdi_filter_table_probe,
+ .filter_table_probe = efx_ef10_filter_table_probe,
.filter_table_restore = efx_mcdi_filter_table_restore,
.filter_table_remove = efx_mcdi_filter_table_remove,
.filter_update_rx_scatter = efx_mcdi_update_rx_scatter,
@@ -4073,6 +4119,8 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
1 << HWTSTAMP_FILTER_ALL,
.rx_hash_key_size = 40,
+ .check_caps = ef10_check_caps,
+ .print_additional_fwver = efx_ef10_print_additional_fwver,
};
const struct efx_nic_type efx_hunt_a0_nic_type = {
@@ -4139,7 +4187,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.ev_process = efx_ef10_ev_process,
.ev_read_ack = efx_ef10_ev_read_ack,
.ev_test_generate = efx_ef10_ev_test_generate,
- .filter_table_probe = efx_mcdi_filter_table_probe,
+ .filter_table_probe = efx_ef10_filter_table_probe,
.filter_table_restore = efx_mcdi_filter_table_restore,
.filter_table_remove = efx_mcdi_filter_table_remove,
.filter_update_rx_scatter = efx_mcdi_update_rx_scatter,
@@ -4208,4 +4256,6 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
1 << HWTSTAMP_FILTER_ALL,
.rx_hash_key_size = 40,
+ .check_caps = ef10_check_caps,
+ .print_additional_fwver = efx_ef10_print_additional_fwver,
};
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
index 4580b30caae1..21fa6c0e8873 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.c
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -232,15 +232,14 @@ fail:
static int efx_ef10_vadaptor_alloc_set_features(struct efx_nic *efx)
{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
u32 port_flags;
int rc;
- rc = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+ rc = efx_ef10_vadaptor_alloc(efx, efx->vport_id);
if (rc)
goto fail_vadaptor_alloc;
- rc = efx_ef10_vadaptor_query(efx, nic_data->vport_id,
+ rc = efx_ef10_vadaptor_query(efx, efx->vport_id,
&port_flags, NULL, NULL);
if (rc)
goto fail_vadaptor_query;
@@ -281,11 +280,11 @@ int efx_ef10_vswitching_probe_pf(struct efx_nic *efx)
rc = efx_ef10_vport_alloc(efx, EVB_PORT_ID_ASSIGNED,
MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL,
- EFX_EF10_NO_VLAN, &nic_data->vport_id);
+ EFX_EF10_NO_VLAN, &efx->vport_id);
if (rc)
goto fail2;
- rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id, net_dev->dev_addr);
+ rc = efx_ef10_vport_add_mac(efx, efx->vport_id, net_dev->dev_addr);
if (rc)
goto fail3;
ether_addr_copy(nic_data->vport_mac, net_dev->dev_addr);
@@ -296,11 +295,11 @@ int efx_ef10_vswitching_probe_pf(struct efx_nic *efx)
return 0;
fail4:
- efx_ef10_vport_del_mac(efx, nic_data->vport_id, nic_data->vport_mac);
+ efx_ef10_vport_del_mac(efx, efx->vport_id, nic_data->vport_mac);
eth_zero_addr(nic_data->vport_mac);
fail3:
- efx_ef10_vport_free(efx, nic_data->vport_id);
- nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+ efx_ef10_vport_free(efx, efx->vport_id);
+ efx->vport_id = EVB_PORT_ID_ASSIGNED;
fail2:
efx_ef10_vswitch_free(efx, EVB_PORT_ID_ASSIGNED);
fail1:
@@ -355,22 +354,22 @@ void efx_ef10_vswitching_remove_pf(struct efx_nic *efx)
efx_ef10_sriov_free_vf_vswitching(efx);
- efx_ef10_vadaptor_free(efx, nic_data->vport_id);
+ efx_ef10_vadaptor_free(efx, efx->vport_id);
- if (nic_data->vport_id == EVB_PORT_ID_ASSIGNED)
+ if (efx->vport_id == EVB_PORT_ID_ASSIGNED)
return; /* No vswitch was ever created */
if (!is_zero_ether_addr(nic_data->vport_mac)) {
- efx_ef10_vport_del_mac(efx, nic_data->vport_id,
+ efx_ef10_vport_del_mac(efx, efx->vport_id,
efx->net_dev->dev_addr);
eth_zero_addr(nic_data->vport_mac);
}
- efx_ef10_vport_free(efx, nic_data->vport_id);
- nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+ efx_ef10_vport_free(efx, efx->vport_id);
+ efx->vport_id = EVB_PORT_ID_ASSIGNED;
/* Only free the vswitch if no VFs are assigned */
if (!pci_vfs_assigned(efx->pci_dev))
- efx_ef10_vswitch_free(efx, nic_data->vport_id);
+ efx_ef10_vswitch_free(efx, efx->vport_id);
}
void efx_ef10_vswitching_remove_vf(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 15c731d04065..a8cc3881edce 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -1425,23 +1425,16 @@ void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
le16_to_cpu(ver_words[2]),
le16_to_cpu(ver_words[3]));
- /* EF10 may have multiple datapath firmware variants within a
- * single version. Report which variants are running.
- */
- if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
-
- offset += scnprintf(buf + offset, len - offset, " rx%x tx%x",
- nic_data->rx_dpcpu_fw_id,
- nic_data->tx_dpcpu_fw_id);
+ if (efx->type->print_additional_fwver)
+ offset += efx->type->print_additional_fwver(efx, buf + offset,
+ len - offset);
- /* It's theoretically possible for the string to exceed 31
- * characters, though in practice the first three version
- * components are short enough that this doesn't happen.
- */
- if (WARN_ON(offset >= len))
- buf[0] = 0;
- }
+ /* It's theoretically possible for the string to exceed 31
+ * characters, though in practice the first three version
+ * components are short enough that this doesn't happen.
+ */
+ if (WARN_ON(offset >= len))
+ buf[0] = 0;
return;
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 54a45010b576..b107e4c00285 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -326,6 +326,18 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
#define MCDI_EVENT_FIELD(_ev, _field) \
EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
+#define MCDI_CAPABILITY(field) \
+ MC_CMD_GET_CAPABILITIES_V4_OUT_ ## field ## _LBN
+
+#define MCDI_CAPABILITY_OFST(field) \
+ MC_CMD_GET_CAPABILITIES_V4_OUT_ ## field ## _OFST
+
+/* field is FLAGS1 or FLAGS2 */
+#define efx_has_cap(efx, flag, field) \
+ efx->type->check_caps(efx, \
+ MCDI_CAPABILITY(flag), \
+ MCDI_CAPABILITY_OFST(field))
+
void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
u16 *fw_subtype_list, u32 *capabilities);
diff --git a/drivers/net/ethernet/sfc/mcdi_filters.c b/drivers/net/ethernet/sfc/mcdi_filters.c
index 4310ae5bd898..455a62814fb9 100644
--- a/drivers/net/ethernet/sfc/mcdi_filters.c
+++ b/drivers/net/ethernet/sfc/mcdi_filters.c
@@ -186,7 +186,6 @@ static void efx_mcdi_filter_push_prep(struct efx_nic *efx,
struct efx_rss_context *ctx,
bool replacing)
{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
u32 flags = spec->flags;
memset(inbuf, 0, MC_CMD_FILTER_OP_EXT_IN_LEN);
@@ -211,7 +210,7 @@ static void efx_mcdi_filter_push_prep(struct efx_nic *efx,
efx_mcdi_filter_push_prep_set_match_fields(efx, spec, inbuf);
}
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, efx->vport_id);
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
@@ -332,7 +331,6 @@ static s32 efx_mcdi_filter_insert_locked(struct efx_nic *efx,
bool replace_equal)
{
DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
struct efx_mcdi_filter_table *table;
struct efx_filter_spec *saved_spec;
struct efx_rss_context *ctx = NULL;
@@ -461,7 +459,7 @@ static s32 efx_mcdi_filter_insert_locked(struct efx_nic *efx,
rc = efx_mcdi_filter_push(efx, spec, &table->entry[ins_index].handle,
ctx, replacing);
- if (rc == -EINVAL && nic_data->must_realloc_vis)
+ if (rc == -EINVAL && efx->must_realloc_vis)
/* The MC rebooted under us, causing it to reject our filter
* insertion as pointing to an invalid VI (spec->dmaq_id).
*/
@@ -813,7 +811,7 @@ static int efx_mcdi_filter_insert_def(struct efx_nic *efx,
enum efx_encap_type encap_type,
bool multicast, bool rollback)
{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_mcdi_filter_table *table = efx->filter_state;
enum efx_filter_flags filter_flags;
struct efx_filter_spec spec;
u8 baddr[ETH_ALEN];
@@ -830,8 +828,7 @@ static int efx_mcdi_filter_insert_def(struct efx_nic *efx,
efx_filter_set_uc_def(&spec);
if (encap_type) {
- if (nic_data->datapath_caps &
- (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
+ if (efx_has_cap(efx, VXLAN_NVGRE, FLAGS1))
efx_filter_set_encap_type(&spec, encap_type);
else
/*
@@ -899,7 +896,7 @@ static int efx_mcdi_filter_insert_def(struct efx_nic *efx,
EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID);
*id = efx_mcdi_filter_get_unsafe_id(rc);
- if (!nic_data->workaround_26807 && !encap_type) {
+ if (!table->mc_chaining && !encap_type) {
/* Also need an Ethernet broadcast filter */
efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
filter_flags, 0);
@@ -965,7 +962,6 @@ static void efx_mcdi_filter_vlan_sync_rx_mode(struct efx_nic *efx,
struct efx_mcdi_filter_vlan *vlan)
{
struct efx_mcdi_filter_table *table = efx->filter_state;
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
/*
* Do not install unspecified VID if VLAN filtering is enabled.
@@ -1012,11 +1008,10 @@ static void efx_mcdi_filter_vlan_sync_rx_mode(struct efx_nic *efx,
* If changing promiscuous state with cascaded multicast filters, remove
* old filters first, so that packets are dropped rather than duplicated
*/
- if (nic_data->workaround_26807 &&
- table->mc_promisc_last != table->mc_promisc)
+ if (table->mc_chaining && table->mc_promisc_last != table->mc_promisc)
efx_mcdi_filter_remove_old(efx);
if (table->mc_promisc) {
- if (nic_data->workaround_26807) {
+ if (table->mc_chaining) {
/*
* If we failed to insert promiscuous filters, rollback
* and fall back to individual multicast filters
@@ -1051,7 +1046,7 @@ static void efx_mcdi_filter_vlan_sync_rx_mode(struct efx_nic *efx,
*/
if (efx_mcdi_filter_insert_addr_list(efx, vlan, true, true)) {
/* Changing promisc state, so remove old filters */
- if (nic_data->workaround_26807)
+ if (table->mc_chaining)
efx_mcdi_filter_remove_old(efx);
if (efx_mcdi_filter_insert_def(efx, vlan,
EFX_ENCAP_TYPE_NONE,
@@ -1288,12 +1283,10 @@ efx_mcdi_filter_table_probe_matches(struct efx_nic *efx,
return 0;
}
-int efx_mcdi_filter_table_probe(struct efx_nic *efx)
+int efx_mcdi_filter_table_probe(struct efx_nic *efx, bool multicast_chaining)
{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
struct net_device *net_dev = efx->net_dev;
struct efx_mcdi_filter_table *table;
- struct efx_mcdi_filter_vlan *vlan;
int rc;
if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
@@ -1306,12 +1299,12 @@ int efx_mcdi_filter_table_probe(struct efx_nic *efx)
if (!table)
return -ENOMEM;
+ table->mc_chaining = multicast_chaining;
table->rx_match_count = 0;
rc = efx_mcdi_filter_table_probe_matches(efx, table, false);
if (rc)
goto fail;
- if (nic_data->datapath_caps &
- (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
+ if (efx_has_cap(efx, VXLAN_NVGRE, FLAGS1))
rc = efx_mcdi_filter_table_probe_matches(efx, table, true);
if (rc)
goto fail;
@@ -1342,22 +1335,22 @@ int efx_mcdi_filter_table_probe(struct efx_nic *efx)
efx->filter_state = table;
- list_for_each_entry(vlan, &nic_data->vlan_list, list) {
- rc = efx_mcdi_filter_add_vlan(efx, vlan->vid);
- if (rc)
- goto fail_add_vlan;
- }
-
return 0;
-
-fail_add_vlan:
- efx_mcdi_filter_cleanup_vlans(efx);
- efx->filter_state = NULL;
fail:
kfree(table);
return rc;
}
+void efx_mcdi_filter_table_reset_mc_allocations(struct efx_nic *efx)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+
+ if (table) {
+ table->must_restore_filters = true;
+ table->must_restore_rss_contexts = true;
+ }
+}
+
/*
* Caller must hold efx->filter_sem for read if race against
* efx_mcdi_filter_table_remove() is possible
@@ -1365,7 +1358,6 @@ fail:
void efx_mcdi_filter_table_restore(struct efx_nic *efx)
{
struct efx_mcdi_filter_table *table = efx->filter_state;
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
unsigned int invalid_filters = 0, failed = 0;
struct efx_mcdi_filter_vlan *vlan;
struct efx_filter_spec *spec;
@@ -1377,10 +1369,7 @@ void efx_mcdi_filter_table_restore(struct efx_nic *efx)
WARN_ON(!rwsem_is_locked(&efx->filter_sem));
- if (!nic_data->must_restore_filters)
- return;
-
- if (!table)
+ if (!table || !table->must_restore_filters)
return;
down_write(&table->lock);
@@ -1456,7 +1445,7 @@ not_restored:
netif_err(efx, hw, efx->net_dev,
"unable to restore %u filters\n", failed);
else
- nic_data->must_restore_filters = false;
+ table->must_restore_filters = false;
}
void efx_mcdi_filter_table_remove(struct efx_nic *efx)
@@ -1921,7 +1910,6 @@ static int efx_mcdi_filter_alloc_rss_context(struct efx_nic *efx, bool exclusive
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
size_t outlen;
int rc;
u32 alloc_type = exclusive ?
@@ -1939,12 +1927,11 @@ static int efx_mcdi_filter_alloc_rss_context(struct efx_nic *efx, bool exclusive
return 0;
}
- if (nic_data->datapath_caps &
- 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN)
+ if (efx_has_cap(efx, RX_RSS_LIMITED, FLAGS1))
return -EOPNOTSUPP;
MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
- nic_data->vport_id);
+ efx->vport_id);
MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type);
MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, rss_spread);
@@ -1961,8 +1948,7 @@ static int efx_mcdi_filter_alloc_rss_context(struct efx_nic *efx, bool exclusive
if (context_size)
*context_size = rss_spread;
- if (nic_data->datapath_caps &
- 1 << MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN)
+ if (efx_has_cap(efx, ADDITIONAL_RSS_MODES, FLAGS1))
efx_mcdi_set_rss_context_flags(efx, ctx);
return 0;
@@ -2030,14 +2016,14 @@ void efx_mcdi_rx_free_indir_table(struct efx_nic *efx)
static int efx_mcdi_filter_rx_push_shared_rss_config(struct efx_nic *efx,
unsigned *context_size)
{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_mcdi_filter_table *table = efx->filter_state;
int rc = efx_mcdi_filter_alloc_rss_context(efx, false, &efx->rss_context,
context_size);
if (rc != 0)
return rc;
- nic_data->rx_rss_context_exclusive = false;
+ table->rx_rss_context_exclusive = false;
efx_set_default_rx_indir_table(efx, &efx->rss_context);
return 0;
}
@@ -2046,12 +2032,12 @@ static int efx_mcdi_filter_rx_push_exclusive_rss_config(struct efx_nic *efx,
const u32 *rx_indir_table,
const u8 *key)
{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
u32 old_rx_rss_context = efx->rss_context.context_id;
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
int rc;
if (efx->rss_context.context_id == EFX_MCDI_RSS_CONTEXT_INVALID ||
- !nic_data->rx_rss_context_exclusive) {
+ !table->rx_rss_context_exclusive) {
rc = efx_mcdi_filter_alloc_rss_context(efx, true, &efx->rss_context,
NULL);
if (rc == -EOPNOTSUPP)
@@ -2068,7 +2054,7 @@ static int efx_mcdi_filter_rx_push_exclusive_rss_config(struct efx_nic *efx,
if (efx->rss_context.context_id != old_rx_rss_context &&
old_rx_rss_context != EFX_MCDI_RSS_CONTEXT_INVALID)
WARN_ON(efx_mcdi_filter_free_rss_context(efx, old_rx_rss_context) != 0);
- nic_data->rx_rss_context_exclusive = true;
+ table->rx_rss_context_exclusive = true;
if (rx_indir_table != efx->rss_context.rx_indir_table)
memcpy(efx->rss_context.rx_indir_table, rx_indir_table,
sizeof(efx->rss_context.rx_indir_table));
@@ -2182,13 +2168,13 @@ int efx_mcdi_rx_pull_rss_config(struct efx_nic *efx)
void efx_mcdi_rx_restore_rss_contexts(struct efx_nic *efx)
{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_mcdi_filter_table *table = efx->filter_state;
struct efx_rss_context *ctx;
int rc;
WARN_ON(!mutex_is_locked(&efx->rss_lock));
- if (!nic_data->must_restore_rss_contexts)
+ if (!table->must_restore_rss_contexts)
return;
list_for_each_entry(ctx, &efx->rss_context.list, list) {
@@ -2204,7 +2190,7 @@ void efx_mcdi_rx_restore_rss_contexts(struct efx_nic *efx)
"; RSS filters may fail to be applied\n",
ctx->user_id, rc);
}
- nic_data->must_restore_rss_contexts = false;
+ table->must_restore_rss_contexts = false;
}
int efx_mcdi_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
diff --git a/drivers/net/ethernet/sfc/mcdi_filters.h b/drivers/net/ethernet/sfc/mcdi_filters.h
index 1837f4f5d661..03a8bf74c733 100644
--- a/drivers/net/ethernet/sfc/mcdi_filters.h
+++ b/drivers/net/ethernet/sfc/mcdi_filters.h
@@ -55,6 +55,8 @@ struct efx_mcdi_filter_table {
u32 rx_match_mcdi_flags[
MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM * 2];
unsigned int rx_match_count;
+ /* Our RSS context is exclusive (as opposed to shared) */
+ bool rx_rss_context_exclusive;
struct rw_semaphore lock; /* Protects entries */
struct {
@@ -75,14 +77,27 @@ struct efx_mcdi_filter_table {
/* Whether in multicast promiscuous mode when last changed */
bool mc_promisc_last;
bool mc_overflow; /* Too many MC addrs; should always imply mc_promisc */
+ /* RSS contexts have yet to be restored after MC reboot */
+ bool must_restore_rss_contexts;
+ /* filters have yet to be restored after MC reboot */
+ bool must_restore_filters;
+ /* Multicast filter chaining allows less-specific filters to receive
+ * multicast packets that matched more-specific filters. Early EF10
+ * firmware didn't support this (SF bug 26807); if mc_chaining == false
+ * then we still subscribe the dev_mc_list even when mc_promisc to
+ * prevent another VI stealing the traffic.
+ */
+ bool mc_chaining;
bool vlan_filter;
struct list_head vlan_list;
};
-int efx_mcdi_filter_table_probe(struct efx_nic *efx);
+int efx_mcdi_filter_table_probe(struct efx_nic *efx, bool multicast_chaining);
void efx_mcdi_filter_table_remove(struct efx_nic *efx);
void efx_mcdi_filter_table_restore(struct efx_nic *efx);
+void efx_mcdi_filter_table_reset_mc_allocations(struct efx_nic *efx);
+
/*
* The filter table(s) are managed by firmware and we have write-only
* access. When removing filters we must identify them to the
diff --git a/drivers/net/ethernet/sfc/mcdi_functions.c b/drivers/net/ethernet/sfc/mcdi_functions.c
index dcfe78b0fa5a..962d8395d958 100644
--- a/drivers/net/ethernet/sfc/mcdi_functions.c
+++ b/drivers/net/ethernet/sfc/mcdi_functions.c
@@ -168,21 +168,18 @@ int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue, bool tso_v2)
size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
struct efx_channel *channel = tx_queue->channel;
struct efx_nic *efx = tx_queue->efx;
- struct efx_ef10_nic_data *nic_data;
dma_addr_t dma_addr;
size_t inlen;
int rc, i;
BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0);
- nic_data = efx->nic_data;
-
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
- MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, efx->vport_id);
dma_addr = tx_queue->txd.buf.dma_addr;
@@ -276,7 +273,6 @@ void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue)
struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
struct efx_nic *efx = rx_queue->efx;
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
dma_addr_t dma_addr;
size_t inlen;
int rc;
@@ -295,7 +291,7 @@ void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue)
INIT_RXQ_IN_FLAG_PREFIX, 1,
INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
- MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, nic_data->vport_id);
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, efx->vport_id);
dma_addr = rx_queue->rxd.buf.dma_addr;
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index ab5227b13ae6..b807871d8f69 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -722,11 +722,8 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx,
MAC_STATS_IN_PERIOD_MS, period);
MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
- if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
-
- MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, nic_data->vport_id);
- }
+ if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
+ MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, efx->vport_id);
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
NULL, 0, NULL);
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index b084e623b5f4..1afb58feb9ab 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -887,8 +887,10 @@ struct efx_async_filter_insertion {
* @rss_context: Main RSS context. Its @list member is the head of the list of
* RSS contexts created by user requests
* @rss_lock: Protects custom RSS context software state in @rss_context.list
+ * @vport_id: The function's vport ID, only relevant for PFs
* @int_error_count: Number of internal errors seen recently
* @int_error_expire: Time at which error count will be expired
+ * @must_realloc_vis: Flag: VIs have yet to be reallocated after MC reboot
* @irq_soft_enabled: Are IRQs soft-enabled? If not, IRQ handler will
* acknowledge but do nothing else.
* @irq_status: Interrupt status buffer
@@ -1044,10 +1046,12 @@ struct efx_nic {
bool rx_scatter;
struct efx_rss_context rss_context;
struct mutex rss_lock;
+ u32 vport_id;
unsigned int_error_count;
unsigned long int_error_expire;
+ bool must_realloc_vis;
bool irq_soft_enabled;
struct efx_buffer irq_status;
unsigned irq_zero_count;
@@ -1292,6 +1296,7 @@ struct efx_udp_tunnel {
* @udp_tnl_add_port: Add a UDP tunnel port
* @udp_tnl_has_port: Check if a port has been added as UDP tunnel
* @udp_tnl_del_port: Remove a UDP tunnel port
+ * @print_additional_fwver: Dump NIC-specific additional FW version info
* @revision: Hardware architecture revision
* @txd_ptr_tbl_base: TX descriptor ring base address
* @rxd_ptr_tbl_base: RX descriptor ring base address
@@ -1352,6 +1357,9 @@ struct efx_nic_type {
void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol);
int (*set_wol)(struct efx_nic *efx, u32 type);
void (*resume_wol)(struct efx_nic *efx);
+ unsigned int (*check_caps)(const struct efx_nic *efx,
+ u8 flag,
+ u32 offset);
int (*test_chip)(struct efx_nic *efx, struct efx_self_tests *tests);
int (*test_nvram)(struct efx_nic *efx);
void (*mcdi_request)(struct efx_nic *efx,
@@ -1462,6 +1470,8 @@ struct efx_nic_type {
int (*udp_tnl_add_port)(struct efx_nic *efx, struct efx_udp_tunnel tnl);
bool (*udp_tnl_has_port)(struct efx_nic *efx, __be16 port);
int (*udp_tnl_del_port)(struct efx_nic *efx, struct efx_udp_tunnel tnl);
+ size_t (*print_additional_fwver)(struct efx_nic *efx, char *buf,
+ size_t len);
int revision;
unsigned int txd_ptr_tbl_base;
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 6670fda8f35a..8f73c5d996eb 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -360,10 +360,6 @@ enum {
* @warm_boot_count: Last seen MC warm boot count
* @vi_base: Absolute index of first VI in this function
* @n_allocated_vis: Number of VIs allocated to this function
- * @must_realloc_vis: Flag: VIs have yet to be reallocated after MC reboot
- * @must_restore_rss_contexts: Flag: RSS contexts have yet to be restored after
- * MC reboot
- * @must_restore_filters: Flag: filters have yet to be restored after MC reboot
* @n_piobufs: Number of PIO buffers allocated to this function
* @wc_membase: Base address of write-combining mapping of the memory BAR
* @pio_write_base: Base address for writing PIO buffers
@@ -372,7 +368,6 @@ enum {
* @piobuf_size: size of a single PIO buffer
* @must_restore_piobufs: Flag: PIO buffers have yet to be restored after MC
* reboot
- * @rx_rss_context_exclusive: Whether our RSS context is exclusive or shared
* @stats: Hardware statistics
* @workaround_35388: Flag: firmware supports workaround for bug 35388
* @workaround_26807: Flag: firmware supports workaround for bug 26807
@@ -385,7 +380,6 @@ enum {
* %MC_CMD_GET_CAPABILITIES response)
* @rx_dpcpu_fw_id: Firmware ID of the RxDPCPU
* @tx_dpcpu_fw_id: Firmware ID of the TxDPCPU
- * @vport_id: The function's vport ID, only relevant for PFs
* @must_probe_vswitching: Flag: vswitching has yet to be setup after MC reboot
* @pf_index: The number for this PF, or the parent PF if this is a VF
#ifdef CONFIG_SFC_SRIOV
@@ -404,16 +398,12 @@ struct efx_ef10_nic_data {
u16 warm_boot_count;
unsigned int vi_base;
unsigned int n_allocated_vis;
- bool must_realloc_vis;
- bool must_restore_rss_contexts;
- bool must_restore_filters;
unsigned int n_piobufs;
void __iomem *wc_membase, *pio_write_base;
unsigned int pio_write_vi_base;
unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT];
u16 piobuf_size;
bool must_restore_piobufs;
- bool rx_rss_context_exclusive;
u64 stats[EF10_STAT_COUNT];
bool workaround_35388;
bool workaround_26807;
@@ -423,7 +413,6 @@ struct efx_ef10_nic_data {
u32 datapath_caps2;
unsigned int rx_dpcpu_fw_id;
unsigned int tx_dpcpu_fw_id;
- unsigned int vport_id;
bool must_probe_vswitching;
unsigned int pf_index;
u8 port_id[ETH_ALEN];
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 59b4f16896a8..04c7283d205e 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -352,12 +352,7 @@ static int efx_phc_enable(struct ptp_clock_info *ptp,
bool efx_ptp_use_mac_tx_timestamps(struct efx_nic *efx)
{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
-
- return ((efx_nic_rev(efx) >= EFX_REV_HUNT_A0) &&
- (nic_data->datapath_caps2 &
- (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_LBN)
- ));
+ return efx_has_cap(efx, TX_MAC_TIMESTAMPING, FLAGS2);
}
/* PTP 'extra' channel is still a traffic channel, but we only create TX queues
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 260352d97d9d..68c47a8c71df 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -308,6 +308,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + rx_buf->len;
xdp.rxq = &rx_queue->xdp_rxq_info;
+ xdp.frame_sz = efx->rx_page_buf_step;
xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
rcu_read_unlock();
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index baa464161626..891e9fb6abec 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -948,6 +948,13 @@ fail:
#endif /* CONFIG_SFC_MTD */
+static unsigned int siena_check_caps(const struct efx_nic *efx,
+ u8 flag, u32 offset)
+{
+ /* Siena did not support MC_CMD_GET_CAPABILITIES */
+ return 0;
+}
+
/**************************************************************************
*
* Revision-dependent attributes used by efx.c and nic.c
@@ -1086,4 +1093,5 @@ const struct efx_nic_type siena_a0_nic_type = {
1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT |
1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT),
.rx_hash_key_size = 16,
+ .check_caps = siena_check_caps,
};
diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig
index 9e1c3752b200..4d2d91ec8b41 100644
--- a/drivers/net/ethernet/smsc/Kconfig
+++ b/drivers/net/ethernet/smsc/Kconfig
@@ -28,7 +28,7 @@ config SMC9194
option if you have a DELL laptop with the docking station, or
another SMC9192/9194 based chipset. Say Y if you want it compiled
into the kernel, and read the file
- <file:Documentation/networking/device_drivers/smsc/smc9.txt>.
+ <file:Documentation/networking/device_drivers/smsc/smc9.rst>.
To compile this driver as a module, choose M here. The module
will be called smc9194.
@@ -44,7 +44,7 @@ config SMC91X
This is a driver for SMC's 91x series of Ethernet chipsets,
including the SMC91C94 and the SMC91C111. Say Y if you want it
compiled into the kernel, and read the file
- <file:Documentation/networking/device_drivers/smsc/smc9.txt>.
+ <file:Documentation/networking/device_drivers/smsc/smc9.rst>.
This driver is also available as a module ( = code which can be
inserted in and removed from the running kernel whenever you want).
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index a5a0fb60193a..e1f4be4b3d69 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -884,23 +884,28 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
struct xdp_buff *xdp)
{
struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
- unsigned int len = xdp->data_end - xdp->data;
+ unsigned int sync, len = xdp->data_end - xdp->data;
u32 ret = NETSEC_XDP_PASS;
+ struct page *page;
int err;
u32 act;
act = bpf_prog_run_xdp(prog, xdp);
+ /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
+ sync = xdp->data_end - xdp->data_hard_start - NETSEC_RXBUF_HEADROOM;
+ sync = max(sync, len);
+
switch (act) {
case XDP_PASS:
ret = NETSEC_XDP_PASS;
break;
case XDP_TX:
ret = netsec_xdp_xmit_back(priv, xdp);
- if (ret != NETSEC_XDP_TX)
- page_pool_put_page(dring->page_pool,
- virt_to_head_page(xdp->data), len,
- true);
+ if (ret != NETSEC_XDP_TX) {
+ page = virt_to_head_page(xdp->data);
+ page_pool_put_page(dring->page_pool, page, sync, true);
+ }
break;
case XDP_REDIRECT:
err = xdp_do_redirect(priv->ndev, xdp, prog);
@@ -908,9 +913,8 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
ret = NETSEC_XDP_REDIR;
} else {
ret = NETSEC_XDP_CONSUMED;
- page_pool_put_page(dring->page_pool,
- virt_to_head_page(xdp->data), len,
- true);
+ page = virt_to_head_page(xdp->data);
+ page_pool_put_page(dring->page_pool, page, sync, true);
}
break;
default:
@@ -921,8 +925,8 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
/* fall through -- handle aborts by dropping packet */
case XDP_DROP:
ret = NETSEC_XDP_CONSUMED;
- page_pool_put_page(dring->page_pool,
- virt_to_head_page(xdp->data), len, true);
+ page = virt_to_head_page(xdp->data);
+ page_pool_put_page(dring->page_pool, page, sync, true);
break;
}
@@ -936,10 +940,14 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
struct netsec_rx_pkt_info rx_info;
enum dma_data_direction dma_dir;
struct bpf_prog *xdp_prog;
+ struct xdp_buff xdp;
u16 xdp_xmit = 0;
u32 xdp_act = 0;
int done = 0;
+ xdp.rxq = &dring->xdp_rxq;
+ xdp.frame_sz = PAGE_SIZE;
+
rcu_read_lock();
xdp_prog = READ_ONCE(priv->xdp_prog);
dma_dir = page_pool_get_dma_dir(dring->page_pool);
@@ -953,7 +961,6 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
struct sk_buff *skb = NULL;
u16 pkt_len, desc_len;
dma_addr_t dma_handle;
- struct xdp_buff xdp;
void *buf_addr;
if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) {
@@ -1002,7 +1009,6 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
xdp.data = desc->addr + NETSEC_RXBUF_HEADROOM;
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + pkt_len;
- xdp.rxq = &dring->xdp_rxq;
if (xdp_prog) {
xdp_result = netsec_run_xdp(priv, xdp_prog, &xdp);
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index 67ddf782d98a..f2638446b62e 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -1394,7 +1394,7 @@ static int ave_stop(struct net_device *ndev)
return 0;
}
-static int ave_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t ave_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct ave_private *priv = netdev_priv(ndev);
u32 proc_idx, done_idx, ndesc, cmdsts;
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 5a6f265bc540..f9d024d6b69b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -30,6 +30,6 @@ obj-$(CONFIG_DWMAC_GENERIC) += dwmac-generic.o
stmmac-platform-objs:= stmmac_platform.o
dwmac-altr-socfpga-objs := altr_tse_pcs.o dwmac-socfpga.o
-obj-$(CONFIG_DWMAC_INTEL) += dwmac-intel.o
-obj-$(CONFIG_STMMAC_PCI) += stmmac-pci.o
+obj-$(CONFIG_STMMAC_PCI) += stmmac-pci.o
+obj-$(CONFIG_DWMAC_INTEL) += dwmac-intel.o
stmmac-pci-objs:= stmmac_pci.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 6208a68a331d..127f75862962 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -473,6 +473,7 @@ struct mac_device_info {
unsigned int xlgmac;
unsigned int num_vlan;
u32 vlan_filter[32];
+ unsigned int promisc;
};
struct stmmac_rx_routing {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index 2e4aaedb93f5..2ac9dfb3462c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -83,13 +83,9 @@ static int intel_serdes_powerup(struct net_device *ndev, void *priv_data)
serdes_phy_addr = intel_priv->mdio_adhoc_addr;
/* assert clk_req */
- data = mdiobus_read(priv->mii, serdes_phy_addr,
- SERDES_GCR0);
-
+ data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
data |= SERDES_PLL_CLK;
-
- mdiobus_write(priv->mii, serdes_phy_addr,
- SERDES_GCR0, data);
+ mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
/* check for clk_ack assertion */
data = serdes_status_poll(priv, serdes_phy_addr,
@@ -103,13 +99,9 @@ static int intel_serdes_powerup(struct net_device *ndev, void *priv_data)
}
/* assert lane reset */
- data = mdiobus_read(priv->mii, serdes_phy_addr,
- SERDES_GCR0);
-
+ data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
data |= SERDES_RST;
-
- mdiobus_write(priv->mii, serdes_phy_addr,
- SERDES_GCR0, data);
+ mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
/* check for assert lane reset reflection */
data = serdes_status_poll(priv, serdes_phy_addr,
@@ -123,14 +115,12 @@ static int intel_serdes_powerup(struct net_device *ndev, void *priv_data)
}
/* move power state to P0 */
- data = mdiobus_read(priv->mii, serdes_phy_addr,
- SERDES_GCR0);
+ data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
data &= ~SERDES_PWR_ST_MASK;
data |= SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT;
- mdiobus_write(priv->mii, serdes_phy_addr,
- SERDES_GCR0, data);
+ mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
/* Check for P0 state */
data = serdes_status_poll(priv, serdes_phy_addr,
@@ -159,14 +149,12 @@ static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data)
serdes_phy_addr = intel_priv->mdio_adhoc_addr;
/* move power state to P3 */
- data = mdiobus_read(priv->mii, serdes_phy_addr,
- SERDES_GCR0);
+ data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
data &= ~SERDES_PWR_ST_MASK;
data |= SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT;
- mdiobus_write(priv->mii, serdes_phy_addr,
- SERDES_GCR0, data);
+ mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
/* Check for P3 state */
data = serdes_status_poll(priv, serdes_phy_addr,
@@ -180,13 +168,9 @@ static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data)
}
/* de-assert clk_req */
- data = mdiobus_read(priv->mii, serdes_phy_addr,
- SERDES_GCR0);
-
+ data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
data &= ~SERDES_PLL_CLK;
-
- mdiobus_write(priv->mii, serdes_phy_addr,
- SERDES_GCR0, data);
+ mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
/* check for clk_ack de-assert */
data = serdes_status_poll(priv, serdes_phy_addr,
@@ -200,13 +184,9 @@ static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data)
}
/* de-assert lane reset */
- data = mdiobus_read(priv->mii, serdes_phy_addr,
- SERDES_GCR0);
-
+ data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
data &= ~SERDES_RST;
-
- mdiobus_write(priv->mii, serdes_phy_addr,
- SERDES_GCR0, data);
+ mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
/* check for de-assert lane reset reflection */
data = serdes_status_poll(priv, serdes_phy_addr,
@@ -252,6 +232,7 @@ static void common_default_data(struct plat_stmmacenet_data *plat)
static int intel_mgbe_common_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
+ int ret;
int i;
plat->clk_csr = 5;
@@ -324,7 +305,12 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
dev_warn(&pdev->dev, "Fail to register stmmac-clk\n");
plat->stmmac_clk = NULL;
}
- clk_prepare_enable(plat->stmmac_clk);
+
+ ret = clk_prepare_enable(plat->stmmac_clk);
+ if (ret) {
+ clk_unregister_fixed_rate(plat->stmmac_clk);
+ return ret;
+ }
/* Set default value for multicast hash bins */
plat->multicast_filter_bins = HASH_TABLE_SIZE;
@@ -341,16 +327,11 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
static int ehl_common_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
- int ret;
-
plat->rx_queues_to_use = 8;
plat->tx_queues_to_use = 8;
plat->clk_ptp_rate = 200000000;
- ret = intel_mgbe_common_data(pdev, plat);
- if (ret)
- return ret;
- return 0;
+ return intel_mgbe_common_data(pdev, plat);
}
static int ehl_sgmii_data(struct pci_dev *pdev,
@@ -366,7 +347,7 @@ static int ehl_sgmii_data(struct pci_dev *pdev,
return ehl_common_data(pdev, plat);
}
-static struct stmmac_pci_info ehl_sgmii1g_pci_info = {
+static struct stmmac_pci_info ehl_sgmii1g_info = {
.setup = ehl_sgmii_data,
};
@@ -380,7 +361,7 @@ static int ehl_rgmii_data(struct pci_dev *pdev,
return ehl_common_data(pdev, plat);
}
-static struct stmmac_pci_info ehl_rgmii1g_pci_info = {
+static struct stmmac_pci_info ehl_rgmii1g_info = {
.setup = ehl_rgmii_data,
};
@@ -399,7 +380,7 @@ static int ehl_pse0_rgmii1g_data(struct pci_dev *pdev,
return ehl_pse0_common_data(pdev, plat);
}
-static struct stmmac_pci_info ehl_pse0_rgmii1g_pci_info = {
+static struct stmmac_pci_info ehl_pse0_rgmii1g_info = {
.setup = ehl_pse0_rgmii1g_data,
};
@@ -412,7 +393,7 @@ static int ehl_pse0_sgmii1g_data(struct pci_dev *pdev,
return ehl_pse0_common_data(pdev, plat);
}
-static struct stmmac_pci_info ehl_pse0_sgmii1g_pci_info = {
+static struct stmmac_pci_info ehl_pse0_sgmii1g_info = {
.setup = ehl_pse0_sgmii1g_data,
};
@@ -431,7 +412,7 @@ static int ehl_pse1_rgmii1g_data(struct pci_dev *pdev,
return ehl_pse1_common_data(pdev, plat);
}
-static struct stmmac_pci_info ehl_pse1_rgmii1g_pci_info = {
+static struct stmmac_pci_info ehl_pse1_rgmii1g_info = {
.setup = ehl_pse1_rgmii1g_data,
};
@@ -444,23 +425,18 @@ static int ehl_pse1_sgmii1g_data(struct pci_dev *pdev,
return ehl_pse1_common_data(pdev, plat);
}
-static struct stmmac_pci_info ehl_pse1_sgmii1g_pci_info = {
+static struct stmmac_pci_info ehl_pse1_sgmii1g_info = {
.setup = ehl_pse1_sgmii1g_data,
};
static int tgl_common_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
- int ret;
-
plat->rx_queues_to_use = 6;
plat->tx_queues_to_use = 4;
plat->clk_ptp_rate = 200000000;
- ret = intel_mgbe_common_data(pdev, plat);
- if (ret)
- return ret;
- return 0;
+ return intel_mgbe_common_data(pdev, plat);
}
static int tgl_sgmii_data(struct pci_dev *pdev,
@@ -474,7 +450,7 @@ static int tgl_sgmii_data(struct pci_dev *pdev,
return tgl_common_data(pdev, plat);
}
-static struct stmmac_pci_info tgl_sgmii1g_pci_info = {
+static struct stmmac_pci_info tgl_sgmii1g_info = {
.setup = tgl_sgmii_data,
};
@@ -577,7 +553,7 @@ static int quark_default_data(struct pci_dev *pdev,
return 0;
}
-static const struct stmmac_pci_info quark_pci_info = {
+static const struct stmmac_pci_info quark_info = {
.setup = quark_default_data,
};
@@ -600,11 +576,9 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
struct intel_priv_data *intel_priv;
struct plat_stmmacenet_data *plat;
struct stmmac_resources res;
- int i;
int ret;
- intel_priv = devm_kzalloc(&pdev->dev, sizeof(*intel_priv),
- GFP_KERNEL);
+ intel_priv = devm_kzalloc(&pdev->dev, sizeof(*intel_priv), GFP_KERNEL);
if (!intel_priv)
return -ENOMEM;
@@ -631,15 +605,9 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
return ret;
}
- /* Get the base address of device */
- for (i = 0; i < PCI_STD_NUM_BARS; i++) {
- if (pci_resource_len(pdev, i) == 0)
- continue;
- ret = pcim_iomap_regions(pdev, BIT(i), pci_name(pdev));
- if (ret)
- return ret;
- break;
- }
+ ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+ if (ret)
+ return ret;
pci_set_master(pdev);
@@ -650,14 +618,23 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- pci_enable_msi(pdev);
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0)
+ return ret;
memset(&res, 0, sizeof(res));
- res.addr = pcim_iomap_table(pdev)[i];
- res.wol_irq = pdev->irq;
- res.irq = pdev->irq;
+ res.addr = pcim_iomap_table(pdev)[0];
+ res.wol_irq = pci_irq_vector(pdev, 0);
+ res.irq = pci_irq_vector(pdev, 0);
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
+ if (ret) {
+ pci_free_irq_vectors(pdev);
+ clk_disable_unprepare(plat->stmmac_clk);
+ clk_unregister_fixed_rate(plat->stmmac_clk);
+ }
- return stmmac_dvr_probe(&pdev->dev, plat, &res);
+ return ret;
}
/**
@@ -671,19 +648,15 @@ static void intel_eth_pci_remove(struct pci_dev *pdev)
{
struct net_device *ndev = dev_get_drvdata(&pdev->dev);
struct stmmac_priv *priv = netdev_priv(ndev);
- int i;
stmmac_dvr_remove(&pdev->dev);
- if (priv->plat->stmmac_clk)
- clk_unregister_fixed_rate(priv->plat->stmmac_clk);
+ pci_free_irq_vectors(pdev);
- for (i = 0; i < PCI_STD_NUM_BARS; i++) {
- if (pci_resource_len(pdev, i) == 0)
- continue;
- pcim_iounmap_regions(pdev, BIT(i));
- break;
- }
+ clk_disable_unprepare(priv->plat->stmmac_clk);
+ clk_unregister_fixed_rate(priv->plat->stmmac_clk);
+
+ pcim_iounmap_regions(pdev, BIT(0));
pci_disable_device(pdev);
}
@@ -742,26 +715,19 @@ static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend,
#define PCI_DEVICE_ID_INTEL_TGL_SGMII1G_ID 0xa0ac
static const struct pci_device_id intel_eth_pci_id_table[] = {
- { PCI_DEVICE_DATA(INTEL, QUARK_ID, &quark_pci_info) },
- { PCI_DEVICE_DATA(INTEL, EHL_RGMII1G_ID, &ehl_rgmii1g_pci_info) },
- { PCI_DEVICE_DATA(INTEL, EHL_SGMII1G_ID, &ehl_sgmii1g_pci_info) },
- { PCI_DEVICE_DATA(INTEL, EHL_SGMII2G5_ID, &ehl_sgmii1g_pci_info) },
- { PCI_DEVICE_DATA(INTEL, EHL_PSE0_RGMII1G_ID,
- &ehl_pse0_rgmii1g_pci_info) },
- { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII1G_ID,
- &ehl_pse0_sgmii1g_pci_info) },
- { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII2G5_ID,
- &ehl_pse0_sgmii1g_pci_info) },
- { PCI_DEVICE_DATA(INTEL, EHL_PSE1_RGMII1G_ID,
- &ehl_pse1_rgmii1g_pci_info) },
- { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G_ID,
- &ehl_pse1_sgmii1g_pci_info) },
- { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5_ID,
- &ehl_pse1_sgmii1g_pci_info) },
- { PCI_DEVICE_DATA(INTEL, TGL_SGMII1G_ID, &tgl_sgmii1g_pci_info) },
+ { PCI_DEVICE_DATA(INTEL, QUARK_ID, &quark_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_RGMII1G_ID, &ehl_rgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_SGMII1G_ID, &ehl_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_SGMII2G5_ID, &ehl_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_PSE0_RGMII1G_ID, &ehl_pse0_rgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII1G_ID, &ehl_pse0_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII2G5_ID, &ehl_pse0_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_PSE1_RGMII1G_ID, &ehl_pse1_rgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G_ID, &ehl_pse1_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5_ID, &ehl_pse1_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, TGL_SGMII1G_ID, &tgl_sgmii1g_info) },
{}
};
-
MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table);
static struct pci_driver intel_eth_pci_driver = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index a3934ca6a043..234e8b6816ce 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -5,6 +5,7 @@
* Copyright (C) 2016 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/device.h>
@@ -32,7 +33,10 @@
#define PRG_ETH0_CLK_M250_SEL_SHIFT 4
#define PRG_ETH0_CLK_M250_SEL_MASK GENMASK(4, 4)
-#define PRG_ETH0_TXDLY_SHIFT 5
+/* TX clock delay in ns = "8ns / 4 * tx_dly_val" (where 8ns are exactly one
+ * cycle of the 125MHz RGMII TX clock):
+ * 0ns = 0x0, 2ns = 0x1, 4ns = 0x2, 6ns = 0x3
+ */
#define PRG_ETH0_TXDLY_MASK GENMASK(6, 5)
/* divider for the result of m250_sel */
@@ -44,6 +48,27 @@
#define PRG_ETH0_INVERTED_RMII_CLK BIT(11)
#define PRG_ETH0_TX_AND_PHY_REF_CLK BIT(12)
+/* Bypass (= 0, the signal from the GPIO input directly connects to the
+ * internal sampling) or enable (= 1) the internal logic for RXEN and RXD[3:0]
+ * timing tuning.
+ */
+#define PRG_ETH0_ADJ_ENABLE BIT(13)
+/* Controls whether the RXEN and RXD[3:0] signals should be aligned with the
+ * input RX rising/falling edge and sent to the Ethernet internals. This sets
+ * the automatically delay and skew automatically (internally).
+ */
+#define PRG_ETH0_ADJ_SETUP BIT(14)
+/* An internal counter based on the "timing-adjustment" clock. The counter is
+ * cleared on both, the falling and rising edge of the RX_CLK. This selects the
+ * delay (= the counter value) when to start sampling RXEN and RXD[3:0].
+ */
+#define PRG_ETH0_ADJ_DELAY GENMASK(19, 15)
+/* Adjusts the skew between each bit of RXEN and RXD[3:0]. If a signal has a
+ * large input delay, the bit for that signal (RXEN = bit 0, RXD[3] = bit 1,
+ * ...) can be configured to be 1 to compensate for a delay of about 1ns.
+ */
+#define PRG_ETH0_ADJ_SKEW GENMASK(24, 20)
+
#define MUX_CLK_NUM_PARENTS 2
struct meson8b_dwmac;
@@ -60,6 +85,8 @@ struct meson8b_dwmac {
phy_interface_t phy_mode;
struct clk *rgmii_tx_clk;
u32 tx_delay_ns;
+ u32 rx_delay_ns;
+ struct clk *timing_adj_clk;
};
struct meson8b_dwmac_clk_configs {
@@ -240,30 +267,82 @@ static int meson_axg_set_phy_mode(struct meson8b_dwmac *dwmac)
return 0;
}
+static int meson8b_devm_clk_prepare_enable(struct meson8b_dwmac *dwmac,
+ struct clk *clk)
+{
+ int ret;
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ devm_add_action_or_reset(dwmac->dev,
+ (void(*)(void *))clk_disable_unprepare,
+ dwmac->rgmii_tx_clk);
+
+ return 0;
+}
+
static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
{
+ u32 tx_dly_config, rx_dly_config, delay_config;
int ret;
- u8 tx_dly_val = 0;
+
+ tx_dly_config = FIELD_PREP(PRG_ETH0_TXDLY_MASK,
+ dwmac->tx_delay_ns >> 1);
+
+ if (dwmac->rx_delay_ns == 2)
+ rx_dly_config = PRG_ETH0_ADJ_ENABLE | PRG_ETH0_ADJ_SETUP;
+ else
+ rx_dly_config = 0;
switch (dwmac->phy_mode) {
case PHY_INTERFACE_MODE_RGMII:
+ delay_config = tx_dly_config | rx_dly_config;
+ break;
case PHY_INTERFACE_MODE_RGMII_RXID:
- /* TX clock delay in ns = "8ns / 4 * tx_dly_val" (where
- * 8ns are exactly one cycle of the 125MHz RGMII TX clock):
- * 0ns = 0x0, 2ns = 0x1, 4ns = 0x2, 6ns = 0x3
- */
- tx_dly_val = dwmac->tx_delay_ns >> 1;
- /* fall through */
-
- case PHY_INTERFACE_MODE_RGMII_ID:
+ delay_config = tx_dly_config;
+ break;
case PHY_INTERFACE_MODE_RGMII_TXID:
+ delay_config = rx_dly_config;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RMII:
+ delay_config = 0;
+ break;
+ default:
+ dev_err(dwmac->dev, "unsupported phy-mode %s\n",
+ phy_modes(dwmac->phy_mode));
+ return -EINVAL;
+ };
+
+ if (rx_dly_config & PRG_ETH0_ADJ_ENABLE) {
+ if (!dwmac->timing_adj_clk) {
+ dev_err(dwmac->dev,
+ "The timing-adjustment clock is mandatory for the RX delay re-timing\n");
+ return -EINVAL;
+ }
+
+ /* The timing adjustment logic is driven by a separate clock */
+ ret = meson8b_devm_clk_prepare_enable(dwmac,
+ dwmac->timing_adj_clk);
+ if (ret) {
+ dev_err(dwmac->dev,
+ "Failed to enable the timing-adjustment clock\n");
+ return ret;
+ }
+ }
+
+ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TXDLY_MASK |
+ PRG_ETH0_ADJ_ENABLE | PRG_ETH0_ADJ_SETUP |
+ PRG_ETH0_ADJ_DELAY | PRG_ETH0_ADJ_SKEW,
+ delay_config);
+
+ if (phy_interface_mode_is_rgmii(dwmac->phy_mode)) {
/* only relevant for RMII mode -> disable in RGMII mode */
meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
PRG_ETH0_INVERTED_RMII_CLK, 0);
- meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TXDLY_MASK,
- tx_dly_val << PRG_ETH0_TXDLY_SHIFT);
-
/* Configure the 125MHz RGMII TX clock, the IP block changes
* the output automatically (= without us having to configure
* a register) based on the line-speed (125MHz for Gbit speeds,
@@ -276,34 +355,18 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
return ret;
}
- ret = clk_prepare_enable(dwmac->rgmii_tx_clk);
+ ret = meson8b_devm_clk_prepare_enable(dwmac,
+ dwmac->rgmii_tx_clk);
if (ret) {
dev_err(dwmac->dev,
"failed to enable the RGMII TX clock\n");
return ret;
}
-
- devm_add_action_or_reset(dwmac->dev,
- (void(*)(void *))clk_disable_unprepare,
- dwmac->rgmii_tx_clk);
- break;
-
- case PHY_INTERFACE_MODE_RMII:
+ } else {
/* invert internal clk_rmii_i to generate 25/2.5 tx_rx_clk */
meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
PRG_ETH0_INVERTED_RMII_CLK,
PRG_ETH0_INVERTED_RMII_CLK);
-
- /* TX clock delay cannot be configured in RMII mode */
- meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TXDLY_MASK,
- 0);
-
- break;
-
- default:
- dev_err(dwmac->dev, "unsupported phy-mode %s\n",
- phy_modes(dwmac->phy_mode));
- return -EINVAL;
}
/* enable TX_CLK and PHY_REF_CLK generator */
@@ -358,6 +421,25 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
&dwmac->tx_delay_ns))
dwmac->tx_delay_ns = 2;
+ /* use 0ns as fallback since this is what most boards actually use */
+ if (of_property_read_u32(pdev->dev.of_node, "amlogic,rx-delay-ns",
+ &dwmac->rx_delay_ns))
+ dwmac->rx_delay_ns = 0;
+
+ if (dwmac->rx_delay_ns != 0 && dwmac->rx_delay_ns != 2) {
+ dev_err(&pdev->dev,
+ "The only allowed RX delays values are: 0ns, 2ns");
+ ret = -EINVAL;
+ goto err_remove_config_dt;
+ }
+
+ dwmac->timing_adj_clk = devm_clk_get_optional(dwmac->dev,
+ "timing-adjustment");
+ if (IS_ERR(dwmac->timing_adj_clk)) {
+ ret = PTR_ERR(dwmac->timing_adj_clk);
+ goto err_remove_config_dt;
+ }
+
ret = meson8b_init_rgmii_tx_clk(dwmac);
if (ret)
goto err_remove_config_dt;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index b2dc99289687..5d4df4c5254e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -29,6 +29,11 @@
#define SYSCFG_PMCR_ETH_CLK_SEL BIT(16)
#define SYSCFG_PMCR_ETH_REF_CLK_SEL BIT(17)
+/* CLOCK feed to PHY*/
+#define ETH_CK_F_25M 25000000
+#define ETH_CK_F_50M 50000000
+#define ETH_CK_F_125M 125000000
+
/* Ethernet PHY interface selection in register SYSCFG Configuration
*------------------------------------------
* src |BIT(23)| BIT(22)| BIT(21)|BIT(20)|
@@ -58,33 +63,20 @@
*| | | 25MHz | 50MHz | |
* ---------------------------------------------------------------------------
*| MII | - | eth-ck | n/a | n/a |
- *| | | | | |
+ *| | | st,ext-phyclk | | |
* ---------------------------------------------------------------------------
*| GMII | - | eth-ck | n/a | n/a |
- *| | | | | |
+ *| | | st,ext-phyclk | | |
* ---------------------------------------------------------------------------
- *| RGMII | - | eth-ck | n/a | eth-ck (no pin) |
- *| | | | | st,eth-clk-sel |
+ *| RGMII | - | eth-ck | n/a | eth-ck |
+ *| | | st,ext-phyclk | | st,eth-clk-sel or|
+ *| | | | | st,ext-phyclk |
* ---------------------------------------------------------------------------
*| RMII | - | eth-ck | eth-ck | n/a |
- *| | | | st,eth-ref-clk-sel | |
+ *| | | st,ext-phyclk | st,eth-ref-clk-sel | |
+ *| | | | or st,ext-phyclk | |
* ---------------------------------------------------------------------------
*
- * BIT(17) : set this bit in RMII mode when you have PHY without crystal 50MHz
- * BIT(16) : set this bit in GMII/RGMII PHY when you do not want use 125Mhz
- * from PHY
- *-----------------------------------------------------
- * src | BIT(17) | BIT(16) |
- *-----------------------------------------------------
- * MII | n/a | n/a |
- *-----------------------------------------------------
- * GMII | n/a | st,eth-clk-sel |
- *-----------------------------------------------------
- * RGMII | n/a | st,eth-clk-sel |
- *-----------------------------------------------------
- * RMII | st,eth-ref-clk-sel | n/a |
- *-----------------------------------------------------
- *
*/
struct stm32_dwmac {
@@ -93,6 +85,8 @@ struct stm32_dwmac {
struct clk *clk_eth_ck;
struct clk *clk_ethstp;
struct clk *syscfg_clk;
+ int ext_phyclk;
+ int enable_eth_ck;
int eth_clk_sel_reg;
int eth_ref_clk_sel_reg;
int irq_pwr_wakeup;
@@ -155,14 +149,17 @@ static int stm32mp1_clk_prepare(struct stm32_dwmac *dwmac, bool prepare)
ret = clk_prepare_enable(dwmac->syscfg_clk);
if (ret)
return ret;
- ret = clk_prepare_enable(dwmac->clk_eth_ck);
- if (ret) {
- clk_disable_unprepare(dwmac->syscfg_clk);
- return ret;
+ if (dwmac->enable_eth_ck) {
+ ret = clk_prepare_enable(dwmac->clk_eth_ck);
+ if (ret) {
+ clk_disable_unprepare(dwmac->syscfg_clk);
+ return ret;
+ }
}
} else {
clk_disable_unprepare(dwmac->syscfg_clk);
- clk_disable_unprepare(dwmac->clk_eth_ck);
+ if (dwmac->enable_eth_ck)
+ clk_disable_unprepare(dwmac->clk_eth_ck);
}
return ret;
}
@@ -170,24 +167,34 @@ static int stm32mp1_clk_prepare(struct stm32_dwmac *dwmac, bool prepare)
static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat)
{
struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
- u32 reg = dwmac->mode_reg;
+ u32 reg = dwmac->mode_reg, clk_rate;
int val;
+ clk_rate = clk_get_rate(dwmac->clk_eth_ck);
+ dwmac->enable_eth_ck = false;
switch (plat_dat->interface) {
case PHY_INTERFACE_MODE_MII:
+ if (clk_rate == ETH_CK_F_25M && dwmac->ext_phyclk)
+ dwmac->enable_eth_ck = true;
val = SYSCFG_PMCR_ETH_SEL_MII;
pr_debug("SYSCFG init : PHY_INTERFACE_MODE_MII\n");
break;
case PHY_INTERFACE_MODE_GMII:
val = SYSCFG_PMCR_ETH_SEL_GMII;
- if (dwmac->eth_clk_sel_reg)
+ if (clk_rate == ETH_CK_F_25M &&
+ (dwmac->eth_clk_sel_reg || dwmac->ext_phyclk)) {
+ dwmac->enable_eth_ck = true;
val |= SYSCFG_PMCR_ETH_CLK_SEL;
+ }
pr_debug("SYSCFG init : PHY_INTERFACE_MODE_GMII\n");
break;
case PHY_INTERFACE_MODE_RMII:
val = SYSCFG_PMCR_ETH_SEL_RMII;
- if (dwmac->eth_ref_clk_sel_reg)
+ if ((clk_rate == ETH_CK_F_25M || clk_rate == ETH_CK_F_50M) &&
+ (dwmac->eth_ref_clk_sel_reg || dwmac->ext_phyclk)) {
+ dwmac->enable_eth_ck = true;
val |= SYSCFG_PMCR_ETH_REF_CLK_SEL;
+ }
pr_debug("SYSCFG init : PHY_INTERFACE_MODE_RMII\n");
break;
case PHY_INTERFACE_MODE_RGMII:
@@ -195,8 +202,11 @@ static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat)
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
val = SYSCFG_PMCR_ETH_SEL_RGMII;
- if (dwmac->eth_clk_sel_reg)
+ if ((clk_rate == ETH_CK_F_25M || clk_rate == ETH_CK_F_125M) &&
+ (dwmac->eth_clk_sel_reg || dwmac->ext_phyclk)) {
+ dwmac->enable_eth_ck = true;
val |= SYSCFG_PMCR_ETH_CLK_SEL;
+ }
pr_debug("SYSCFG init : PHY_INTERFACE_MODE_RGMII\n");
break;
default:
@@ -294,6 +304,9 @@ static int stm32mp1_parse_data(struct stm32_dwmac *dwmac,
struct device_node *np = dev->of_node;
int err = 0;
+ /* Ethernet PHY have no crystal */
+ dwmac->ext_phyclk = of_property_read_bool(np, "st,ext-phyclk");
+
/* Gigabit Ethernet 125MHz clock selection. */
dwmac->eth_clk_sel_reg = of_property_read_bool(np, "st,eth-clk-sel");
@@ -431,7 +444,8 @@ static int stm32mp1_suspend(struct stm32_dwmac *dwmac)
clk_disable_unprepare(dwmac->clk_tx);
clk_disable_unprepare(dwmac->syscfg_clk);
- clk_disable_unprepare(dwmac->clk_eth_ck);
+ if (dwmac->enable_eth_ck)
+ clk_disable_unprepare(dwmac->clk_eth_ck);
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index 28cac28253b8..61f3249bd724 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -90,6 +90,7 @@
#define GMAC_VLAN_CSVL BIT(19)
#define GMAC_VLAN_VLC GENMASK(17, 16)
#define GMAC_VLAN_VLC_SHIFT 16
+#define GMAC_VLAN_VLHT GENMASK(15, 0)
/* MAC VLAN Tag */
#define GMAC_VLAN_TAG_VID GENMASK(15, 0)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 39692d15d80c..ecd834e0e121 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -450,6 +450,12 @@ static int dwmac4_add_hw_vlan_rx_fltr(struct net_device *dev,
if (vid > 4095)
return -EINVAL;
+ if (hw->promisc) {
+ netdev_err(dev,
+ "Adding VLAN in promisc mode not supported\n");
+ return -EPERM;
+ }
+
/* Single Rx VLAN Filter */
if (hw->num_vlan == 1) {
/* For single VLAN filter, VID 0 means VLAN promiscuous */
@@ -499,6 +505,12 @@ static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev,
{
int i, ret = 0;
+ if (hw->promisc) {
+ netdev_err(dev,
+ "Deleting VLAN in promisc mode not supported\n");
+ return -EPERM;
+ }
+
/* Single Rx VLAN Filter */
if (hw->num_vlan == 1) {
if ((hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) == vid) {
@@ -523,9 +535,45 @@ static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev,
return ret;
}
+static void dwmac4_vlan_promisc_enable(struct net_device *dev,
+ struct mac_device_info *hw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+ u32 hash;
+ u32 val;
+ int i;
+
+ /* Single Rx VLAN Filter */
+ if (hw->num_vlan == 1) {
+ dwmac4_write_single_vlan(dev, 0);
+ return;
+ }
+
+ /* Extended Rx VLAN Filter Enable */
+ for (i = 0; i < hw->num_vlan; i++) {
+ if (hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN) {
+ val = hw->vlan_filter[i] & ~GMAC_VLAN_TAG_DATA_VEN;
+ dwmac4_write_vlan_filter(dev, hw, i, val);
+ }
+ }
+
+ hash = readl(ioaddr + GMAC_VLAN_HASH_TABLE);
+ if (hash & GMAC_VLAN_VLHT) {
+ value = readl(ioaddr + GMAC_VLAN_TAG);
+ if (value & GMAC_VLAN_VTHM) {
+ value &= ~GMAC_VLAN_VTHM;
+ writel(value, ioaddr + GMAC_VLAN_TAG);
+ }
+ }
+}
+
static void dwmac4_restore_hw_vlan_rx_fltr(struct net_device *dev,
struct mac_device_info *hw)
{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+ u32 hash;
u32 val;
int i;
@@ -542,6 +590,13 @@ static void dwmac4_restore_hw_vlan_rx_fltr(struct net_device *dev,
dwmac4_write_vlan_filter(dev, hw, i, val);
}
}
+
+ hash = readl(ioaddr + GMAC_VLAN_HASH_TABLE);
+ if (hash & GMAC_VLAN_VLHT) {
+ value = readl(ioaddr + GMAC_VLAN_TAG);
+ value |= GMAC_VLAN_VTHM;
+ writel(value, ioaddr + GMAC_VLAN_TAG);
+ }
}
static void dwmac4_set_filter(struct mac_device_info *hw,
@@ -624,6 +679,18 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
value |= GMAC_PACKET_FILTER_VTFE;
writel(value, ioaddr + GMAC_PACKET_FILTER);
+
+ if (dev->flags & IFF_PROMISC) {
+ if (!hw->promisc) {
+ hw->promisc = 1;
+ dwmac4_vlan_promisc_enable(dev, hw);
+ }
+ } else {
+ if (hw->promisc) {
+ hw->promisc = 0;
+ dwmac4_restore_hw_vlan_rx_fltr(dev, hw);
+ }
+ }
}
static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index a999d6b33a64..8f08393f25dd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3543,15 +3543,6 @@ static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
}
}
-
-static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
-{
- if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
- return 0;
-
- return 1;
-}
-
/**
* stmmac_rx_refill - refill used skb preallocated buffers
* @priv: driver private structure
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 3fb21f7ac9fb..272cb47af9f2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -217,15 +217,10 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
*/
static void stmmac_pci_remove(struct pci_dev *pdev)
{
- struct net_device *ndev = dev_get_drvdata(&pdev->dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
int i;
stmmac_dvr_remove(&pdev->dev);
- if (priv->plat->stmmac_clk)
- clk_unregister_fixed_rate(priv->plat->stmmac_clk);
-
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (pci_resource_len(pdev, i) == 0)
continue;
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index e6d1aa882fa5..e6e25960da4f 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -237,12 +237,6 @@ static inline void cas_lock_tx(struct cas *cp)
spin_lock_nested(&cp->tx_lock[i], i);
}
-static inline void cas_lock_all(struct cas *cp)
-{
- spin_lock_irq(&cp->lock);
- cas_lock_tx(cp);
-}
-
/* WTZ: QA was finding deadlock problems with the previous
* versions after long test runs with multiple cards per machine.
* See if replacing cas_lock_all with safer versions helps. The
@@ -266,12 +260,6 @@ static inline void cas_unlock_tx(struct cas *cp)
spin_unlock(&cp->tx_lock[i - 1]);
}
-static inline void cas_unlock_all(struct cas *cp)
-{
- cas_unlock_tx(cp);
- spin_unlock_irq(&cp->lock);
-}
-
#define cas_unlock_all_restore(cp, flags) \
do { \
struct cas *xxxcp = (cp); \
@@ -5059,7 +5047,7 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (cp->cas_flags & CAS_FLAG_SATURN)
cas_saturn_firmware_init(cp);
- cp->init_block = (struct cas_init_block *)
+ cp->init_block =
pci_alloc_consistent(pdev, sizeof(struct cas_init_block),
&cp->block_dvma);
if (!cp->init_block) {
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 40a2ce0ca808..e28727297563 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1362,18 +1362,6 @@ static void print_rxfd(struct rxf_desc *rxfd)
* As our benchmarks shows, it adds 1.5 Gbit/sec to NIS's throuput.
*/
-/*************************************************************************
- * Tx DB *
- *************************************************************************/
-static inline int bdx_tx_db_size(struct txdb *db)
-{
- int taken = db->wptr - db->rptr;
- if (taken < 0)
- taken = db->size + 1 + taken; /* (size + 1) equals memsz */
-
- return db->size - taken;
-}
-
/**
* __bdx_tx_db_ptr_next - helper function, increment read/write pointer + wrap
* @db: tx data base
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 62f809b67469..182d10f171f6 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -94,6 +94,7 @@ config TI_K3_AM65_CPSW_NUSS
depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
select TI_DAVINCI_MDIO
imply PHY_TI_GMII_SEL
+ depends on TI_K3_AM65_CPTS || !TI_K3_AM65_CPTS
help
This driver supports TI K3 AM654/J721E CPSW2G Ethernet SubSystem.
The two-port Gigabit Ethernet MAC (MCU_CPSW0) subsystem provides
@@ -104,6 +105,28 @@ config TI_K3_AM65_CPSW_NUSS
To compile this driver as a module, choose M here: the module
will be called ti-am65-cpsw-nuss.
+config TI_K3_AM65_CPTS
+ tristate "TI K3 AM65x CPTS"
+ depends on ARCH_K3 && OF
+ depends on PTP_1588_CLOCK
+ help
+ Say y here to support the TI K3 AM65x CPTS with 1588 features such as
+ PTP hardware clock for each CPTS device and network packets
+ timestamping where applicable.
+ Depending on integration CPTS blocks enable compliance with
+ the IEEE 1588-2008 standard for a precision clock synchronization
+ protocol, Ethernet Enhanced Scheduled Traffic Operations (CPTS_ESTFn)
+ and PCIe Subsystem Precision Time Measurement (PTM).
+
+config TI_AM65_CPSW_TAS
+ bool "Enable TAS offload in AM65 CPSW"
+ depends on TI_K3_AM65_CPSW_NUSS && NET_SCH_TAPRIO && TI_K3_AM65_CPTS
+ help
+ Say y here to support Time Aware Shaper(TAS) offload in AM65 CPSW.
+ AM65 CPSW hardware supports Enhanced Scheduled Traffic (EST)
+ defined in IEEE 802.1Q 2018. The EST scheduler runs on CPTS and the
+ TAS/EST schedule is updated in the Fetch RAM memory of the CPSW.
+
config TI_KEYSTONE_NETCP
tristate "TI Keystone NETCP Core Support"
select TI_DAVINCI_MDIO
@@ -133,7 +156,7 @@ config TLAN
Devices currently supported by this driver are Compaq Netelligent,
Compaq NetFlex and Olicom cards. Please read the file
- <file:Documentation/networking/device_drivers/ti/tlan.txt>
+ <file:Documentation/networking/device_drivers/ti/tlan.rst>
for more details.
To compile this driver as a module, choose M here. The module
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index cb26a9d21869..6e779292545d 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -25,4 +25,5 @@ obj-$(CONFIG_TI_KEYSTONE_NETCP_ETHSS) += keystone_netcp_ethss.o
keystone_netcp_ethss-y := netcp_ethss.o netcp_sgmii.o netcp_xgbepcsr.o cpsw_ale.o
obj-$(CONFIG_TI_K3_AM65_CPSW_NUSS) += ti-am65-cpsw-nuss.o
-ti-am65-cpsw-nuss-y := am65-cpsw-nuss.o cpsw_sl.o am65-cpsw-ethtool.o cpsw_ale.o k3-cppi-desc-pool.o
+ti-am65-cpsw-nuss-y := am65-cpsw-nuss.o cpsw_sl.o am65-cpsw-ethtool.o cpsw_ale.o k3-cppi-desc-pool.o am65-cpsw-qos.o
+obj-$(CONFIG_TI_K3_AM65_CPTS) += am65-cpts.o
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
index c3502aa15ea0..8c4690f3ebcb 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
@@ -12,6 +12,7 @@
#include "am65-cpsw-nuss.h"
#include "cpsw_ale.h"
+#include "am65-cpts.h"
#define AM65_CPSW_REGDUMP_VER 0x1
@@ -694,6 +695,27 @@ static void am65_cpsw_get_ethtool_stats(struct net_device *ndev,
hw_stats[i].offset);
}
+static int am65_cpsw_get_ethtool_ts_info(struct net_device *ndev,
+ struct ethtool_ts_info *info)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+
+ if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
+ return ethtool_op_get_ts_info(ndev, info);
+
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->phc_index = am65_cpts_phc_index(common->cpts);
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
+ return 0;
+}
+
static u32 am65_cpsw_get_ethtool_priv_flags(struct net_device *ndev)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
@@ -708,9 +730,17 @@ static u32 am65_cpsw_get_ethtool_priv_flags(struct net_device *ndev)
static int am65_cpsw_set_ethtool_priv_flags(struct net_device *ndev, u32 flags)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ int rrobin;
+
+ rrobin = !!(flags & AM65_CPSW_PRIV_P0_RX_PTYPE_RROBIN);
+
+ if (common->est_enabled && rrobin) {
+ netdev_err(ndev,
+ "p0-rx-ptype-rrobin flag conflicts with QOS\n");
+ return -EINVAL;
+ }
- common->pf_p0_rx_ptype_rrobin =
- !!(flags & AM65_CPSW_PRIV_P0_RX_PTYPE_RROBIN);
+ common->pf_p0_rx_ptype_rrobin = rrobin;
am65_cpsw_nuss_set_p0_ptype(common);
return 0;
@@ -730,7 +760,7 @@ const struct ethtool_ops am65_cpsw_ethtool_ops_slave = {
.get_sset_count = am65_cpsw_get_sset_count,
.get_strings = am65_cpsw_get_strings,
.get_ethtool_stats = am65_cpsw_get_ethtool_stats,
- .get_ts_info = ethtool_op_get_ts_info,
+ .get_ts_info = am65_cpsw_get_ethtool_ts_info,
.get_priv_flags = am65_cpsw_get_ethtool_priv_flags,
.set_priv_flags = am65_cpsw_set_ethtool_priv_flags,
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 2517ffba8178..4a8229864ae4 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -30,18 +30,21 @@
#include "cpsw_sl.h"
#include "am65-cpsw-nuss.h"
#include "k3-cppi-desc-pool.h"
+#include "am65-cpts.h"
#define AM65_CPSW_SS_BASE 0x0
#define AM65_CPSW_SGMII_BASE 0x100
#define AM65_CPSW_XGMII_BASE 0x2100
#define AM65_CPSW_CPSW_NU_BASE 0x20000
#define AM65_CPSW_NU_PORTS_BASE 0x1000
+#define AM65_CPSW_NU_FRAM_BASE 0x12000
#define AM65_CPSW_NU_STATS_BASE 0x1a000
#define AM65_CPSW_NU_ALE_BASE 0x1e000
#define AM65_CPSW_NU_CPTS_BASE 0x1d000
#define AM65_CPSW_NU_PORTS_OFFSET 0x1000
#define AM65_CPSW_NU_STATS_PORT_OFFSET 0x200
+#define AM65_CPSW_NU_FRAM_PORT_OFFSET 0x200
#define AM65_CPSW_MAX_PORTS 8
@@ -187,9 +190,11 @@ void am65_cpsw_nuss_adjust_link(struct net_device *ndev)
cpsw_ale_control_set(common->ale, port->port_id,
ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
+ am65_cpsw_qos_link_up(ndev, phy->speed);
netif_tx_wake_all_queues(ndev);
} else {
int tmo;
+
/* disable forwarding */
cpsw_ale_control_set(common->ale, port->port_id,
ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
@@ -203,6 +208,7 @@ void am65_cpsw_nuss_adjust_link(struct net_device *ndev)
cpsw_sl_ctl_reset(port->slave.mac_sl);
+ am65_cpsw_qos_link_down(ndev);
netif_tx_stop_all_queues(ndev);
}
@@ -668,6 +674,18 @@ static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
dev_kfree_skb_any(skb);
}
+static void am65_cpsw_nuss_rx_ts(struct sk_buff *skb, u32 *psdata)
+{
+ struct skb_shared_hwtstamps *ssh;
+ u64 ns;
+
+ ns = ((u64)psdata[1] << 32) | psdata[0];
+
+ ssh = skb_hwtstamps(skb);
+ memset(ssh, 0, sizeof(*ssh));
+ ssh->hwtstamp = ns_to_ktime(ns);
+}
+
/* RX psdata[2] word format - checksum information */
#define AM65_CPSW_RX_PSD_CSUM_ADD GENMASK(15, 0)
#define AM65_CPSW_RX_PSD_CSUM_ERR BIT(16)
@@ -745,6 +763,9 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
skb->dev = ndev;
psdata = cppi5_hdesc_get_psdata(desc_rx);
+ /* add RX timestamp */
+ if (port->rx_ts_enabled)
+ am65_cpsw_nuss_rx_ts(skb, psdata);
csum_info = psdata[2];
dev_dbg(dev, "%s rx csum_info:%#x\n", __func__, csum_info);
@@ -904,6 +925,8 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
ndev = skb->dev;
+ am65_cpts_tx_timestamp(common->cpts, skb);
+
ndev_priv = netdev_priv(ndev);
stats = this_cpu_ptr(ndev_priv->stats);
u64_stats_update_begin(&stats->syncp);
@@ -995,6 +1018,10 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
/* padding enabled in hw */
pkt_len = skb_headlen(skb);
+ /* SKB TX timestamp */
+ if (port->tx_ts_enabled)
+ am65_cpts_prep_tx_timestamp(common->cpts, skb);
+
q_idx = skb_get_queue_mapping(skb);
dev_dbg(dev, "%s skb_queue:%d\n", __func__, q_idx);
@@ -1158,6 +1185,111 @@ static int am65_cpsw_nuss_ndo_slave_set_mac_address(struct net_device *ndev,
return 0;
}
+static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev,
+ struct ifreq *ifr)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ u32 ts_ctrl, seq_id, ts_ctrl_ltype2, ts_vlan_ltype;
+ struct hwtstamp_config cfg;
+
+ if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ /* TX HW timestamp */
+ switch (cfg.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (cfg.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ port->rx_ts_enabled = false;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ port->rx_ts_enabled = true;
+ cfg.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ port->tx_ts_enabled = (cfg.tx_type == HWTSTAMP_TX_ON);
+
+ /* cfg TX timestamp */
+ seq_id = (AM65_CPSW_TS_SEQ_ID_OFFSET <<
+ AM65_CPSW_PN_TS_SEQ_ID_OFFSET_SHIFT) | ETH_P_1588;
+
+ ts_vlan_ltype = ETH_P_8021Q;
+
+ ts_ctrl_ltype2 = ETH_P_1588 |
+ AM65_CPSW_PN_TS_CTL_LTYPE2_TS_107 |
+ AM65_CPSW_PN_TS_CTL_LTYPE2_TS_129 |
+ AM65_CPSW_PN_TS_CTL_LTYPE2_TS_130 |
+ AM65_CPSW_PN_TS_CTL_LTYPE2_TS_131 |
+ AM65_CPSW_PN_TS_CTL_LTYPE2_TS_132 |
+ AM65_CPSW_PN_TS_CTL_LTYPE2_TS_319 |
+ AM65_CPSW_PN_TS_CTL_LTYPE2_TS_320 |
+ AM65_CPSW_PN_TS_CTL_LTYPE2_TS_TTL_NONZERO;
+
+ ts_ctrl = AM65_CPSW_TS_EVENT_MSG_TYPE_BITS <<
+ AM65_CPSW_PN_TS_CTL_MSG_TYPE_EN_SHIFT;
+
+ if (port->tx_ts_enabled)
+ ts_ctrl |= AM65_CPSW_TS_TX_ANX_ALL_EN |
+ AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN;
+
+ writel(seq_id, port->port_base + AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG);
+ writel(ts_vlan_ltype, port->port_base +
+ AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG);
+ writel(ts_ctrl_ltype2, port->port_base +
+ AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2);
+ writel(ts_ctrl, port->port_base + AM65_CPSW_PORTN_REG_TS_CTL);
+
+ /* en/dis RX timestamp */
+ am65_cpts_rx_enable(common->cpts, port->rx_ts_enabled);
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+static int am65_cpsw_nuss_hwtstamp_get(struct net_device *ndev,
+ struct ifreq *ifr)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct hwtstamp_config cfg;
+
+ if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
+ return -EOPNOTSUPP;
+
+ cfg.flags = 0;
+ cfg.tx_type = port->tx_ts_enabled ?
+ HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ cfg.rx_filter = port->rx_ts_enabled ?
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device *ndev,
struct ifreq *req, int cmd)
{
@@ -1166,6 +1298,13 @@ static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device *ndev,
if (!netif_running(ndev))
return -EINVAL;
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return am65_cpsw_nuss_hwtstamp_set(ndev, req);
+ case SIOCGHWTSTAMP:
+ return am65_cpsw_nuss_hwtstamp_get(ndev, req);
+ }
+
if (!port->slave.phy)
return -EOPNOTSUPP;
@@ -1244,6 +1383,7 @@ static const struct net_device_ops am65_cpsw_nuss_netdev_ops_2g = {
.ndo_vlan_rx_kill_vid = am65_cpsw_nuss_ndo_slave_kill_vid,
.ndo_do_ioctl = am65_cpsw_nuss_ndo_slave_ioctl,
.ndo_set_features = am65_cpsw_nuss_ndo_slave_set_features,
+ .ndo_setup_tc = am65_cpsw_qos_ndo_setup_tc,
};
static void am65_cpsw_nuss_slave_disable_unused(struct am65_cpsw_port *port)
@@ -1531,6 +1671,40 @@ static int am65_cpsw_am654_get_efuse_macid(struct device_node *of_node,
return 0;
}
+static int am65_cpsw_init_cpts(struct am65_cpsw_common *common)
+{
+ struct device *dev = common->dev;
+ struct device_node *node;
+ struct am65_cpts *cpts;
+ void __iomem *reg_base;
+
+ if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
+ return 0;
+
+ node = of_get_child_by_name(dev->of_node, "cpts");
+ if (!node) {
+ dev_err(dev, "%s cpts not found\n", __func__);
+ return -ENOENT;
+ }
+
+ reg_base = common->cpsw_base + AM65_CPSW_NU_CPTS_BASE;
+ cpts = am65_cpts_create(dev, reg_base, node);
+ if (IS_ERR(cpts)) {
+ int ret = PTR_ERR(cpts);
+
+ if (ret == -EOPNOTSUPP) {
+ dev_info(dev, "cpts disabled\n");
+ return 0;
+ }
+
+ dev_err(dev, "cpts create err %d\n", ret);
+ return ret;
+ }
+ common->cpts = cpts;
+
+ return 0;
+}
+
static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
{
struct device_node *node, *port_np;
@@ -1571,6 +1745,9 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
port->stat_base = common->cpsw_base + AM65_CPSW_NU_STATS_BASE +
(AM65_CPSW_NU_STATS_PORT_OFFSET * port_id);
port->name = of_get_property(port_np, "label", NULL);
+ port->fetch_ram_base =
+ common->cpsw_base + AM65_CPSW_NU_FRAM_BASE +
+ (AM65_CPSW_NU_FRAM_PORT_OFFSET * (port_id - 1));
port->disabled = !of_device_is_available(port_np);
if (port->disabled)
@@ -1863,10 +2040,21 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
return ret;
}
- ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
- /* We do not want to force this, as in some cases may not have child */
- if (ret)
- dev_warn(dev, "populating child nodes err:%d\n", ret);
+ node = of_get_child_by_name(dev->of_node, "mdio");
+ if (!node) {
+ dev_warn(dev, "MDIO node not found\n");
+ } else if (of_device_is_available(node)) {
+ struct platform_device *mdio_pdev;
+
+ mdio_pdev = of_platform_device_create(node, NULL, dev);
+ if (!mdio_pdev) {
+ ret = -ENODEV;
+ goto err_pm_clear;
+ }
+
+ common->mdio_dev = &mdio_pdev->dev;
+ }
+ of_node_put(node);
am65_cpsw_nuss_get_ver(common);
@@ -1900,6 +2088,10 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
goto err_of_clear;
}
+ ret = am65_cpsw_init_cpts(common);
+ if (ret)
+ goto err_of_clear;
+
/* init ports */
for (i = 0; i < common->port_num; i++)
am65_cpsw_nuss_slave_disable_unused(&common->ports[i]);
@@ -1918,7 +2110,8 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
return 0;
err_of_clear:
- of_platform_depopulate(dev);
+ of_platform_device_destroy(common->mdio_dev, NULL);
+err_pm_clear:
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
return ret;
@@ -1943,7 +2136,7 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev)
*/
am65_cpsw_nuss_cleanup_ndev(common);
- of_platform_depopulate(dev);
+ of_platform_device_destroy(common->mdio_dev, NULL);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
index 41ae5b4c7931..9faf4fb1409b 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -9,6 +9,11 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include "am65-cpsw-qos.h"
+
+struct am65_cpts;
#define HOST_PORT_NUM 0
@@ -35,8 +40,12 @@ struct am65_cpsw_port {
u32 port_id;
void __iomem *port_base;
void __iomem *stat_base;
+ void __iomem *fetch_ram_base;
bool disabled;
struct am65_cpsw_slave_data slave;
+ bool tx_ts_enabled;
+ bool rx_ts_enabled;
+ struct am65_cpsw_qos qos;
};
struct am65_cpsw_host {
@@ -72,6 +81,7 @@ struct am65_cpsw_pdata {
struct am65_cpsw_common {
struct device *dev;
+ struct device *mdio_dev;
const struct am65_cpsw_pdata *pdata;
void __iomem *ss_base;
@@ -96,8 +106,9 @@ struct am65_cpsw_common {
u32 nuss_ver;
u32 cpsw_ver;
-
bool pf_p0_rx_ptype_rrobin;
+ struct am65_cpts *cpts;
+ int est_enabled;
};
struct am65_cpsw_ndev_stats {
diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.c b/drivers/net/ethernet/ti/am65-cpsw-qos.c
new file mode 100644
index 000000000000..32eac04468bb
--- /dev/null
+++ b/drivers/net/ethernet/ti/am65-cpsw-qos.c
@@ -0,0 +1,626 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Texas Instruments K3 AM65 Ethernet QoS submodule
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * quality of service module includes:
+ * Enhanced Scheduler Traffic (EST - P802.1Qbv/D2.2)
+ */
+
+#include <linux/pm_runtime.h>
+#include <linux/time.h>
+
+#include "am65-cpsw-nuss.h"
+#include "am65-cpsw-qos.h"
+#include "am65-cpts.h"
+
+#define AM65_CPSW_REG_CTL 0x004
+#define AM65_CPSW_PN_REG_CTL 0x004
+#define AM65_CPSW_PN_REG_FIFO_STATUS 0x050
+#define AM65_CPSW_PN_REG_EST_CTL 0x060
+
+/* AM65_CPSW_REG_CTL register fields */
+#define AM65_CPSW_CTL_EST_EN BIT(18)
+
+/* AM65_CPSW_PN_REG_CTL register fields */
+#define AM65_CPSW_PN_CTL_EST_PORT_EN BIT(17)
+
+/* AM65_CPSW_PN_REG_EST_CTL register fields */
+#define AM65_CPSW_PN_EST_ONEBUF BIT(0)
+#define AM65_CPSW_PN_EST_BUFSEL BIT(1)
+#define AM65_CPSW_PN_EST_TS_EN BIT(2)
+#define AM65_CPSW_PN_EST_TS_FIRST BIT(3)
+#define AM65_CPSW_PN_EST_ONEPRI BIT(4)
+#define AM65_CPSW_PN_EST_TS_PRI_MSK GENMASK(7, 5)
+
+/* AM65_CPSW_PN_REG_FIFO_STATUS register fields */
+#define AM65_CPSW_PN_FST_TX_PRI_ACTIVE_MSK GENMASK(7, 0)
+#define AM65_CPSW_PN_FST_TX_E_MAC_ALLOW_MSK GENMASK(15, 8)
+#define AM65_CPSW_PN_FST_EST_CNT_ERR BIT(16)
+#define AM65_CPSW_PN_FST_EST_ADD_ERR BIT(17)
+#define AM65_CPSW_PN_FST_EST_BUFACT BIT(18)
+
+/* EST FETCH COMMAND RAM */
+#define AM65_CPSW_FETCH_RAM_CMD_NUM 0x80
+#define AM65_CPSW_FETCH_CNT_MSK GENMASK(21, 8)
+#define AM65_CPSW_FETCH_CNT_MAX (AM65_CPSW_FETCH_CNT_MSK >> 8)
+#define AM65_CPSW_FETCH_CNT_OFFSET 8
+#define AM65_CPSW_FETCH_ALLOW_MSK GENMASK(7, 0)
+#define AM65_CPSW_FETCH_ALLOW_MAX AM65_CPSW_FETCH_ALLOW_MSK
+
+enum timer_act {
+ TACT_PROG, /* need program timer */
+ TACT_NEED_STOP, /* need stop first */
+ TACT_SKIP_PROG, /* just buffer can be updated */
+};
+
+static int am65_cpsw_port_est_enabled(struct am65_cpsw_port *port)
+{
+ return port->qos.est_oper || port->qos.est_admin;
+}
+
+static void am65_cpsw_est_enable(struct am65_cpsw_common *common, int enable)
+{
+ u32 val;
+
+ val = readl(common->cpsw_base + AM65_CPSW_REG_CTL);
+
+ if (enable)
+ val |= AM65_CPSW_CTL_EST_EN;
+ else
+ val &= ~AM65_CPSW_CTL_EST_EN;
+
+ writel(val, common->cpsw_base + AM65_CPSW_REG_CTL);
+ common->est_enabled = enable;
+}
+
+static void am65_cpsw_port_est_enable(struct am65_cpsw_port *port, int enable)
+{
+ u32 val;
+
+ val = readl(port->port_base + AM65_CPSW_PN_REG_CTL);
+ if (enable)
+ val |= AM65_CPSW_PN_CTL_EST_PORT_EN;
+ else
+ val &= ~AM65_CPSW_PN_CTL_EST_PORT_EN;
+
+ writel(val, port->port_base + AM65_CPSW_PN_REG_CTL);
+}
+
+/* target new EST RAM buffer, actual toggle happens after cycle completion */
+static void am65_cpsw_port_est_assign_buf_num(struct net_device *ndev,
+ int buf_num)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ u32 val;
+
+ val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL);
+ if (buf_num)
+ val |= AM65_CPSW_PN_EST_BUFSEL;
+ else
+ val &= ~AM65_CPSW_PN_EST_BUFSEL;
+
+ writel(val, port->port_base + AM65_CPSW_PN_REG_EST_CTL);
+}
+
+/* am65_cpsw_port_est_is_swapped() - Indicate if h/w is transitioned
+ * admin -> oper or not
+ *
+ * Return true if already transitioned. i.e oper is equal to admin and buf
+ * numbers match (est_oper->buf match with est_admin->buf).
+ * false if before transition. i.e oper is not equal to admin, (i.e a
+ * previous admin command is waiting to be transitioned to oper state
+ * and est_oper->buf not match with est_oper->buf).
+ */
+static int am65_cpsw_port_est_is_swapped(struct net_device *ndev, int *oper,
+ int *admin)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ u32 val;
+
+ val = readl(port->port_base + AM65_CPSW_PN_REG_FIFO_STATUS);
+ *oper = !!(val & AM65_CPSW_PN_FST_EST_BUFACT);
+
+ val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL);
+ *admin = !!(val & AM65_CPSW_PN_EST_BUFSEL);
+
+ return *admin == *oper;
+}
+
+/* am65_cpsw_port_est_get_free_buf_num() - Get free buffer number for
+ * Admin to program the new schedule.
+ *
+ * Logic as follows:-
+ * If oper is same as admin, return the other buffer (!oper) as the admin
+ * buffer. If oper is not the same, driver let the current oper to continue
+ * as it is in the process of transitioning from admin -> oper. So keep the
+ * oper by selecting the same oper buffer by writing to EST_BUFSEL bit in
+ * EST CTL register. In the second iteration they will match and code returns.
+ * The actual buffer to write command is selected later before it is ready
+ * to update the schedule.
+ */
+static int am65_cpsw_port_est_get_free_buf_num(struct net_device *ndev)
+{
+ int oper, admin;
+ int roll = 2;
+
+ while (roll--) {
+ if (am65_cpsw_port_est_is_swapped(ndev, &oper, &admin))
+ return !oper;
+
+ /* admin is not set, so hinder transition as it's not allowed
+ * to touch memory in-flight, by targeting same oper buf.
+ */
+ am65_cpsw_port_est_assign_buf_num(ndev, oper);
+
+ dev_info(&ndev->dev,
+ "Prev. EST admin cycle is in transit %d -> %d\n",
+ oper, admin);
+ }
+
+ return admin;
+}
+
+static void am65_cpsw_admin_to_oper(struct net_device *ndev)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+
+ if (port->qos.est_oper)
+ devm_kfree(&ndev->dev, port->qos.est_oper);
+
+ port->qos.est_oper = port->qos.est_admin;
+ port->qos.est_admin = NULL;
+}
+
+static void am65_cpsw_port_est_get_buf_num(struct net_device *ndev,
+ struct am65_cpsw_est *est_new)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ u32 val;
+
+ val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL);
+ val &= ~AM65_CPSW_PN_EST_ONEBUF;
+ writel(val, port->port_base + AM65_CPSW_PN_REG_EST_CTL);
+
+ est_new->buf = am65_cpsw_port_est_get_free_buf_num(ndev);
+
+ /* rolled buf num means changed buf while configuring */
+ if (port->qos.est_oper && port->qos.est_admin &&
+ est_new->buf == port->qos.est_oper->buf)
+ am65_cpsw_admin_to_oper(ndev);
+}
+
+static void am65_cpsw_est_set(struct net_device *ndev, int enable)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpsw_common *common = port->common;
+ int common_enable = 0;
+ int i;
+
+ am65_cpsw_port_est_enable(port, enable);
+
+ for (i = 0; i < common->port_num; i++)
+ common_enable |= am65_cpsw_port_est_enabled(&common->ports[i]);
+
+ common_enable |= enable;
+ am65_cpsw_est_enable(common, common_enable);
+}
+
+/* This update is supposed to be used in any routine before getting real state
+ * of admin -> oper transition, particularly it's supposed to be used in some
+ * generic routine for providing real state to Taprio Qdisc.
+ */
+static void am65_cpsw_est_update_state(struct net_device *ndev)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ int oper, admin;
+
+ if (!port->qos.est_admin)
+ return;
+
+ if (!am65_cpsw_port_est_is_swapped(ndev, &oper, &admin))
+ return;
+
+ am65_cpsw_admin_to_oper(ndev);
+}
+
+/* Fetch command count it's number of bytes in Gigabit mode or nibbles in
+ * 10/100Mb mode. So, having speed and time in ns, recalculate ns to number of
+ * bytes/nibbles that can be sent while transmission on given speed.
+ */
+static int am65_est_cmd_ns_to_cnt(u64 ns, int link_speed)
+{
+ u64 temp;
+
+ temp = ns * link_speed;
+ if (link_speed < SPEED_1000)
+ temp <<= 1;
+
+ return DIV_ROUND_UP(temp, 8 * 1000);
+}
+
+static void __iomem *am65_cpsw_est_set_sched_cmds(void __iomem *addr,
+ int fetch_cnt,
+ int fetch_allow)
+{
+ u32 prio_mask, cmd_fetch_cnt, cmd;
+
+ do {
+ if (fetch_cnt > AM65_CPSW_FETCH_CNT_MAX) {
+ fetch_cnt -= AM65_CPSW_FETCH_CNT_MAX;
+ cmd_fetch_cnt = AM65_CPSW_FETCH_CNT_MAX;
+ } else {
+ cmd_fetch_cnt = fetch_cnt;
+ /* fetch count can't be less than 16? */
+ if (cmd_fetch_cnt && cmd_fetch_cnt < 16)
+ cmd_fetch_cnt = 16;
+
+ fetch_cnt = 0;
+ }
+
+ prio_mask = fetch_allow & AM65_CPSW_FETCH_ALLOW_MSK;
+ cmd = (cmd_fetch_cnt << AM65_CPSW_FETCH_CNT_OFFSET) | prio_mask;
+
+ writel(cmd, addr);
+ addr += 4;
+ } while (fetch_cnt);
+
+ return addr;
+}
+
+static int am65_cpsw_est_calc_cmd_num(struct net_device *ndev,
+ struct tc_taprio_qopt_offload *taprio,
+ int link_speed)
+{
+ int i, cmd_cnt, cmd_sum = 0;
+ u32 fetch_cnt;
+
+ for (i = 0; i < taprio->num_entries; i++) {
+ if (taprio->entries[i].command != TC_TAPRIO_CMD_SET_GATES) {
+ dev_err(&ndev->dev, "Only SET command is supported");
+ return -EINVAL;
+ }
+
+ fetch_cnt = am65_est_cmd_ns_to_cnt(taprio->entries[i].interval,
+ link_speed);
+
+ cmd_cnt = DIV_ROUND_UP(fetch_cnt, AM65_CPSW_FETCH_CNT_MAX);
+ if (!cmd_cnt)
+ cmd_cnt++;
+
+ cmd_sum += cmd_cnt;
+
+ if (!fetch_cnt)
+ break;
+ }
+
+ return cmd_sum;
+}
+
+static int am65_cpsw_est_check_scheds(struct net_device *ndev,
+ struct am65_cpsw_est *est_new)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ int cmd_num;
+
+ cmd_num = am65_cpsw_est_calc_cmd_num(ndev, &est_new->taprio,
+ port->qos.link_speed);
+ if (cmd_num < 0)
+ return cmd_num;
+
+ if (cmd_num > AM65_CPSW_FETCH_RAM_CMD_NUM / 2) {
+ dev_err(&ndev->dev, "No fetch RAM");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void am65_cpsw_est_set_sched_list(struct net_device *ndev,
+ struct am65_cpsw_est *est_new)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ u32 fetch_cnt, fetch_allow, all_fetch_allow = 0;
+ void __iomem *ram_addr, *max_ram_addr;
+ struct tc_taprio_sched_entry *entry;
+ int i, ram_size;
+
+ ram_addr = port->fetch_ram_base;
+ ram_size = AM65_CPSW_FETCH_RAM_CMD_NUM * 2;
+ ram_addr += est_new->buf * ram_size;
+
+ max_ram_addr = ram_size + ram_addr;
+ for (i = 0; i < est_new->taprio.num_entries; i++) {
+ entry = &est_new->taprio.entries[i];
+
+ fetch_cnt = am65_est_cmd_ns_to_cnt(entry->interval,
+ port->qos.link_speed);
+ fetch_allow = entry->gate_mask;
+ if (fetch_allow > AM65_CPSW_FETCH_ALLOW_MAX)
+ dev_dbg(&ndev->dev, "fetch_allow > 8 bits: %d\n",
+ fetch_allow);
+
+ ram_addr = am65_cpsw_est_set_sched_cmds(ram_addr, fetch_cnt,
+ fetch_allow);
+
+ if (!fetch_cnt && i < est_new->taprio.num_entries - 1) {
+ dev_info(&ndev->dev,
+ "next scheds after %d have no impact", i + 1);
+ break;
+ }
+
+ all_fetch_allow |= fetch_allow;
+ }
+
+ /* end cmd, enabling non-timed queues for potential over cycle time */
+ if (ram_addr < max_ram_addr)
+ writel(~all_fetch_allow & AM65_CPSW_FETCH_ALLOW_MSK, ram_addr);
+}
+
+/**
+ * Enable ESTf periodic output, set cycle start time and interval.
+ */
+static int am65_cpsw_timer_set(struct net_device *ndev,
+ struct am65_cpsw_est *est_new)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpsw_common *common = port->common;
+ struct am65_cpts *cpts = common->cpts;
+ struct am65_cpts_estf_cfg cfg;
+
+ cfg.ns_period = est_new->taprio.cycle_time;
+ cfg.ns_start = est_new->taprio.base_time;
+
+ return am65_cpts_estf_enable(cpts, port->port_id - 1, &cfg);
+}
+
+static void am65_cpsw_timer_stop(struct net_device *ndev)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpts *cpts = port->common->cpts;
+
+ am65_cpts_estf_disable(cpts, port->port_id - 1);
+}
+
+static enum timer_act am65_cpsw_timer_act(struct net_device *ndev,
+ struct am65_cpsw_est *est_new)
+{
+ struct tc_taprio_qopt_offload *taprio_oper, *taprio_new;
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpts *cpts = port->common->cpts;
+ u64 cur_time;
+ s64 diff;
+
+ if (!port->qos.est_oper)
+ return TACT_PROG;
+
+ taprio_new = &est_new->taprio;
+ taprio_oper = &port->qos.est_oper->taprio;
+
+ if (taprio_new->cycle_time != taprio_oper->cycle_time)
+ return TACT_NEED_STOP;
+
+ /* in order to avoid timer reset get base_time form oper taprio */
+ if (!taprio_new->base_time && taprio_oper)
+ taprio_new->base_time = taprio_oper->base_time;
+
+ if (taprio_new->base_time == taprio_oper->base_time)
+ return TACT_SKIP_PROG;
+
+ /* base times are cycle synchronized */
+ diff = taprio_new->base_time - taprio_oper->base_time;
+ diff = diff < 0 ? -diff : diff;
+ if (diff % taprio_new->cycle_time)
+ return TACT_NEED_STOP;
+
+ cur_time = am65_cpts_ns_gettime(cpts);
+ if (taprio_new->base_time <= cur_time + taprio_new->cycle_time)
+ return TACT_SKIP_PROG;
+
+ /* TODO: Admin schedule at future time is not currently supported */
+ return TACT_NEED_STOP;
+}
+
+static void am65_cpsw_stop_est(struct net_device *ndev)
+{
+ am65_cpsw_est_set(ndev, 0);
+ am65_cpsw_timer_stop(ndev);
+}
+
+static void am65_cpsw_purge_est(struct net_device *ndev)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+
+ am65_cpsw_stop_est(ndev);
+
+ if (port->qos.est_admin)
+ devm_kfree(&ndev->dev, port->qos.est_admin);
+
+ if (port->qos.est_oper)
+ devm_kfree(&ndev->dev, port->qos.est_oper);
+
+ port->qos.est_oper = NULL;
+ port->qos.est_admin = NULL;
+}
+
+static int am65_cpsw_configure_taprio(struct net_device *ndev,
+ struct am65_cpsw_est *est_new)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpts *cpts = common->cpts;
+ int ret = 0, tact = TACT_PROG;
+
+ am65_cpsw_est_update_state(ndev);
+
+ if (!est_new->taprio.enable) {
+ am65_cpsw_stop_est(ndev);
+ return ret;
+ }
+
+ ret = am65_cpsw_est_check_scheds(ndev, est_new);
+ if (ret < 0)
+ return ret;
+
+ tact = am65_cpsw_timer_act(ndev, est_new);
+ if (tact == TACT_NEED_STOP) {
+ dev_err(&ndev->dev,
+ "Can't toggle estf timer, stop taprio first");
+ return -EINVAL;
+ }
+
+ if (tact == TACT_PROG)
+ am65_cpsw_timer_stop(ndev);
+
+ if (!est_new->taprio.base_time)
+ est_new->taprio.base_time = am65_cpts_ns_gettime(cpts);
+
+ am65_cpsw_port_est_get_buf_num(ndev, est_new);
+ am65_cpsw_est_set_sched_list(ndev, est_new);
+ am65_cpsw_port_est_assign_buf_num(ndev, est_new->buf);
+
+ am65_cpsw_est_set(ndev, est_new->taprio.enable);
+
+ if (tact == TACT_PROG) {
+ ret = am65_cpsw_timer_set(ndev, est_new);
+ if (ret) {
+ dev_err(&ndev->dev, "Failed to set cycle time");
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static void am65_cpsw_cp_taprio(struct tc_taprio_qopt_offload *from,
+ struct tc_taprio_qopt_offload *to)
+{
+ int i;
+
+ *to = *from;
+ for (i = 0; i < from->num_entries; i++)
+ to->entries[i] = from->entries[i];
+}
+
+static int am65_cpsw_set_taprio(struct net_device *ndev, void *type_data)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct tc_taprio_qopt_offload *taprio = type_data;
+ struct am65_cpsw_est *est_new;
+ size_t size;
+ int ret = 0;
+
+ if (taprio->cycle_time_extension) {
+ dev_err(&ndev->dev, "Failed to set cycle time extension");
+ return -EOPNOTSUPP;
+ }
+
+ size = sizeof(struct tc_taprio_sched_entry) * taprio->num_entries +
+ sizeof(struct am65_cpsw_est);
+
+ est_new = devm_kzalloc(&ndev->dev, size, GFP_KERNEL);
+ if (!est_new)
+ return -ENOMEM;
+
+ am65_cpsw_cp_taprio(taprio, &est_new->taprio);
+ ret = am65_cpsw_configure_taprio(ndev, est_new);
+ if (!ret) {
+ if (taprio->enable) {
+ if (port->qos.est_admin)
+ devm_kfree(&ndev->dev, port->qos.est_admin);
+
+ port->qos.est_admin = est_new;
+ } else {
+ devm_kfree(&ndev->dev, est_new);
+ am65_cpsw_purge_est(ndev);
+ }
+ } else {
+ devm_kfree(&ndev->dev, est_new);
+ }
+
+ return ret;
+}
+
+static void am65_cpsw_est_link_up(struct net_device *ndev, int link_speed)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ ktime_t cur_time;
+ s64 delta;
+
+ port->qos.link_speed = link_speed;
+ if (!am65_cpsw_port_est_enabled(port))
+ return;
+
+ if (port->qos.link_down_time) {
+ cur_time = ktime_get();
+ delta = ktime_us_delta(cur_time, port->qos.link_down_time);
+ if (delta > USEC_PER_SEC) {
+ dev_err(&ndev->dev,
+ "Link has been lost too long, stopping TAS");
+ goto purge_est;
+ }
+ }
+
+ return;
+
+purge_est:
+ am65_cpsw_purge_est(ndev);
+}
+
+static int am65_cpsw_setup_taprio(struct net_device *ndev, void *type_data)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpsw_common *common = port->common;
+
+ if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_TAS))
+ return -ENODEV;
+
+ if (!netif_running(ndev)) {
+ dev_err(&ndev->dev, "interface is down, link speed unknown\n");
+ return -ENETDOWN;
+ }
+
+ if (common->pf_p0_rx_ptype_rrobin) {
+ dev_err(&ndev->dev,
+ "p0-rx-ptype-rrobin flag conflicts with taprio qdisc\n");
+ return -EINVAL;
+ }
+
+ if (port->qos.link_speed == SPEED_UNKNOWN)
+ return -ENOLINK;
+
+ return am65_cpsw_set_taprio(ndev, type_data);
+}
+
+int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_TAPRIO:
+ return am65_cpsw_setup_taprio(ndev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+void am65_cpsw_qos_link_up(struct net_device *ndev, int link_speed)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+
+ if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_TAS))
+ return;
+
+ am65_cpsw_est_link_up(ndev, link_speed);
+ port->qos.link_down_time = 0;
+}
+
+void am65_cpsw_qos_link_down(struct net_device *ndev)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+
+ if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_TAS))
+ return;
+
+ if (!port->qos.link_down_time)
+ port->qos.link_down_time = ktime_get();
+
+ port->qos.link_speed = SPEED_UNKNOWN;
+}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.h b/drivers/net/ethernet/ti/am65-cpsw-qos.h
new file mode 100644
index 000000000000..e8f1b6b59e93
--- /dev/null
+++ b/drivers/net/ethernet/ti/am65-cpsw-qos.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+#ifndef AM65_CPSW_QOS_H_
+#define AM65_CPSW_QOS_H_
+
+#include <linux/netdevice.h>
+#include <net/pkt_sched.h>
+
+struct am65_cpsw_est {
+ int buf;
+ /* has to be the last one */
+ struct tc_taprio_qopt_offload taprio;
+};
+
+struct am65_cpsw_qos {
+ struct am65_cpsw_est *est_admin;
+ struct am65_cpsw_est *est_oper;
+ ktime_t link_down_time;
+ int link_speed;
+};
+
+int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data);
+void am65_cpsw_qos_link_up(struct net_device *ndev, int link_speed);
+void am65_cpsw_qos_link_down(struct net_device *ndev);
+
+#endif /* AM65_CPSW_QOS_H_ */
diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
new file mode 100644
index 000000000000..c59a289e428c
--- /dev/null
+++ b/drivers/net/ethernet/ti/am65-cpts.c
@@ -0,0 +1,1086 @@
+// SPDX-License-Identifier: GPL-2.0
+/* TI K3 AM65x Common Platform Time Sync
+ *
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/ptp_classify.h>
+#include <linux/ptp_clock_kernel.h>
+
+#include "am65-cpts.h"
+
+struct am65_genf_regs {
+ u32 comp_lo; /* Comparison Low Value 0:31 */
+ u32 comp_hi; /* Comparison High Value 32:63 */
+ u32 control; /* control */
+ u32 length; /* Length */
+ u32 ppm_low; /* PPM Load Low Value 0:31 */
+ u32 ppm_hi; /* PPM Load High Value 32:63 */
+ u32 ts_nudge; /* Nudge value */
+} __aligned(32) __packed;
+
+#define AM65_CPTS_GENF_MAX_NUM 9
+#define AM65_CPTS_ESTF_MAX_NUM 8
+
+struct am65_cpts_regs {
+ u32 idver; /* Identification and version */
+ u32 control; /* Time sync control */
+ u32 rftclk_sel; /* Reference Clock Select Register */
+ u32 ts_push; /* Time stamp event push */
+ u32 ts_load_val_lo; /* Time Stamp Load Low Value 0:31 */
+ u32 ts_load_en; /* Time stamp load enable */
+ u32 ts_comp_lo; /* Time Stamp Comparison Low Value 0:31 */
+ u32 ts_comp_length; /* Time Stamp Comparison Length */
+ u32 intstat_raw; /* Time sync interrupt status raw */
+ u32 intstat_masked; /* Time sync interrupt status masked */
+ u32 int_enable; /* Time sync interrupt enable */
+ u32 ts_comp_nudge; /* Time Stamp Comparison Nudge Value */
+ u32 event_pop; /* Event interrupt pop */
+ u32 event_0; /* Event Time Stamp lo 0:31 */
+ u32 event_1; /* Event Type Fields */
+ u32 event_2; /* Event Type Fields domain */
+ u32 event_3; /* Event Time Stamp hi 32:63 */
+ u32 ts_load_val_hi; /* Time Stamp Load High Value 32:63 */
+ u32 ts_comp_hi; /* Time Stamp Comparison High Value 32:63 */
+ u32 ts_add_val; /* Time Stamp Add value */
+ u32 ts_ppm_low; /* Time Stamp PPM Load Low Value 0:31 */
+ u32 ts_ppm_hi; /* Time Stamp PPM Load High Value 32:63 */
+ u32 ts_nudge; /* Time Stamp Nudge value */
+ u32 reserv[33];
+ struct am65_genf_regs genf[AM65_CPTS_GENF_MAX_NUM];
+ struct am65_genf_regs estf[AM65_CPTS_ESTF_MAX_NUM];
+};
+
+/* CONTROL_REG */
+#define AM65_CPTS_CONTROL_EN BIT(0)
+#define AM65_CPTS_CONTROL_INT_TEST BIT(1)
+#define AM65_CPTS_CONTROL_TS_COMP_POLARITY BIT(2)
+#define AM65_CPTS_CONTROL_TSTAMP_EN BIT(3)
+#define AM65_CPTS_CONTROL_SEQUENCE_EN BIT(4)
+#define AM65_CPTS_CONTROL_64MODE BIT(5)
+#define AM65_CPTS_CONTROL_TS_COMP_TOG BIT(6)
+#define AM65_CPTS_CONTROL_TS_PPM_DIR BIT(7)
+#define AM65_CPTS_CONTROL_HW1_TS_PUSH_EN BIT(8)
+#define AM65_CPTS_CONTROL_HW2_TS_PUSH_EN BIT(9)
+#define AM65_CPTS_CONTROL_HW3_TS_PUSH_EN BIT(10)
+#define AM65_CPTS_CONTROL_HW4_TS_PUSH_EN BIT(11)
+#define AM65_CPTS_CONTROL_HW5_TS_PUSH_EN BIT(12)
+#define AM65_CPTS_CONTROL_HW6_TS_PUSH_EN BIT(13)
+#define AM65_CPTS_CONTROL_HW7_TS_PUSH_EN BIT(14)
+#define AM65_CPTS_CONTROL_HW8_TS_PUSH_EN BIT(15)
+#define AM65_CPTS_CONTROL_HW1_TS_PUSH_OFFSET (8)
+
+#define AM65_CPTS_CONTROL_TS_SYNC_SEL_MASK (0xF)
+#define AM65_CPTS_CONTROL_TS_SYNC_SEL_SHIFT (28)
+
+/* RFTCLK_SEL_REG */
+#define AM65_CPTS_RFTCLK_SEL_MASK (0x1F)
+
+/* TS_PUSH_REG */
+#define AM65_CPTS_TS_PUSH BIT(0)
+
+/* TS_LOAD_EN_REG */
+#define AM65_CPTS_TS_LOAD_EN BIT(0)
+
+/* INTSTAT_RAW_REG */
+#define AM65_CPTS_INTSTAT_RAW_TS_PEND BIT(0)
+
+/* INTSTAT_MASKED_REG */
+#define AM65_CPTS_INTSTAT_MASKED_TS_PEND BIT(0)
+
+/* INT_ENABLE_REG */
+#define AM65_CPTS_INT_ENABLE_TS_PEND_EN BIT(0)
+
+/* TS_COMP_NUDGE_REG */
+#define AM65_CPTS_TS_COMP_NUDGE_MASK (0xFF)
+
+/* EVENT_POP_REG */
+#define AM65_CPTS_EVENT_POP BIT(0)
+
+/* EVENT_1_REG */
+#define AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK GENMASK(15, 0)
+
+#define AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK GENMASK(19, 16)
+#define AM65_CPTS_EVENT_1_MESSAGE_TYPE_SHIFT (16)
+
+#define AM65_CPTS_EVENT_1_EVENT_TYPE_MASK GENMASK(23, 20)
+#define AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT (20)
+
+#define AM65_CPTS_EVENT_1_PORT_NUMBER_MASK GENMASK(28, 24)
+#define AM65_CPTS_EVENT_1_PORT_NUMBER_SHIFT (24)
+
+/* EVENT_2_REG */
+#define AM65_CPTS_EVENT_2_REG_DOMAIN_MASK (0xFF)
+#define AM65_CPTS_EVENT_2_REG_DOMAIN_SHIFT (0)
+
+enum {
+ AM65_CPTS_EV_PUSH, /* Time Stamp Push Event */
+ AM65_CPTS_EV_ROLL, /* Time Stamp Rollover Event */
+ AM65_CPTS_EV_HALF, /* Time Stamp Half Rollover Event */
+ AM65_CPTS_EV_HW, /* Hardware Time Stamp Push Event */
+ AM65_CPTS_EV_RX, /* Ethernet Receive Event */
+ AM65_CPTS_EV_TX, /* Ethernet Transmit Event */
+ AM65_CPTS_EV_TS_COMP, /* Time Stamp Compare Event */
+ AM65_CPTS_EV_HOST, /* Host Transmit Event */
+};
+
+struct am65_cpts_event {
+ struct list_head list;
+ unsigned long tmo;
+ u32 event1;
+ u32 event2;
+ u64 timestamp;
+};
+
+#define AM65_CPTS_FIFO_DEPTH (16)
+#define AM65_CPTS_MAX_EVENTS (32)
+#define AM65_CPTS_EVENT_RX_TX_TIMEOUT (20) /* ms */
+#define AM65_CPTS_SKB_TX_WORK_TIMEOUT 1 /* jiffies */
+#define AM65_CPTS_MIN_PPM 0x400
+
+struct am65_cpts {
+ struct device *dev;
+ struct am65_cpts_regs __iomem *reg;
+ struct ptp_clock_info ptp_info;
+ struct ptp_clock *ptp_clock;
+ int phc_index;
+ struct clk_hw *clk_mux_hw;
+ struct device_node *clk_mux_np;
+ struct clk *refclk;
+ u32 refclk_freq;
+ struct list_head events;
+ struct list_head pool;
+ struct am65_cpts_event pool_data[AM65_CPTS_MAX_EVENTS];
+ spinlock_t lock; /* protects events lists*/
+ u32 ext_ts_inputs;
+ u32 genf_num;
+ u32 ts_add_val;
+ int irq;
+ struct mutex ptp_clk_lock; /* PHC access sync */
+ u64 timestamp;
+ u32 genf_enable;
+ u32 hw_ts_enable;
+ struct sk_buff_head txq;
+};
+
+struct am65_cpts_skb_cb_data {
+ unsigned long tmo;
+ u32 skb_mtype_seqid;
+};
+
+#define am65_cpts_write32(c, v, r) writel(v, &(c)->reg->r)
+#define am65_cpts_read32(c, r) readl(&(c)->reg->r)
+
+static void am65_cpts_settime(struct am65_cpts *cpts, u64 start_tstamp)
+{
+ u32 val;
+
+ val = upper_32_bits(start_tstamp);
+ am65_cpts_write32(cpts, val, ts_load_val_hi);
+ val = lower_32_bits(start_tstamp);
+ am65_cpts_write32(cpts, val, ts_load_val_lo);
+
+ am65_cpts_write32(cpts, AM65_CPTS_TS_LOAD_EN, ts_load_en);
+}
+
+static void am65_cpts_set_add_val(struct am65_cpts *cpts)
+{
+ /* select coefficient according to the rate */
+ cpts->ts_add_val = (NSEC_PER_SEC / cpts->refclk_freq - 1) & 0x7;
+
+ am65_cpts_write32(cpts, cpts->ts_add_val, ts_add_val);
+}
+
+static void am65_cpts_disable(struct am65_cpts *cpts)
+{
+ am65_cpts_write32(cpts, 0, control);
+ am65_cpts_write32(cpts, 0, int_enable);
+}
+
+static int am65_cpts_event_get_port(struct am65_cpts_event *event)
+{
+ return (event->event1 & AM65_CPTS_EVENT_1_PORT_NUMBER_MASK) >>
+ AM65_CPTS_EVENT_1_PORT_NUMBER_SHIFT;
+}
+
+static int am65_cpts_event_get_type(struct am65_cpts_event *event)
+{
+ return (event->event1 & AM65_CPTS_EVENT_1_EVENT_TYPE_MASK) >>
+ AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT;
+}
+
+static int am65_cpts_cpts_purge_events(struct am65_cpts *cpts)
+{
+ struct list_head *this, *next;
+ struct am65_cpts_event *event;
+ int removed = 0;
+
+ list_for_each_safe(this, next, &cpts->events) {
+ event = list_entry(this, struct am65_cpts_event, list);
+ if (time_after(jiffies, event->tmo)) {
+ list_del_init(&event->list);
+ list_add(&event->list, &cpts->pool);
+ ++removed;
+ }
+ }
+
+ if (removed)
+ dev_dbg(cpts->dev, "event pool cleaned up %d\n", removed);
+ return removed ? 0 : -1;
+}
+
+static bool am65_cpts_fifo_pop_event(struct am65_cpts *cpts,
+ struct am65_cpts_event *event)
+{
+ u32 r = am65_cpts_read32(cpts, intstat_raw);
+
+ if (r & AM65_CPTS_INTSTAT_RAW_TS_PEND) {
+ event->timestamp = am65_cpts_read32(cpts, event_0);
+ event->event1 = am65_cpts_read32(cpts, event_1);
+ event->event2 = am65_cpts_read32(cpts, event_2);
+ event->timestamp |= (u64)am65_cpts_read32(cpts, event_3) << 32;
+ am65_cpts_write32(cpts, AM65_CPTS_EVENT_POP, event_pop);
+ return false;
+ }
+ return true;
+}
+
+static int am65_cpts_fifo_read(struct am65_cpts *cpts)
+{
+ struct ptp_clock_event pevent;
+ struct am65_cpts_event *event;
+ bool schedule = false;
+ int i, type, ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cpts->lock, flags);
+ for (i = 0; i < AM65_CPTS_FIFO_DEPTH; i++) {
+ event = list_first_entry_or_null(&cpts->pool,
+ struct am65_cpts_event, list);
+
+ if (!event) {
+ if (am65_cpts_cpts_purge_events(cpts)) {
+ dev_err(cpts->dev, "cpts: event pool empty\n");
+ ret = -1;
+ goto out;
+ }
+ continue;
+ }
+
+ if (am65_cpts_fifo_pop_event(cpts, event))
+ break;
+
+ type = am65_cpts_event_get_type(event);
+ switch (type) {
+ case AM65_CPTS_EV_PUSH:
+ cpts->timestamp = event->timestamp;
+ dev_dbg(cpts->dev, "AM65_CPTS_EV_PUSH t:%llu\n",
+ cpts->timestamp);
+ break;
+ case AM65_CPTS_EV_RX:
+ case AM65_CPTS_EV_TX:
+ event->tmo = jiffies +
+ msecs_to_jiffies(AM65_CPTS_EVENT_RX_TX_TIMEOUT);
+
+ list_del_init(&event->list);
+ list_add_tail(&event->list, &cpts->events);
+
+ dev_dbg(cpts->dev,
+ "AM65_CPTS_EV_TX e1:%08x e2:%08x t:%lld\n",
+ event->event1, event->event2,
+ event->timestamp);
+ schedule = true;
+ break;
+ case AM65_CPTS_EV_HW:
+ pevent.index = am65_cpts_event_get_port(event) - 1;
+ pevent.timestamp = event->timestamp;
+ pevent.type = PTP_CLOCK_EXTTS;
+ dev_dbg(cpts->dev, "AM65_CPTS_EV_HW p:%d t:%llu\n",
+ pevent.index, event->timestamp);
+
+ ptp_clock_event(cpts->ptp_clock, &pevent);
+ break;
+ case AM65_CPTS_EV_HOST:
+ break;
+ case AM65_CPTS_EV_ROLL:
+ case AM65_CPTS_EV_HALF:
+ case AM65_CPTS_EV_TS_COMP:
+ dev_dbg(cpts->dev,
+ "AM65_CPTS_EVT: %d e1:%08x e2:%08x t:%lld\n",
+ type,
+ event->event1, event->event2,
+ event->timestamp);
+ break;
+ default:
+ dev_err(cpts->dev, "cpts: unknown event type\n");
+ ret = -1;
+ goto out;
+ }
+ }
+
+out:
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ if (schedule)
+ ptp_schedule_worker(cpts->ptp_clock, 0);
+
+ return ret;
+}
+
+static u64 am65_cpts_gettime(struct am65_cpts *cpts,
+ struct ptp_system_timestamp *sts)
+{
+ unsigned long flags;
+ u64 val = 0;
+
+ /* temporarily disable cpts interrupt to avoid intentional
+ * doubled read. Interrupt can be in-flight - it's Ok.
+ */
+ am65_cpts_write32(cpts, 0, int_enable);
+
+ /* use spin_lock_irqsave() here as it has to run very fast */
+ spin_lock_irqsave(&cpts->lock, flags);
+ ptp_read_system_prets(sts);
+ am65_cpts_write32(cpts, AM65_CPTS_TS_PUSH, ts_push);
+ am65_cpts_read32(cpts, ts_push);
+ ptp_read_system_postts(sts);
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ am65_cpts_fifo_read(cpts);
+
+ am65_cpts_write32(cpts, AM65_CPTS_INT_ENABLE_TS_PEND_EN, int_enable);
+
+ val = cpts->timestamp;
+
+ return val;
+}
+
+static irqreturn_t am65_cpts_interrupt(int irq, void *dev_id)
+{
+ struct am65_cpts *cpts = dev_id;
+
+ if (am65_cpts_fifo_read(cpts))
+ dev_dbg(cpts->dev, "cpts: unable to obtain a time stamp\n");
+
+ return IRQ_HANDLED;
+}
+
+/* PTP clock operations */
+static int am65_cpts_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
+ int neg_adj = 0;
+ u64 adj_period;
+ u32 val;
+
+ if (ppb < 0) {
+ neg_adj = 1;
+ ppb = -ppb;
+ }
+
+ /* base freq = 1GHz = 1 000 000 000
+ * ppb_norm = ppb * base_freq / clock_freq;
+ * ppm_norm = ppb_norm / 1000
+ * adj_period = 1 000 000 / ppm_norm
+ * adj_period = 1 000 000 000 / ppb_norm
+ * adj_period = 1 000 000 000 / (ppb * base_freq / clock_freq)
+ * adj_period = (1 000 000 000 * clock_freq) / (ppb * base_freq)
+ * adj_period = clock_freq / ppb
+ */
+ adj_period = div_u64(cpts->refclk_freq, ppb);
+
+ mutex_lock(&cpts->ptp_clk_lock);
+
+ val = am65_cpts_read32(cpts, control);
+ if (neg_adj)
+ val |= AM65_CPTS_CONTROL_TS_PPM_DIR;
+ else
+ val &= ~AM65_CPTS_CONTROL_TS_PPM_DIR;
+ am65_cpts_write32(cpts, val, control);
+
+ val = upper_32_bits(adj_period) & 0x3FF;
+ am65_cpts_write32(cpts, val, ts_ppm_hi);
+ val = lower_32_bits(adj_period);
+ am65_cpts_write32(cpts, val, ts_ppm_low);
+
+ mutex_unlock(&cpts->ptp_clk_lock);
+
+ return 0;
+}
+
+static int am65_cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
+ s64 ns;
+
+ mutex_lock(&cpts->ptp_clk_lock);
+ ns = am65_cpts_gettime(cpts, NULL);
+ ns += delta;
+ am65_cpts_settime(cpts, ns);
+ mutex_unlock(&cpts->ptp_clk_lock);
+
+ return 0;
+}
+
+static int am65_cpts_ptp_gettimex(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
+ u64 ns;
+
+ mutex_lock(&cpts->ptp_clk_lock);
+ ns = am65_cpts_gettime(cpts, sts);
+ mutex_unlock(&cpts->ptp_clk_lock);
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+u64 am65_cpts_ns_gettime(struct am65_cpts *cpts)
+{
+ u64 ns;
+
+ /* reuse ptp_clk_lock as it serialize ts push */
+ mutex_lock(&cpts->ptp_clk_lock);
+ ns = am65_cpts_gettime(cpts, NULL);
+ mutex_unlock(&cpts->ptp_clk_lock);
+
+ return ns;
+}
+EXPORT_SYMBOL_GPL(am65_cpts_ns_gettime);
+
+static int am65_cpts_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
+ u64 ns;
+
+ ns = timespec64_to_ns(ts);
+ mutex_lock(&cpts->ptp_clk_lock);
+ am65_cpts_settime(cpts, ns);
+ mutex_unlock(&cpts->ptp_clk_lock);
+
+ return 0;
+}
+
+static void am65_cpts_extts_enable_hw(struct am65_cpts *cpts, u32 index, int on)
+{
+ u32 v;
+
+ v = am65_cpts_read32(cpts, control);
+ if (on) {
+ v |= BIT(AM65_CPTS_CONTROL_HW1_TS_PUSH_OFFSET + index);
+ cpts->hw_ts_enable |= BIT(index);
+ } else {
+ v &= ~BIT(AM65_CPTS_CONTROL_HW1_TS_PUSH_OFFSET + index);
+ cpts->hw_ts_enable &= ~BIT(index);
+ }
+ am65_cpts_write32(cpts, v, control);
+}
+
+static int am65_cpts_extts_enable(struct am65_cpts *cpts, u32 index, int on)
+{
+ if (!!(cpts->hw_ts_enable & BIT(index)) == !!on)
+ return 0;
+
+ mutex_lock(&cpts->ptp_clk_lock);
+ am65_cpts_extts_enable_hw(cpts, index, on);
+ mutex_unlock(&cpts->ptp_clk_lock);
+
+ dev_dbg(cpts->dev, "%s: ExtTS:%u %s\n",
+ __func__, index, on ? "enabled" : "disabled");
+
+ return 0;
+}
+
+int am65_cpts_estf_enable(struct am65_cpts *cpts, int idx,
+ struct am65_cpts_estf_cfg *cfg)
+{
+ u64 cycles;
+ u32 val;
+
+ cycles = cfg->ns_period * cpts->refclk_freq;
+ cycles = DIV_ROUND_UP(cycles, NSEC_PER_SEC);
+ if (cycles > U32_MAX)
+ return -EINVAL;
+
+ /* according to TRM should be zeroed */
+ am65_cpts_write32(cpts, 0, estf[idx].length);
+
+ val = upper_32_bits(cfg->ns_start);
+ am65_cpts_write32(cpts, val, estf[idx].comp_hi);
+ val = lower_32_bits(cfg->ns_start);
+ am65_cpts_write32(cpts, val, estf[idx].comp_lo);
+ val = lower_32_bits(cycles);
+ am65_cpts_write32(cpts, val, estf[idx].length);
+
+ dev_dbg(cpts->dev, "%s: ESTF:%u enabled\n", __func__, idx);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(am65_cpts_estf_enable);
+
+void am65_cpts_estf_disable(struct am65_cpts *cpts, int idx)
+{
+ am65_cpts_write32(cpts, 0, estf[idx].length);
+
+ dev_dbg(cpts->dev, "%s: ESTF:%u disabled\n", __func__, idx);
+}
+EXPORT_SYMBOL_GPL(am65_cpts_estf_disable);
+
+static void am65_cpts_perout_enable_hw(struct am65_cpts *cpts,
+ struct ptp_perout_request *req, int on)
+{
+ u64 ns_period, ns_start, cycles;
+ struct timespec64 ts;
+ u32 val;
+
+ if (on) {
+ ts.tv_sec = req->period.sec;
+ ts.tv_nsec = req->period.nsec;
+ ns_period = timespec64_to_ns(&ts);
+
+ cycles = (ns_period * cpts->refclk_freq) / NSEC_PER_SEC;
+
+ ts.tv_sec = req->start.sec;
+ ts.tv_nsec = req->start.nsec;
+ ns_start = timespec64_to_ns(&ts);
+
+ val = upper_32_bits(ns_start);
+ am65_cpts_write32(cpts, val, genf[req->index].comp_hi);
+ val = lower_32_bits(ns_start);
+ am65_cpts_write32(cpts, val, genf[req->index].comp_lo);
+ val = lower_32_bits(cycles);
+ am65_cpts_write32(cpts, val, genf[req->index].length);
+
+ cpts->genf_enable |= BIT(req->index);
+ } else {
+ am65_cpts_write32(cpts, 0, genf[req->index].length);
+
+ cpts->genf_enable &= ~BIT(req->index);
+ }
+}
+
+static int am65_cpts_perout_enable(struct am65_cpts *cpts,
+ struct ptp_perout_request *req, int on)
+{
+ if (!!(cpts->genf_enable & BIT(req->index)) == !!on)
+ return 0;
+
+ mutex_lock(&cpts->ptp_clk_lock);
+ am65_cpts_perout_enable_hw(cpts, req, on);
+ mutex_unlock(&cpts->ptp_clk_lock);
+
+ dev_dbg(cpts->dev, "%s: GenF:%u %s\n",
+ __func__, req->index, on ? "enabled" : "disabled");
+
+ return 0;
+}
+
+static int am65_cpts_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ return am65_cpts_extts_enable(cpts, rq->extts.index, on);
+ case PTP_CLK_REQ_PEROUT:
+ return am65_cpts_perout_enable(cpts, &rq->perout, on);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static long am65_cpts_ts_work(struct ptp_clock_info *ptp);
+
+static struct ptp_clock_info am65_ptp_info = {
+ .owner = THIS_MODULE,
+ .name = "CTPS timer",
+ .adjfreq = am65_cpts_ptp_adjfreq,
+ .adjtime = am65_cpts_ptp_adjtime,
+ .gettimex64 = am65_cpts_ptp_gettimex,
+ .settime64 = am65_cpts_ptp_settime,
+ .enable = am65_cpts_ptp_enable,
+ .do_aux_work = am65_cpts_ts_work,
+};
+
+static bool am65_cpts_match_tx_ts(struct am65_cpts *cpts,
+ struct am65_cpts_event *event)
+{
+ struct sk_buff_head txq_list;
+ struct sk_buff *skb, *tmp;
+ unsigned long flags;
+ bool found = false;
+ u32 mtype_seqid;
+
+ mtype_seqid = event->event1 &
+ (AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK |
+ AM65_CPTS_EVENT_1_EVENT_TYPE_MASK |
+ AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK);
+
+ __skb_queue_head_init(&txq_list);
+
+ spin_lock_irqsave(&cpts->txq.lock, flags);
+ skb_queue_splice_init(&cpts->txq, &txq_list);
+ spin_unlock_irqrestore(&cpts->txq.lock, flags);
+
+ /* no need to grab txq.lock as access is always done under cpts->lock */
+ skb_queue_walk_safe(&txq_list, skb, tmp) {
+ struct skb_shared_hwtstamps ssh;
+ struct am65_cpts_skb_cb_data *skb_cb =
+ (struct am65_cpts_skb_cb_data *)skb->cb;
+
+ if (mtype_seqid == skb_cb->skb_mtype_seqid) {
+ u64 ns = event->timestamp;
+
+ memset(&ssh, 0, sizeof(ssh));
+ ssh.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(skb, &ssh);
+ found = true;
+ __skb_unlink(skb, &txq_list);
+ dev_consume_skb_any(skb);
+ dev_dbg(cpts->dev,
+ "match tx timestamp mtype_seqid %08x\n",
+ mtype_seqid);
+ break;
+ }
+
+ if (time_after(jiffies, skb_cb->tmo)) {
+ /* timeout any expired skbs over 100 ms */
+ dev_dbg(cpts->dev,
+ "expiring tx timestamp mtype_seqid %08x\n",
+ mtype_seqid);
+ __skb_unlink(skb, &txq_list);
+ dev_consume_skb_any(skb);
+ }
+ }
+
+ spin_lock_irqsave(&cpts->txq.lock, flags);
+ skb_queue_splice(&txq_list, &cpts->txq);
+ spin_unlock_irqrestore(&cpts->txq.lock, flags);
+
+ return found;
+}
+
+static void am65_cpts_find_ts(struct am65_cpts *cpts)
+{
+ struct am65_cpts_event *event;
+ struct list_head *this, *next;
+ LIST_HEAD(events_free);
+ unsigned long flags;
+ LIST_HEAD(events);
+
+ spin_lock_irqsave(&cpts->lock, flags);
+ list_splice_init(&cpts->events, &events);
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ list_for_each_safe(this, next, &events) {
+ event = list_entry(this, struct am65_cpts_event, list);
+ if (am65_cpts_match_tx_ts(cpts, event) ||
+ time_after(jiffies, event->tmo)) {
+ list_del_init(&event->list);
+ list_add(&event->list, &events_free);
+ }
+ }
+
+ spin_lock_irqsave(&cpts->lock, flags);
+ list_splice_tail(&events, &cpts->events);
+ list_splice_tail(&events_free, &cpts->pool);
+ spin_unlock_irqrestore(&cpts->lock, flags);
+}
+
+static long am65_cpts_ts_work(struct ptp_clock_info *ptp)
+{
+ struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
+ unsigned long flags;
+ long delay = -1;
+
+ am65_cpts_find_ts(cpts);
+
+ spin_lock_irqsave(&cpts->txq.lock, flags);
+ if (!skb_queue_empty(&cpts->txq))
+ delay = AM65_CPTS_SKB_TX_WORK_TIMEOUT;
+ spin_unlock_irqrestore(&cpts->txq.lock, flags);
+
+ return delay;
+}
+
+/**
+ * am65_cpts_rx_enable - enable rx timestamping
+ * @cpts: cpts handle
+ * @skb: packet
+ *
+ * This functions enables rx packets timestamping. The CPTS can timestamp all
+ * rx packets.
+ */
+void am65_cpts_rx_enable(struct am65_cpts *cpts, bool en)
+{
+ u32 val;
+
+ mutex_lock(&cpts->ptp_clk_lock);
+ val = am65_cpts_read32(cpts, control);
+ if (en)
+ val |= AM65_CPTS_CONTROL_TSTAMP_EN;
+ else
+ val &= ~AM65_CPTS_CONTROL_TSTAMP_EN;
+ am65_cpts_write32(cpts, val, control);
+ mutex_unlock(&cpts->ptp_clk_lock);
+}
+EXPORT_SYMBOL_GPL(am65_cpts_rx_enable);
+
+static int am65_skb_get_mtype_seqid(struct sk_buff *skb, u32 *mtype_seqid)
+{
+ unsigned int ptp_class = ptp_classify_raw(skb);
+ u8 *msgtype, *data = skb->data;
+ unsigned int offset = 0;
+ __be16 *seqid;
+
+ if (ptp_class == PTP_CLASS_NONE)
+ return 0;
+
+ if (ptp_class & PTP_CLASS_VLAN)
+ offset += VLAN_HLEN;
+
+ switch (ptp_class & PTP_CLASS_PMASK) {
+ case PTP_CLASS_IPV4:
+ offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
+ break;
+ case PTP_CLASS_IPV6:
+ offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
+ break;
+ case PTP_CLASS_L2:
+ offset += ETH_HLEN;
+ break;
+ default:
+ return 0;
+ }
+
+ if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid))
+ return 0;
+
+ if (unlikely(ptp_class & PTP_CLASS_V1))
+ msgtype = data + offset + OFF_PTP_CONTROL;
+ else
+ msgtype = data + offset;
+
+ seqid = (__be16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
+ *mtype_seqid = (*msgtype << AM65_CPTS_EVENT_1_MESSAGE_TYPE_SHIFT) &
+ AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK;
+ *mtype_seqid |= (ntohs(*seqid) & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK);
+
+ return 1;
+}
+
+/**
+ * am65_cpts_tx_timestamp - save tx packet for timestamping
+ * @cpts: cpts handle
+ * @skb: packet
+ *
+ * This functions saves tx packet for timestamping if packet can be timestamped.
+ * The future processing is done in from PTP auxiliary worker.
+ */
+void am65_cpts_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb)
+{
+ struct am65_cpts_skb_cb_data *skb_cb = (void *)skb->cb;
+
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
+ return;
+
+ /* add frame to queue for processing later.
+ * The periodic FIFO check will handle this.
+ */
+ skb_get(skb);
+ /* get the timestamp for timeouts */
+ skb_cb->tmo = jiffies + msecs_to_jiffies(100);
+ skb_queue_tail(&cpts->txq, skb);
+ ptp_schedule_worker(cpts->ptp_clock, 0);
+}
+EXPORT_SYMBOL_GPL(am65_cpts_tx_timestamp);
+
+/**
+ * am65_cpts_prep_tx_timestamp - check and prepare tx packet for timestamping
+ * @cpts: cpts handle
+ * @skb: packet
+ *
+ * This functions should be called from .xmit().
+ * It checks if packet can be timestamped, fills internal cpts data
+ * in skb-cb and marks packet as SKBTX_IN_PROGRESS.
+ */
+void am65_cpts_prep_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb)
+{
+ struct am65_cpts_skb_cb_data *skb_cb = (void *)skb->cb;
+ int ret;
+
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+ return;
+
+ ret = am65_skb_get_mtype_seqid(skb, &skb_cb->skb_mtype_seqid);
+ if (!ret)
+ return;
+ skb_cb->skb_mtype_seqid |= (AM65_CPTS_EV_TX <<
+ AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT);
+
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+}
+EXPORT_SYMBOL_GPL(am65_cpts_prep_tx_timestamp);
+
+int am65_cpts_phc_index(struct am65_cpts *cpts)
+{
+ return cpts->phc_index;
+}
+EXPORT_SYMBOL_GPL(am65_cpts_phc_index);
+
+static void cpts_free_clk_mux(void *data)
+{
+ struct am65_cpts *cpts = data;
+
+ of_clk_del_provider(cpts->clk_mux_np);
+ clk_hw_unregister_mux(cpts->clk_mux_hw);
+ of_node_put(cpts->clk_mux_np);
+}
+
+static int cpts_of_mux_clk_setup(struct am65_cpts *cpts,
+ struct device_node *node)
+{
+ unsigned int num_parents;
+ const char **parent_names;
+ char *clk_mux_name;
+ void __iomem *reg;
+ int ret = -EINVAL;
+
+ cpts->clk_mux_np = of_get_child_by_name(node, "refclk-mux");
+ if (!cpts->clk_mux_np)
+ return 0;
+
+ num_parents = of_clk_get_parent_count(cpts->clk_mux_np);
+ if (num_parents < 1) {
+ dev_err(cpts->dev, "mux-clock %pOF must have parents\n",
+ cpts->clk_mux_np);
+ goto mux_fail;
+ }
+
+ parent_names = devm_kcalloc(cpts->dev, sizeof(char *), num_parents,
+ GFP_KERNEL);
+ if (!parent_names) {
+ ret = -ENOMEM;
+ goto mux_fail;
+ }
+
+ of_clk_parent_fill(cpts->clk_mux_np, parent_names, num_parents);
+
+ clk_mux_name = devm_kasprintf(cpts->dev, GFP_KERNEL, "%s.%pOFn",
+ dev_name(cpts->dev), cpts->clk_mux_np);
+ if (!clk_mux_name) {
+ ret = -ENOMEM;
+ goto mux_fail;
+ }
+
+ reg = &cpts->reg->rftclk_sel;
+ /* dev must be NULL to avoid recursive incrementing
+ * of module refcnt
+ */
+ cpts->clk_mux_hw = clk_hw_register_mux(NULL, clk_mux_name,
+ parent_names, num_parents,
+ 0, reg, 0, 5, 0, NULL);
+ if (IS_ERR(cpts->clk_mux_hw)) {
+ ret = PTR_ERR(cpts->clk_mux_hw);
+ goto mux_fail;
+ }
+
+ ret = of_clk_add_hw_provider(cpts->clk_mux_np, of_clk_hw_simple_get,
+ cpts->clk_mux_hw);
+ if (ret)
+ goto clk_hw_register;
+
+ ret = devm_add_action_or_reset(cpts->dev, cpts_free_clk_mux, cpts);
+ if (ret)
+ dev_err(cpts->dev, "failed to add clkmux reset action %d", ret);
+
+ return ret;
+
+clk_hw_register:
+ clk_hw_unregister_mux(cpts->clk_mux_hw);
+mux_fail:
+ of_node_put(cpts->clk_mux_np);
+ return ret;
+}
+
+static int am65_cpts_of_parse(struct am65_cpts *cpts, struct device_node *node)
+{
+ u32 prop[2];
+
+ if (!of_property_read_u32(node, "ti,cpts-ext-ts-inputs", &prop[0]))
+ cpts->ext_ts_inputs = prop[0];
+
+ if (!of_property_read_u32(node, "ti,cpts-periodic-outputs", &prop[0]))
+ cpts->genf_num = prop[0];
+
+ return cpts_of_mux_clk_setup(cpts, node);
+}
+
+static void am65_cpts_release(void *data)
+{
+ struct am65_cpts *cpts = data;
+
+ ptp_clock_unregister(cpts->ptp_clock);
+ am65_cpts_disable(cpts);
+ clk_disable_unprepare(cpts->refclk);
+}
+
+struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
+ struct device_node *node)
+{
+ struct am65_cpts *cpts;
+ int ret, i;
+
+ cpts = devm_kzalloc(dev, sizeof(*cpts), GFP_KERNEL);
+ if (!cpts)
+ return ERR_PTR(-ENOMEM);
+
+ cpts->dev = dev;
+ cpts->reg = (struct am65_cpts_regs __iomem *)regs;
+
+ cpts->irq = of_irq_get_byname(node, "cpts");
+ if (cpts->irq <= 0) {
+ ret = cpts->irq ?: -ENXIO;
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get IRQ number (err = %d)\n",
+ ret);
+ return ERR_PTR(ret);
+ }
+
+ ret = am65_cpts_of_parse(cpts, node);
+ if (ret)
+ return ERR_PTR(ret);
+
+ mutex_init(&cpts->ptp_clk_lock);
+ INIT_LIST_HEAD(&cpts->events);
+ INIT_LIST_HEAD(&cpts->pool);
+ spin_lock_init(&cpts->lock);
+ skb_queue_head_init(&cpts->txq);
+
+ for (i = 0; i < AM65_CPTS_MAX_EVENTS; i++)
+ list_add(&cpts->pool_data[i].list, &cpts->pool);
+
+ cpts->refclk = devm_get_clk_from_child(dev, node, "cpts");
+ if (IS_ERR(cpts->refclk)) {
+ ret = PTR_ERR(cpts->refclk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get refclk %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ ret = clk_prepare_enable(cpts->refclk);
+ if (ret) {
+ dev_err(dev, "Failed to enable refclk %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ cpts->refclk_freq = clk_get_rate(cpts->refclk);
+
+ am65_ptp_info.max_adj = cpts->refclk_freq / AM65_CPTS_MIN_PPM;
+ cpts->ptp_info = am65_ptp_info;
+
+ if (cpts->ext_ts_inputs)
+ cpts->ptp_info.n_ext_ts = cpts->ext_ts_inputs;
+ if (cpts->genf_num)
+ cpts->ptp_info.n_per_out = cpts->genf_num;
+
+ am65_cpts_set_add_val(cpts);
+
+ am65_cpts_write32(cpts, AM65_CPTS_CONTROL_EN | AM65_CPTS_CONTROL_64MODE,
+ control);
+ am65_cpts_write32(cpts, AM65_CPTS_INT_ENABLE_TS_PEND_EN, int_enable);
+
+ /* set time to the current system time */
+ am65_cpts_settime(cpts, ktime_to_ns(ktime_get_real()));
+
+ cpts->ptp_clock = ptp_clock_register(&cpts->ptp_info, cpts->dev);
+ if (IS_ERR_OR_NULL(cpts->ptp_clock)) {
+ dev_err(dev, "Failed to register ptp clk %ld\n",
+ PTR_ERR(cpts->ptp_clock));
+ if (!cpts->ptp_clock)
+ ret = -ENODEV;
+ goto refclk_disable;
+ }
+ cpts->phc_index = ptp_clock_index(cpts->ptp_clock);
+
+ ret = devm_add_action_or_reset(dev, am65_cpts_release, cpts);
+ if (ret) {
+ dev_err(dev, "failed to add ptpclk reset action %d", ret);
+ return ERR_PTR(ret);
+ }
+
+ ret = devm_request_threaded_irq(dev, cpts->irq, NULL,
+ am65_cpts_interrupt,
+ IRQF_ONESHOT, dev_name(dev), cpts);
+ if (ret < 0) {
+ dev_err(cpts->dev, "error attaching irq %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ dev_info(dev, "CPTS ver 0x%08x, freq:%u, add_val:%u\n",
+ am65_cpts_read32(cpts, idver),
+ cpts->refclk_freq, cpts->ts_add_val);
+
+ return cpts;
+
+refclk_disable:
+ clk_disable_unprepare(cpts->refclk);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(am65_cpts_create);
+
+static int am65_cpts_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct am65_cpts *cpts;
+ struct resource *res;
+ void __iomem *base;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cpts");
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ cpts = am65_cpts_create(dev, base, node);
+ return PTR_ERR_OR_ZERO(cpts);
+}
+
+static const struct of_device_id am65_cpts_of_match[] = {
+ { .compatible = "ti,am65-cpts", },
+ { .compatible = "ti,j721e-cpts", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, am65_cpts_of_match);
+
+static struct platform_driver am65_cpts_driver = {
+ .probe = am65_cpts_probe,
+ .driver = {
+ .name = "am65-cpts",
+ .of_match_table = am65_cpts_of_match,
+ },
+};
+module_platform_driver(am65_cpts_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Grygorii Strashko <grygorii.strashko@ti.com>");
+MODULE_DESCRIPTION("TI K3 AM65 CPTS driver");
diff --git a/drivers/net/ethernet/ti/am65-cpts.h b/drivers/net/ethernet/ti/am65-cpts.h
new file mode 100644
index 000000000000..98c1960b20b9
--- /dev/null
+++ b/drivers/net/ethernet/ti/am65-cpts.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* TI K3 AM65 CPTS driver interface
+ *
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#ifndef K3_CPTS_H_
+#define K3_CPTS_H_
+
+#include <linux/device.h>
+#include <linux/of.h>
+
+struct am65_cpts;
+
+struct am65_cpts_estf_cfg {
+ u64 ns_period;
+ u64 ns_start;
+};
+
+#if IS_ENABLED(CONFIG_TI_K3_AM65_CPTS)
+struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
+ struct device_node *node);
+int am65_cpts_phc_index(struct am65_cpts *cpts);
+void am65_cpts_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb);
+void am65_cpts_prep_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb);
+void am65_cpts_rx_enable(struct am65_cpts *cpts, bool en);
+u64 am65_cpts_ns_gettime(struct am65_cpts *cpts);
+int am65_cpts_estf_enable(struct am65_cpts *cpts, int idx,
+ struct am65_cpts_estf_cfg *cfg);
+void am65_cpts_estf_disable(struct am65_cpts *cpts, int idx);
+#else
+static inline struct am65_cpts *am65_cpts_create(struct device *dev,
+ void __iomem *regs,
+ struct device_node *node)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int am65_cpts_phc_index(struct am65_cpts *cpts)
+{
+ return -1;
+}
+
+static inline void am65_cpts_tx_timestamp(struct am65_cpts *cpts,
+ struct sk_buff *skb)
+{
+}
+
+static inline void am65_cpts_prep_tx_timestamp(struct am65_cpts *cpts,
+ struct sk_buff *skb)
+{
+}
+
+static inline void am65_cpts_rx_enable(struct am65_cpts *cpts, bool en)
+{
+}
+
+static s64 am65_cpts_ns_gettime(struct am65_cpts *cpts)
+{
+ return 0;
+}
+
+static int am65_cpts_estf_enable(struct am65_cpts *cpts,
+ int idx, struct am65_cpts_estf_cfg *cfg)
+{
+ return 0;
+}
+
+static void am65_cpts_estf_disable(struct am65_cpts *cpts, int idx)
+{
+}
+#endif
+
+#endif /* K3_CPTS_H_ */
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index a530afe3ce12..c20715107075 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -532,7 +532,7 @@ fatal_error:
}
-static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
int queue;
unsigned int len;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index c2c5bf87da01..ce0645ada6e7 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -406,6 +406,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
xdp.data_hard_start = pa;
xdp.rxq = &priv->xdp_rxq[ch];
+ xdp.frame_sz = PAGE_SIZE;
port = priv->emac_port + cpsw->data.dual_emac;
ret = cpsw_run_xdp(priv, ch, &xdp, page, port);
@@ -1569,6 +1570,12 @@ static int cpsw_probe(struct platform_device *pdev)
return irq;
cpsw->irqs_table[1] = irq;
+ /* get misc irq*/
+ irq = platform_get_irq(pdev, 3);
+ if (irq <= 0)
+ return irq;
+ cpsw->misc_irq = irq;
+
/*
* This may be required here for child devices.
*/
@@ -1703,6 +1710,21 @@ static int cpsw_probe(struct platform_device *pdev)
goto clean_unregister_netdev_ret;
}
+ if (!cpsw->cpts)
+ goto skip_cpts;
+
+ ret = devm_request_irq(&pdev->dev, cpsw->misc_irq, cpsw_misc_interrupt,
+ 0, dev_name(&pdev->dev), cpsw);
+ if (ret < 0) {
+ dev_err(dev, "error attaching misc irq (%d)\n", ret);
+ goto clean_unregister_netdev_ret;
+ }
+
+ /* Enable misc CPTS evnt_pend IRQ */
+ cpts_set_irqpoll(cpsw->cpts, false);
+ writel(0x10, &cpsw->wr_regs->misc_en);
+
+skip_cpts:
cpsw_notice(priv, probe,
"initialized device (regs %pa, irq %d, pool size %d)\n",
&ss_res->start, cpsw->irqs_table[0], descs_pool_size);
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 9209e613257d..1247d35d42ef 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -348,6 +348,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
xdp.data_hard_start = pa;
xdp.rxq = &priv->xdp_rxq[ch];
+ xdp.frame_sz = PAGE_SIZE;
ret = cpsw_run_xdp(priv, ch, &xdp, page, priv->emac_port);
if (ret != CPSW_XDP_PASS)
@@ -1228,7 +1229,7 @@ static int cpsw_probe_dt(struct cpsw_common *cpsw)
data->active_slave = 0;
data->channels = CPSW_MAX_QUEUES;
data->ale_entries = CPSW_ALE_NUM_ENTRIES;
- data->dual_emac = 1;
+ data->dual_emac = true;
data->bd_ram_size = CPSW_BD_RAM_SIZE;
data->mac_control = 0;
@@ -1896,6 +1897,11 @@ static int cpsw_probe(struct platform_device *pdev)
return irq;
cpsw->irqs_table[1] = irq;
+ irq = platform_get_irq_byname(pdev, "misc");
+ if (irq <= 0)
+ return irq;
+ cpsw->misc_irq = irq;
+
platform_set_drvdata(pdev, cpsw);
/* This may be required here for child devices. */
pm_runtime_enable(dev);
@@ -1916,7 +1922,7 @@ static int cpsw_probe(struct platform_device *pdev)
soc = soc_device_match(cpsw_soc_devices);
if (soc)
- cpsw->quirk_irq = 1;
+ cpsw->quirk_irq = true;
cpsw->rx_packet_max = rx_packet_max;
cpsw->descs_pool_size = descs_pool_size;
@@ -1975,6 +1981,21 @@ static int cpsw_probe(struct platform_device *pdev)
goto clean_unregister_netdev;
}
+ if (!cpsw->cpts)
+ goto skip_cpts;
+
+ ret = devm_request_irq(dev, cpsw->misc_irq, cpsw_misc_interrupt,
+ 0, dev_name(&pdev->dev), cpsw);
+ if (ret < 0) {
+ dev_err(dev, "error attaching misc irq (%d)\n", ret);
+ goto clean_unregister_netdev;
+ }
+
+ /* Enable misc CPTS evnt_pend IRQ */
+ cpts_set_irqpoll(cpsw->cpts, false);
+ writel(0x10, &cpsw->wr_regs->misc_en);
+
+skip_cpts:
ret = cpsw_register_notifiers(cpsw);
if (ret)
goto clean_unregister_netdev;
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index 97a058ca60ac..9d098c802c6d 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -28,6 +28,8 @@
#include "cpsw_sl.h"
#include "davinci_cpdma.h"
+#define CPTS_N_ETX_TS 4
+
int (*cpsw_slave_index)(struct cpsw_common *cpsw, struct cpsw_priv *priv);
void cpsw_intr_enable(struct cpsw_common *cpsw)
@@ -112,6 +114,18 @@ irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+irqreturn_t cpsw_misc_interrupt(int irq, void *dev_id)
+{
+ struct cpsw_common *cpsw = dev_id;
+
+ writel(0, &cpsw->wr_regs->misc_en);
+ cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_MISC);
+ cpts_misc_interrupt(cpsw->cpts);
+ writel(0x10, &cpsw->wr_regs->misc_en);
+
+ return IRQ_HANDLED;
+}
+
int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget)
{
struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
@@ -522,7 +536,8 @@ int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs,
if (!cpts_node)
cpts_node = cpsw->dev->of_node;
- cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpts_node);
+ cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpts_node,
+ CPTS_N_ETX_TS);
if (IS_ERR(cpsw->cpts)) {
ret = PTR_ERR(cpsw->cpts);
cpdma_ctlr_destroy(cpsw->dma);
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
index b8d7b924ee3d..bf4e179b4ca4 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.h
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -350,6 +350,7 @@ struct cpsw_common {
bool rx_irq_disabled;
bool tx_irq_disabled;
u32 irqs_table[IRQ_NUM];
+ int misc_irq;
struct cpts *cpts;
struct devlink *devlink;
int rx_ch_num, tx_ch_num;
@@ -442,6 +443,7 @@ int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
struct page *page, int port);
irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id);
irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id);
+irqreturn_t cpsw_misc_interrupt(int irq, void *dev_id);
int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget);
int cpsw_tx_poll(struct napi_struct *napi_tx, int budget);
int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget);
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 729ce09dded9..7c55d395de2c 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -21,16 +21,21 @@
#include "cpts.h"
#define CPTS_SKB_TX_WORK_TIMEOUT 1 /* jiffies */
+#define CPTS_SKB_RX_TX_TMO 100 /*ms */
+#define CPTS_EVENT_RX_TX_TIMEOUT (100) /* ms */
struct cpts_skb_cb_data {
+ u32 skb_mtype_seqid;
unsigned long tmo;
};
#define cpts_read32(c, r) readl_relaxed(&c->reg->r)
#define cpts_write32(c, v, r) writel_relaxed(v, &c->reg->r)
-static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
- u16 ts_seqid, u8 ts_msgtype);
+static int cpts_event_port(struct cpts_event *event)
+{
+ return (event->high >> PORT_NUMBER_SHIFT) & PORT_NUMBER_MASK;
+}
static int event_expired(struct cpts_event *event)
{
@@ -71,7 +76,7 @@ static int cpts_purge_events(struct cpts *cpts)
}
if (removed)
- pr_debug("cpts: event pool cleaned up %d\n", removed);
+ dev_dbg(cpts->dev, "cpts: event pool cleaned up %d\n", removed);
return removed ? 0 : -1;
}
@@ -94,132 +99,126 @@ static void cpts_purge_txq(struct cpts *cpts)
dev_dbg(cpts->dev, "txq cleaned up %d\n", removed);
}
-static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
-{
- struct sk_buff *skb, *tmp;
- u16 seqid;
- u8 mtype;
- bool found = false;
-
- mtype = (event->high >> MESSAGE_TYPE_SHIFT) & MESSAGE_TYPE_MASK;
- seqid = (event->high >> SEQUENCE_ID_SHIFT) & SEQUENCE_ID_MASK;
-
- /* no need to grab txq.lock as access is always done under cpts->lock */
- skb_queue_walk_safe(&cpts->txq, skb, tmp) {
- struct skb_shared_hwtstamps ssh;
- unsigned int class = ptp_classify_raw(skb);
- struct cpts_skb_cb_data *skb_cb =
- (struct cpts_skb_cb_data *)skb->cb;
-
- if (cpts_match(skb, class, seqid, mtype)) {
- u64 ns = timecounter_cyc2time(&cpts->tc, event->low);
-
- memset(&ssh, 0, sizeof(ssh));
- ssh.hwtstamp = ns_to_ktime(ns);
- skb_tstamp_tx(skb, &ssh);
- found = true;
- __skb_unlink(skb, &cpts->txq);
- dev_consume_skb_any(skb);
- dev_dbg(cpts->dev, "match tx timestamp mtype %u seqid %04x\n",
- mtype, seqid);
- break;
- }
-
- if (time_after(jiffies, skb_cb->tmo)) {
- /* timeout any expired skbs over 1s */
- dev_dbg(cpts->dev, "expiring tx timestamp from txq\n");
- __skb_unlink(skb, &cpts->txq);
- dev_consume_skb_any(skb);
- }
- }
-
- return found;
-}
-
/*
* Returns zero if matching event type was found.
*/
static int cpts_fifo_read(struct cpts *cpts, int match)
{
+ struct ptp_clock_event pevent;
+ bool need_schedule = false;
+ struct cpts_event *event;
+ unsigned long flags;
int i, type = -1;
u32 hi, lo;
- struct cpts_event *event;
+
+ spin_lock_irqsave(&cpts->lock, flags);
for (i = 0; i < CPTS_FIFO_DEPTH; i++) {
if (cpts_fifo_pop(cpts, &hi, &lo))
break;
if (list_empty(&cpts->pool) && cpts_purge_events(cpts)) {
- pr_err("cpts: event pool empty\n");
- return -1;
+ dev_warn(cpts->dev, "cpts: event pool empty\n");
+ break;
}
event = list_first_entry(&cpts->pool, struct cpts_event, list);
- event->tmo = jiffies + 2;
event->high = hi;
event->low = lo;
+ event->timestamp = timecounter_cyc2time(&cpts->tc, event->low);
type = event_type(event);
+
+ dev_dbg(cpts->dev, "CPTS_EV: %d high:%08X low:%08x\n",
+ type, event->high, event->low);
switch (type) {
- case CPTS_EV_TX:
- if (cpts_match_tx_ts(cpts, event)) {
- /* if the new event matches an existing skb,
- * then don't queue it
- */
- break;
- }
- /* fall through */
case CPTS_EV_PUSH:
+ WRITE_ONCE(cpts->cur_timestamp, lo);
+ timecounter_read(&cpts->tc);
+ if (cpts->mult_new) {
+ cpts->cc.mult = cpts->mult_new;
+ cpts->mult_new = 0;
+ }
+ if (!cpts->irq_poll)
+ complete(&cpts->ts_push_complete);
+ break;
+ case CPTS_EV_TX:
case CPTS_EV_RX:
+ event->tmo = jiffies +
+ msecs_to_jiffies(CPTS_EVENT_RX_TX_TIMEOUT);
+
list_del_init(&event->list);
list_add_tail(&event->list, &cpts->events);
+ need_schedule = true;
break;
case CPTS_EV_ROLL:
case CPTS_EV_HALF:
+ break;
case CPTS_EV_HW:
+ pevent.timestamp = event->timestamp;
+ pevent.type = PTP_CLOCK_EXTTS;
+ pevent.index = cpts_event_port(event) - 1;
+ ptp_clock_event(cpts->clock, &pevent);
break;
default:
- pr_err("cpts: unknown event type\n");
+ dev_err(cpts->dev, "cpts: unknown event type\n");
break;
}
if (type == match)
break;
}
+
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ if (!cpts->irq_poll && need_schedule)
+ ptp_schedule_worker(cpts->clock, 0);
+
return type == match ? 0 : -1;
}
+void cpts_misc_interrupt(struct cpts *cpts)
+{
+ cpts_fifo_read(cpts, -1);
+}
+EXPORT_SYMBOL_GPL(cpts_misc_interrupt);
+
static u64 cpts_systim_read(const struct cyclecounter *cc)
{
- u64 val = 0;
- struct cpts_event *event;
- struct list_head *this, *next;
struct cpts *cpts = container_of(cc, struct cpts, cc);
+ return READ_ONCE(cpts->cur_timestamp);
+}
+
+static void cpts_update_cur_time(struct cpts *cpts, int match,
+ struct ptp_system_timestamp *sts)
+{
+ unsigned long flags;
+
+ reinit_completion(&cpts->ts_push_complete);
+
+ /* use spin_lock_irqsave() here as it has to run very fast */
+ spin_lock_irqsave(&cpts->lock, flags);
+ ptp_read_system_prets(sts);
cpts_write32(cpts, TS_PUSH, ts_push);
- if (cpts_fifo_read(cpts, CPTS_EV_PUSH))
- pr_err("cpts: unable to obtain a time stamp\n");
+ cpts_read32(cpts, ts_push);
+ ptp_read_system_postts(sts);
+ spin_unlock_irqrestore(&cpts->lock, flags);
- list_for_each_safe(this, next, &cpts->events) {
- event = list_entry(this, struct cpts_event, list);
- if (event_type(event) == CPTS_EV_PUSH) {
- list_del_init(&event->list);
- list_add(&event->list, &cpts->pool);
- val = event->low;
- break;
- }
- }
+ if (cpts->irq_poll && cpts_fifo_read(cpts, match) && match != -1)
+ dev_err(cpts->dev, "cpts: unable to obtain a time stamp\n");
- return val;
+ if (!cpts->irq_poll &&
+ !wait_for_completion_timeout(&cpts->ts_push_complete, HZ))
+ dev_err(cpts->dev, "cpts: obtain a time stamp timeout\n");
}
/* PTP clock operations */
static int cpts_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
{
- u64 adj;
- u32 diff, mult;
- int neg_adj = 0;
- unsigned long flags;
struct cpts *cpts = container_of(ptp, struct cpts, info);
+ int neg_adj = 0;
+ u32 diff, mult;
+ u64 adj;
if (ppb < 0) {
neg_adj = 1;
@@ -230,38 +229,40 @@ static int cpts_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
adj *= ppb;
diff = div_u64(adj, 1000000000ULL);
- spin_lock_irqsave(&cpts->lock, flags);
+ mutex_lock(&cpts->ptp_clk_mutex);
- timecounter_read(&cpts->tc);
+ cpts->mult_new = neg_adj ? mult - diff : mult + diff;
- cpts->cc.mult = neg_adj ? mult - diff : mult + diff;
-
- spin_unlock_irqrestore(&cpts->lock, flags);
+ cpts_update_cur_time(cpts, CPTS_EV_PUSH, NULL);
+ mutex_unlock(&cpts->ptp_clk_mutex);
return 0;
}
static int cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
- unsigned long flags;
struct cpts *cpts = container_of(ptp, struct cpts, info);
- spin_lock_irqsave(&cpts->lock, flags);
+ mutex_lock(&cpts->ptp_clk_mutex);
timecounter_adjtime(&cpts->tc, delta);
- spin_unlock_irqrestore(&cpts->lock, flags);
+ mutex_unlock(&cpts->ptp_clk_mutex);
return 0;
}
-static int cpts_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+static int cpts_ptp_gettimeex(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
- u64 ns;
- unsigned long flags;
struct cpts *cpts = container_of(ptp, struct cpts, info);
+ u64 ns;
+
+ mutex_lock(&cpts->ptp_clk_mutex);
+
+ cpts_update_cur_time(cpts, CPTS_EV_PUSH, sts);
- spin_lock_irqsave(&cpts->lock, flags);
ns = timecounter_read(&cpts->tc);
- spin_unlock_irqrestore(&cpts->lock, flags);
+ mutex_unlock(&cpts->ptp_clk_mutex);
*ts = ns_to_timespec64(ns);
@@ -271,15 +272,38 @@ static int cpts_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
static int cpts_ptp_settime(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
- u64 ns;
- unsigned long flags;
struct cpts *cpts = container_of(ptp, struct cpts, info);
+ u64 ns;
ns = timespec64_to_ns(ts);
- spin_lock_irqsave(&cpts->lock, flags);
+ mutex_lock(&cpts->ptp_clk_mutex);
timecounter_init(&cpts->tc, &cpts->cc, ns);
- spin_unlock_irqrestore(&cpts->lock, flags);
+ mutex_unlock(&cpts->ptp_clk_mutex);
+
+ return 0;
+}
+
+static int cpts_extts_enable(struct cpts *cpts, u32 index, int on)
+{
+ u32 v;
+
+ if (((cpts->hw_ts_enable & BIT(index)) >> index) == on)
+ return 0;
+
+ mutex_lock(&cpts->ptp_clk_mutex);
+
+ v = cpts_read32(cpts, control);
+ if (on) {
+ v |= BIT(8 + index);
+ cpts->hw_ts_enable |= BIT(index);
+ } else {
+ v &= ~BIT(8 + index);
+ cpts->hw_ts_enable &= ~BIT(index);
+ }
+ cpts_write32(cpts, v, control);
+
+ mutex_unlock(&cpts->ptp_clk_mutex);
return 0;
}
@@ -287,28 +311,120 @@ static int cpts_ptp_settime(struct ptp_clock_info *ptp,
static int cpts_ptp_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
+ struct cpts *cpts = container_of(ptp, struct cpts, info);
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ return cpts_extts_enable(cpts, rq->extts.index, on);
+ default:
+ break;
+ }
+
return -EOPNOTSUPP;
}
+static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
+{
+ struct sk_buff_head txq_list;
+ struct sk_buff *skb, *tmp;
+ unsigned long flags;
+ bool found = false;
+ u32 mtype_seqid;
+
+ mtype_seqid = event->high &
+ ((MESSAGE_TYPE_MASK << MESSAGE_TYPE_SHIFT) |
+ (SEQUENCE_ID_MASK << SEQUENCE_ID_SHIFT) |
+ (EVENT_TYPE_MASK << EVENT_TYPE_SHIFT));
+
+ __skb_queue_head_init(&txq_list);
+
+ spin_lock_irqsave(&cpts->txq.lock, flags);
+ skb_queue_splice_init(&cpts->txq, &txq_list);
+ spin_unlock_irqrestore(&cpts->txq.lock, flags);
+
+ skb_queue_walk_safe(&txq_list, skb, tmp) {
+ struct skb_shared_hwtstamps ssh;
+ struct cpts_skb_cb_data *skb_cb =
+ (struct cpts_skb_cb_data *)skb->cb;
+
+ if (mtype_seqid == skb_cb->skb_mtype_seqid) {
+ memset(&ssh, 0, sizeof(ssh));
+ ssh.hwtstamp = ns_to_ktime(event->timestamp);
+ skb_tstamp_tx(skb, &ssh);
+ found = true;
+ __skb_unlink(skb, &txq_list);
+ dev_consume_skb_any(skb);
+ dev_dbg(cpts->dev, "match tx timestamp mtype_seqid %08x\n",
+ mtype_seqid);
+ break;
+ }
+
+ if (time_after(jiffies, skb_cb->tmo)) {
+ /* timeout any expired skbs over 1s */
+ dev_dbg(cpts->dev, "expiring tx timestamp from txq\n");
+ __skb_unlink(skb, &txq_list);
+ dev_consume_skb_any(skb);
+ }
+ }
+
+ spin_lock_irqsave(&cpts->txq.lock, flags);
+ skb_queue_splice(&txq_list, &cpts->txq);
+ spin_unlock_irqrestore(&cpts->txq.lock, flags);
+
+ return found;
+}
+
+static void cpts_process_events(struct cpts *cpts)
+{
+ struct list_head *this, *next;
+ struct cpts_event *event;
+ LIST_HEAD(events_free);
+ unsigned long flags;
+ LIST_HEAD(events);
+
+ spin_lock_irqsave(&cpts->lock, flags);
+ list_splice_init(&cpts->events, &events);
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ list_for_each_safe(this, next, &events) {
+ event = list_entry(this, struct cpts_event, list);
+ if (cpts_match_tx_ts(cpts, event) ||
+ time_after(jiffies, event->tmo)) {
+ list_del_init(&event->list);
+ list_add(&event->list, &events_free);
+ }
+ }
+
+ spin_lock_irqsave(&cpts->lock, flags);
+ list_splice_tail(&events, &cpts->events);
+ list_splice_tail(&events_free, &cpts->pool);
+ spin_unlock_irqrestore(&cpts->lock, flags);
+}
+
static long cpts_overflow_check(struct ptp_clock_info *ptp)
{
struct cpts *cpts = container_of(ptp, struct cpts, info);
unsigned long delay = cpts->ov_check_period;
- struct timespec64 ts;
unsigned long flags;
+ u64 ns;
- spin_lock_irqsave(&cpts->lock, flags);
- ts = ns_to_timespec64(timecounter_read(&cpts->tc));
+ mutex_lock(&cpts->ptp_clk_mutex);
+
+ cpts_update_cur_time(cpts, -1, NULL);
+ ns = timecounter_read(&cpts->tc);
+
+ cpts_process_events(cpts);
+ spin_lock_irqsave(&cpts->txq.lock, flags);
if (!skb_queue_empty(&cpts->txq)) {
cpts_purge_txq(cpts);
if (!skb_queue_empty(&cpts->txq))
delay = CPTS_SKB_TX_WORK_TIMEOUT;
}
- spin_unlock_irqrestore(&cpts->lock, flags);
+ spin_unlock_irqrestore(&cpts->txq.lock, flags);
- pr_debug("cpts overflow check at %lld.%09ld\n",
- (long long)ts.tv_sec, ts.tv_nsec);
+ dev_dbg(cpts->dev, "cpts overflow check at %lld\n", ns);
+ mutex_unlock(&cpts->ptp_clk_mutex);
return (long)delay;
}
@@ -321,18 +437,21 @@ static const struct ptp_clock_info cpts_info = {
.pps = 0,
.adjfreq = cpts_ptp_adjfreq,
.adjtime = cpts_ptp_adjtime,
- .gettime64 = cpts_ptp_gettime,
+ .gettimex64 = cpts_ptp_gettimeex,
.settime64 = cpts_ptp_settime,
.enable = cpts_ptp_enable,
.do_aux_work = cpts_overflow_check,
};
-static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
- u16 ts_seqid, u8 ts_msgtype)
+static int cpts_skb_get_mtype_seqid(struct sk_buff *skb, u32 *mtype_seqid)
{
- u16 *seqid;
- unsigned int offset = 0;
+ unsigned int ptp_class = ptp_classify_raw(skb);
u8 *msgtype, *data = skb->data;
+ unsigned int offset = 0;
+ u16 *seqid;
+
+ if (ptp_class == PTP_CLASS_NONE)
+ return 0;
if (ptp_class & PTP_CLASS_VLAN)
offset += VLAN_HLEN;
@@ -360,25 +479,23 @@ static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
msgtype = data + offset;
seqid = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
+ *mtype_seqid = (*msgtype & MESSAGE_TYPE_MASK) << MESSAGE_TYPE_SHIFT;
+ *mtype_seqid |= (ntohs(*seqid) & SEQUENCE_ID_MASK) << SEQUENCE_ID_SHIFT;
- return (ts_msgtype == (*msgtype & 0xf) && ts_seqid == ntohs(*seqid));
+ return 1;
}
-static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
+static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb,
+ int ev_type, u32 skb_mtype_seqid)
{
- u64 ns = 0;
- struct cpts_event *event;
struct list_head *this, *next;
- unsigned int class = ptp_classify_raw(skb);
+ struct cpts_event *event;
unsigned long flags;
- u16 seqid;
- u8 mtype;
-
- if (class == PTP_CLASS_NONE)
- return 0;
+ u32 mtype_seqid;
+ u64 ns = 0;
- spin_lock_irqsave(&cpts->lock, flags);
cpts_fifo_read(cpts, -1);
+ spin_lock_irqsave(&cpts->lock, flags);
list_for_each_safe(this, next, &cpts->events) {
event = list_entry(this, struct cpts_event, list);
if (event_expired(event)) {
@@ -386,29 +503,19 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
list_add(&event->list, &cpts->pool);
continue;
}
- mtype = (event->high >> MESSAGE_TYPE_SHIFT) & MESSAGE_TYPE_MASK;
- seqid = (event->high >> SEQUENCE_ID_SHIFT) & SEQUENCE_ID_MASK;
- if (ev_type == event_type(event) &&
- cpts_match(skb, class, seqid, mtype)) {
- ns = timecounter_cyc2time(&cpts->tc, event->low);
+
+ mtype_seqid = event->high &
+ ((MESSAGE_TYPE_MASK << MESSAGE_TYPE_SHIFT) |
+ (SEQUENCE_ID_MASK << SEQUENCE_ID_SHIFT) |
+ (EVENT_TYPE_MASK << EVENT_TYPE_SHIFT));
+
+ if (mtype_seqid == skb_mtype_seqid) {
+ ns = event->timestamp;
list_del_init(&event->list);
list_add(&event->list, &cpts->pool);
break;
}
}
-
- if (ev_type == CPTS_EV_TX && !ns) {
- struct cpts_skb_cb_data *skb_cb =
- (struct cpts_skb_cb_data *)skb->cb;
- /* Not found, add frame to queue for processing later.
- * The periodic FIFO check will handle this.
- */
- skb_get(skb);
- /* get the timestamp for timeouts */
- skb_cb->tmo = jiffies + msecs_to_jiffies(100);
- __skb_queue_tail(&cpts->txq, skb);
- ptp_schedule_worker(cpts->clock, 0);
- }
spin_unlock_irqrestore(&cpts->lock, flags);
return ns;
@@ -416,10 +523,21 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
{
- u64 ns;
+ struct cpts_skb_cb_data *skb_cb = (struct cpts_skb_cb_data *)skb->cb;
struct skb_shared_hwtstamps *ssh;
+ int ret;
+ u64 ns;
- ns = cpts_find_ts(cpts, skb, CPTS_EV_RX);
+ ret = cpts_skb_get_mtype_seqid(skb, &skb_cb->skb_mtype_seqid);
+ if (!ret)
+ return;
+
+ skb_cb->skb_mtype_seqid |= (CPTS_EV_RX << EVENT_TYPE_SHIFT);
+
+ dev_dbg(cpts->dev, "%s mtype seqid %08x\n",
+ __func__, skb_cb->skb_mtype_seqid);
+
+ ns = cpts_find_ts(cpts, skb, CPTS_EV_RX, skb_cb->skb_mtype_seqid);
if (!ns)
return;
ssh = skb_hwtstamps(skb);
@@ -430,17 +548,27 @@ EXPORT_SYMBOL_GPL(cpts_rx_timestamp);
void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
{
- u64 ns;
- struct skb_shared_hwtstamps ssh;
+ struct cpts_skb_cb_data *skb_cb = (struct cpts_skb_cb_data *)skb->cb;
+ int ret;
if (!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
return;
- ns = cpts_find_ts(cpts, skb, CPTS_EV_TX);
- if (!ns)
+
+ ret = cpts_skb_get_mtype_seqid(skb, &skb_cb->skb_mtype_seqid);
+ if (!ret)
return;
- memset(&ssh, 0, sizeof(ssh));
- ssh.hwtstamp = ns_to_ktime(ns);
- skb_tstamp_tx(skb, &ssh);
+
+ skb_cb->skb_mtype_seqid |= (CPTS_EV_TX << EVENT_TYPE_SHIFT);
+
+ dev_dbg(cpts->dev, "%s mtype seqid %08x\n",
+ __func__, skb_cb->skb_mtype_seqid);
+
+ /* Always defer TX TS processing to PTP worker */
+ skb_get(skb);
+ /* get the timestamp for timeouts */
+ skb_cb->tmo = jiffies + msecs_to_jiffies(CPTS_SKB_RX_TX_TMO);
+ skb_queue_tail(&cpts->txq, skb);
+ ptp_schedule_worker(cpts->clock, 0);
}
EXPORT_SYMBOL_GPL(cpts_tx_timestamp);
@@ -632,7 +760,7 @@ of_error:
}
struct cpts *cpts_create(struct device *dev, void __iomem *regs,
- struct device_node *node)
+ struct device_node *node, u32 n_ext_ts)
{
struct cpts *cpts;
int ret;
@@ -643,7 +771,10 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs,
cpts->dev = dev;
cpts->reg = (struct cpsw_cpts __iomem *)regs;
+ cpts->irq_poll = true;
spin_lock_init(&cpts->lock);
+ mutex_init(&cpts->ptp_clk_mutex);
+ init_completion(&cpts->ts_push_complete);
ret = cpts_of_parse(cpts, node);
if (ret)
@@ -668,6 +799,9 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs,
cpts->cc.mask = CLOCKSOURCE_MASK(32);
cpts->info = cpts_info;
+ if (n_ext_ts)
+ cpts->info.n_ext_ts = n_ext_ts;
+
cpts_calc_mult_shift(cpts);
/* save cc.mult original value as it can be modified
* by cpts_ptp_adjfreq().
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h
index bb997c11ee15..07222f651d2e 100644
--- a/drivers/net/ethernet/ti/cpts.h
+++ b/drivers/net/ethernet/ti/cpts.h
@@ -94,6 +94,7 @@ struct cpts_event {
unsigned long tmo;
u32 high;
u32 low;
+ u64 timestamp;
};
struct cpts {
@@ -103,7 +104,7 @@ struct cpts {
int rx_enable;
struct ptp_clock_info info;
struct ptp_clock *clock;
- spinlock_t lock; /* protects time registers */
+ spinlock_t lock; /* protects fifo/events */
u32 cc_mult; /* for the nominal frequency */
struct cyclecounter cc;
struct timecounter tc;
@@ -114,6 +115,12 @@ struct cpts {
struct cpts_event pool_data[CPTS_MAX_EVENTS];
unsigned long ov_check_period;
struct sk_buff_head txq;
+ u64 cur_timestamp;
+ u32 mult_new;
+ struct mutex ptp_clk_mutex; /* sync PTP interface and worker */
+ bool irq_poll;
+ struct completion ts_push_complete;
+ u32 hw_ts_enable;
};
void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);
@@ -121,8 +128,9 @@ void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb);
int cpts_register(struct cpts *cpts);
void cpts_unregister(struct cpts *cpts);
struct cpts *cpts_create(struct device *dev, void __iomem *regs,
- struct device_node *node);
+ struct device_node *node, u32 n_ext_ts);
void cpts_release(struct cpts *cpts);
+void cpts_misc_interrupt(struct cpts *cpts);
static inline bool cpts_can_timestamp(struct cpts *cpts, struct sk_buff *skb)
{
@@ -134,6 +142,11 @@ static inline bool cpts_can_timestamp(struct cpts *cpts, struct sk_buff *skb)
return true;
}
+static inline void cpts_set_irqpoll(struct cpts *cpts, bool en)
+{
+ cpts->irq_poll = en;
+}
+
#else
struct cpts;
@@ -146,7 +159,7 @@ static inline void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
static inline
struct cpts *cpts_create(struct device *dev, void __iomem *regs,
- struct device_node *node)
+ struct device_node *node, u32 n_ext_ts)
{
return NULL;
}
@@ -169,6 +182,14 @@ static inline bool cpts_can_timestamp(struct cpts *cpts, struct sk_buff *skb)
{
return false;
}
+
+static inline void cpts_misc_interrupt(struct cpts *cpts)
+{
+}
+
+static inline void cpts_set_irqpoll(struct cpts *cpts, bool en)
+{
+}
#endif
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 38b7f6d35759..702fdc393da0 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -397,6 +397,8 @@ static int davinci_mdio_probe(struct platform_device *pdev)
data->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
data->regs = devm_ioremap(dev, res->start, resource_size(res));
if (!data->regs)
return -ENOMEM;
diff --git a/drivers/net/ethernet/ti/k3-cppi-desc-pool.c b/drivers/net/ethernet/ti/k3-cppi-desc-pool.c
index ad7cfc1316ce..38cc12f9f133 100644
--- a/drivers/net/ethernet/ti/k3-cppi-desc-pool.c
+++ b/drivers/net/ethernet/ti/k3-cppi-desc-pool.c
@@ -64,8 +64,8 @@ k3_cppi_desc_pool_create_name(struct device *dev, size_t size,
return ERR_PTR(-ENOMEM);
pool->gen_pool = gen_pool_create(ilog2(pool->desc_size), -1);
- if (IS_ERR(pool->gen_pool)) {
- ret = PTR_ERR(pool->gen_pool);
+ if (!pool->gen_pool) {
+ ret = -ENOMEM;
dev_err(pool->dev, "pool create failed %d\n", ret);
kfree_const(pool_name);
goto gen_pool_create_fail;
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index fb36115e9c51..9d6e27fb710e 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -3716,7 +3716,8 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
if (!cpts_node)
cpts_node = of_node_get(node);
- gbe_dev->cpts = cpts_create(gbe_dev->dev, gbe_dev->cpts_reg, cpts_node);
+ gbe_dev->cpts = cpts_create(gbe_dev->dev, gbe_dev->cpts_reg,
+ cpts_node, 0);
of_node_put(cpts_node);
if (IS_ENABLED(CONFIG_TI_CPTS) && IS_ERR(gbe_dev->cpts)) {
ret = PTR_ERR(gbe_dev->cpts);
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index ad465202980a..857709828058 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -70,7 +70,7 @@ MODULE_DESCRIPTION("Driver for TI ThunderLAN based ethernet PCI adapters");
MODULE_LICENSE("GPL");
/* Turn on debugging.
- * See Documentation/networking/device_drivers/ti/tlan.txt for details
+ * See Documentation/networking/device_drivers/ti/tlan.rst for details
*/
static int debug;
module_param(debug, int, 0);
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 070dd6fa9401..310e6839c6e5 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1150,7 +1150,7 @@ static irqreturn_t gelic_card_interrupt(int irq, void *ptr)
* gelic_net_poll_controller - artificial interrupt for netconsole etc.
* @netdev: interface device structure
*
- * see Documentation/networking/netconsole.txt
+ * see Documentation/networking/netconsole.rst
*/
void gelic_net_poll_controller(struct net_device *netdev)
{
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 6576271642c1..3902b3aeb0c2 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -1615,7 +1615,7 @@ spider_net_interrupt(int irq, void *ptr)
* spider_net_poll_controller - artificial interrupt for netconsole etc.
* @netdev: interface device structure
*
- * see Documentation/networking/netconsole.txt
+ * see Documentation/networking/netconsole.rst
*/
static void
spider_net_poll_controller(struct net_device *netdev)
diff --git a/drivers/net/ethernet/via/Kconfig b/drivers/net/ethernet/via/Kconfig
index a962097b58c6..6cff5f7d57c4 100644
--- a/drivers/net/ethernet/via/Kconfig
+++ b/drivers/net/ethernet/via/Kconfig
@@ -19,6 +19,7 @@ if NET_VENDOR_VIA
config VIA_RHINE
tristate "VIA Rhine support"
depends on PCI || (OF_IRQ && GENERIC_PCI_IOMAP)
+ depends on PCI || ARCH_VT8500 || COMPILE_TEST
depends on HAS_DMA
select CRC32
select MII
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 3e313e71ae36..929244064abd 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1410,9 +1410,9 @@ static int temac_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
lp->regs = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
- if (IS_ERR(lp->regs)) {
+ if (!lp->regs) {
dev_err(&pdev->dev, "could not map TEMAC registers\n");
- return PTR_ERR(lp->regs);
+ return -ENOMEM;
}
/* Select register access functions with the specified
@@ -1505,10 +1505,10 @@ static int temac_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
lp->sdma_regs = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
- if (IS_ERR(lp->sdma_regs)) {
+ if (!lp->sdma_regs) {
dev_err(&pdev->dev,
"could not map DMA registers\n");
- return PTR_ERR(lp->sdma_regs);
+ return -ENOMEM;
}
if (pdata->dma_little_endian) {
lp->dma_in = temac_dma_in32_le;
diff --git a/drivers/net/fddi/Kconfig b/drivers/net/fddi/Kconfig
index 3b412a56f2cb..da4f58eed08f 100644
--- a/drivers/net/fddi/Kconfig
+++ b/drivers/net/fddi/Kconfig
@@ -77,7 +77,7 @@ config SKFP
- Netelligent 100 FDDI SAS UTP
- Netelligent 100 FDDI SAS Fibre MIC
- Read <file:Documentation/networking/skfp.txt> for information about
+ Read <file:Documentation/networking/skfp.rst> for information about
the driver.
Questions concerning this driver can be addressed to:
diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig
index 8e05b5c31a77..f4500f04147d 100644
--- a/drivers/net/hamradio/Kconfig
+++ b/drivers/net/hamradio/Kconfig
@@ -30,7 +30,7 @@ config 6PACK
Note that this driver is still experimental and might cause
problems. For details about the features and the usage of the
- driver, read <file:Documentation/networking/6pack.txt>.
+ driver, read <file:Documentation/networking/6pack.rst>.
To compile this driver as a module, choose M here: the module
will be called 6pack.
@@ -84,7 +84,7 @@ config SCC
---help---
These cards are used to connect your Linux box to an amateur radio
in order to communicate with other computers. If you want to use
- this, read <file:Documentation/networking/z8530drv.txt> and the
+ this, read <file:Documentation/networking/z8530drv.rst> and the
AX25-HOWTO, available from
<http://www.tldp.org/docs.html#howto>. Also make sure to say Y
to "Amateur Radio AX.25 Level 2" support.
@@ -98,7 +98,7 @@ config SCC_DELAY
help
Say Y here if you experience problems with the SCC driver not
working properly; please read
- <file:Documentation/networking/z8530drv.txt> for details.
+ <file:Documentation/networking/z8530drv.rst> for details.
If unsure, say N.
@@ -127,7 +127,7 @@ config BAYCOM_SER_FDX
your serial interface chip. To configure the driver, use the sethdlc
utility available in the standard ax25 utilities package. For
information on the modems, see <http://www.baycom.de/> and
- <file:Documentation/networking/baycom.txt>.
+ <file:Documentation/networking/baycom.rst>.
To compile this driver as a module, choose M here: the module
will be called baycom_ser_fdx. This is recommended.
@@ -145,7 +145,7 @@ config BAYCOM_SER_HDX
the driver, use the sethdlc utility available in the standard ax25
utilities package. For information on the modems, see
<http://www.baycom.de/> and
- <file:Documentation/networking/baycom.txt>.
+ <file:Documentation/networking/baycom.rst>.
To compile this driver as a module, choose M here: the module
will be called baycom_ser_hdx. This is recommended.
@@ -160,7 +160,7 @@ config BAYCOM_PAR
par96 designs. To configure the driver, use the sethdlc utility
available in the standard ax25 utilities package. For information on
the modems, see <http://www.baycom.de/> and the file
- <file:Documentation/networking/baycom.txt>.
+ <file:Documentation/networking/baycom.rst>.
To compile this driver as a module, choose M here: the module
will be called baycom_par. This is recommended.
@@ -175,7 +175,7 @@ config BAYCOM_EPP
designs. To configure the driver, use the sethdlc utility available
in the standard ax25 utilities package. For information on the
modems, see <http://www.baycom.de/> and the file
- <file:Documentation/networking/baycom.txt>.
+ <file:Documentation/networking/baycom.rst>.
To compile this driver as a module, choose M here: the module
will be called baycom_epp. This is recommended.
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index e2ad3c2e8df5..60dcaf2a04a9 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -107,6 +107,25 @@ struct bpqdev {
static LIST_HEAD(bpq_devices);
+/*
+ * bpqether network devices are paired with ethernet devices below them, so
+ * form a special "super class" of normal ethernet devices; split their locks
+ * off into a separate class since they always nest.
+ */
+static struct lock_class_key bpq_netdev_xmit_lock_key;
+
+static void bpq_set_lockdep_class_one(struct net_device *dev,
+ struct netdev_queue *txq,
+ void *_unused)
+{
+ lockdep_set_class(&txq->_xmit_lock, &bpq_netdev_xmit_lock_key);
+}
+
+static void bpq_set_lockdep_class(struct net_device *dev)
+{
+ netdev_for_each_tx_queue(dev, bpq_set_lockdep_class_one, NULL);
+}
+
/* ------------------------------------------------------------------------ */
@@ -478,6 +497,7 @@ static int bpq_new_device(struct net_device *edev)
err = register_netdevice(ndev);
if (err)
goto error;
+ bpq_set_lockdep_class(ndev);
/* List protected by RTNL */
list_add_rcu(&bpq->bpq_list, &bpq_devices);
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index 6c03932d8a6b..33fdd55c6122 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -7,7 +7,7 @@
* ------------------
*
* You can find a subset of the documentation in
- * Documentation/networking/z8530drv.txt.
+ * Documentation/networking/z8530drv.rst.
*/
/*
diff --git a/drivers/net/hyperv/netvsc_bpf.c b/drivers/net/hyperv/netvsc_bpf.c
index b86611041db6..1e0c024b0a93 100644
--- a/drivers/net/hyperv/netvsc_bpf.c
+++ b/drivers/net/hyperv/netvsc_bpf.c
@@ -49,6 +49,7 @@ u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
xdp_set_data_meta_invalid(xdp);
xdp->data_end = xdp->data + len;
xdp->rxq = &nvchan->xdp_rxq;
+ xdp->frame_sz = PAGE_SIZE;
xdp->handle = 0;
memcpy(xdp->data, data, len);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index ebcfbae05690..6267f706e8ee 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -795,7 +795,7 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
if (xbuf) {
unsigned int hdroom = xdp->data - xdp->data_hard_start;
unsigned int xlen = xdp->data_end - xdp->data;
- unsigned int frag_size = netvsc_xdp_fraglen(hdroom + xlen);
+ unsigned int frag_size = xdp->frame_sz;
skb = build_skb(xbuf, frag_size);
@@ -2457,6 +2457,8 @@ static int netvsc_probe(struct hv_device *dev,
NETIF_F_HW_VLAN_CTAG_RX;
net->vlan_features = net->features;
+ netdev_lockdep_set_classes(net);
+
/* MTU range: 68 - 1500 or 65521 */
net->min_mtu = NETVSC_MTU_MIN;
if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index b671bea0aa7c..66570609c845 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -415,13 +415,14 @@ static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
evt_ring->state);
}
-/* Return the hardware's notion of the current state of a channel */
-static enum gsi_channel_state
-gsi_channel_state(struct gsi *gsi, u32 channel_id)
+/* Fetch the current state of a channel from hardware */
+static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
{
+ u32 channel_id = gsi_channel_id(channel);
+ void *virt = channel->gsi->virt;
u32 val;
- val = ioread32(gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
+ val = ioread32(virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
return u32_get_bits(val, CHSTATE_FMASK);
}
@@ -432,16 +433,18 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
{
struct completion *completion = &channel->completion;
u32 channel_id = gsi_channel_id(channel);
+ struct gsi *gsi = channel->gsi;
u32 val;
val = u32_encode_bits(channel_id, CH_CHID_FMASK);
val |= u32_encode_bits(opcode, CH_OPCODE_FMASK);
- if (gsi_command(channel->gsi, GSI_CH_CMD_OFFSET, val, completion))
+ if (gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion))
return 0; /* Success! */
- dev_err(channel->gsi->dev, "GSI command %u to channel %u timed out "
- "(state is %u)\n", opcode, channel_id, channel->state);
+ dev_err(gsi->dev,
+ "GSI command %u to channel %u timed out (state is %u)\n",
+ opcode, channel_id, gsi_channel_state(channel));
return -ETIMEDOUT;
}
@@ -450,18 +453,21 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
+ enum gsi_channel_state state;
int ret;
/* Get initial channel state */
- channel->state = gsi_channel_state(gsi, channel_id);
-
- if (channel->state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
+ state = gsi_channel_state(channel);
+ if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
return -EINVAL;
ret = gsi_channel_command(channel, GSI_CH_ALLOCATE);
- if (!ret && channel->state != GSI_CHANNEL_STATE_ALLOCATED) {
+
+ /* Channel state will normally have been updated */
+ state = gsi_channel_state(channel);
+ if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED) {
dev_err(gsi->dev, "bad channel state (%u) after alloc\n",
- channel->state);
+ state);
ret = -EIO;
}
@@ -471,18 +477,21 @@ static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
/* Start an ALLOCATED channel */
static int gsi_channel_start_command(struct gsi_channel *channel)
{
- enum gsi_channel_state state = channel->state;
+ enum gsi_channel_state state;
int ret;
+ state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_ALLOCATED &&
state != GSI_CHANNEL_STATE_STOPPED)
return -EINVAL;
ret = gsi_channel_command(channel, GSI_CH_START);
- if (!ret && channel->state != GSI_CHANNEL_STATE_STARTED) {
+
+ /* Channel state will normally have been updated */
+ state = gsi_channel_state(channel);
+ if (!ret && state != GSI_CHANNEL_STATE_STARTED) {
dev_err(channel->gsi->dev,
- "bad channel state (%u) after start\n",
- channel->state);
+ "bad channel state (%u) after start\n", state);
ret = -EIO;
}
@@ -492,23 +501,27 @@ static int gsi_channel_start_command(struct gsi_channel *channel)
/* Stop a GSI channel in STARTED state */
static int gsi_channel_stop_command(struct gsi_channel *channel)
{
- enum gsi_channel_state state = channel->state;
+ enum gsi_channel_state state;
int ret;
+ state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_STARTED &&
state != GSI_CHANNEL_STATE_STOP_IN_PROC)
return -EINVAL;
ret = gsi_channel_command(channel, GSI_CH_STOP);
- if (ret || channel->state == GSI_CHANNEL_STATE_STOPPED)
+
+ /* Channel state will normally have been updated */
+ state = gsi_channel_state(channel);
+ if (ret || state == GSI_CHANNEL_STATE_STOPPED)
return ret;
/* We may have to try again if stop is in progress */
- if (channel->state == GSI_CHANNEL_STATE_STOP_IN_PROC)
+ if (state == GSI_CHANNEL_STATE_STOP_IN_PROC)
return -EAGAIN;
- dev_err(channel->gsi->dev, "bad channel state (%u) after stop\n",
- channel->state);
+ dev_err(channel->gsi->dev,
+ "bad channel state (%u) after stop\n", state);
return -EIO;
}
@@ -516,41 +529,49 @@ static int gsi_channel_stop_command(struct gsi_channel *channel)
/* Reset a GSI channel in ALLOCATED or ERROR state. */
static void gsi_channel_reset_command(struct gsi_channel *channel)
{
+ enum gsi_channel_state state;
int ret;
msleep(1); /* A short delay is required before a RESET command */
- if (channel->state != GSI_CHANNEL_STATE_STOPPED &&
- channel->state != GSI_CHANNEL_STATE_ERROR) {
+ state = gsi_channel_state(channel);
+ if (state != GSI_CHANNEL_STATE_STOPPED &&
+ state != GSI_CHANNEL_STATE_ERROR) {
dev_err(channel->gsi->dev,
- "bad channel state (%u) before reset\n",
- channel->state);
+ "bad channel state (%u) before reset\n", state);
return;
}
ret = gsi_channel_command(channel, GSI_CH_RESET);
- if (!ret && channel->state != GSI_CHANNEL_STATE_ALLOCATED)
+
+ /* Channel state will normally have been updated */
+ state = gsi_channel_state(channel);
+ if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED)
dev_err(channel->gsi->dev,
- "bad channel state (%u) after reset\n",
- channel->state);
+ "bad channel state (%u) after reset\n", state);
}
/* Deallocate an ALLOCATED GSI channel */
static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
+ enum gsi_channel_state state;
int ret;
- if (channel->state != GSI_CHANNEL_STATE_ALLOCATED) {
- dev_err(gsi->dev, "bad channel state (%u) before dealloc\n",
- channel->state);
+ state = gsi_channel_state(channel);
+ if (state != GSI_CHANNEL_STATE_ALLOCATED) {
+ dev_err(gsi->dev,
+ "bad channel state (%u) before dealloc\n", state);
return;
}
ret = gsi_channel_command(channel, GSI_CH_DE_ALLOC);
- if (!ret && channel->state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
- dev_err(gsi->dev, "bad channel state (%u) after dealloc\n",
- channel->state);
+
+ /* Channel state will normally have been updated */
+ state = gsi_channel_state(channel);
+ if (!ret && state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
+ dev_err(gsi->dev,
+ "bad channel state (%u) after dealloc\n", state);
}
/* Ring an event ring doorbell, reporting the last entry processed by the AP.
@@ -777,6 +798,7 @@ int gsi_channel_start(struct gsi *gsi, u32 channel_id)
int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
+ enum gsi_channel_state state;
u32 retries;
int ret;
@@ -786,7 +808,8 @@ int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
* STOP command timed out. We won't stop a channel if stopping it
* was successful previously (so we still want the freeze above).
*/
- if (channel->state == GSI_CHANNEL_STATE_STOPPED)
+ state = gsi_channel_state(channel);
+ if (state == GSI_CHANNEL_STATE_STOPPED)
return 0;
/* RX channels might require a little time to enter STOPPED state */
@@ -811,18 +834,18 @@ int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
}
/* Reset and reconfigure a channel (possibly leaving doorbell disabled) */
-void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool db_enable)
+void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool legacy)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
mutex_lock(&gsi->mutex);
- /* Due to a hardware quirk we need to reset RX channels twice. */
gsi_channel_reset_command(channel);
- if (!channel->toward_ipa)
+ /* Due to a hardware quirk we may need to reset RX channels twice. */
+ if (legacy && !channel->toward_ipa)
gsi_channel_reset_command(channel);
- gsi_channel_program(channel, db_enable);
+ gsi_channel_program(channel, legacy);
gsi_channel_trans_cancel_pending(channel);
mutex_unlock(&gsi->mutex);
@@ -940,7 +963,6 @@ static void gsi_isr_chan_ctrl(struct gsi *gsi)
channel_mask ^= BIT(channel_id);
channel = &gsi->channel[channel_id];
- channel->state = gsi_channel_state(gsi, channel_id);
complete(&channel->completion);
}
@@ -1434,7 +1456,7 @@ static void gsi_evt_ring_teardown(struct gsi *gsi)
/* Setup function for a single channel */
static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id,
- bool db_enable)
+ bool legacy)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
u32 evt_ring_id = channel->evt_ring_id;
@@ -1453,7 +1475,7 @@ static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id,
if (ret)
goto err_evt_ring_de_alloc;
- gsi_channel_program(channel, db_enable);
+ gsi_channel_program(channel, legacy);
if (channel->toward_ipa)
netif_tx_napi_add(&gsi->dummy_dev, &channel->napi,
@@ -1530,7 +1552,7 @@ static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
}
/* Setup function for channels */
-static int gsi_channel_setup(struct gsi *gsi, bool db_enable)
+static int gsi_channel_setup(struct gsi *gsi, bool legacy)
{
u32 channel_id = 0;
u32 mask;
@@ -1542,7 +1564,7 @@ static int gsi_channel_setup(struct gsi *gsi, bool db_enable)
mutex_lock(&gsi->mutex);
do {
- ret = gsi_channel_setup_one(gsi, channel_id, db_enable);
+ ret = gsi_channel_setup_one(gsi, channel_id, legacy);
if (ret)
goto err_unwind;
} while (++channel_id < gsi->channel_count);
@@ -1628,7 +1650,7 @@ static void gsi_channel_teardown(struct gsi *gsi)
}
/* Setup function for GSI. GSI firmware must be loaded and initialized */
-int gsi_setup(struct gsi *gsi, bool db_enable)
+int gsi_setup(struct gsi *gsi, bool legacy)
{
u32 val;
@@ -1671,7 +1693,7 @@ int gsi_setup(struct gsi *gsi, bool db_enable)
/* Writing 1 indicates IRQ interrupts; 0 would be MSI */
iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET);
- return gsi_channel_setup(gsi, db_enable);
+ return gsi_channel_setup(gsi, legacy);
}
/* Inverse of gsi_setup() */
diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h
index 0698ff1ae7a6..90a02194e7ad 100644
--- a/drivers/net/ipa/gsi.h
+++ b/drivers/net/ipa/gsi.h
@@ -113,8 +113,7 @@ struct gsi_channel {
u16 tre_count;
u16 event_count;
- struct completion completion; /* signals channel state changes */
- enum gsi_channel_state state;
+ struct completion completion; /* signals channel command completion */
struct gsi_ring tre_ring;
u32 evt_ring_id;
@@ -166,14 +165,14 @@ struct gsi {
/**
* gsi_setup() - Set up the GSI subsystem
* @gsi: Address of GSI structure embedded in an IPA structure
- * @db_enable: Whether to use the GSI doorbell engine
+ * @legacy: Set up for legacy hardware
*
* @Return: 0 if successful, or a negative error code
*
* Performs initialization that must wait until the GSI hardware is
* ready (including firmware loaded).
*/
-int gsi_setup(struct gsi *gsi, bool db_enable);
+int gsi_setup(struct gsi *gsi, bool legacy);
/**
* gsi_teardown() - Tear down GSI subsystem
@@ -221,15 +220,15 @@ int gsi_channel_stop(struct gsi *gsi, u32 channel_id);
* gsi_channel_reset() - Reset an allocated GSI channel
* @gsi: GSI pointer
* @channel_id: Channel to be reset
- * @db_enable: Whether doorbell engine should be enabled
+ * @legacy: Legacy behavior
*
- * Reset a channel and reconfigure it. The @db_enable flag indicates
- * whether the doorbell engine will be enabled following reconfiguration.
+ * Reset a channel and reconfigure it. The @legacy flag indicates
+ * that some steps should be done differently for legacy hardware.
*
* GSI hardware relinquishes ownership of all pending receive buffer
* transactions and they will complete with their cancelled flag set.
*/
-void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool db_enable);
+void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool legacy);
int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop);
int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start);
diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h
index 23fb29889e5a..b10a85392952 100644
--- a/drivers/net/ipa/ipa.h
+++ b/drivers/net/ipa/ipa.h
@@ -47,6 +47,10 @@ struct ipa_interrupt;
* @mem_offset: Offset from @mem_virt used for access to IPA memory
* @mem_size: Total size (bytes) of memory at @mem_virt
* @mem: Array of IPA-local memory region descriptors
+ * @imem_iova: I/O virtual address of IPA region in IMEM
+ * @imem_size; Size of IMEM region
+ * @smem_iova: I/O virtual address of IPA region in SMEM
+ * @smem_size; Size of SMEM region
* @zero_addr: DMA address of preallocated zero-filled memory
* @zero_virt: Virtual address of preallocated zero-filled memory
* @zero_size: Size (bytes) of preallocated zero-filled memory
@@ -88,6 +92,12 @@ struct ipa {
u32 mem_size;
const struct ipa_mem *mem;
+ unsigned long imem_iova;
+ size_t imem_size;
+
+ unsigned long smem_iova;
+ size_t smem_size;
+
dma_addr_t zero_addr;
void *zero_virt;
size_t zero_size;
diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
index cee417181f98..c9ab865e7290 100644
--- a/drivers/net/ipa/ipa_cmd.c
+++ b/drivers/net/ipa/ipa_cmd.c
@@ -103,28 +103,6 @@ struct ipa_cmd_ip_packet_init {
/* Field masks for ipa_cmd_ip_packet_init dest_endpoint field */
#define IPA_PACKET_INIT_DEST_ENDPOINT_FMASK GENMASK(4, 0)
-/* IPA_CMD_DMA_TASK_32B_ADDR */
-
-/* This opcode gets modified with a DMA operation count */
-
-#define DMA_TASK_32B_ADDR_OPCODE_COUNT_FMASK GENMASK(15, 8)
-
-struct ipa_cmd_hw_dma_task_32b_addr {
- __le16 flags;
- __le16 size;
- __le32 addr;
- __le16 packet_size;
- u8 reserved[6];
-};
-
-/* Field masks for ipa_cmd_hw_dma_task_32b_addr flags field */
-#define DMA_TASK_32B_ADDR_FLAGS_SW_RSVD_FMASK GENMASK(10, 0)
-#define DMA_TASK_32B_ADDR_FLAGS_CMPLT_FMASK GENMASK(11, 11)
-#define DMA_TASK_32B_ADDR_FLAGS_EOF_FMASK GENMASK(12, 12)
-#define DMA_TASK_32B_ADDR_FLAGS_FLSH_FMASK GENMASK(13, 13)
-#define DMA_TASK_32B_ADDR_FLAGS_LOCK_FMASK GENMASK(14, 14)
-#define DMA_TASK_32B_ADDR_FLAGS_UNLOCK_FMASK GENMASK(15, 15)
-
/* IPA_CMD_DMA_SHARED_MEM */
/* For IPA v4.0+, this opcode gets modified with pipeline clear options */
@@ -163,7 +141,6 @@ union ipa_cmd_payload {
struct ipa_cmd_hw_hdr_init_local hdr_init_local;
struct ipa_cmd_register_write register_write;
struct ipa_cmd_ip_packet_init ip_packet_init;
- struct ipa_cmd_hw_dma_task_32b_addr dma_task_32b_addr;
struct ipa_cmd_hw_dma_mem_mem dma_shared_mem;
struct ipa_cmd_ip_packet_tag_status ip_packet_tag_status;
};
@@ -508,42 +485,6 @@ static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id)
direction, opcode);
}
-/* Use a 32-bit DMA command to zero a block of memory */
-void ipa_cmd_dma_task_32b_addr_add(struct gsi_trans *trans, u16 size,
- dma_addr_t addr, bool toward_ipa)
-{
- struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
- enum ipa_cmd_opcode opcode = IPA_CMD_DMA_TASK_32B_ADDR;
- struct ipa_cmd_hw_dma_task_32b_addr *payload;
- union ipa_cmd_payload *cmd_payload;
- enum dma_data_direction direction;
- dma_addr_t payload_addr;
- u16 flags;
-
- /* assert(addr <= U32_MAX); */
- addr &= GENMASK_ULL(31, 0);
-
- /* The opcode encodes the number of DMA operations in the high byte */
- opcode |= u16_encode_bits(1, DMA_TASK_32B_ADDR_OPCODE_COUNT_FMASK);
-
- direction = toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
-
- /* complete: 0 = don't interrupt; eof: 0 = don't assert eot */
- flags = DMA_TASK_32B_ADDR_FLAGS_FLSH_FMASK;
- /* lock: 0 = don't lock endpoint; unlock: 0 = don't unlock */
-
- cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
- payload = &cmd_payload->dma_task_32b_addr;
-
- payload->flags = cpu_to_le16(flags);
- payload->size = cpu_to_le16(size);
- payload->addr = cpu_to_le32((u32)addr);
- payload->packet_size = cpu_to_le16(size);
-
- gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
- direction, opcode);
-}
-
/* Use a DMA command to read or write a block of IPA-resident memory */
void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size,
dma_addr_t addr, bool toward_ipa)
diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h
index 4917525b3a47..e440aa69c8b5 100644
--- a/drivers/net/ipa/ipa_cmd.h
+++ b/drivers/net/ipa/ipa_cmd.h
@@ -35,7 +35,6 @@ enum ipa_cmd_opcode {
IPA_CMD_HDR_INIT_LOCAL = 9,
IPA_CMD_REGISTER_WRITE = 12,
IPA_CMD_IP_PACKET_INIT = 16,
- IPA_CMD_DMA_TASK_32B_ADDR = 17,
IPA_CMD_DMA_SHARED_MEM = 19,
IPA_CMD_IP_PACKET_TAG_STATUS = 20,
};
@@ -148,16 +147,6 @@ void ipa_cmd_register_write_add(struct gsi_trans *trans, u32 offset, u32 value,
u32 mask, bool clear_full);
/**
- * ipa_cmd_dma_task_32b_addr_add() - Add a 32-bit DMA command to a transaction
- * @trans: GSi transaction
- * @size: Number of bytes to be memory to be transferred
- * @addr: DMA address of buffer to be read into or written from
- * @toward_ipa: true means write to IPA memory; false means read
- */
-void ipa_cmd_dma_task_32b_addr_add(struct gsi_trans *trans, u16 size,
- dma_addr_t addr, bool toward_ipa);
-
-/**
* ipa_cmd_dma_shared_mem_add() - Add a DMA memory command to a transaction
* @trans: GSI transaction
* @offset: Offset of IPA memory to be read or written
diff --git a/drivers/net/ipa/ipa_data-sc7180.c b/drivers/net/ipa/ipa_data-sc7180.c
index 042b5fc3c135..43faa35ae726 100644
--- a/drivers/net/ipa/ipa_data-sc7180.c
+++ b/drivers/net/ipa/ipa_data-sc7180.c
@@ -193,7 +193,7 @@ static const struct ipa_resource_data ipa_resource_data = {
};
/* IPA-resident memory region configuration for the SC7180 SoC. */
-static const struct ipa_mem ipa_mem_data[] = {
+static const struct ipa_mem ipa_mem_local_data[] = {
[IPA_MEM_UC_SHARED] = {
.offset = 0x0000,
.size = 0x0080,
@@ -296,12 +296,20 @@ static const struct ipa_mem ipa_mem_data[] = {
},
};
+static struct ipa_mem_data ipa_mem_data = {
+ .local_count = ARRAY_SIZE(ipa_mem_local_data),
+ .local = ipa_mem_local_data,
+ .imem_addr = 0x146a8000,
+ .imem_size = 0x00002000,
+ .smem_id = 497,
+ .smem_size = 0x00002000,
+};
+
/* Configuration data for the SC7180 SoC. */
const struct ipa_data ipa_data_sc7180 = {
.version = IPA_VERSION_4_2,
.endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
.endpoint_data = ipa_gsi_endpoint_data,
.resource_data = &ipa_resource_data,
- .mem_count = ARRAY_SIZE(ipa_mem_data),
- .mem_data = ipa_mem_data,
+ .mem_data = &ipa_mem_data,
};
diff --git a/drivers/net/ipa/ipa_data-sdm845.c b/drivers/net/ipa/ipa_data-sdm845.c
index 0d9c36e1e806..52d4b84e0dac 100644
--- a/drivers/net/ipa/ipa_data-sdm845.c
+++ b/drivers/net/ipa/ipa_data-sdm845.c
@@ -74,7 +74,6 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.tx = {
.status_endpoint =
IPA_ENDPOINT_MODEM_AP_RX,
- .delay = true,
},
},
},
@@ -235,7 +234,7 @@ static const struct ipa_resource_data ipa_resource_data = {
};
/* IPA-resident memory region configuration for the SDM845 SoC. */
-static const struct ipa_mem ipa_mem_data[] = {
+static const struct ipa_mem ipa_mem_local_data[] = {
[IPA_MEM_UC_SHARED] = {
.offset = 0x0000,
.size = 0x0080,
@@ -318,12 +317,20 @@ static const struct ipa_mem ipa_mem_data[] = {
},
};
+static struct ipa_mem_data ipa_mem_data = {
+ .local_count = ARRAY_SIZE(ipa_mem_local_data),
+ .local = ipa_mem_local_data,
+ .imem_addr = 0x146bd000,
+ .imem_size = 0x00002000,
+ .smem_id = 497,
+ .smem_size = 0x00002000,
+};
+
/* Configuration data for the SDM845 SoC. */
const struct ipa_data ipa_data_sdm845 = {
.version = IPA_VERSION_3_5_1,
.endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
.endpoint_data = ipa_gsi_endpoint_data,
.resource_data = &ipa_resource_data,
- .mem_count = ARRAY_SIZE(ipa_mem_data),
- .mem_data = ipa_mem_data,
+ .mem_data = &ipa_mem_data,
};
diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h
index 7110de2de817..7fc1058a5ca9 100644
--- a/drivers/net/ipa/ipa_data.h
+++ b/drivers/net/ipa/ipa_data.h
@@ -80,18 +80,12 @@ struct gsi_channel_data {
/**
* struct ipa_endpoint_tx_data - configuration data for TX endpoints
* @status_endpoint: endpoint to which status elements are sent
- * @delay: whether endpoint starts in delay mode
- *
- * Delay mode prevents a TX endpoint from transmitting anything, even if
- * commands have been presented to the hardware. Once the endpoint exits
- * delay mode, queued transfer commands are sent.
*
* The @status_endpoint is only valid if the endpoint's @status_enable
* flag is set.
*/
struct ipa_endpoint_tx_data {
enum ipa_endpoint_name status_endpoint;
- bool delay;
};
/**
@@ -245,15 +239,21 @@ struct ipa_resource_data {
};
/**
- * struct ipa_mem - IPA-local memory region description
- * @offset: offset in IPA memory space to base of the region
- * @size: size in bytes base of the region
- * @canary_count: number of 32-bit "canary" values that precede region
+ * struct ipa_mem - description of IPA memory regions
+ * @local_count: number of regions defined in the local[] array
+ * @local: array of IPA-local memory region descriptors
+ * @imem_addr: physical address of IPA region within IMEM
+ * @imem_size: size in bytes of IPA IMEM region
+ * @smem_id: item identifier for IPA region within SMEM memory
+ * @imem_size: size in bytes of the IPA SMEM region
*/
struct ipa_mem_data {
- u32 offset;
- u16 size;
- u16 canary_count;
+ u32 local_count;
+ const struct ipa_mem *local;
+ u32 imem_addr;
+ u32 imem_size;
+ u32 smem_id;
+ u32 smem_size;
};
/**
@@ -270,8 +270,7 @@ struct ipa_data {
u32 endpoint_count; /* # entries in endpoint_data[] */
const struct ipa_gsi_endpoint_data *endpoint_data;
const struct ipa_resource_data *resource_data;
- u32 mem_count; /* # entries in mem_data[] */
- const struct ipa_mem *mem_data;
+ const struct ipa_mem_data *mem_data;
};
extern const struct ipa_data ipa_data_sdm845;
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index a21534f1462f..82066a223a67 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -32,14 +32,9 @@
/* The amount of RX buffer space consumed by standard skb overhead */
#define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
-#define IPA_ENDPOINT_STOP_RX_RETRIES 10
-#define IPA_ENDPOINT_STOP_RX_SIZE 1 /* bytes */
-
#define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3
#define IPA_AGGR_TIME_LIMIT_DEFAULT 1000 /* microseconds */
-#define ENDPOINT_STOP_DMA_TIMEOUT 15 /* milliseconds */
-
/** enum ipa_status_opcode - status element opcode hardware values */
enum ipa_status_opcode {
IPA_STATUS_OPCODE_PACKET = 0x01,
@@ -284,25 +279,52 @@ static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
/* suspend_delay represents suspend for RX, delay for TX endpoints.
* Note that suspend is not supported starting with IPA v4.0.
*/
-static int
+static bool
ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
{
u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id);
struct ipa *ipa = endpoint->ipa;
+ bool state;
u32 mask;
u32 val;
- /* assert(ipa->version == IPA_VERSION_3_5_1 */
+ /* Suspend is not supported for IPA v4.0+. Delay doesn't work
+ * correctly on IPA v4.2.
+ *
+ * if (endpoint->toward_ipa)
+ * assert(ipa->version != IPA_VERSION_4.2);
+ * else
+ * assert(ipa->version == IPA_VERSION_3_5_1);
+ */
mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK;
val = ioread32(ipa->reg_virt + offset);
- if (suspend_delay == !!(val & mask))
- return -EALREADY; /* Already set to desired state */
+ /* Don't bother if it's already in the requested state */
+ state = !!(val & mask);
+ if (suspend_delay != state) {
+ val ^= mask;
+ iowrite32(val, ipa->reg_virt + offset);
+ }
- val ^= mask;
- iowrite32(val, ipa->reg_virt + offset);
+ return state;
+}
- return 0;
+/* We currently don't care what the previous state was for delay mode */
+static void
+ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
+{
+ /* assert(endpoint->toward_ipa); */
+
+ (void)ipa_endpoint_init_ctrl(endpoint, enable);
+}
+
+/* Returns previous suspend state (true means it was enabled) */
+static bool
+ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
+{
+ /* assert(!endpoint->toward_ipa); */
+
+ return ipa_endpoint_init_ctrl(endpoint, enable);
}
/* Enable or disable delay or suspend mode on all modem endpoints */
@@ -311,7 +333,7 @@ void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
bool support_suspend;
u32 endpoint_id;
- /* DELAY mode doesn't work right on IPA v4.2 */
+ /* DELAY mode doesn't work correctly on IPA v4.2 */
if (ipa->version == IPA_VERSION_4_2)
return;
@@ -325,8 +347,10 @@ void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
continue;
/* Set TX delay mode, or for IPA v3.5.1 RX suspend mode */
- if (endpoint->toward_ipa || support_suspend)
- (void)ipa_endpoint_init_ctrl(endpoint, enable);
+ if (endpoint->toward_ipa)
+ ipa_endpoint_program_delay(endpoint, enable);
+ else if (support_suspend)
+ (void)ipa_endpoint_program_suspend(endpoint, enable);
}
}
@@ -1133,10 +1157,10 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
{
struct device *dev = &endpoint->ipa->pdev->dev;
struct ipa *ipa = endpoint->ipa;
- bool endpoint_suspended = false;
struct gsi *gsi = &ipa->gsi;
+ bool suspended = false;
dma_addr_t addr;
- bool db_enable;
+ bool legacy;
u32 retries;
u32 len = 1;
void *virt;
@@ -1164,8 +1188,7 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
/* Make sure the channel isn't suspended */
if (endpoint->ipa->version == IPA_VERSION_3_5_1)
- if (!ipa_endpoint_init_ctrl(endpoint, false))
- endpoint_suspended = true;
+ suspended = ipa_endpoint_program_suspend(endpoint, false);
/* Start channel and do a 1 byte read */
ret = gsi_channel_start(gsi, endpoint->channel_id);
@@ -1191,7 +1214,7 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
gsi_trans_read_byte_done(gsi, endpoint->channel_id);
- ret = ipa_endpoint_stop(endpoint);
+ ret = gsi_channel_stop(gsi, endpoint->channel_id);
if (ret)
goto out_suspend_again;
@@ -1200,18 +1223,18 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
* complete the channel reset sequence. Finish by suspending the
* channel again (if necessary).
*/
- db_enable = ipa->version == IPA_VERSION_3_5_1;
- gsi_channel_reset(gsi, endpoint->channel_id, db_enable);
+ legacy = ipa->version == IPA_VERSION_3_5_1;
+ gsi_channel_reset(gsi, endpoint->channel_id, legacy);
msleep(1);
goto out_suspend_again;
err_endpoint_stop:
- ipa_endpoint_stop(endpoint);
+ (void)gsi_channel_stop(gsi, endpoint->channel_id);
out_suspend_again:
- if (endpoint_suspended)
- (void)ipa_endpoint_init_ctrl(endpoint, true);
+ if (suspended)
+ (void)ipa_endpoint_program_suspend(endpoint, true);
dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE);
out_kfree:
kfree(virt);
@@ -1223,8 +1246,8 @@ static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
{
u32 channel_id = endpoint->channel_id;
struct ipa *ipa = endpoint->ipa;
- bool db_enable;
bool special;
+ bool legacy;
int ret = 0;
/* On IPA v3.5.1, if an RX endpoint is reset while aggregation
@@ -1233,12 +1256,12 @@ static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
*
* IPA v3.5.1 enables the doorbell engine. Newer versions do not.
*/
- db_enable = ipa->version == IPA_VERSION_3_5_1;
+ legacy = ipa->version == IPA_VERSION_3_5_1;
special = !endpoint->toward_ipa && endpoint->data->aggregation;
if (special && ipa_endpoint_aggr_active(endpoint))
ret = ipa_endpoint_reset_rx_aggr(endpoint);
else
- gsi_channel_reset(&ipa->gsi, channel_id, db_enable);
+ gsi_channel_reset(&ipa->gsi, channel_id, legacy);
if (ret)
dev_err(&ipa->pdev->dev,
@@ -1246,94 +1269,18 @@ static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
ret, endpoint->channel_id, endpoint->endpoint_id);
}
-static int ipa_endpoint_stop_rx_dma(struct ipa *ipa)
-{
- u16 size = IPA_ENDPOINT_STOP_RX_SIZE;
- struct gsi_trans *trans;
- dma_addr_t addr;
- int ret;
-
- trans = ipa_cmd_trans_alloc(ipa, 1);
- if (!trans) {
- dev_err(&ipa->pdev->dev,
- "no transaction for RX endpoint STOP workaround\n");
- return -EBUSY;
- }
-
- /* Read into the highest part of the zero memory area */
- addr = ipa->zero_addr + ipa->zero_size - size;
-
- ipa_cmd_dma_task_32b_addr_add(trans, size, addr, false);
-
- ret = gsi_trans_commit_wait_timeout(trans, ENDPOINT_STOP_DMA_TIMEOUT);
- if (ret)
- gsi_trans_free(trans);
-
- return ret;
-}
-
-/**
- * ipa_endpoint_stop() - Stops a GSI channel in IPA
- * @client: Client whose endpoint should be stopped
- *
- * This function implements the sequence to stop a GSI channel
- * in IPA. This function returns when the channel is is STOP state.
- *
- * Return value: 0 on success, negative otherwise
- */
-int ipa_endpoint_stop(struct ipa_endpoint *endpoint)
-{
- u32 retries = IPA_ENDPOINT_STOP_RX_RETRIES;
- int ret;
-
- do {
- struct ipa *ipa = endpoint->ipa;
- struct gsi *gsi = &ipa->gsi;
-
- ret = gsi_channel_stop(gsi, endpoint->channel_id);
- if (ret != -EAGAIN || endpoint->toward_ipa)
- break;
-
- /* For IPA v3.5.1, send a DMA read task and check again */
- if (ipa->version == IPA_VERSION_3_5_1) {
- ret = ipa_endpoint_stop_rx_dma(ipa);
- if (ret)
- break;
- }
-
- msleep(1);
- } while (retries--);
-
- return retries ? ret : -EIO;
-}
-
static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
{
- struct device *dev = &endpoint->ipa->pdev->dev;
- int ret;
-
if (endpoint->toward_ipa) {
- bool delay_mode = endpoint->data->tx.delay;
-
- ret = ipa_endpoint_init_ctrl(endpoint, delay_mode);
- /* Endpoint is expected to not be in delay mode */
- if (!ret != delay_mode) {
- dev_warn(dev,
- "TX endpoint %u was %sin delay mode\n",
- endpoint->endpoint_id,
- delay_mode ? "already " : "");
- }
+ if (endpoint->ipa->version != IPA_VERSION_4_2)
+ ipa_endpoint_program_delay(endpoint, false);
ipa_endpoint_init_hdr_ext(endpoint);
ipa_endpoint_init_aggr(endpoint);
ipa_endpoint_init_deaggr(endpoint);
ipa_endpoint_init_seq(endpoint);
} else {
- if (endpoint->ipa->version == IPA_VERSION_3_5_1) {
- if (!ipa_endpoint_init_ctrl(endpoint, false))
- dev_warn(dev,
- "RX endpoint %u was suspended\n",
- endpoint->endpoint_id);
- }
+ if (endpoint->ipa->version == IPA_VERSION_3_5_1)
+ (void)ipa_endpoint_program_suspend(endpoint, false);
ipa_endpoint_init_hdr_ext(endpoint);
ipa_endpoint_init_aggr(endpoint);
}
@@ -1374,12 +1321,13 @@ void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
{
u32 mask = BIT(endpoint->endpoint_id);
struct ipa *ipa = endpoint->ipa;
+ struct gsi *gsi = &ipa->gsi;
int ret;
- if (!(endpoint->ipa->enabled & mask))
+ if (!(ipa->enabled & mask))
return;
- endpoint->ipa->enabled ^= mask;
+ ipa->enabled ^= mask;
if (!endpoint->toward_ipa) {
ipa_endpoint_replenish_disable(endpoint);
@@ -1388,7 +1336,7 @@ void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
}
/* Note that if stop fails, the channel's state is not well-defined */
- ret = ipa_endpoint_stop(endpoint);
+ ret = gsi_channel_stop(gsi, endpoint->channel_id);
if (ret)
dev_err(&ipa->pdev->dev,
"error %d attempting to stop endpoint %u\n", ret,
@@ -1445,7 +1393,7 @@ void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
* aggregation frame, then simulating the arrival of such
* an interrupt.
*/
- WARN_ON(ipa_endpoint_init_ctrl(endpoint, true));
+ (void)ipa_endpoint_program_suspend(endpoint, true);
ipa_endpoint_suspend_aggr(endpoint);
}
@@ -1468,7 +1416,7 @@ void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
/* IPA v3.5.1 doesn't use channel start for resume */
start_channel = endpoint->ipa->version != IPA_VERSION_3_5_1;
if (!endpoint->toward_ipa && !start_channel)
- WARN_ON(ipa_endpoint_init_ctrl(endpoint, false));
+ (void)ipa_endpoint_program_suspend(endpoint, false);
ret = gsi_channel_resume(gsi, endpoint->channel_id, start_channel);
if (ret)
diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h
index 4b336a1f759d..3b297d65828e 100644
--- a/drivers/net/ipa/ipa_endpoint.h
+++ b/drivers/net/ipa/ipa_endpoint.h
@@ -76,8 +76,6 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa);
int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb);
-int ipa_endpoint_stop(struct ipa_endpoint *endpoint);
-
void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint);
int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint);
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index 28998dcce3d2..e0b1fe3c34f9 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -108,7 +108,7 @@ int ipa_setup(struct ipa *ipa)
struct ipa_endpoint *command_endpoint;
int ret;
- /* IPA v4.0 and above don't use the doorbell engine. */
+ /* Setup for IPA v3.5.1 has some slight differences */
ret = gsi_setup(&ipa->gsi, ipa->version == IPA_VERSION_3_5_1);
if (ret)
return ret;
@@ -778,7 +778,7 @@ static int ipa_probe(struct platform_device *pdev)
if (ret)
goto err_kfree_ipa;
- ret = ipa_mem_init(ipa, data->mem_count, data->mem_data);
+ ret = ipa_mem_init(ipa, data->mem_data);
if (ret)
goto err_reg_exit;
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
index 42d2c29d9f0c..3ef814119aab 100644
--- a/drivers/net/ipa/ipa_mem.c
+++ b/drivers/net/ipa/ipa_mem.c
@@ -8,19 +8,24 @@
#include <linux/bitfield.h>
#include <linux/bug.h>
#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
#include <linux/io.h>
+#include <linux/soc/qcom/smem.h>
#include "ipa.h"
#include "ipa_reg.h"
+#include "ipa_data.h"
#include "ipa_cmd.h"
#include "ipa_mem.h"
-#include "ipa_data.h"
#include "ipa_table.h"
#include "gsi_trans.h"
/* "Canary" value placed between memory regions to detect overflow */
#define IPA_MEM_CANARY_VAL cpu_to_le32(0xdeadbeef)
+/* SMEM host id representing the modem. */
+#define QCOM_SMEM_HOST_MODEM 1
+
/* Add an immediate command to a transaction that zeroes a memory region */
static void
ipa_mem_zero_region_add(struct gsi_trans *trans, const struct ipa_mem *mem)
@@ -265,16 +270,194 @@ int ipa_mem_zero_modem(struct ipa *ipa)
return 0;
}
+/**
+ * ipa_imem_init() - Initialize IMEM memory used by the IPA
+ * @ipa: IPA pointer
+ * @addr: Physical address of the IPA region in IMEM
+ * @size: Size (bytes) of the IPA region in IMEM
+ *
+ * IMEM is a block of shared memory separate from system DRAM, and
+ * a portion of this memory is available for the IPA to use. The
+ * modem accesses this memory directly, but the IPA accesses it
+ * via the IOMMU, using the AP's credentials.
+ *
+ * If this region exists (size > 0) we map it for read/write access
+ * through the IOMMU using the IPA device.
+ *
+ * Note: @addr and @size are not guaranteed to be page-aligned.
+ */
+static int ipa_imem_init(struct ipa *ipa, unsigned long addr, size_t size)
+{
+ struct device *dev = &ipa->pdev->dev;
+ struct iommu_domain *domain;
+ unsigned long iova;
+ phys_addr_t phys;
+ int ret;
+
+ if (!size)
+ return 0; /* IMEM memory not used */
+
+ domain = iommu_get_domain_for_dev(dev);
+ if (!domain) {
+ dev_err(dev, "no IOMMU domain found for IMEM\n");
+ return -EINVAL;
+ }
+
+ /* Align the address down and the size up to page boundaries */
+ phys = addr & PAGE_MASK;
+ size = PAGE_ALIGN(size + addr - phys);
+ iova = phys; /* We just want a direct mapping */
+
+ ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE);
+ if (ret)
+ return ret;
+
+ ipa->imem_iova = iova;
+ ipa->imem_size = size;
+
+ return 0;
+}
+
+static void ipa_imem_exit(struct ipa *ipa)
+{
+ struct iommu_domain *domain;
+ struct device *dev;
+
+ if (!ipa->imem_size)
+ return;
+
+ dev = &ipa->pdev->dev;
+ domain = iommu_get_domain_for_dev(dev);
+ if (domain) {
+ size_t size;
+
+ size = iommu_unmap(domain, ipa->imem_iova, ipa->imem_size);
+ if (size != ipa->imem_size)
+ dev_warn(dev, "unmapped %zu IMEM bytes, expected %lu\n",
+ size, ipa->imem_size);
+ } else {
+ dev_err(dev, "couldn't get IPA IOMMU domain for IMEM\n");
+ }
+
+ ipa->imem_size = 0;
+ ipa->imem_iova = 0;
+}
+
+/**
+ * ipa_smem_init() - Initialize SMEM memory used by the IPA
+ * @ipa: IPA pointer
+ * @item: Item ID of SMEM memory
+ * @size: Size (bytes) of SMEM memory region
+ *
+ * SMEM is a managed block of shared DRAM, from which numbered "items"
+ * can be allocated. One item is designated for use by the IPA.
+ *
+ * The modem accesses SMEM memory directly, but the IPA accesses it
+ * via the IOMMU, using the AP's credentials.
+ *
+ * If size provided is non-zero, we allocate it and map it for
+ * access through the IOMMU.
+ *
+ * Note: @size and the item address are is not guaranteed to be page-aligned.
+ */
+static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
+{
+ struct device *dev = &ipa->pdev->dev;
+ struct iommu_domain *domain;
+ unsigned long iova;
+ phys_addr_t phys;
+ phys_addr_t addr;
+ size_t actual;
+ void *virt;
+ int ret;
+
+ if (!size)
+ return 0; /* SMEM memory not used */
+
+ /* SMEM is memory shared between the AP and another system entity
+ * (in this case, the modem). An allocation from SMEM is persistent
+ * until the AP reboots; there is no way to free an allocated SMEM
+ * region. Allocation only reserves the space; to use it you need
+ * to "get" a pointer it (this implies no reference counting).
+ * The item might have already been allocated, in which case we
+ * use it unless the size isn't what we expect.
+ */
+ ret = qcom_smem_alloc(QCOM_SMEM_HOST_MODEM, item, size);
+ if (ret && ret != -EEXIST) {
+ dev_err(dev, "error %d allocating size %zu SMEM item %u\n",
+ ret, size, item);
+ return ret;
+ }
+
+ /* Now get the address of the SMEM memory region */
+ virt = qcom_smem_get(QCOM_SMEM_HOST_MODEM, item, &actual);
+ if (IS_ERR(virt)) {
+ ret = PTR_ERR(virt);
+ dev_err(dev, "error %d getting SMEM item %u\n", ret, item);
+ return ret;
+ }
+
+ /* In case the region was already allocated, verify the size */
+ if (ret && actual != size) {
+ dev_err(dev, "SMEM item %u has size %zu, expected %zu\n",
+ item, actual, size);
+ return -EINVAL;
+ }
+
+ domain = iommu_get_domain_for_dev(dev);
+ if (!domain) {
+ dev_err(dev, "no IOMMU domain found for SMEM\n");
+ return -EINVAL;
+ }
+
+ /* Align the address down and the size up to a page boundary */
+ addr = qcom_smem_virt_to_phys(virt) & PAGE_MASK;
+ phys = addr & PAGE_MASK;
+ size = PAGE_ALIGN(size + addr - phys);
+ iova = phys; /* We just want a direct mapping */
+
+ ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE);
+ if (ret)
+ return ret;
+
+ ipa->smem_iova = iova;
+ ipa->smem_size = size;
+
+ return 0;
+}
+
+static void ipa_smem_exit(struct ipa *ipa)
+{
+ struct device *dev = &ipa->pdev->dev;
+ struct iommu_domain *domain;
+
+ domain = iommu_get_domain_for_dev(dev);
+ if (domain) {
+ size_t size;
+
+ size = iommu_unmap(domain, ipa->smem_iova, ipa->smem_size);
+ if (size != ipa->smem_size)
+ dev_warn(dev, "unmapped %zu SMEM bytes, expected %lu\n",
+ size, ipa->smem_size);
+
+ } else {
+ dev_err(dev, "couldn't get IPA IOMMU domain for SMEM\n");
+ }
+
+ ipa->smem_size = 0;
+ ipa->smem_iova = 0;
+}
+
/* Perform memory region-related initialization */
-int ipa_mem_init(struct ipa *ipa, u32 count, const struct ipa_mem *mem)
+int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data)
{
struct device *dev = &ipa->pdev->dev;
struct resource *res;
int ret;
- if (count > IPA_MEM_COUNT) {
+ if (mem_data->local_count > IPA_MEM_COUNT) {
dev_err(dev, "to many memory regions (%u > %u)\n",
- count, IPA_MEM_COUNT);
+ mem_data->local_count, IPA_MEM_COUNT);
return -EINVAL;
}
@@ -302,13 +485,30 @@ int ipa_mem_init(struct ipa *ipa, u32 count, const struct ipa_mem *mem)
ipa->mem_size = resource_size(res);
/* The ipa->mem[] array is indexed by enum ipa_mem_id values */
- ipa->mem = mem;
+ ipa->mem = mem_data->local;
+
+ ret = ipa_imem_init(ipa, mem_data->imem_addr, mem_data->imem_size);
+ if (ret)
+ goto err_unmap;
+
+ ret = ipa_smem_init(ipa, mem_data->smem_id, mem_data->smem_size);
+ if (ret)
+ goto err_imem_exit;
return 0;
+
+err_imem_exit:
+ ipa_imem_exit(ipa);
+err_unmap:
+ memunmap(ipa->mem_virt);
+
+ return ret;
}
/* Inverse of ipa_mem_init() */
void ipa_mem_exit(struct ipa *ipa)
{
+ ipa_smem_exit(ipa);
+ ipa_imem_exit(ipa);
memunmap(ipa->mem_virt);
}
diff --git a/drivers/net/ipa/ipa_mem.h b/drivers/net/ipa/ipa_mem.h
index 065cb499ebe5..f99180f84f0d 100644
--- a/drivers/net/ipa/ipa_mem.h
+++ b/drivers/net/ipa/ipa_mem.h
@@ -7,6 +7,7 @@
#define _IPA_MEM_H_
struct ipa;
+struct ipa_mem_data;
/**
* DOC: IPA Local Memory
@@ -84,7 +85,7 @@ void ipa_mem_teardown(struct ipa *ipa);
int ipa_mem_zero_modem(struct ipa *ipa);
-int ipa_mem_init(struct ipa *ipa, u32 count, const struct ipa_mem *mem);
+int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data);
void ipa_mem_exit(struct ipa *ipa);
#endif /* _IPA_MEM_H_ */
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index f195f278a83a..15e87c097b0b 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -131,6 +131,8 @@ static int ipvlan_init(struct net_device *dev)
dev->gso_max_segs = phy_dev->gso_max_segs;
dev->hard_header_len = phy_dev->hard_header_len;
+ netdev_lockdep_set_classes(dev);
+
ipvlan->pcpu_stats = netdev_alloc_pcpu_stats(struct ipvl_pcpu_stats);
if (!ipvlan->pcpu_stats)
return -ENOMEM;
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index d0d31cb99180..20b53e255f68 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -4049,6 +4049,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
if (err < 0)
return err;
+ netdev_lockdep_set_classes(dev);
+
err = netdev_upper_dev_link(real_dev, dev, extack);
if (err < 0)
goto unregister;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 0482adc9916b..9a419d5102ce 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -123,7 +123,8 @@ static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port,
struct macvlan_dev *vlan;
u32 idx = macvlan_eth_hash(addr);
- hlist_for_each_entry_rcu(vlan, &port->vlan_hash[idx], hlist) {
+ hlist_for_each_entry_rcu(vlan, &port->vlan_hash[idx], hlist,
+ lockdep_rtnl_is_held()) {
if (ether_addr_equal_64bits(vlan->dev->dev_addr, addr))
return vlan;
}
@@ -541,12 +542,11 @@ xmit_world:
static inline netdev_tx_t macvlan_netpoll_send_skb(struct macvlan_dev *vlan, struct sk_buff *skb)
{
#ifdef CONFIG_NET_POLL_CONTROLLER
- if (vlan->netpoll)
- netpoll_send_skb(vlan->netpoll, skb);
+ return netpoll_send_skb(vlan->netpoll, skb);
#else
BUG();
-#endif
return NETDEV_TX_OK;
+#endif
}
static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
@@ -889,6 +889,8 @@ static int macvlan_init(struct net_device *dev)
dev->gso_max_segs = lowerdev->gso_max_segs;
dev->hard_header_len = lowerdev->hard_header_len;
+ netdev_lockdep_set_classes(dev);
+
vlan->pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
if (!vlan->pcpu_stats)
return -ENOMEM;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 3fa33d27eeba..2a32f26ead0b 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -157,6 +157,13 @@ config MDIO_I2C
This is library mode.
+config MDIO_IPQ4019
+ tristate "Qualcomm IPQ4019 MDIO interface support"
+ depends on HAS_IOMEM && OF_MDIO
+ help
+ This driver supports the MDIO interface found in Qualcomm
+ IPQ40xx series Soc-s.
+
config MDIO_IPQ8064
tristate "Qualcomm IPQ8064 MDIO interface support"
depends on HAS_IOMEM && OF_MDIO
@@ -346,6 +353,17 @@ config BROADCOM_PHY
Currently supports the BCM5411, BCM5421, BCM5461, BCM54616S, BCM5464,
BCM5481, BCM54810 and BCM5482 PHYs.
+config BCM54140_PHY
+ tristate "Broadcom BCM54140 PHY"
+ depends on PHYLIB
+ depends on HWMON || HWMON=n
+ select BCM_NET_PHYLIB
+ help
+ Support the Broadcom BCM54140 Quad SGMII/QSGMII PHY.
+
+ This driver also supports the hardware monitoring of this PHY and
+ exposes voltage and temperature sensors.
+
config BCM84881_PHY
tristate "Broadcom BCM84881 PHY"
depends on PHYLIB
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 2f5c7093a65b..dc9e53b511d6 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o
obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o
obj-$(CONFIG_MDIO_I2C) += mdio-i2c.o
+obj-$(CONFIG_MDIO_IPQ4019) += mdio-ipq4019.o
obj-$(CONFIG_MDIO_IPQ8064) += mdio-ipq8064.o
obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o
obj-$(CONFIG_MDIO_MSCC_MIIM) += mdio-mscc-miim.o
@@ -68,6 +69,7 @@ obj-$(CONFIG_BCM87XX_PHY) += bcm87xx.o
obj-$(CONFIG_BCM_CYGNUS_PHY) += bcm-cygnus.o
obj-$(CONFIG_BCM_NET_PHYLIB) += bcm-phy-lib.o
obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
+obj-$(CONFIG_BCM54140_PHY) += bcm54140.o
obj-$(CONFIG_BCM84881_PHY) += bcm84881.o
obj-$(CONFIG_CICADA_PHY) += cicada.o
obj-$(CONFIG_CORTINA_PHY) += cortina.o
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 31f731e6df72..acd51b29a476 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -12,6 +12,7 @@
#include <linux/string.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/ethtool_netlink.h>
#include <linux/of_gpio.h>
#include <linux/bitfield.h>
#include <linux/gpio/consumer.h>
@@ -43,6 +44,19 @@
#define AT803X_INTR_STATUS 0x13
#define AT803X_SMART_SPEED 0x14
+#define AT803X_SMART_SPEED_ENABLE BIT(5)
+#define AT803X_SMART_SPEED_RETRY_LIMIT_MASK GENMASK(4, 2)
+#define AT803X_SMART_SPEED_BYPASS_TIMER BIT(1)
+#define AT803X_CDT 0x16
+#define AT803X_CDT_MDI_PAIR_MASK GENMASK(9, 8)
+#define AT803X_CDT_ENABLE_TEST BIT(0)
+#define AT803X_CDT_STATUS 0x1c
+#define AT803X_CDT_STATUS_STAT_NORMAL 0
+#define AT803X_CDT_STATUS_STAT_SHORT 1
+#define AT803X_CDT_STATUS_STAT_OPEN 2
+#define AT803X_CDT_STATUS_STAT_FAIL 3
+#define AT803X_CDT_STATUS_STAT_MASK GENMASK(9, 8)
+#define AT803X_CDT_STATUS_DELTA_TIME_MASK GENMASK(7, 0)
#define AT803X_LED_CONTROL 0x18
#define AT803X_DEVICE_ADDR 0x03
@@ -103,9 +117,14 @@
#define AT803X_CLK_OUT_STRENGTH_HALF 1
#define AT803X_CLK_OUT_STRENGTH_QUARTER 2
+#define AT803X_DEFAULT_DOWNSHIFT 5
+#define AT803X_MIN_DOWNSHIFT 2
+#define AT803X_MAX_DOWNSHIFT 9
+
#define ATH9331_PHY_ID 0x004dd041
#define ATH8030_PHY_ID 0x004dd076
#define ATH8031_PHY_ID 0x004dd074
+#define ATH8032_PHY_ID 0x004dd023
#define ATH8035_PHY_ID 0x004dd072
#define AT803X_PHY_ID_MASK 0xffffffef
@@ -712,15 +731,250 @@ static int at803x_read_status(struct phy_device *phydev)
return 0;
}
+static int at803x_get_downshift(struct phy_device *phydev, u8 *d)
+{
+ int val;
+
+ val = phy_read(phydev, AT803X_SMART_SPEED);
+ if (val < 0)
+ return val;
+
+ if (val & AT803X_SMART_SPEED_ENABLE)
+ *d = FIELD_GET(AT803X_SMART_SPEED_RETRY_LIMIT_MASK, val) + 2;
+ else
+ *d = DOWNSHIFT_DEV_DISABLE;
+
+ return 0;
+}
+
+static int at803x_set_downshift(struct phy_device *phydev, u8 cnt)
+{
+ u16 mask, set;
+ int ret;
+
+ switch (cnt) {
+ case DOWNSHIFT_DEV_DEFAULT_COUNT:
+ cnt = AT803X_DEFAULT_DOWNSHIFT;
+ fallthrough;
+ case AT803X_MIN_DOWNSHIFT ... AT803X_MAX_DOWNSHIFT:
+ set = AT803X_SMART_SPEED_ENABLE |
+ AT803X_SMART_SPEED_BYPASS_TIMER |
+ FIELD_PREP(AT803X_SMART_SPEED_RETRY_LIMIT_MASK, cnt - 2);
+ mask = AT803X_SMART_SPEED_RETRY_LIMIT_MASK;
+ break;
+ case DOWNSHIFT_DEV_DISABLE:
+ set = 0;
+ mask = AT803X_SMART_SPEED_ENABLE |
+ AT803X_SMART_SPEED_BYPASS_TIMER;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = phy_modify_changed(phydev, AT803X_SMART_SPEED, mask, set);
+
+ /* After changing the smart speed settings, we need to perform a
+ * software reset, use phy_init_hw() to make sure we set the
+ * reapply any values which might got lost during software reset.
+ */
+ if (ret == 1)
+ ret = phy_init_hw(phydev);
+
+ return ret;
+}
+
+static int at803x_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return at803x_get_downshift(phydev, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int at803x_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, const void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return at803x_set_downshift(phydev, *(const u8 *)data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int at803x_cable_test_result_trans(u16 status)
+{
+ switch (FIELD_GET(AT803X_CDT_STATUS_STAT_MASK, status)) {
+ case AT803X_CDT_STATUS_STAT_NORMAL:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OK;
+ case AT803X_CDT_STATUS_STAT_SHORT:
+ return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+ case AT803X_CDT_STATUS_STAT_OPEN:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+ case AT803X_CDT_STATUS_STAT_FAIL:
+ default:
+ return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+ }
+}
+
+static bool at803x_cdt_test_failed(u16 status)
+{
+ return FIELD_GET(AT803X_CDT_STATUS_STAT_MASK, status) ==
+ AT803X_CDT_STATUS_STAT_FAIL;
+}
+
+static bool at803x_cdt_fault_length_valid(u16 status)
+{
+ switch (FIELD_GET(AT803X_CDT_STATUS_STAT_MASK, status)) {
+ case AT803X_CDT_STATUS_STAT_OPEN:
+ case AT803X_CDT_STATUS_STAT_SHORT:
+ return true;
+ }
+ return false;
+}
+
+static int at803x_cdt_fault_length(u16 status)
+{
+ int dt;
+
+ /* According to the datasheet the distance to the fault is
+ * DELTA_TIME * 0.824 meters.
+ *
+ * The author suspect the correct formula is:
+ *
+ * fault_distance = DELTA_TIME * (c * VF) / 125MHz / 2
+ *
+ * where c is the speed of light, VF is the velocity factor of
+ * the twisted pair cable, 125MHz the counter frequency and
+ * we need to divide by 2 because the hardware will measure the
+ * round trip time to the fault and back to the PHY.
+ *
+ * With a VF of 0.69 we get the factor 0.824 mentioned in the
+ * datasheet.
+ */
+ dt = FIELD_GET(AT803X_CDT_STATUS_DELTA_TIME_MASK, status);
+
+ return (dt * 824) / 10;
+}
+
+static int at803x_cdt_start(struct phy_device *phydev, int pair)
+{
+ u16 cdt;
+
+ cdt = FIELD_PREP(AT803X_CDT_MDI_PAIR_MASK, pair) |
+ AT803X_CDT_ENABLE_TEST;
+
+ return phy_write(phydev, AT803X_CDT, cdt);
+}
+
+static int at803x_cdt_wait_for_completion(struct phy_device *phydev)
+{
+ int val, ret;
+
+ /* One test run takes about 25ms */
+ ret = phy_read_poll_timeout(phydev, AT803X_CDT, val,
+ !(val & AT803X_CDT_ENABLE_TEST),
+ 30000, 100000, true);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int at803x_cable_test_one_pair(struct phy_device *phydev, int pair)
+{
+ static const int ethtool_pair[] = {
+ ETHTOOL_A_CABLE_PAIR_A,
+ ETHTOOL_A_CABLE_PAIR_B,
+ ETHTOOL_A_CABLE_PAIR_C,
+ ETHTOOL_A_CABLE_PAIR_D,
+ };
+ int ret, val;
+
+ ret = at803x_cdt_start(phydev, pair);
+ if (ret)
+ return ret;
+
+ ret = at803x_cdt_wait_for_completion(phydev);
+ if (ret)
+ return ret;
+
+ val = phy_read(phydev, AT803X_CDT_STATUS);
+ if (val < 0)
+ return val;
+
+ if (at803x_cdt_test_failed(val))
+ return 0;
+
+ ethnl_cable_test_result(phydev, ethtool_pair[pair],
+ at803x_cable_test_result_trans(val));
+
+ if (at803x_cdt_fault_length_valid(val))
+ ethnl_cable_test_fault_length(phydev, ethtool_pair[pair],
+ at803x_cdt_fault_length(val));
+
+ return 1;
+}
+
+static int at803x_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ unsigned long pair_mask = 0xf;
+ int retries = 20;
+ int pair, ret;
+
+ *finished = false;
+
+ /* According to the datasheet the CDT can be performed when
+ * there is no link partner or when the link partner is
+ * auto-negotiating. Starting the test will restart the AN
+ * automatically. It seems that doing this repeatedly we will
+ * get a slot where our link partner won't disturb our
+ * measurement.
+ */
+ while (pair_mask && retries--) {
+ for_each_set_bit(pair, &pair_mask, 4) {
+ ret = at803x_cable_test_one_pair(phydev, pair);
+ if (ret < 0)
+ return ret;
+ if (ret)
+ clear_bit(pair, &pair_mask);
+ }
+ if (pair_mask)
+ msleep(250);
+ }
+
+ *finished = true;
+
+ return 0;
+}
+
+static int at803x_cable_test_start(struct phy_device *phydev)
+{
+ /* Enable auto-negotiation, but advertise no capabilities, no link
+ * will be established. A restart of the auto-negotiation is not
+ * required, because the cable test will automatically break the link.
+ */
+ phy_write(phydev, MII_BMCR, BMCR_ANENABLE);
+ phy_write(phydev, MII_ADVERTISE, ADVERTISE_CSMA);
+ phy_write(phydev, MII_CTRL1000, 0);
+
+ /* we do all the (time consuming) work later */
+ return 0;
+}
+
static struct phy_driver at803x_driver[] = {
{
/* Qualcomm Atheros AR8035 */
.phy_id = ATH8035_PHY_ID,
.name = "Qualcomm Atheros AR8035",
.phy_id_mask = AT803X_PHY_ID_MASK,
+ .flags = PHY_POLL_CABLE_TEST,
.probe = at803x_probe,
.remove = at803x_remove,
.config_init = at803x_config_init,
+ .soft_reset = genphy_soft_reset,
.set_wol = at803x_set_wol,
.get_wol = at803x_get_wol,
.suspend = at803x_suspend,
@@ -729,6 +983,10 @@ static struct phy_driver at803x_driver[] = {
.read_status = at803x_read_status,
.ack_interrupt = at803x_ack_interrupt,
.config_intr = at803x_config_intr,
+ .get_tunable = at803x_get_tunable,
+ .set_tunable = at803x_set_tunable,
+ .cable_test_start = at803x_cable_test_start,
+ .cable_test_get_status = at803x_cable_test_get_status,
}, {
/* Qualcomm Atheros AR8030 */
.phy_id = ATH8030_PHY_ID,
@@ -750,9 +1008,11 @@ static struct phy_driver at803x_driver[] = {
.phy_id = ATH8031_PHY_ID,
.name = "Qualcomm Atheros AR8031/AR8033",
.phy_id_mask = AT803X_PHY_ID_MASK,
+ .flags = PHY_POLL_CABLE_TEST,
.probe = at803x_probe,
.remove = at803x_remove,
.config_init = at803x_config_init,
+ .soft_reset = genphy_soft_reset,
.set_wol = at803x_set_wol,
.get_wol = at803x_get_wol,
.suspend = at803x_suspend,
@@ -762,6 +1022,25 @@ static struct phy_driver at803x_driver[] = {
.aneg_done = at803x_aneg_done,
.ack_interrupt = &at803x_ack_interrupt,
.config_intr = &at803x_config_intr,
+ .get_tunable = at803x_get_tunable,
+ .set_tunable = at803x_set_tunable,
+ .cable_test_start = at803x_cable_test_start,
+ .cable_test_get_status = at803x_cable_test_get_status,
+}, {
+ /* Qualcomm Atheros AR8032 */
+ PHY_ID_MATCH_EXACT(ATH8032_PHY_ID),
+ .name = "Qualcomm Atheros AR8032",
+ .probe = at803x_probe,
+ .remove = at803x_remove,
+ .config_init = at803x_config_init,
+ .link_change_notify = at803x_link_change_notify,
+ .set_wol = at803x_set_wol,
+ .get_wol = at803x_get_wol,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
+ /* PHY_BASIC_FEATURES */
+ .ack_interrupt = at803x_ack_interrupt,
+ .config_intr = at803x_config_intr,
}, {
/* ATHEROS AR9331 */
PHY_ID_MATCH_EXACT(ATH9331_PHY_ID),
@@ -778,6 +1057,7 @@ module_phy_driver(at803x_driver);
static struct mdio_device_id __maybe_unused atheros_tbl[] = {
{ ATH8030_PHY_ID, AT803X_PHY_ID_MASK },
{ ATH8031_PHY_ID, AT803X_PHY_ID_MASK },
+ { PHY_ID_MATCH_EXACT(ATH8032_PHY_ID) },
{ ATH8035_PHY_ID, AT803X_PHY_ID_MASK },
{ PHY_ID_MATCH_EXACT(ATH9331_PHY_ID) },
{ }
diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
index e77b274a09fd..cb92786e3ded 100644
--- a/drivers/net/phy/bcm-phy-lib.c
+++ b/drivers/net/phy/bcm-phy-lib.c
@@ -4,45 +4,103 @@
*/
#include "bcm-phy-lib.h"
+#include <linux/bitfield.h>
#include <linux/brcmphy.h>
#include <linux/export.h>
#include <linux/mdio.h>
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/ethtool.h>
+#include <linux/ethtool_netlink.h>
#define MII_BCM_CHANNEL_WIDTH 0x2000
#define BCM_CL45VEN_EEE_ADV 0x3c
-int bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val)
+int __bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val)
{
int rc;
- rc = phy_write(phydev, MII_BCM54XX_EXP_SEL, reg);
+ rc = __phy_write(phydev, MII_BCM54XX_EXP_SEL, reg);
if (rc < 0)
return rc;
- return phy_write(phydev, MII_BCM54XX_EXP_DATA, val);
+ return __phy_write(phydev, MII_BCM54XX_EXP_DATA, val);
+}
+EXPORT_SYMBOL_GPL(__bcm_phy_write_exp);
+
+int bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val)
+{
+ int rc;
+
+ phy_lock_mdio_bus(phydev);
+ rc = __bcm_phy_write_exp(phydev, reg, val);
+ phy_unlock_mdio_bus(phydev);
+
+ return rc;
}
EXPORT_SYMBOL_GPL(bcm_phy_write_exp);
-int bcm_phy_read_exp(struct phy_device *phydev, u16 reg)
+int __bcm_phy_read_exp(struct phy_device *phydev, u16 reg)
{
int val;
- val = phy_write(phydev, MII_BCM54XX_EXP_SEL, reg);
+ val = __phy_write(phydev, MII_BCM54XX_EXP_SEL, reg);
if (val < 0)
return val;
- val = phy_read(phydev, MII_BCM54XX_EXP_DATA);
+ val = __phy_read(phydev, MII_BCM54XX_EXP_DATA);
/* Restore default value. It's O.K. if this write fails. */
- phy_write(phydev, MII_BCM54XX_EXP_SEL, 0);
+ __phy_write(phydev, MII_BCM54XX_EXP_SEL, 0);
return val;
}
+EXPORT_SYMBOL_GPL(__bcm_phy_read_exp);
+
+int bcm_phy_read_exp(struct phy_device *phydev, u16 reg)
+{
+ int rc;
+
+ phy_lock_mdio_bus(phydev);
+ rc = __bcm_phy_read_exp(phydev, reg);
+ phy_unlock_mdio_bus(phydev);
+
+ return rc;
+}
EXPORT_SYMBOL_GPL(bcm_phy_read_exp);
+int __bcm_phy_modify_exp(struct phy_device *phydev, u16 reg, u16 mask, u16 set)
+{
+ int new, ret;
+
+ ret = __phy_write(phydev, MII_BCM54XX_EXP_SEL, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = __phy_read(phydev, MII_BCM54XX_EXP_DATA);
+ if (ret < 0)
+ return ret;
+
+ new = (ret & ~mask) | set;
+ if (new == ret)
+ return 0;
+
+ return __phy_write(phydev, MII_BCM54XX_EXP_DATA, new);
+}
+EXPORT_SYMBOL_GPL(__bcm_phy_modify_exp);
+
+int bcm_phy_modify_exp(struct phy_device *phydev, u16 reg, u16 mask, u16 set)
+{
+ int ret;
+
+ phy_lock_mdio_bus(phydev);
+ ret = __bcm_phy_modify_exp(phydev, reg, mask, set);
+ phy_unlock_mdio_bus(phydev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(bcm_phy_modify_exp);
+
int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum)
{
/* The register must be written to both the Shadow Register Select and
@@ -155,6 +213,86 @@ int bcm_phy_write_shadow(struct phy_device *phydev, u16 shadow,
}
EXPORT_SYMBOL_GPL(bcm_phy_write_shadow);
+int __bcm_phy_read_rdb(struct phy_device *phydev, u16 rdb)
+{
+ int val;
+
+ val = __phy_write(phydev, MII_BCM54XX_RDB_ADDR, rdb);
+ if (val < 0)
+ return val;
+
+ return __phy_read(phydev, MII_BCM54XX_RDB_DATA);
+}
+EXPORT_SYMBOL_GPL(__bcm_phy_read_rdb);
+
+int bcm_phy_read_rdb(struct phy_device *phydev, u16 rdb)
+{
+ int ret;
+
+ phy_lock_mdio_bus(phydev);
+ ret = __bcm_phy_read_rdb(phydev, rdb);
+ phy_unlock_mdio_bus(phydev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(bcm_phy_read_rdb);
+
+int __bcm_phy_write_rdb(struct phy_device *phydev, u16 rdb, u16 val)
+{
+ int ret;
+
+ ret = __phy_write(phydev, MII_BCM54XX_RDB_ADDR, rdb);
+ if (ret < 0)
+ return ret;
+
+ return __phy_write(phydev, MII_BCM54XX_RDB_DATA, val);
+}
+EXPORT_SYMBOL_GPL(__bcm_phy_write_rdb);
+
+int bcm_phy_write_rdb(struct phy_device *phydev, u16 rdb, u16 val)
+{
+ int ret;
+
+ phy_lock_mdio_bus(phydev);
+ ret = __bcm_phy_write_rdb(phydev, rdb, val);
+ phy_unlock_mdio_bus(phydev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(bcm_phy_write_rdb);
+
+int __bcm_phy_modify_rdb(struct phy_device *phydev, u16 rdb, u16 mask, u16 set)
+{
+ int new, ret;
+
+ ret = __phy_write(phydev, MII_BCM54XX_RDB_ADDR, rdb);
+ if (ret < 0)
+ return ret;
+
+ ret = __phy_read(phydev, MII_BCM54XX_RDB_DATA);
+ if (ret < 0)
+ return ret;
+
+ new = (ret & ~mask) | set;
+ if (new == ret)
+ return 0;
+
+ return __phy_write(phydev, MII_BCM54XX_RDB_DATA, new);
+}
+EXPORT_SYMBOL_GPL(__bcm_phy_modify_rdb);
+
+int bcm_phy_modify_rdb(struct phy_device *phydev, u16 rdb, u16 mask, u16 set)
+{
+ int ret;
+
+ phy_lock_mdio_bus(phydev);
+ ret = __bcm_phy_modify_rdb(phydev, rdb, mask, set);
+ phy_unlock_mdio_bus(phydev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(bcm_phy_modify_rdb);
+
int bcm_phy_enable_apd(struct phy_device *phydev, bool dll_pwr_down)
{
int val;
@@ -445,6 +583,193 @@ int bcm_phy_enable_jumbo(struct phy_device *phydev)
}
EXPORT_SYMBOL_GPL(bcm_phy_enable_jumbo);
+int __bcm_phy_enable_rdb_access(struct phy_device *phydev)
+{
+ return __bcm_phy_write_exp(phydev, BCM54XX_EXP_REG7E, 0);
+}
+EXPORT_SYMBOL_GPL(__bcm_phy_enable_rdb_access);
+
+int __bcm_phy_enable_legacy_access(struct phy_device *phydev)
+{
+ return __bcm_phy_write_rdb(phydev, BCM54XX_RDB_REG0087,
+ BCM54XX_ACCESS_MODE_LEGACY_EN);
+}
+EXPORT_SYMBOL_GPL(__bcm_phy_enable_legacy_access);
+
+static int _bcm_phy_cable_test_start(struct phy_device *phydev, bool is_rdb)
+{
+ u16 mask, set;
+ int ret;
+
+ /* Auto-negotiation must be enabled for cable diagnostics to work, but
+ * don't advertise any capabilities.
+ */
+ phy_write(phydev, MII_BMCR, BMCR_ANENABLE);
+ phy_write(phydev, MII_ADVERTISE, ADVERTISE_CSMA);
+ phy_write(phydev, MII_CTRL1000, 0);
+
+ phy_lock_mdio_bus(phydev);
+ if (is_rdb) {
+ ret = __bcm_phy_enable_legacy_access(phydev);
+ if (ret)
+ goto out;
+ }
+
+ mask = BCM54XX_ECD_CTRL_CROSS_SHORT_DIS | BCM54XX_ECD_CTRL_UNIT_MASK;
+ set = BCM54XX_ECD_CTRL_RUN | BCM54XX_ECD_CTRL_BREAK_LINK |
+ FIELD_PREP(BCM54XX_ECD_CTRL_UNIT_MASK,
+ BCM54XX_ECD_CTRL_UNIT_CM);
+
+ ret = __bcm_phy_modify_exp(phydev, BCM54XX_EXP_ECD_CTRL, mask, set);
+
+out:
+ /* re-enable the RDB access even if there was an error */
+ if (is_rdb)
+ ret = __bcm_phy_enable_rdb_access(phydev) ? : ret;
+
+ phy_unlock_mdio_bus(phydev);
+
+ return ret;
+}
+
+static int bcm_phy_cable_test_report_trans(int result)
+{
+ switch (result) {
+ case BCM54XX_ECD_FAULT_TYPE_OK:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OK;
+ case BCM54XX_ECD_FAULT_TYPE_OPEN:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+ case BCM54XX_ECD_FAULT_TYPE_SAME_SHORT:
+ return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+ case BCM54XX_ECD_FAULT_TYPE_CROSS_SHORT:
+ return ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT;
+ case BCM54XX_ECD_FAULT_TYPE_INVALID:
+ case BCM54XX_ECD_FAULT_TYPE_BUSY:
+ default:
+ return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+ }
+}
+
+static bool bcm_phy_distance_valid(int result)
+{
+ switch (result) {
+ case BCM54XX_ECD_FAULT_TYPE_OPEN:
+ case BCM54XX_ECD_FAULT_TYPE_SAME_SHORT:
+ case BCM54XX_ECD_FAULT_TYPE_CROSS_SHORT:
+ return true;
+ }
+ return false;
+}
+
+static int bcm_phy_report_length(struct phy_device *phydev, int pair)
+{
+ int val;
+
+ val = __bcm_phy_read_exp(phydev,
+ BCM54XX_EXP_ECD_PAIR_A_LENGTH_RESULTS + pair);
+ if (val < 0)
+ return val;
+
+ if (val == BCM54XX_ECD_LENGTH_RESULTS_INVALID)
+ return 0;
+
+ ethnl_cable_test_fault_length(phydev, pair, val);
+
+ return 0;
+}
+
+static int _bcm_phy_cable_test_get_status(struct phy_device *phydev,
+ bool *finished, bool is_rdb)
+{
+ int pair_a, pair_b, pair_c, pair_d, ret;
+
+ *finished = false;
+
+ phy_lock_mdio_bus(phydev);
+
+ if (is_rdb) {
+ ret = __bcm_phy_enable_legacy_access(phydev);
+ if (ret)
+ goto out;
+ }
+
+ ret = __bcm_phy_read_exp(phydev, BCM54XX_EXP_ECD_CTRL);
+ if (ret < 0)
+ goto out;
+
+ if (ret & BCM54XX_ECD_CTRL_IN_PROGRESS) {
+ ret = 0;
+ goto out;
+ }
+
+ ret = __bcm_phy_read_exp(phydev, BCM54XX_EXP_ECD_FAULT_TYPE);
+ if (ret < 0)
+ goto out;
+
+ pair_a = FIELD_GET(BCM54XX_ECD_FAULT_TYPE_PAIR_A_MASK, ret);
+ pair_b = FIELD_GET(BCM54XX_ECD_FAULT_TYPE_PAIR_B_MASK, ret);
+ pair_c = FIELD_GET(BCM54XX_ECD_FAULT_TYPE_PAIR_C_MASK, ret);
+ pair_d = FIELD_GET(BCM54XX_ECD_FAULT_TYPE_PAIR_D_MASK, ret);
+
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ bcm_phy_cable_test_report_trans(pair_a));
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_B,
+ bcm_phy_cable_test_report_trans(pair_b));
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_C,
+ bcm_phy_cable_test_report_trans(pair_c));
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_D,
+ bcm_phy_cable_test_report_trans(pair_d));
+
+ if (bcm_phy_distance_valid(pair_a))
+ bcm_phy_report_length(phydev, 0);
+ if (bcm_phy_distance_valid(pair_b))
+ bcm_phy_report_length(phydev, 1);
+ if (bcm_phy_distance_valid(pair_c))
+ bcm_phy_report_length(phydev, 2);
+ if (bcm_phy_distance_valid(pair_d))
+ bcm_phy_report_length(phydev, 3);
+
+ ret = 0;
+ *finished = true;
+out:
+ /* re-enable the RDB access even if there was an error */
+ if (is_rdb)
+ ret = __bcm_phy_enable_rdb_access(phydev) ? : ret;
+
+ phy_unlock_mdio_bus(phydev);
+
+ return ret;
+}
+
+int bcm_phy_cable_test_start(struct phy_device *phydev)
+{
+ return _bcm_phy_cable_test_start(phydev, false);
+}
+EXPORT_SYMBOL_GPL(bcm_phy_cable_test_start);
+
+int bcm_phy_cable_test_get_status(struct phy_device *phydev, bool *finished)
+{
+ return _bcm_phy_cable_test_get_status(phydev, finished, false);
+}
+EXPORT_SYMBOL_GPL(bcm_phy_cable_test_get_status);
+
+/* We assume that all PHYs which support RDB access can be switched to legacy
+ * mode. If, in the future, this is not true anymore, we have to re-implement
+ * this with RDB access.
+ */
+int bcm_phy_cable_test_start_rdb(struct phy_device *phydev)
+{
+ return _bcm_phy_cable_test_start(phydev, true);
+}
+EXPORT_SYMBOL_GPL(bcm_phy_cable_test_start_rdb);
+
+int bcm_phy_cable_test_get_status_rdb(struct phy_device *phydev,
+ bool *finished)
+{
+ return _bcm_phy_cable_test_get_status(phydev, finished, true);
+}
+EXPORT_SYMBOL_GPL(bcm_phy_cable_test_get_status_rdb);
+
MODULE_DESCRIPTION("Broadcom PHY Library");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Broadcom Corporation");
diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h
index 129df819be8c..237a8503c9b4 100644
--- a/drivers/net/phy/bcm-phy-lib.h
+++ b/drivers/net/phy/bcm-phy-lib.h
@@ -27,8 +27,12 @@
#define AFE_HPF_TRIM_OTHERS MISC_ADDR(0x3a, 0)
+int __bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val);
+int __bcm_phy_read_exp(struct phy_device *phydev, u16 reg);
+int __bcm_phy_modify_exp(struct phy_device *phydev, u16 reg, u16 mask, u16 set);
int bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val);
int bcm_phy_read_exp(struct phy_device *phydev, u16 reg);
+int bcm_phy_modify_exp(struct phy_device *phydev, u16 reg, u16 mask, u16 set);
static inline int bcm_phy_write_exp_sel(struct phy_device *phydev,
u16 reg, u16 val)
@@ -48,6 +52,15 @@ int bcm_phy_write_shadow(struct phy_device *phydev, u16 shadow,
u16 val);
int bcm_phy_read_shadow(struct phy_device *phydev, u16 shadow);
+int __bcm_phy_write_rdb(struct phy_device *phydev, u16 rdb, u16 val);
+int bcm_phy_write_rdb(struct phy_device *phydev, u16 rdb, u16 val);
+int __bcm_phy_read_rdb(struct phy_device *phydev, u16 rdb);
+int bcm_phy_read_rdb(struct phy_device *phydev, u16 rdb);
+int __bcm_phy_modify_rdb(struct phy_device *phydev, u16 rdb, u16 mask,
+ u16 set);
+int bcm_phy_modify_rdb(struct phy_device *phydev, u16 rdb, u16 mask,
+ u16 set);
+
int bcm_phy_ack_intr(struct phy_device *phydev);
int bcm_phy_config_intr(struct phy_device *phydev);
@@ -67,4 +80,10 @@ void bcm_phy_r_rc_cal_reset(struct phy_device *phydev);
int bcm_phy_28nm_a0b0_afe_config_init(struct phy_device *phydev);
int bcm_phy_enable_jumbo(struct phy_device *phydev);
+int bcm_phy_cable_test_get_status_rdb(struct phy_device *phydev,
+ bool *finished);
+int bcm_phy_cable_test_start_rdb(struct phy_device *phydev);
+int bcm_phy_cable_test_start(struct phy_device *phydev);
+int bcm_phy_cable_test_get_status(struct phy_device *phydev, bool *finished);
+
#endif /* _LINUX_BCM_PHY_LIB_H */
diff --git a/drivers/net/phy/bcm54140.c b/drivers/net/phy/bcm54140.c
new file mode 100644
index 000000000000..8998e68bb26b
--- /dev/null
+++ b/drivers/net/phy/bcm54140.c
@@ -0,0 +1,860 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Broadcom BCM54140 Quad SGMII/QSGMII Copper/Fiber Gigabit PHY
+ *
+ * Copyright (c) 2020 Michael Walle <michael@walle.cc>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/brcmphy.h>
+#include <linux/hwmon.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+
+#include "bcm-phy-lib.h"
+
+/* RDB per-port registers
+ */
+#define BCM54140_RDB_ISR 0x00a /* interrupt status */
+#define BCM54140_RDB_IMR 0x00b /* interrupt mask */
+#define BCM54140_RDB_INT_LINK BIT(1) /* link status changed */
+#define BCM54140_RDB_INT_SPEED BIT(2) /* link speed change */
+#define BCM54140_RDB_INT_DUPLEX BIT(3) /* duplex mode changed */
+#define BCM54140_RDB_SPARE1 0x012 /* spare control 1 */
+#define BCM54140_RDB_SPARE1_LSLM BIT(2) /* link speed LED mode */
+#define BCM54140_RDB_SPARE2 0x014 /* spare control 2 */
+#define BCM54140_RDB_SPARE2_WS_RTRY_DIS BIT(8) /* wirespeed retry disable */
+#define BCM54140_RDB_SPARE2_WS_RTRY_LIMIT GENMASK(4, 2) /* retry limit */
+#define BCM54140_RDB_SPARE3 0x015 /* spare control 3 */
+#define BCM54140_RDB_SPARE3_BIT0 BIT(0)
+#define BCM54140_RDB_LED_CTRL 0x019 /* LED control */
+#define BCM54140_RDB_LED_CTRL_ACTLINK0 BIT(4)
+#define BCM54140_RDB_LED_CTRL_ACTLINK1 BIT(8)
+#define BCM54140_RDB_C_APWR 0x01a /* auto power down control */
+#define BCM54140_RDB_C_APWR_SINGLE_PULSE BIT(8) /* single pulse */
+#define BCM54140_RDB_C_APWR_APD_MODE_DIS 0 /* ADP disable */
+#define BCM54140_RDB_C_APWR_APD_MODE_EN 1 /* ADP enable */
+#define BCM54140_RDB_C_APWR_APD_MODE_DIS2 2 /* ADP disable */
+#define BCM54140_RDB_C_APWR_APD_MODE_EN_ANEG 3 /* ADP enable w/ aneg */
+#define BCM54140_RDB_C_APWR_APD_MODE_MASK GENMASK(6, 5)
+#define BCM54140_RDB_C_APWR_SLP_TIM_MASK BIT(4)/* sleep timer */
+#define BCM54140_RDB_C_APWR_SLP_TIM_2_7 0 /* 2.7s */
+#define BCM54140_RDB_C_APWR_SLP_TIM_5_4 1 /* 5.4s */
+#define BCM54140_RDB_C_PWR 0x02a /* copper power control */
+#define BCM54140_RDB_C_PWR_ISOLATE BIT(5) /* super isolate mode */
+#define BCM54140_RDB_C_MISC_CTRL 0x02f /* misc copper control */
+#define BCM54140_RDB_C_MISC_CTRL_WS_EN BIT(4) /* wirespeed enable */
+
+/* RDB global registers
+ */
+#define BCM54140_RDB_TOP_IMR 0x82d /* interrupt mask */
+#define BCM54140_RDB_TOP_IMR_PORT0 BIT(4)
+#define BCM54140_RDB_TOP_IMR_PORT1 BIT(5)
+#define BCM54140_RDB_TOP_IMR_PORT2 BIT(6)
+#define BCM54140_RDB_TOP_IMR_PORT3 BIT(7)
+#define BCM54140_RDB_MON_CTRL 0x831 /* monitor control */
+#define BCM54140_RDB_MON_CTRL_V_MODE BIT(3) /* voltage mode */
+#define BCM54140_RDB_MON_CTRL_SEL_MASK GENMASK(2, 1)
+#define BCM54140_RDB_MON_CTRL_SEL_TEMP 0 /* meassure temperature */
+#define BCM54140_RDB_MON_CTRL_SEL_1V0 1 /* meassure AVDDL 1.0V */
+#define BCM54140_RDB_MON_CTRL_SEL_3V3 2 /* meassure AVDDH 3.3V */
+#define BCM54140_RDB_MON_CTRL_SEL_RR 3 /* meassure all round-robin */
+#define BCM54140_RDB_MON_CTRL_PWR_DOWN BIT(0) /* power-down monitor */
+#define BCM54140_RDB_MON_TEMP_VAL 0x832 /* temperature value */
+#define BCM54140_RDB_MON_TEMP_MAX 0x833 /* temperature high thresh */
+#define BCM54140_RDB_MON_TEMP_MIN 0x834 /* temperature low thresh */
+#define BCM54140_RDB_MON_TEMP_DATA_MASK GENMASK(9, 0)
+#define BCM54140_RDB_MON_1V0_VAL 0x835 /* AVDDL 1.0V value */
+#define BCM54140_RDB_MON_1V0_MAX 0x836 /* AVDDL 1.0V high thresh */
+#define BCM54140_RDB_MON_1V0_MIN 0x837 /* AVDDL 1.0V low thresh */
+#define BCM54140_RDB_MON_1V0_DATA_MASK GENMASK(10, 0)
+#define BCM54140_RDB_MON_3V3_VAL 0x838 /* AVDDH 3.3V value */
+#define BCM54140_RDB_MON_3V3_MAX 0x839 /* AVDDH 3.3V high thresh */
+#define BCM54140_RDB_MON_3V3_MIN 0x83a /* AVDDH 3.3V low thresh */
+#define BCM54140_RDB_MON_3V3_DATA_MASK GENMASK(11, 0)
+#define BCM54140_RDB_MON_ISR 0x83b /* interrupt status */
+#define BCM54140_RDB_MON_ISR_3V3 BIT(2) /* AVDDH 3.3V alarm */
+#define BCM54140_RDB_MON_ISR_1V0 BIT(1) /* AVDDL 1.0V alarm */
+#define BCM54140_RDB_MON_ISR_TEMP BIT(0) /* temperature alarm */
+
+/* According to the datasheet the formula is:
+ * T = 413.35 - (0.49055 * bits[9:0])
+ */
+#define BCM54140_HWMON_TO_TEMP(v) (413350L - (v) * 491)
+#define BCM54140_HWMON_FROM_TEMP(v) DIV_ROUND_CLOSEST_ULL(413350L - (v), 491)
+
+/* According to the datasheet the formula is:
+ * U = bits[11:0] / 1024 * 220 / 0.2
+ *
+ * Normalized:
+ * U = bits[11:0] / 4096 * 2514
+ */
+#define BCM54140_HWMON_TO_IN_1V0(v) ((v) * 2514 >> 11)
+#define BCM54140_HWMON_FROM_IN_1V0(v) DIV_ROUND_CLOSEST_ULL(((v) << 11), 2514)
+
+/* According to the datasheet the formula is:
+ * U = bits[10:0] / 1024 * 880 / 0.7
+ *
+ * Normalized:
+ * U = bits[10:0] / 2048 * 4400
+ */
+#define BCM54140_HWMON_TO_IN_3V3(v) ((v) * 4400 >> 12)
+#define BCM54140_HWMON_FROM_IN_3V3(v) DIV_ROUND_CLOSEST_ULL(((v) << 12), 4400)
+
+#define BCM54140_HWMON_TO_IN(ch, v) ((ch) ? BCM54140_HWMON_TO_IN_3V3(v) \
+ : BCM54140_HWMON_TO_IN_1V0(v))
+#define BCM54140_HWMON_FROM_IN(ch, v) ((ch) ? BCM54140_HWMON_FROM_IN_3V3(v) \
+ : BCM54140_HWMON_FROM_IN_1V0(v))
+#define BCM54140_HWMON_IN_MASK(ch) ((ch) ? BCM54140_RDB_MON_3V3_DATA_MASK \
+ : BCM54140_RDB_MON_1V0_DATA_MASK)
+#define BCM54140_HWMON_IN_VAL_REG(ch) ((ch) ? BCM54140_RDB_MON_3V3_VAL \
+ : BCM54140_RDB_MON_1V0_VAL)
+#define BCM54140_HWMON_IN_MIN_REG(ch) ((ch) ? BCM54140_RDB_MON_3V3_MIN \
+ : BCM54140_RDB_MON_1V0_MIN)
+#define BCM54140_HWMON_IN_MAX_REG(ch) ((ch) ? BCM54140_RDB_MON_3V3_MAX \
+ : BCM54140_RDB_MON_1V0_MAX)
+#define BCM54140_HWMON_IN_ALARM_BIT(ch) ((ch) ? BCM54140_RDB_MON_ISR_3V3 \
+ : BCM54140_RDB_MON_ISR_1V0)
+
+/* This PHY has two different PHY IDs depening on its MODE_SEL pin. This
+ * pin choses between 4x SGMII and QSGMII mode:
+ * AE02_5009 4x SGMII
+ * AE02_5019 QSGMII
+ */
+#define BCM54140_PHY_ID_MASK 0xffffffe8
+
+#define BCM54140_PHY_ID_REV(phy_id) ((phy_id) & 0x7)
+#define BCM54140_REV_B0 1
+
+#define BCM54140_DEFAULT_DOWNSHIFT 5
+#define BCM54140_MAX_DOWNSHIFT 9
+
+struct bcm54140_priv {
+ int port;
+ int base_addr;
+#if IS_ENABLED(CONFIG_HWMON)
+ /* protect the alarm bits */
+ struct mutex alarm_lock;
+ u16 alarm;
+#endif
+};
+
+#if IS_ENABLED(CONFIG_HWMON)
+static umode_t bcm54140_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_min:
+ case hwmon_in_max:
+ return 0644;
+ case hwmon_in_label:
+ case hwmon_in_input:
+ case hwmon_in_alarm:
+ return 0444;
+ default:
+ return 0;
+ }
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_min:
+ case hwmon_temp_max:
+ return 0644;
+ case hwmon_temp_input:
+ case hwmon_temp_alarm:
+ return 0444;
+ default:
+ return 0;
+ }
+ default:
+ return 0;
+ }
+}
+
+static int bcm54140_hwmon_read_alarm(struct device *dev, unsigned int bit,
+ long *val)
+{
+ struct phy_device *phydev = dev_get_drvdata(dev);
+ struct bcm54140_priv *priv = phydev->priv;
+ int tmp, ret = 0;
+
+ mutex_lock(&priv->alarm_lock);
+
+ /* latch any alarm bits */
+ tmp = bcm_phy_read_rdb(phydev, BCM54140_RDB_MON_ISR);
+ if (tmp < 0) {
+ ret = tmp;
+ goto out;
+ }
+ priv->alarm |= tmp;
+
+ *val = !!(priv->alarm & bit);
+ priv->alarm &= ~bit;
+
+out:
+ mutex_unlock(&priv->alarm_lock);
+ return ret;
+}
+
+static int bcm54140_hwmon_read_temp(struct device *dev, u32 attr, long *val)
+{
+ struct phy_device *phydev = dev_get_drvdata(dev);
+ u16 reg;
+ int tmp;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ reg = BCM54140_RDB_MON_TEMP_VAL;
+ break;
+ case hwmon_temp_min:
+ reg = BCM54140_RDB_MON_TEMP_MIN;
+ break;
+ case hwmon_temp_max:
+ reg = BCM54140_RDB_MON_TEMP_MAX;
+ break;
+ case hwmon_temp_alarm:
+ return bcm54140_hwmon_read_alarm(dev,
+ BCM54140_RDB_MON_ISR_TEMP,
+ val);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ tmp = bcm_phy_read_rdb(phydev, reg);
+ if (tmp < 0)
+ return tmp;
+
+ *val = BCM54140_HWMON_TO_TEMP(tmp & BCM54140_RDB_MON_TEMP_DATA_MASK);
+
+ return 0;
+}
+
+static int bcm54140_hwmon_read_in(struct device *dev, u32 attr,
+ int channel, long *val)
+{
+ struct phy_device *phydev = dev_get_drvdata(dev);
+ u16 bit, reg;
+ int tmp;
+
+ switch (attr) {
+ case hwmon_in_input:
+ reg = BCM54140_HWMON_IN_VAL_REG(channel);
+ break;
+ case hwmon_in_min:
+ reg = BCM54140_HWMON_IN_MIN_REG(channel);
+ break;
+ case hwmon_in_max:
+ reg = BCM54140_HWMON_IN_MAX_REG(channel);
+ break;
+ case hwmon_in_alarm:
+ bit = BCM54140_HWMON_IN_ALARM_BIT(channel);
+ return bcm54140_hwmon_read_alarm(dev, bit, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ tmp = bcm_phy_read_rdb(phydev, reg);
+ if (tmp < 0)
+ return tmp;
+
+ tmp &= BCM54140_HWMON_IN_MASK(channel);
+ *val = BCM54140_HWMON_TO_IN(channel, tmp);
+
+ return 0;
+}
+
+static int bcm54140_hwmon_read(struct device *dev,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel, long *val)
+{
+ switch (type) {
+ case hwmon_temp:
+ return bcm54140_hwmon_read_temp(dev, attr, val);
+ case hwmon_in:
+ return bcm54140_hwmon_read_in(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const char *const bcm54140_hwmon_in_labels[] = {
+ "AVDDL",
+ "AVDDH",
+};
+
+static int bcm54140_hwmon_read_string(struct device *dev,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel, const char **str)
+{
+ switch (type) {
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_label:
+ *str = bcm54140_hwmon_in_labels[channel];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int bcm54140_hwmon_write_temp(struct device *dev, u32 attr,
+ int channel, long val)
+{
+ struct phy_device *phydev = dev_get_drvdata(dev);
+ u16 mask = BCM54140_RDB_MON_TEMP_DATA_MASK;
+ u16 reg;
+
+ val = clamp_val(val, BCM54140_HWMON_TO_TEMP(mask),
+ BCM54140_HWMON_TO_TEMP(0));
+
+ switch (attr) {
+ case hwmon_temp_min:
+ reg = BCM54140_RDB_MON_TEMP_MIN;
+ break;
+ case hwmon_temp_max:
+ reg = BCM54140_RDB_MON_TEMP_MAX;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return bcm_phy_modify_rdb(phydev, reg, mask,
+ BCM54140_HWMON_FROM_TEMP(val));
+}
+
+static int bcm54140_hwmon_write_in(struct device *dev, u32 attr,
+ int channel, long val)
+{
+ struct phy_device *phydev = dev_get_drvdata(dev);
+ u16 mask = BCM54140_HWMON_IN_MASK(channel);
+ u16 reg;
+
+ val = clamp_val(val, 0, BCM54140_HWMON_TO_IN(channel, mask));
+
+ switch (attr) {
+ case hwmon_in_min:
+ reg = BCM54140_HWMON_IN_MIN_REG(channel);
+ break;
+ case hwmon_in_max:
+ reg = BCM54140_HWMON_IN_MAX_REG(channel);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return bcm_phy_modify_rdb(phydev, reg, mask,
+ BCM54140_HWMON_FROM_IN(channel, val));
+}
+
+static int bcm54140_hwmon_write(struct device *dev,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel, long val)
+{
+ switch (type) {
+ case hwmon_temp:
+ return bcm54140_hwmon_write_temp(dev, attr, channel, val);
+ case hwmon_in:
+ return bcm54140_hwmon_write_in(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct hwmon_channel_info *bcm54140_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_ALARM),
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM | HWMON_I_LABEL),
+ NULL
+};
+
+static const struct hwmon_ops bcm54140_hwmon_ops = {
+ .is_visible = bcm54140_hwmon_is_visible,
+ .read = bcm54140_hwmon_read,
+ .read_string = bcm54140_hwmon_read_string,
+ .write = bcm54140_hwmon_write,
+};
+
+static const struct hwmon_chip_info bcm54140_chip_info = {
+ .ops = &bcm54140_hwmon_ops,
+ .info = bcm54140_hwmon_info,
+};
+
+static int bcm54140_enable_monitoring(struct phy_device *phydev)
+{
+ u16 mask, set;
+
+ /* 3.3V voltage mode */
+ set = BCM54140_RDB_MON_CTRL_V_MODE;
+
+ /* select round-robin */
+ mask = BCM54140_RDB_MON_CTRL_SEL_MASK;
+ set |= FIELD_PREP(BCM54140_RDB_MON_CTRL_SEL_MASK,
+ BCM54140_RDB_MON_CTRL_SEL_RR);
+
+ /* remove power-down bit */
+ mask |= BCM54140_RDB_MON_CTRL_PWR_DOWN;
+
+ return bcm_phy_modify_rdb(phydev, BCM54140_RDB_MON_CTRL, mask, set);
+}
+
+static int bcm54140_probe_once(struct phy_device *phydev)
+{
+ struct device *hwmon;
+ int ret;
+
+ /* enable hardware monitoring */
+ ret = bcm54140_enable_monitoring(phydev);
+ if (ret)
+ return ret;
+
+ hwmon = devm_hwmon_device_register_with_info(&phydev->mdio.dev,
+ "BCM54140", phydev,
+ &bcm54140_chip_info,
+ NULL);
+ return PTR_ERR_OR_ZERO(hwmon);
+}
+#endif
+
+static int bcm54140_base_read_rdb(struct phy_device *phydev, u16 rdb)
+{
+ int ret;
+
+ phy_lock_mdio_bus(phydev);
+ ret = __phy_package_write(phydev, MII_BCM54XX_RDB_ADDR, rdb);
+ if (ret < 0)
+ goto out;
+
+ ret = __phy_package_read(phydev, MII_BCM54XX_RDB_DATA);
+
+out:
+ phy_unlock_mdio_bus(phydev);
+ return ret;
+}
+
+static int bcm54140_base_write_rdb(struct phy_device *phydev,
+ u16 rdb, u16 val)
+{
+ int ret;
+
+ phy_lock_mdio_bus(phydev);
+ ret = __phy_package_write(phydev, MII_BCM54XX_RDB_ADDR, rdb);
+ if (ret < 0)
+ goto out;
+
+ ret = __phy_package_write(phydev, MII_BCM54XX_RDB_DATA, val);
+
+out:
+ phy_unlock_mdio_bus(phydev);
+ return ret;
+}
+
+/* Under some circumstances a core PLL may not lock, this will then prevent
+ * a successful link establishment. Restart the PLL after the voltages are
+ * stable to workaround this issue.
+ */
+static int bcm54140_b0_workaround(struct phy_device *phydev)
+{
+ int spare3;
+ int ret;
+
+ spare3 = bcm_phy_read_rdb(phydev, BCM54140_RDB_SPARE3);
+ if (spare3 < 0)
+ return spare3;
+
+ spare3 &= ~BCM54140_RDB_SPARE3_BIT0;
+
+ ret = bcm_phy_write_rdb(phydev, BCM54140_RDB_SPARE3, spare3);
+ if (ret)
+ return ret;
+
+ ret = phy_modify(phydev, MII_BMCR, 0, BMCR_PDOWN);
+ if (ret)
+ return ret;
+
+ ret = phy_modify(phydev, MII_BMCR, BMCR_PDOWN, 0);
+ if (ret)
+ return ret;
+
+ spare3 |= BCM54140_RDB_SPARE3_BIT0;
+
+ return bcm_phy_write_rdb(phydev, BCM54140_RDB_SPARE3, spare3);
+}
+
+/* The BCM54140 is a quad PHY where only the first port has access to the
+ * global register. Thus we need to find out its PHY address.
+ *
+ */
+static int bcm54140_get_base_addr_and_port(struct phy_device *phydev)
+{
+ struct bcm54140_priv *priv = phydev->priv;
+ struct mii_bus *bus = phydev->mdio.bus;
+ int addr, min_addr, max_addr;
+ int step = 1;
+ u32 phy_id;
+ int tmp;
+
+ min_addr = phydev->mdio.addr;
+ max_addr = phydev->mdio.addr;
+ addr = phydev->mdio.addr;
+
+ /* We scan forward and backwards and look for PHYs which have the
+ * same phy_id like we do. Step 1 will scan forward, step 2
+ * backwards. Once we are finished, we have a min_addr and
+ * max_addr which resembles the range of PHY addresses of the same
+ * type of PHY. There is one caveat; there may be many PHYs of
+ * the same type, but we know that each PHY takes exactly 4
+ * consecutive addresses. Therefore we can deduce our offset
+ * to the base address of this quad PHY.
+ */
+
+ while (1) {
+ if (step == 3) {
+ break;
+ } else if (step == 1) {
+ max_addr = addr;
+ addr++;
+ } else {
+ min_addr = addr;
+ addr--;
+ }
+
+ if (addr < 0 || addr >= PHY_MAX_ADDR) {
+ addr = phydev->mdio.addr;
+ step++;
+ continue;
+ }
+
+ /* read the PHY id */
+ tmp = mdiobus_read(bus, addr, MII_PHYSID1);
+ if (tmp < 0)
+ return tmp;
+ phy_id = tmp << 16;
+ tmp = mdiobus_read(bus, addr, MII_PHYSID2);
+ if (tmp < 0)
+ return tmp;
+ phy_id |= tmp;
+
+ /* see if it is still the same PHY */
+ if ((phy_id & phydev->drv->phy_id_mask) !=
+ (phydev->drv->phy_id & phydev->drv->phy_id_mask)) {
+ addr = phydev->mdio.addr;
+ step++;
+ }
+ }
+
+ /* The range we get should be a multiple of four. Please note that both
+ * the min_addr and max_addr are inclusive. So we have to add one if we
+ * subtract them.
+ */
+ if ((max_addr - min_addr + 1) % 4) {
+ dev_err(&phydev->mdio.dev,
+ "Detected Quad PHY IDs %d..%d doesn't make sense.\n",
+ min_addr, max_addr);
+ return -EINVAL;
+ }
+
+ priv->port = (phydev->mdio.addr - min_addr) % 4;
+ priv->base_addr = phydev->mdio.addr - priv->port;
+
+ return 0;
+}
+
+static int bcm54140_probe(struct phy_device *phydev)
+{
+ struct bcm54140_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ ret = bcm54140_get_base_addr_and_port(phydev);
+ if (ret)
+ return ret;
+
+ devm_phy_package_join(&phydev->mdio.dev, phydev, priv->base_addr, 0);
+
+#if IS_ENABLED(CONFIG_HWMON)
+ mutex_init(&priv->alarm_lock);
+
+ if (phy_package_init_once(phydev)) {
+ ret = bcm54140_probe_once(phydev);
+ if (ret)
+ return ret;
+ }
+#endif
+
+ phydev_dbg(phydev, "probed (port %d, base PHY address %d)\n",
+ priv->port, priv->base_addr);
+
+ return 0;
+}
+
+static int bcm54140_config_init(struct phy_device *phydev)
+{
+ u16 reg = 0xffff;
+ int ret;
+
+ /* Apply hardware errata */
+ if (BCM54140_PHY_ID_REV(phydev->phy_id) == BCM54140_REV_B0) {
+ ret = bcm54140_b0_workaround(phydev);
+ if (ret)
+ return ret;
+ }
+
+ /* Unmask events we are interested in. */
+ reg &= ~(BCM54140_RDB_INT_DUPLEX |
+ BCM54140_RDB_INT_SPEED |
+ BCM54140_RDB_INT_LINK);
+ ret = bcm_phy_write_rdb(phydev, BCM54140_RDB_IMR, reg);
+ if (ret)
+ return ret;
+
+ /* LED1=LINKSPD[1], LED2=LINKSPD[2], LED3=LINK/ACTIVITY */
+ ret = bcm_phy_modify_rdb(phydev, BCM54140_RDB_SPARE1,
+ 0, BCM54140_RDB_SPARE1_LSLM);
+ if (ret)
+ return ret;
+
+ ret = bcm_phy_modify_rdb(phydev, BCM54140_RDB_LED_CTRL,
+ 0, BCM54140_RDB_LED_CTRL_ACTLINK0);
+ if (ret)
+ return ret;
+
+ /* disable super isolate mode */
+ return bcm_phy_modify_rdb(phydev, BCM54140_RDB_C_PWR,
+ BCM54140_RDB_C_PWR_ISOLATE, 0);
+}
+
+static int bcm54140_did_interrupt(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = bcm_phy_read_rdb(phydev, BCM54140_RDB_ISR);
+
+ return (ret < 0) ? 0 : ret;
+}
+
+static int bcm54140_ack_intr(struct phy_device *phydev)
+{
+ int reg;
+
+ /* clear pending interrupts */
+ reg = bcm_phy_read_rdb(phydev, BCM54140_RDB_ISR);
+ if (reg < 0)
+ return reg;
+
+ return 0;
+}
+
+static int bcm54140_config_intr(struct phy_device *phydev)
+{
+ struct bcm54140_priv *priv = phydev->priv;
+ static const u16 port_to_imr_bit[] = {
+ BCM54140_RDB_TOP_IMR_PORT0, BCM54140_RDB_TOP_IMR_PORT1,
+ BCM54140_RDB_TOP_IMR_PORT2, BCM54140_RDB_TOP_IMR_PORT3,
+ };
+ int reg;
+
+ if (priv->port >= ARRAY_SIZE(port_to_imr_bit))
+ return -EINVAL;
+
+ reg = bcm54140_base_read_rdb(phydev, BCM54140_RDB_TOP_IMR);
+ if (reg < 0)
+ return reg;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ reg &= ~port_to_imr_bit[priv->port];
+ else
+ reg |= port_to_imr_bit[priv->port];
+
+ return bcm54140_base_write_rdb(phydev, BCM54140_RDB_TOP_IMR, reg);
+}
+
+static int bcm54140_get_downshift(struct phy_device *phydev, u8 *data)
+{
+ int val;
+
+ val = bcm_phy_read_rdb(phydev, BCM54140_RDB_C_MISC_CTRL);
+ if (val < 0)
+ return val;
+
+ if (!(val & BCM54140_RDB_C_MISC_CTRL_WS_EN)) {
+ *data = DOWNSHIFT_DEV_DISABLE;
+ return 0;
+ }
+
+ val = bcm_phy_read_rdb(phydev, BCM54140_RDB_SPARE2);
+ if (val < 0)
+ return val;
+
+ if (val & BCM54140_RDB_SPARE2_WS_RTRY_DIS)
+ *data = 1;
+ else
+ *data = FIELD_GET(BCM54140_RDB_SPARE2_WS_RTRY_LIMIT, val) + 2;
+
+ return 0;
+}
+
+static int bcm54140_set_downshift(struct phy_device *phydev, u8 cnt)
+{
+ u16 mask, set;
+ int ret;
+
+ if (cnt > BCM54140_MAX_DOWNSHIFT && cnt != DOWNSHIFT_DEV_DEFAULT_COUNT)
+ return -EINVAL;
+
+ if (!cnt)
+ return bcm_phy_modify_rdb(phydev, BCM54140_RDB_C_MISC_CTRL,
+ BCM54140_RDB_C_MISC_CTRL_WS_EN, 0);
+
+ if (cnt == DOWNSHIFT_DEV_DEFAULT_COUNT)
+ cnt = BCM54140_DEFAULT_DOWNSHIFT;
+
+ if (cnt == 1) {
+ mask = 0;
+ set = BCM54140_RDB_SPARE2_WS_RTRY_DIS;
+ } else {
+ mask = BCM54140_RDB_SPARE2_WS_RTRY_DIS;
+ mask |= BCM54140_RDB_SPARE2_WS_RTRY_LIMIT;
+ set = FIELD_PREP(BCM54140_RDB_SPARE2_WS_RTRY_LIMIT, cnt - 2);
+ }
+ ret = bcm_phy_modify_rdb(phydev, BCM54140_RDB_SPARE2,
+ mask, set);
+ if (ret)
+ return ret;
+
+ return bcm_phy_modify_rdb(phydev, BCM54140_RDB_C_MISC_CTRL,
+ 0, BCM54140_RDB_C_MISC_CTRL_WS_EN);
+}
+
+static int bcm54140_get_edpd(struct phy_device *phydev, u16 *tx_interval)
+{
+ int val;
+
+ val = bcm_phy_read_rdb(phydev, BCM54140_RDB_C_APWR);
+ if (val < 0)
+ return val;
+
+ switch (FIELD_GET(BCM54140_RDB_C_APWR_APD_MODE_MASK, val)) {
+ case BCM54140_RDB_C_APWR_APD_MODE_DIS:
+ case BCM54140_RDB_C_APWR_APD_MODE_DIS2:
+ *tx_interval = ETHTOOL_PHY_EDPD_DISABLE;
+ break;
+ case BCM54140_RDB_C_APWR_APD_MODE_EN:
+ case BCM54140_RDB_C_APWR_APD_MODE_EN_ANEG:
+ switch (FIELD_GET(BCM54140_RDB_C_APWR_SLP_TIM_MASK, val)) {
+ case BCM54140_RDB_C_APWR_SLP_TIM_2_7:
+ *tx_interval = 2700;
+ break;
+ case BCM54140_RDB_C_APWR_SLP_TIM_5_4:
+ *tx_interval = 5400;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int bcm54140_set_edpd(struct phy_device *phydev, u16 tx_interval)
+{
+ u16 mask, set;
+
+ mask = BCM54140_RDB_C_APWR_APD_MODE_MASK;
+ if (tx_interval == ETHTOOL_PHY_EDPD_DISABLE)
+ set = FIELD_PREP(BCM54140_RDB_C_APWR_APD_MODE_MASK,
+ BCM54140_RDB_C_APWR_APD_MODE_DIS);
+ else
+ set = FIELD_PREP(BCM54140_RDB_C_APWR_APD_MODE_MASK,
+ BCM54140_RDB_C_APWR_APD_MODE_EN_ANEG);
+
+ /* enable single pulse mode */
+ set |= BCM54140_RDB_C_APWR_SINGLE_PULSE;
+
+ /* set sleep timer */
+ mask |= BCM54140_RDB_C_APWR_SLP_TIM_MASK;
+ switch (tx_interval) {
+ case ETHTOOL_PHY_EDPD_DFLT_TX_MSECS:
+ case ETHTOOL_PHY_EDPD_DISABLE:
+ case 2700:
+ set |= BCM54140_RDB_C_APWR_SLP_TIM_2_7;
+ break;
+ case 5400:
+ set |= BCM54140_RDB_C_APWR_SLP_TIM_5_4;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return bcm_phy_modify_rdb(phydev, BCM54140_RDB_C_APWR, mask, set);
+}
+
+static int bcm54140_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return bcm54140_get_downshift(phydev, data);
+ case ETHTOOL_PHY_EDPD:
+ return bcm54140_get_edpd(phydev, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int bcm54140_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, const void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return bcm54140_set_downshift(phydev, *(const u8 *)data);
+ case ETHTOOL_PHY_EDPD:
+ return bcm54140_set_edpd(phydev, *(const u16 *)data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static struct phy_driver bcm54140_drivers[] = {
+ {
+ .phy_id = PHY_ID_BCM54140,
+ .phy_id_mask = BCM54140_PHY_ID_MASK,
+ .name = "Broadcom BCM54140",
+ .flags = PHY_POLL_CABLE_TEST,
+ .features = PHY_GBIT_FEATURES,
+ .config_init = bcm54140_config_init,
+ .did_interrupt = bcm54140_did_interrupt,
+ .ack_interrupt = bcm54140_ack_intr,
+ .config_intr = bcm54140_config_intr,
+ .probe = bcm54140_probe,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .soft_reset = genphy_soft_reset,
+ .get_tunable = bcm54140_get_tunable,
+ .set_tunable = bcm54140_set_tunable,
+ .cable_test_start = bcm_phy_cable_test_start_rdb,
+ .cable_test_get_status = bcm_phy_cable_test_get_status_rdb,
+ },
+};
+module_phy_driver(bcm54140_drivers);
+
+static struct mdio_device_id __maybe_unused bcm54140_tbl[] = {
+ { PHY_ID_BCM54140, BCM54140_PHY_ID_MASK },
+ { }
+};
+
+MODULE_AUTHOR("Michael Walle");
+MODULE_DESCRIPTION("Broadcom BCM54140 PHY driver");
+MODULE_DEVICE_TABLE(mdio, bcm54140_tbl);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index d14d91b759b7..8cd8d188542a 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -195,7 +195,8 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
if (BRCM_PHY_MODEL(phydev) != PHY_ID_BCM57780 &&
BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610 &&
BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610M &&
- BRCM_PHY_MODEL(phydev) != PHY_ID_BCM54810)
+ BRCM_PHY_MODEL(phydev) != PHY_ID_BCM54810 &&
+ BRCM_PHY_MODEL(phydev) != PHY_ID_BCM54811)
return;
val = bcm_phy_read_shadow(phydev, BCM54XX_SHD_SCR3);
@@ -214,8 +215,10 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
clk125en = false;
} else {
if (phydev->dev_flags & PHY_BRCM_RX_REFCLK_UNUSED) {
- /* Here, bit 0 _enables_ CLK125 when set */
- val &= ~BCM54XX_SHD_SCR3_DEF_CLK125;
+ if (BRCM_PHY_MODEL(phydev) != PHY_ID_BCM54811) {
+ /* Here, bit 0 _enables_ CLK125 when set */
+ val &= ~BCM54XX_SHD_SCR3_DEF_CLK125;
+ }
clk125en = false;
}
}
@@ -226,7 +229,8 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
val |= BCM54XX_SHD_SCR3_DLLAPD_DIS;
if (phydev->dev_flags & PHY_BRCM_DIS_TXCRXC_NOENRGY) {
- if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54810)
+ if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54810 ||
+ BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54811)
val |= BCM54810_SHD_SCR3_TRDDAPD;
else
val |= BCM54XX_SHD_SCR3_TRDDAPD;
@@ -331,6 +335,32 @@ static int bcm54xx_resume(struct phy_device *phydev)
return bcm54xx_config_init(phydev);
}
+static int bcm54811_config_init(struct phy_device *phydev)
+{
+ int err, reg;
+
+ /* Disable BroadR-Reach function. */
+ reg = bcm_phy_read_exp(phydev, BCM54810_EXP_BROADREACH_LRE_MISC_CTL);
+ reg &= ~BCM54810_EXP_BROADREACH_LRE_MISC_CTL_EN;
+ err = bcm_phy_write_exp(phydev, BCM54810_EXP_BROADREACH_LRE_MISC_CTL,
+ reg);
+ if (err < 0)
+ return err;
+
+ err = bcm54xx_config_init(phydev);
+
+ /* Enable CLK125 MUX on LED4 if ref clock is enabled. */
+ if (!(phydev->dev_flags & PHY_BRCM_RX_REFCLK_UNUSED)) {
+ reg = bcm_phy_read_exp(phydev, BCM54612E_EXP_SPARE0);
+ err = bcm_phy_write_exp(phydev, BCM54612E_EXP_SPARE0,
+ BCM54612E_LED4_CLK125OUT_EN | reg);
+ if (err < 0)
+ return err;
+ }
+
+ return err;
+}
+
static int bcm5482_config_init(struct phy_device *phydev)
{
int err, reg;
@@ -727,6 +757,17 @@ static struct phy_driver broadcom_drivers[] = {
.suspend = genphy_suspend,
.resume = bcm54xx_resume,
}, {
+ .phy_id = PHY_ID_BCM54811,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Broadcom BCM54811",
+ /* PHY_GBIT_FEATURES */
+ .config_init = bcm54811_config_init,
+ .config_aneg = bcm5481_config_aneg,
+ .ack_interrupt = bcm_phy_ack_intr,
+ .config_intr = bcm_phy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = bcm54xx_resume,
+}, {
.phy_id = PHY_ID_BCM5482,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5482",
@@ -786,6 +827,19 @@ static struct phy_driver broadcom_drivers[] = {
.get_stats = bcm53xx_phy_get_stats,
.probe = bcm53xx_phy_probe,
}, {
+ .phy_id = PHY_ID_BCM53125,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Broadcom BCM53125",
+ .flags = PHY_IS_INTERNAL,
+ /* PHY_GBIT_FEATURES */
+ .get_sset_count = bcm_phy_get_sset_count,
+ .get_strings = bcm_phy_get_strings,
+ .get_stats = bcm53xx_phy_get_stats,
+ .probe = bcm53xx_phy_probe,
+ .config_init = bcm54xx_config_init,
+ .ack_interrupt = bcm_phy_ack_intr,
+ .config_intr = bcm_phy_config_intr,
+}, {
.phy_id = PHY_ID_BCM89610,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM89610",
@@ -807,6 +861,7 @@ static struct mdio_device_id __maybe_unused broadcom_tbl[] = {
{ PHY_ID_BCM5464, 0xfffffff0 },
{ PHY_ID_BCM5481, 0xfffffff0 },
{ PHY_ID_BCM54810, 0xfffffff0 },
+ { PHY_ID_BCM54811, 0xfffffff0 },
{ PHY_ID_BCM5482, 0xfffffff0 },
{ PHY_ID_BCM50610, 0xfffffff0 },
{ PHY_ID_BCM50610M, 0xfffffff0 },
@@ -814,6 +869,7 @@ static struct mdio_device_id __maybe_unused broadcom_tbl[] = {
{ PHY_ID_BCMAC131, 0xfffffff0 },
{ PHY_ID_BCM5241, 0xfffffff0 },
{ PHY_ID_BCM5395, 0xfffffff0 },
+ { PHY_ID_BCM53125, 0xfffffff0 },
{ PHY_ID_BCM89610, 0xfffffff0 },
{ }
};
diff --git a/drivers/net/phy/cortina.c b/drivers/net/phy/cortina.c
index 856cdc36aacd..aac51362c0fe 100644
--- a/drivers/net/phy/cortina.c
+++ b/drivers/net/phy/cortina.c
@@ -82,7 +82,6 @@ static struct phy_driver cortina_driver[] = {
.features = PHY_10GBIT_FEATURES,
.config_aneg = gen10g_config_aneg,
.read_status = cortina_read_status,
- .soft_reset = genphy_no_soft_reset,
.probe = cortina_probe,
},
};
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index b55e3c0403ed..4017ae1692d8 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -365,7 +365,7 @@ static int dp83867_get_downshift(struct phy_device *phydev, u8 *data)
break;
default:
return -EINVAL;
- };
+ }
*data = enable ? count : DOWNSHIFT_DEV_DISABLE;
@@ -400,7 +400,7 @@ static int dp83867_set_downshift(struct phy_device *phydev, u8 cnt)
phydev_err(phydev,
"Downshift count must be 1, 2, 4 or 8\n");
return -EINVAL;
- };
+ }
val = DP83867_DOWNSHIFT_EN;
val |= FIELD_PREP(DP83867_DOWNSHIFT_ATTEMPT_MASK, count);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 7fc8e10c5f33..4bc7febf9248 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -27,6 +27,7 @@
#include <linux/module.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
+#include <linux/ethtool_netlink.h>
#include <linux/phy.h>
#include <linux/marvell_phy.h>
#include <linux/bitfield.h>
@@ -42,6 +43,7 @@
#define MII_MARVELL_MSCR_PAGE 0x02
#define MII_MARVELL_LED_PAGE 0x03
#define MII_MARVELL_MISC_TEST_PAGE 0x06
+#define MII_MARVELL_VCT7_PAGE 0x07
#define MII_MARVELL_WOL_PAGE 0x11
#define MII_M1011_IEVENT 0x13
@@ -162,6 +164,36 @@
#define MII_88E1510_GEN_CTRL_REG_1_MODE_SGMII 0x1 /* SGMII to copper */
#define MII_88E1510_GEN_CTRL_REG_1_RESET 0x8000 /* Soft reset */
+#define MII_VCT7_PAIR_0_DISTANCE 0x10
+#define MII_VCT7_PAIR_1_DISTANCE 0x11
+#define MII_VCT7_PAIR_2_DISTANCE 0x12
+#define MII_VCT7_PAIR_3_DISTANCE 0x13
+
+#define MII_VCT7_RESULTS 0x14
+#define MII_VCT7_RESULTS_PAIR3_MASK 0xf000
+#define MII_VCT7_RESULTS_PAIR2_MASK 0x0f00
+#define MII_VCT7_RESULTS_PAIR1_MASK 0x00f0
+#define MII_VCT7_RESULTS_PAIR0_MASK 0x000f
+#define MII_VCT7_RESULTS_PAIR3_SHIFT 12
+#define MII_VCT7_RESULTS_PAIR2_SHIFT 8
+#define MII_VCT7_RESULTS_PAIR1_SHIFT 4
+#define MII_VCT7_RESULTS_PAIR0_SHIFT 0
+#define MII_VCT7_RESULTS_INVALID 0
+#define MII_VCT7_RESULTS_OK 1
+#define MII_VCT7_RESULTS_OPEN 2
+#define MII_VCT7_RESULTS_SAME_SHORT 3
+#define MII_VCT7_RESULTS_CROSS_SHORT 4
+#define MII_VCT7_RESULTS_BUSY 9
+
+#define MII_VCT7_CTRL 0x15
+#define MII_VCT7_CTRL_RUN_NOW BIT(15)
+#define MII_VCT7_CTRL_RUN_ANEG BIT(14)
+#define MII_VCT7_CTRL_DISABLE_CROSS BIT(13)
+#define MII_VCT7_CTRL_RUN_AFTER_BREAK_LINK BIT(12)
+#define MII_VCT7_CTRL_IN_PROGRESS BIT(11)
+#define MII_VCT7_CTRL_METERS BIT(10)
+#define MII_VCT7_CTRL_CENTIMETERS 0
+
#define LPA_PAUSE_FIBER 0x180
#define LPA_PAUSE_ASYM_FIBER 0x100
@@ -1658,6 +1690,163 @@ static void marvell_get_stats(struct phy_device *phydev,
data[i] = marvell_get_stat(phydev, i);
}
+static int marvell_vct7_cable_test_start(struct phy_device *phydev)
+{
+ int bmcr, bmsr, ret;
+
+ /* If auto-negotiation is enabled, but not complete, the cable
+ * test never completes. So disable auto-neg.
+ */
+ bmcr = phy_read(phydev, MII_BMCR);
+ if (bmcr < 0)
+ return bmcr;
+
+ bmsr = phy_read(phydev, MII_BMSR);
+
+ if (bmsr < 0)
+ return bmsr;
+
+ if (bmcr & BMCR_ANENABLE) {
+ ret = phy_modify(phydev, MII_BMCR, BMCR_ANENABLE, 0);
+ if (ret < 0)
+ return ret;
+ ret = genphy_soft_reset(phydev);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* If the link is up, allow it some time to go down */
+ if (bmsr & BMSR_LSTATUS)
+ msleep(1500);
+
+ return phy_write_paged(phydev, MII_MARVELL_VCT7_PAGE,
+ MII_VCT7_CTRL,
+ MII_VCT7_CTRL_RUN_NOW |
+ MII_VCT7_CTRL_CENTIMETERS);
+}
+
+static int marvell_vct7_distance_to_length(int distance, bool meter)
+{
+ if (meter)
+ distance *= 100;
+
+ return distance;
+}
+
+static bool marvell_vct7_distance_valid(int result)
+{
+ switch (result) {
+ case MII_VCT7_RESULTS_OPEN:
+ case MII_VCT7_RESULTS_SAME_SHORT:
+ case MII_VCT7_RESULTS_CROSS_SHORT:
+ return true;
+ }
+ return false;
+}
+
+static int marvell_vct7_report_length(struct phy_device *phydev,
+ int pair, bool meter)
+{
+ int length;
+ int ret;
+
+ ret = phy_read_paged(phydev, MII_MARVELL_VCT7_PAGE,
+ MII_VCT7_PAIR_0_DISTANCE + pair);
+ if (ret < 0)
+ return ret;
+
+ length = marvell_vct7_distance_to_length(ret, meter);
+
+ ethnl_cable_test_fault_length(phydev, pair, length);
+
+ return 0;
+}
+
+static int marvell_vct7_cable_test_report_trans(int result)
+{
+ switch (result) {
+ case MII_VCT7_RESULTS_OK:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OK;
+ case MII_VCT7_RESULTS_OPEN:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+ case MII_VCT7_RESULTS_SAME_SHORT:
+ return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+ case MII_VCT7_RESULTS_CROSS_SHORT:
+ return ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT;
+ default:
+ return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+ }
+}
+
+static int marvell_vct7_cable_test_report(struct phy_device *phydev)
+{
+ int pair0, pair1, pair2, pair3;
+ bool meter;
+ int ret;
+
+ ret = phy_read_paged(phydev, MII_MARVELL_VCT7_PAGE,
+ MII_VCT7_RESULTS);
+ if (ret < 0)
+ return ret;
+
+ pair3 = (ret & MII_VCT7_RESULTS_PAIR3_MASK) >>
+ MII_VCT7_RESULTS_PAIR3_SHIFT;
+ pair2 = (ret & MII_VCT7_RESULTS_PAIR2_MASK) >>
+ MII_VCT7_RESULTS_PAIR2_SHIFT;
+ pair1 = (ret & MII_VCT7_RESULTS_PAIR1_MASK) >>
+ MII_VCT7_RESULTS_PAIR1_SHIFT;
+ pair0 = (ret & MII_VCT7_RESULTS_PAIR0_MASK) >>
+ MII_VCT7_RESULTS_PAIR0_SHIFT;
+
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ marvell_vct7_cable_test_report_trans(pair0));
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_B,
+ marvell_vct7_cable_test_report_trans(pair1));
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_C,
+ marvell_vct7_cable_test_report_trans(pair2));
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_D,
+ marvell_vct7_cable_test_report_trans(pair3));
+
+ ret = phy_read_paged(phydev, MII_MARVELL_VCT7_PAGE, MII_VCT7_CTRL);
+ if (ret < 0)
+ return ret;
+
+ meter = ret & MII_VCT7_CTRL_METERS;
+
+ if (marvell_vct7_distance_valid(pair0))
+ marvell_vct7_report_length(phydev, 0, meter);
+ if (marvell_vct7_distance_valid(pair1))
+ marvell_vct7_report_length(phydev, 1, meter);
+ if (marvell_vct7_distance_valid(pair2))
+ marvell_vct7_report_length(phydev, 2, meter);
+ if (marvell_vct7_distance_valid(pair3))
+ marvell_vct7_report_length(phydev, 3, meter);
+
+ return 0;
+}
+
+static int marvell_vct7_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ int ret;
+
+ *finished = false;
+
+ ret = phy_read_paged(phydev, MII_MARVELL_VCT7_PAGE,
+ MII_VCT7_CTRL);
+
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & MII_VCT7_CTRL_IN_PROGRESS)) {
+ *finished = true;
+
+ return marvell_vct7_cable_test_report(phydev);
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_HWMON
static int m88e1121_get_temp(struct phy_device *phydev, long *temp)
{
@@ -2353,6 +2542,7 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1510",
.features = PHY_GBIT_FIBRE_FEATURES,
+ .flags = PHY_POLL_CABLE_TEST,
.probe = &m88e1510_probe,
.config_init = &m88e1510_config_init,
.config_aneg = &m88e1510_config_aneg,
@@ -2372,12 +2562,15 @@ static struct phy_driver marvell_drivers[] = {
.set_loopback = genphy_loopback,
.get_tunable = m88e1011_get_tunable,
.set_tunable = m88e1011_set_tunable,
+ .cable_test_start = marvell_vct7_cable_test_start,
+ .cable_test_get_status = marvell_vct7_cable_test_get_status,
},
{
.phy_id = MARVELL_PHY_ID_88E1540,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1540",
/* PHY_GBIT_FEATURES */
+ .flags = PHY_POLL_CABLE_TEST,
.probe = m88e1510_probe,
.config_init = &marvell_config_init,
.config_aneg = &m88e1510_config_aneg,
@@ -2394,6 +2587,8 @@ static struct phy_driver marvell_drivers[] = {
.get_stats = marvell_get_stats,
.get_tunable = m88e1540_get_tunable,
.set_tunable = m88e1540_set_tunable,
+ .cable_test_start = marvell_vct7_cable_test_start,
+ .cable_test_get_status = marvell_vct7_cable_test_get_status,
},
{
.phy_id = MARVELL_PHY_ID_88E1545,
@@ -2401,6 +2596,7 @@ static struct phy_driver marvell_drivers[] = {
.name = "Marvell 88E1545",
.probe = m88e1510_probe,
/* PHY_GBIT_FEATURES */
+ .flags = PHY_POLL_CABLE_TEST,
.config_init = &marvell_config_init,
.config_aneg = &m88e1510_config_aneg,
.read_status = &marvell_read_status,
@@ -2416,6 +2612,8 @@ static struct phy_driver marvell_drivers[] = {
.get_stats = marvell_get_stats,
.get_tunable = m88e1540_get_tunable,
.set_tunable = m88e1540_set_tunable,
+ .cable_test_start = marvell_vct7_cable_test_start,
+ .cable_test_get_status = marvell_vct7_cable_test_get_status,
},
{
.phy_id = MARVELL_PHY_ID_88E3016,
@@ -2442,6 +2640,7 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E6390",
/* PHY_GBIT_FEATURES */
+ .flags = PHY_POLL_CABLE_TEST,
.probe = m88e6390_probe,
.config_init = &marvell_config_init,
.config_aneg = &m88e6390_config_aneg,
@@ -2458,6 +2657,8 @@ static struct phy_driver marvell_drivers[] = {
.get_stats = marvell_get_stats,
.get_tunable = m88e1540_get_tunable,
.set_tunable = m88e1540_set_tunable,
+ .cable_test_start = marvell_vct7_cable_test_start,
+ .cable_test_get_status = marvell_vct7_cable_test_get_status,
},
};
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 1f1a01c98e44..d4c2e62b2439 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -753,7 +753,6 @@ static struct phy_driver mv3310_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "mv88x3310",
.get_features = mv3310_get_features,
- .soft_reset = genphy_no_soft_reset,
.config_init = mv3310_config_init,
.probe = mv3310_probe,
.suspend = mv3310_suspend,
@@ -771,7 +770,6 @@ static struct phy_driver mv3310_drivers[] = {
.probe = mv3310_probe,
.suspend = mv3310_suspend,
.resume = mv3310_resume,
- .soft_reset = genphy_no_soft_reset,
.config_init = mv3310_config_init,
.config_aneg = mv3310_config_aneg,
.aneg_done = mv3310_aneg_done,
diff --git a/drivers/net/phy/mdio-bcm-iproc.c b/drivers/net/phy/mdio-bcm-iproc.c
index f1ded03f0229..77fc970cdfde 100644
--- a/drivers/net/phy/mdio-bcm-iproc.c
+++ b/drivers/net/phy/mdio-bcm-iproc.c
@@ -159,7 +159,7 @@ static int iproc_mdio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
- dev_info(&pdev->dev, "Broadcom iProc MDIO bus at 0x%p\n", priv->base);
+ dev_info(&pdev->dev, "Broadcom iProc MDIO bus registered\n");
return 0;
@@ -179,7 +179,7 @@ static int iproc_mdio_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
-int iproc_mdio_resume(struct device *dev)
+static int iproc_mdio_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct iproc_mdio_priv *priv = platform_get_drvdata(pdev);
diff --git a/drivers/net/phy/mdio-ipq4019.c b/drivers/net/phy/mdio-ipq4019.c
new file mode 100644
index 000000000000..1ce81ff2f41d
--- /dev/null
+++ b/drivers/net/phy/mdio-ipq4019.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2020 Sartura Ltd. */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/of_address.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+
+#define MDIO_ADDR_REG 0x44
+#define MDIO_DATA_WRITE_REG 0x48
+#define MDIO_DATA_READ_REG 0x4c
+#define MDIO_CMD_REG 0x50
+#define MDIO_CMD_ACCESS_BUSY BIT(16)
+#define MDIO_CMD_ACCESS_START BIT(8)
+#define MDIO_CMD_ACCESS_CODE_READ 0
+#define MDIO_CMD_ACCESS_CODE_WRITE 1
+
+#define ipq4019_MDIO_TIMEOUT 10000
+#define ipq4019_MDIO_SLEEP 10
+
+struct ipq4019_mdio_data {
+ void __iomem *membase;
+};
+
+static int ipq4019_mdio_wait_busy(struct mii_bus *bus)
+{
+ struct ipq4019_mdio_data *priv = bus->priv;
+ unsigned int busy;
+
+ return readl_poll_timeout(priv->membase + MDIO_CMD_REG, busy,
+ (busy & MDIO_CMD_ACCESS_BUSY) == 0,
+ ipq4019_MDIO_SLEEP, ipq4019_MDIO_TIMEOUT);
+}
+
+static int ipq4019_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+ struct ipq4019_mdio_data *priv = bus->priv;
+ unsigned int cmd;
+
+ /* Reject clause 45 */
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ if (ipq4019_mdio_wait_busy(bus))
+ return -ETIMEDOUT;
+
+ /* issue the phy address and reg */
+ writel((mii_id << 8) | regnum, priv->membase + MDIO_ADDR_REG);
+
+ cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_READ;
+
+ /* issue read command */
+ writel(cmd, priv->membase + MDIO_CMD_REG);
+
+ /* Wait read complete */
+ if (ipq4019_mdio_wait_busy(bus))
+ return -ETIMEDOUT;
+
+ /* Read and return data */
+ return readl(priv->membase + MDIO_DATA_READ_REG);
+}
+
+static int ipq4019_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
+ u16 value)
+{
+ struct ipq4019_mdio_data *priv = bus->priv;
+ unsigned int cmd;
+
+ /* Reject clause 45 */
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ if (ipq4019_mdio_wait_busy(bus))
+ return -ETIMEDOUT;
+
+ /* issue the phy address and reg */
+ writel((mii_id << 8) | regnum, priv->membase + MDIO_ADDR_REG);
+
+ /* issue write data */
+ writel(value, priv->membase + MDIO_DATA_WRITE_REG);
+
+ cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_WRITE;
+ /* issue write command */
+ writel(cmd, priv->membase + MDIO_CMD_REG);
+
+ /* Wait write complete */
+ if (ipq4019_mdio_wait_busy(bus))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int ipq4019_mdio_probe(struct platform_device *pdev)
+{
+ struct ipq4019_mdio_data *priv;
+ struct mii_bus *bus;
+ int ret;
+
+ bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*priv));
+ if (!bus)
+ return -ENOMEM;
+
+ priv = bus->priv;
+
+ priv->membase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->membase))
+ return PTR_ERR(priv->membase);
+
+ bus->name = "ipq4019_mdio";
+ bus->read = ipq4019_mdio_read;
+ bus->write = ipq4019_mdio_write;
+ bus->parent = &pdev->dev;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s%d", pdev->name, pdev->id);
+
+ ret = of_mdiobus_register(bus, pdev->dev.of_node);
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot register MDIO bus!\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, bus);
+
+ return 0;
+}
+
+static int ipq4019_mdio_remove(struct platform_device *pdev)
+{
+ struct mii_bus *bus = platform_get_drvdata(pdev);
+
+ mdiobus_unregister(bus);
+
+ return 0;
+}
+
+static const struct of_device_id ipq4019_mdio_dt_ids[] = {
+ { .compatible = "qcom,ipq4019-mdio" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ipq4019_mdio_dt_ids);
+
+static struct platform_driver ipq4019_mdio_driver = {
+ .probe = ipq4019_mdio_probe,
+ .remove = ipq4019_mdio_remove,
+ .driver = {
+ .name = "ipq4019-mdio",
+ .of_match_table = ipq4019_mdio_dt_ids,
+ },
+};
+
+module_platform_driver(ipq4019_mdio_driver);
+
+MODULE_DESCRIPTION("ipq4019 MDIO interface driver");
+MODULE_AUTHOR("Qualcomm Atheros");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/phy/mdio-moxart.c b/drivers/net/phy/mdio-moxart.c
index 2d16fc4173c1..b72c6d185175 100644
--- a/drivers/net/phy/mdio-moxart.c
+++ b/drivers/net/phy/mdio-moxart.c
@@ -12,7 +12,6 @@
#include <linux/of_mdio.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
-#include <linux/regulator/consumer.h>
#define REG_PHY_CTRL 0
#define REG_PHY_WRITE_DATA 4
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 7a4eb3f2cb74..255fdfcc13a6 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -42,14 +42,11 @@
static int mdiobus_register_gpiod(struct mdio_device *mdiodev)
{
- int error;
-
/* Deassert the optional reset signal */
mdiodev->reset_gpio = gpiod_get_optional(&mdiodev->dev,
"reset", GPIOD_OUT_LOW);
- error = PTR_ERR_OR_ZERO(mdiodev->reset_gpio);
- if (error)
- return error;
+ if (IS_ERR(mdiodev->reset_gpio))
+ return PTR_ERR(mdiodev->reset_gpio);
if (mdiodev->reset_gpio)
gpiod_set_consumer_name(mdiodev->reset_gpio, "PHY reset");
@@ -170,7 +167,12 @@ EXPORT_SYMBOL(mdiobus_alloc_size);
static void _devm_mdiobus_free(struct device *dev, void *res)
{
- mdiobus_free(*(struct mii_bus **)res);
+ struct mii_bus *bus = *(struct mii_bus **)res;
+
+ if (bus->is_managed_registered && bus->state == MDIOBUS_REGISTERED)
+ mdiobus_unregister(bus);
+
+ mdiobus_free(bus);
}
static int devm_mdiobus_match(struct device *dev, void *res, void *data)
@@ -210,6 +212,7 @@ struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv)
if (bus) {
*ptr = bus;
devres_add(dev, ptr);
+ bus->is_managed = 1;
} else {
devres_free(ptr);
}
@@ -611,6 +614,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
}
mutex_init(&bus->mdio_lock);
+ mutex_init(&bus->shared_lock);
/* de-assert bus level PHY GPIO reset */
gpiod = devm_gpiod_get_optional(&bus->dev, "reset", GPIOD_OUT_LOW);
@@ -627,8 +631,11 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
gpiod_set_value_cansleep(gpiod, 0);
}
- if (bus->reset)
- bus->reset(bus);
+ if (bus->reset) {
+ err = bus->reset(bus);
+ if (err)
+ goto error_reset_gpiod;
+ }
for (i = 0; i < PHY_MAX_ADDR; i++) {
if ((bus->phy_mask & (1 << i)) == 0) {
@@ -657,7 +664,7 @@ error:
mdiodev->device_remove(mdiodev);
mdiodev->device_free(mdiodev);
}
-
+error_reset_gpiod:
/* Put PHYs in RESET to save power */
if (bus->reset_gpiod)
gpiod_set_value_cansleep(bus->reset_gpiod, 1);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 3a4d83fa52dc..3fe552675dd2 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -19,6 +19,7 @@
* ksz9477
*/
+#include <linux/bitfield.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/phy.h>
@@ -490,9 +491,50 @@ static int ksz9021_config_init(struct phy_device *phydev)
/* MMD Address 0x2 */
#define MII_KSZ9031RN_CONTROL_PAD_SKEW 4
+#define MII_KSZ9031RN_RX_CTL_M GENMASK(7, 4)
+#define MII_KSZ9031RN_TX_CTL_M GENMASK(3, 0)
+
#define MII_KSZ9031RN_RX_DATA_PAD_SKEW 5
+#define MII_KSZ9031RN_RXD3 GENMASK(15, 12)
+#define MII_KSZ9031RN_RXD2 GENMASK(11, 8)
+#define MII_KSZ9031RN_RXD1 GENMASK(7, 4)
+#define MII_KSZ9031RN_RXD0 GENMASK(3, 0)
+
#define MII_KSZ9031RN_TX_DATA_PAD_SKEW 6
+#define MII_KSZ9031RN_TXD3 GENMASK(15, 12)
+#define MII_KSZ9031RN_TXD2 GENMASK(11, 8)
+#define MII_KSZ9031RN_TXD1 GENMASK(7, 4)
+#define MII_KSZ9031RN_TXD0 GENMASK(3, 0)
+
#define MII_KSZ9031RN_CLK_PAD_SKEW 8
+#define MII_KSZ9031RN_GTX_CLK GENMASK(9, 5)
+#define MII_KSZ9031RN_RX_CLK GENMASK(4, 0)
+
+/* KSZ9031 has internal RGMII_IDRX = 1.2ns and RGMII_IDTX = 0ns. To
+ * provide different RGMII options we need to configure delay offset
+ * for each pad relative to build in delay.
+ */
+/* keep rx as "No delay adjustment" and set rx_clk to +0.60ns to get delays of
+ * 1.80ns
+ */
+#define RX_ID 0x7
+#define RX_CLK_ID 0x19
+
+/* set rx to +0.30ns and rx_clk to -0.90ns to compensate the
+ * internal 1.2ns delay.
+ */
+#define RX_ND 0xc
+#define RX_CLK_ND 0x0
+
+/* set tx to -0.42ns and tx_clk to +0.96ns to get 1.38ns delay */
+#define TX_ID 0x0
+#define TX_CLK_ID 0x1f
+
+/* set tx and tx_clk to "No delay adjustment" to keep 0ns
+ * dealy
+ */
+#define TX_ND 0x7
+#define TX_CLK_ND 0xf
/* MMD Address 0x1C */
#define MII_KSZ9031RN_EDPD 0x23
@@ -501,7 +543,8 @@ static int ksz9021_config_init(struct phy_device *phydev)
static int ksz9031_of_load_skew_values(struct phy_device *phydev,
const struct device_node *of_node,
u16 reg, size_t field_sz,
- const char *field[], u8 numfields)
+ const char *field[], u8 numfields,
+ bool *update)
{
int val[4] = {-1, -2, -3, -4};
int matches = 0;
@@ -517,6 +560,8 @@ static int ksz9031_of_load_skew_values(struct phy_device *phydev,
if (!matches)
return 0;
+ *update |= true;
+
if (matches < numfields)
newval = phy_read_mmd(phydev, 2, reg);
else
@@ -565,6 +610,67 @@ static int ksz9031_enable_edpd(struct phy_device *phydev)
reg | MII_KSZ9031RN_EDPD_ENABLE);
}
+static int ksz9031_config_rgmii_delay(struct phy_device *phydev)
+{
+ u16 rx, tx, rx_clk, tx_clk;
+ int ret;
+
+ switch (phydev->interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ tx = TX_ND;
+ tx_clk = TX_CLK_ND;
+ rx = RX_ND;
+ rx_clk = RX_CLK_ND;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ tx = TX_ID;
+ tx_clk = TX_CLK_ID;
+ rx = RX_ID;
+ rx_clk = RX_CLK_ID;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ tx = TX_ND;
+ tx_clk = TX_CLK_ND;
+ rx = RX_ID;
+ rx_clk = RX_CLK_ID;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ tx = TX_ID;
+ tx_clk = TX_CLK_ID;
+ rx = RX_ND;
+ rx_clk = RX_CLK_ND;
+ break;
+ default:
+ return 0;
+ }
+
+ ret = phy_write_mmd(phydev, 2, MII_KSZ9031RN_CONTROL_PAD_SKEW,
+ FIELD_PREP(MII_KSZ9031RN_RX_CTL_M, rx) |
+ FIELD_PREP(MII_KSZ9031RN_TX_CTL_M, tx));
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, 2, MII_KSZ9031RN_RX_DATA_PAD_SKEW,
+ FIELD_PREP(MII_KSZ9031RN_RXD3, rx) |
+ FIELD_PREP(MII_KSZ9031RN_RXD2, rx) |
+ FIELD_PREP(MII_KSZ9031RN_RXD1, rx) |
+ FIELD_PREP(MII_KSZ9031RN_RXD0, rx));
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, 2, MII_KSZ9031RN_TX_DATA_PAD_SKEW,
+ FIELD_PREP(MII_KSZ9031RN_TXD3, tx) |
+ FIELD_PREP(MII_KSZ9031RN_TXD2, tx) |
+ FIELD_PREP(MII_KSZ9031RN_TXD1, tx) |
+ FIELD_PREP(MII_KSZ9031RN_TXD0, tx));
+ if (ret < 0)
+ return ret;
+
+ return phy_write_mmd(phydev, 2, MII_KSZ9031RN_CLK_PAD_SKEW,
+ FIELD_PREP(MII_KSZ9031RN_GTX_CLK, tx_clk) |
+ FIELD_PREP(MII_KSZ9031RN_RX_CLK, rx_clk));
+}
+
static int ksz9031_config_init(struct phy_device *phydev)
{
const struct device *dev = &phydev->mdio.dev;
@@ -597,21 +703,33 @@ static int ksz9031_config_init(struct phy_device *phydev)
} while (!of_node && dev_walker);
if (of_node) {
+ bool update = false;
+
+ if (phy_interface_is_rgmii(phydev)) {
+ result = ksz9031_config_rgmii_delay(phydev);
+ if (result < 0)
+ return result;
+ }
+
ksz9031_of_load_skew_values(phydev, of_node,
MII_KSZ9031RN_CLK_PAD_SKEW, 5,
- clk_skews, 2);
+ clk_skews, 2, &update);
ksz9031_of_load_skew_values(phydev, of_node,
MII_KSZ9031RN_CONTROL_PAD_SKEW, 4,
- control_skews, 2);
+ control_skews, 2, &update);
ksz9031_of_load_skew_values(phydev, of_node,
MII_KSZ9031RN_RX_DATA_PAD_SKEW, 4,
- rx_data_skews, 4);
+ rx_data_skews, 4, &update);
ksz9031_of_load_skew_values(phydev, of_node,
MII_KSZ9031RN_TX_DATA_PAD_SKEW, 4,
- tx_data_skews, 4);
+ tx_data_skews, 4, &update);
+
+ if (update && phydev->interface != PHY_INTERFACE_MODE_RGMII)
+ phydev_warn(phydev,
+ "*-skew-ps values should be used only with phy-mode = \"rgmii\"\n");
/* Silicon Errata Sheet (DS80000691D or DS80000692D):
* When the device links in the 1000BASE-T slave mode only,
diff --git a/drivers/net/phy/mscc/mscc.h b/drivers/net/phy/mscc/mscc.h
index 030bf8b600df..acdd8ee61a39 100644
--- a/drivers/net/phy/mscc/mscc.h
+++ b/drivers/net/phy/mscc/mscc.h
@@ -353,7 +353,6 @@ struct vsc8531_private {
const struct vsc85xx_hw_stat *hw_stats;
u64 *stats;
int nstats;
- bool pkg_init;
/* For multiple port PHYs; the MDIO address of the base PHY in the
* package.
*/
diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
index acddef79f4e8..6508d6536134 100644
--- a/drivers/net/phy/mscc/mscc_main.c
+++ b/drivers/net/phy/mscc/mscc_main.c
@@ -691,27 +691,23 @@ out_unlock:
/* phydev->bus->mdio_lock should be locked when using this function */
static int phy_base_write(struct phy_device *phydev, u32 regnum, u16 val)
{
- struct vsc8531_private *priv = phydev->priv;
-
if (unlikely(!mutex_is_locked(&phydev->mdio.bus->mdio_lock))) {
dev_err(&phydev->mdio.dev, "MDIO bus lock not held!\n");
dump_stack();
}
- return __mdiobus_write(phydev->mdio.bus, priv->base_addr, regnum, val);
+ return __phy_package_write(phydev, regnum, val);
}
/* phydev->bus->mdio_lock should be locked when using this function */
static int phy_base_read(struct phy_device *phydev, u32 regnum)
{
- struct vsc8531_private *priv = phydev->priv;
-
if (unlikely(!mutex_is_locked(&phydev->mdio.bus->mdio_lock))) {
dev_err(&phydev->mdio.dev, "MDIO bus lock not held!\n");
dump_stack();
}
- return __mdiobus_read(phydev->mdio.bus, priv->base_addr, regnum);
+ return __phy_package_read(phydev, regnum);
}
/* bus->mdio_lock should be locked when using this function */
@@ -1287,66 +1283,38 @@ out:
return ret;
}
-/* Check if one PHY has already done the init of the parts common to all PHYs
- * in the Quad PHY package.
- */
-static bool vsc8584_is_pkg_init(struct phy_device *phydev, bool reversed)
+static void vsc8584_get_base_addr(struct phy_device *phydev)
{
- struct mdio_device **map = phydev->mdio.bus->mdio_map;
- struct vsc8531_private *vsc8531;
- struct phy_device *phy;
- int i, addr;
-
- /* VSC8584 is a Quad PHY */
- for (i = 0; i < 4; i++) {
- vsc8531 = phydev->priv;
-
- if (reversed)
- addr = vsc8531->base_addr - i;
- else
- addr = vsc8531->base_addr + i;
-
- if (!map[addr])
- continue;
+ struct vsc8531_private *vsc8531 = phydev->priv;
+ u16 val, addr;
- phy = container_of(map[addr], struct phy_device, mdio);
+ mutex_lock(&phydev->mdio.bus->mdio_lock);
+ __phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED);
- if ((phy->phy_id & phydev->drv->phy_id_mask) !=
- (phydev->drv->phy_id & phydev->drv->phy_id_mask))
- continue;
+ addr = __phy_read(phydev, MSCC_PHY_EXT_PHY_CNTL_4);
+ addr >>= PHY_CNTL_4_ADDR_POS;
- vsc8531 = phy->priv;
+ val = __phy_read(phydev, MSCC_PHY_ACTIPHY_CNTL);
- if (vsc8531 && vsc8531->pkg_init)
- return true;
- }
+ __phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+ mutex_unlock(&phydev->mdio.bus->mdio_lock);
- return false;
+ if (val & PHY_ADDR_REVERSED)
+ vsc8531->base_addr = phydev->mdio.addr + addr;
+ else
+ vsc8531->base_addr = phydev->mdio.addr - addr;
}
static int vsc8584_config_init(struct phy_device *phydev)
{
struct vsc8531_private *vsc8531 = phydev->priv;
- u16 addr, val;
int ret, i;
+ u16 val;
phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
mutex_lock(&phydev->mdio.bus->mdio_lock);
- __mdiobus_write(phydev->mdio.bus, phydev->mdio.addr,
- MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED);
- addr = __mdiobus_read(phydev->mdio.bus, phydev->mdio.addr,
- MSCC_PHY_EXT_PHY_CNTL_4);
- addr >>= PHY_CNTL_4_ADDR_POS;
-
- val = __mdiobus_read(phydev->mdio.bus, phydev->mdio.addr,
- MSCC_PHY_ACTIPHY_CNTL);
- if (val & PHY_ADDR_REVERSED)
- vsc8531->base_addr = phydev->mdio.addr + addr;
- else
- vsc8531->base_addr = phydev->mdio.addr - addr;
-
/* Some parts of the init sequence are identical for every PHY in the
* package. Some parts are modifying the GPIO register bank which is a
* set of registers that are affecting all PHYs, a few resetting the
@@ -1360,7 +1328,7 @@ static int vsc8584_config_init(struct phy_device *phydev)
* do the correct init sequence for all PHYs that are package-critical
* in this pre-init function.
*/
- if (!vsc8584_is_pkg_init(phydev, val & PHY_ADDR_REVERSED ? 1 : 0)) {
+ if (phy_package_init_once(phydev)) {
/* The following switch statement assumes that the lowest
* nibble of the phy_id_mask is always 0. This works because
* the lowest nibble of the PHY_ID's below are also 0.
@@ -1389,8 +1357,6 @@ static int vsc8584_config_init(struct phy_device *phydev)
goto err;
}
- vsc8531->pkg_init = true;
-
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
MSCC_PHY_PAGE_EXTENDED_GPIO);
@@ -1428,7 +1394,8 @@ static int vsc8584_config_init(struct phy_device *phydev)
/* Disable SerDes for 100Base-FX */
ret = vsc8584_cmd(phydev, PROC_CMD_FIBER_MEDIA_CONF |
- PROC_CMD_FIBER_PORT(addr) | PROC_CMD_FIBER_DISABLE |
+ PROC_CMD_FIBER_PORT(vsc8531->base_addr) |
+ PROC_CMD_FIBER_DISABLE |
PROC_CMD_READ_MOD_WRITE_PORT |
PROC_CMD_RST_CONF_PORT | PROC_CMD_FIBER_100BASE_FX);
if (ret)
@@ -1436,7 +1403,8 @@ static int vsc8584_config_init(struct phy_device *phydev)
/* Disable SerDes for 1000Base-X */
ret = vsc8584_cmd(phydev, PROC_CMD_FIBER_MEDIA_CONF |
- PROC_CMD_FIBER_PORT(addr) | PROC_CMD_FIBER_DISABLE |
+ PROC_CMD_FIBER_PORT(vsc8531->base_addr) |
+ PROC_CMD_FIBER_DISABLE |
PROC_CMD_READ_MOD_WRITE_PORT |
PROC_CMD_RST_CONF_PORT | PROC_CMD_FIBER_1000BASE_X);
if (ret)
@@ -1751,26 +1719,14 @@ static int vsc8514_config_init(struct phy_device *phydev)
{
struct vsc8531_private *vsc8531 = phydev->priv;
unsigned long deadline;
- u16 val, addr;
int ret, i;
+ u16 val;
u32 reg;
phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
mutex_lock(&phydev->mdio.bus->mdio_lock);
- __phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED);
-
- addr = __phy_read(phydev, MSCC_PHY_EXT_PHY_CNTL_4);
- addr >>= PHY_CNTL_4_ADDR_POS;
-
- val = __phy_read(phydev, MSCC_PHY_ACTIPHY_CNTL);
-
- if (val & PHY_ADDR_REVERSED)
- vsc8531->base_addr = phydev->mdio.addr + addr;
- else
- vsc8531->base_addr = phydev->mdio.addr - addr;
-
/* Some parts of the init sequence are identical for every PHY in the
* package. Some parts are modifying the GPIO register bank which is a
* set of registers that are affecting all PHYs, a few resetting the
@@ -1782,11 +1738,9 @@ static int vsc8514_config_init(struct phy_device *phydev)
* do the correct init sequence for all PHYs that are package-critical
* in this pre-init function.
*/
- if (!vsc8584_is_pkg_init(phydev, val & PHY_ADDR_REVERSED ? 1 : 0))
+ if (phy_package_init_once(phydev))
vsc8514_config_pre_init(phydev);
- vsc8531->pkg_init = true;
-
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
MSCC_PHY_PAGE_EXTENDED_GPIO);
@@ -1992,6 +1946,10 @@ static int vsc8514_probe(struct phy_device *phydev)
phydev->priv = vsc8531;
+ vsc8584_get_base_addr(phydev);
+ devm_phy_package_join(&phydev->mdio.dev, phydev,
+ vsc8531->base_addr, 0);
+
vsc8531->nleds = 4;
vsc8531->supp_led_modes = VSC85XX_SUPP_LED_MODES;
vsc8531->hw_stats = vsc85xx_hw_stats;
@@ -2047,6 +2005,10 @@ static int vsc8584_probe(struct phy_device *phydev)
phydev->priv = vsc8531;
+ vsc8584_get_base_addr(phydev);
+ devm_phy_package_join(&phydev->mdio.dev, phydev,
+ vsc8531->base_addr, 0);
+
vsc8531->nleds = 4;
vsc8531->supp_led_modes = VSC8584_SUPP_LED_MODES;
vsc8531->hw_stats = vsc8584_hw_stats;
diff --git a/drivers/net/phy/nxp-tja11xx.c b/drivers/net/phy/nxp-tja11xx.c
index 47caae770ffc..0d4f9067ca71 100644
--- a/drivers/net/phy/nxp-tja11xx.c
+++ b/drivers/net/phy/nxp-tja11xx.c
@@ -5,16 +5,21 @@
*/
#include <linux/delay.h>
#include <linux/ethtool.h>
+#include <linux/ethtool_netlink.h>
#include <linux/kernel.h>
+#include <linux/mdio.h>
#include <linux/mii.h>
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/hwmon.h>
#include <linux/bitfield.h>
+#include <linux/of_mdio.h>
+#include <linux/of_irq.h>
#define PHY_ID_MASK 0xfffffff0
#define PHY_ID_TJA1100 0x0180dc40
#define PHY_ID_TJA1101 0x0180dd00
+#define PHY_ID_TJA1102 0x0180dc80
#define MII_ECTRL 17
#define MII_ECTRL_LINK_CONTROL BIT(15)
@@ -22,10 +27,12 @@
#define MII_ECTRL_POWER_MODE_NO_CHANGE (0x0 << 11)
#define MII_ECTRL_POWER_MODE_NORMAL (0x3 << 11)
#define MII_ECTRL_POWER_MODE_STANDBY (0xc << 11)
+#define MII_ECTRL_CABLE_TEST BIT(5)
#define MII_ECTRL_CONFIG_EN BIT(2)
#define MII_ECTRL_WAKE_REQUEST BIT(0)
#define MII_CFG1 18
+#define MII_CFG1_MASTER_SLAVE BIT(15)
#define MII_CFG1_AUTO_OP BIT(14)
#define MII_CFG1_SLEEP_CONFIRM BIT(6)
#define MII_CFG1_LED_MODE_MASK GENMASK(5, 4)
@@ -40,18 +47,29 @@
#define MII_INTSRC_TEMP_ERR BIT(1)
#define MII_INTSRC_UV_ERR BIT(3)
+#define MII_INTEN 22
+#define MII_INTEN_LINK_FAIL BIT(10)
+#define MII_INTEN_LINK_UP BIT(9)
+
#define MII_COMMSTAT 23
#define MII_COMMSTAT_LINK_UP BIT(15)
#define MII_GENSTAT 24
#define MII_GENSTAT_PLL_LOCKED BIT(14)
+#define MII_EXTSTAT 25
+#define MII_EXTSTAT_SHORT_DETECT BIT(8)
+#define MII_EXTSTAT_OPEN_DETECT BIT(7)
+#define MII_EXTSTAT_POLARITY_DETECT BIT(6)
+
#define MII_COMMCFG 27
#define MII_COMMCFG_AUTO_OP BIT(15)
struct tja11xx_priv {
char *hwmon_name;
struct device *hwmon_dev;
+ struct phy_device *phydev;
+ struct work_struct phy_register_work;
};
struct tja11xx_phy_stats {
@@ -100,6 +118,11 @@ static int tja11xx_enable_link_control(struct phy_device *phydev)
return phy_set_bits(phydev, MII_ECTRL, MII_ECTRL_LINK_CONTROL);
}
+static int tja11xx_disable_link_control(struct phy_device *phydev)
+{
+ return phy_clear_bits(phydev, MII_ECTRL, MII_ECTRL_LINK_CONTROL);
+}
+
static int tja11xx_wakeup(struct phy_device *phydev)
{
int ret;
@@ -157,6 +180,70 @@ static int tja11xx_soft_reset(struct phy_device *phydev)
return genphy_soft_reset(phydev);
}
+static int tja11xx_config_aneg_cable_test(struct phy_device *phydev)
+{
+ bool finished = false;
+ int ret;
+
+ if (phydev->link)
+ return 0;
+
+ if (!phydev->drv->cable_test_start ||
+ !phydev->drv->cable_test_get_status)
+ return 0;
+
+ ret = ethnl_cable_test_alloc(phydev);
+ if (ret)
+ return ret;
+
+ ret = phydev->drv->cable_test_start(phydev);
+ if (ret)
+ return ret;
+
+ /* According to the documentation this test takes 100 usec */
+ usleep_range(100, 200);
+
+ ret = phydev->drv->cable_test_get_status(phydev, &finished);
+ if (ret)
+ return ret;
+
+ if (finished)
+ ethnl_cable_test_finished(phydev);
+
+ return 0;
+}
+
+static int tja11xx_config_aneg(struct phy_device *phydev)
+{
+ int ret, changed = 0;
+ u16 ctl = 0;
+
+ switch (phydev->master_slave_set) {
+ case MASTER_SLAVE_CFG_MASTER_FORCE:
+ ctl |= MII_CFG1_MASTER_SLAVE;
+ break;
+ case MASTER_SLAVE_CFG_SLAVE_FORCE:
+ break;
+ case MASTER_SLAVE_CFG_UNKNOWN:
+ case MASTER_SLAVE_CFG_UNSUPPORTED:
+ goto do_test;
+ default:
+ phydev_warn(phydev, "Unsupported Master/Slave mode\n");
+ return -ENOTSUPP;
+ }
+
+ changed = phy_modify_changed(phydev, MII_CFG1, MII_CFG1_MASTER_SLAVE, ctl);
+ if (changed < 0)
+ return changed;
+
+do_test:
+ ret = tja11xx_config_aneg_cable_test(phydev);
+ if (ret)
+ return ret;
+
+ return __genphy_config_aneg(phydev, changed);
+}
+
static int tja11xx_config_init(struct phy_device *phydev)
{
int ret;
@@ -180,6 +267,7 @@ static int tja11xx_config_init(struct phy_device *phydev)
return ret;
break;
case PHY_ID_TJA1101:
+ case PHY_ID_TJA1102:
ret = phy_set_bits(phydev, MII_COMMCFG, MII_COMMCFG_AUTO_OP);
if (ret)
return ret;
@@ -213,10 +301,22 @@ static int tja11xx_read_status(struct phy_device *phydev)
{
int ret;
+ phydev->master_slave_get = MASTER_SLAVE_CFG_UNKNOWN;
+ phydev->master_slave_state = MASTER_SLAVE_STATE_UNSUPPORTED;
+
ret = genphy_update_link(phydev);
if (ret)
return ret;
+ ret = phy_read(phydev, MII_CFG1);
+ if (ret < 0)
+ return ret;
+
+ if (ret & MII_CFG1_MASTER_SLAVE)
+ phydev->master_slave_get = MASTER_SLAVE_CFG_MASTER_FORCE;
+ else
+ phydev->master_slave_get = MASTER_SLAVE_CFG_SLAVE_FORCE;
+
if (phydev->link) {
ret = phy_read(phydev, MII_COMMSTAT);
if (ret < 0)
@@ -317,16 +417,12 @@ static const struct hwmon_chip_info tja11xx_hwmon_chip_info = {
.info = tja11xx_hwmon_info,
};
-static int tja11xx_probe(struct phy_device *phydev)
+static int tja11xx_hwmon_register(struct phy_device *phydev,
+ struct tja11xx_priv *priv)
{
struct device *dev = &phydev->mdio.dev;
- struct tja11xx_priv *priv;
int i;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
priv->hwmon_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
if (!priv->hwmon_name)
return -ENOMEM;
@@ -344,6 +440,239 @@ static int tja11xx_probe(struct phy_device *phydev)
return PTR_ERR_OR_ZERO(priv->hwmon_dev);
}
+static int tja11xx_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct tja11xx_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->phydev = phydev;
+
+ return tja11xx_hwmon_register(phydev, priv);
+}
+
+static void tja1102_p1_register(struct work_struct *work)
+{
+ struct tja11xx_priv *priv = container_of(work, struct tja11xx_priv,
+ phy_register_work);
+ struct phy_device *phydev_phy0 = priv->phydev;
+ struct mii_bus *bus = phydev_phy0->mdio.bus;
+ struct device *dev = &phydev_phy0->mdio.dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *child;
+ int ret;
+
+ for_each_available_child_of_node(np, child) {
+ struct phy_device *phy;
+ int addr;
+
+ addr = of_mdio_parse_addr(dev, child);
+ if (addr < 0) {
+ dev_err(dev, "Can't parse addr\n");
+ continue;
+ } else if (addr != phydev_phy0->mdio.addr + 1) {
+ /* Currently we care only about double PHY chip TJA1102.
+ * If some day NXP will decide to bring chips with more
+ * PHYs, this logic should be reworked.
+ */
+ dev_err(dev, "Unexpected address. Should be: %i\n",
+ phydev_phy0->mdio.addr + 1);
+ continue;
+ }
+
+ if (mdiobus_is_registered_device(bus, addr)) {
+ dev_err(dev, "device is already registered\n");
+ continue;
+ }
+
+ /* Real PHY ID of Port 1 is 0 */
+ phy = phy_device_create(bus, addr, PHY_ID_TJA1102, false, NULL);
+ if (IS_ERR(phy)) {
+ dev_err(dev, "Can't create PHY device for Port 1: %i\n",
+ addr);
+ continue;
+ }
+
+ /* Overwrite parent device. phy_device_create() set parent to
+ * the mii_bus->dev, which is not correct in case.
+ */
+ phy->mdio.dev.parent = dev;
+
+ ret = of_mdiobus_phy_device_register(bus, phy, child, addr);
+ if (ret) {
+ /* All resources needed for Port 1 should be already
+ * available for Port 0. Both ports use the same
+ * interrupt line, so -EPROBE_DEFER would make no sense
+ * here.
+ */
+ dev_err(dev, "Can't register Port 1. Unexpected error: %i\n",
+ ret);
+ phy_device_free(phy);
+ }
+ }
+}
+
+static int tja1102_p0_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct tja11xx_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->phydev = phydev;
+ INIT_WORK(&priv->phy_register_work, tja1102_p1_register);
+
+ ret = tja11xx_hwmon_register(phydev, priv);
+ if (ret)
+ return ret;
+
+ schedule_work(&priv->phy_register_work);
+
+ return 0;
+}
+
+static int tja1102_match_phy_device(struct phy_device *phydev, bool port0)
+{
+ int ret;
+
+ if ((phydev->phy_id & PHY_ID_MASK) != PHY_ID_TJA1102)
+ return 0;
+
+ ret = phy_read(phydev, MII_PHYSID2);
+ if (ret < 0)
+ return ret;
+
+ /* TJA1102 Port 1 has phyid 0 and doesn't support temperature
+ * and undervoltage alarms.
+ */
+ if (port0)
+ return ret ? 1 : 0;
+
+ return !ret;
+}
+
+static int tja1102_p0_match_phy_device(struct phy_device *phydev)
+{
+ return tja1102_match_phy_device(phydev, true);
+}
+
+static int tja1102_p1_match_phy_device(struct phy_device *phydev)
+{
+ return tja1102_match_phy_device(phydev, false);
+}
+
+static int tja11xx_ack_interrupt(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_read(phydev, MII_INTSRC);
+
+ return (ret < 0) ? ret : 0;
+}
+
+static int tja11xx_config_intr(struct phy_device *phydev)
+{
+ int value = 0;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ value = MII_INTEN_LINK_FAIL | MII_INTEN_LINK_UP;
+
+ return phy_write(phydev, MII_INTEN, value);
+}
+
+static int tja11xx_cable_test_start(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_clear_bits(phydev, MII_COMMCFG, MII_COMMCFG_AUTO_OP);
+ if (ret)
+ return ret;
+
+ ret = tja11xx_wakeup(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = tja11xx_disable_link_control(phydev);
+ if (ret < 0)
+ return ret;
+
+ return phy_set_bits(phydev, MII_ECTRL, MII_ECTRL_CABLE_TEST);
+}
+
+/*
+ * | BI_DA+ | BI_DA- | Result
+ * | open | open | open
+ * | + short to - | - short to + | short
+ * | short to Vdd | open | open
+ * | open | shot to Vdd | open
+ * | short to Vdd | short to Vdd | short
+ * | shot to GND | open | open
+ * | open | shot to GND | open
+ * | short to GND | shot to GND | short
+ * | connected to active link partner (master) | shot and open
+ */
+static int tja11xx_cable_test_report_trans(u32 result)
+{
+ u32 mask = MII_EXTSTAT_SHORT_DETECT | MII_EXTSTAT_OPEN_DETECT;
+
+ if ((result & mask) == mask) {
+ /* connected to active link partner (master) */
+ return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+ } else if ((result & mask) == 0) {
+ return ETHTOOL_A_CABLE_RESULT_CODE_OK;
+ } else if (result & MII_EXTSTAT_SHORT_DETECT) {
+ return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+ } else if (result & MII_EXTSTAT_OPEN_DETECT) {
+ return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+ } else {
+ return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+ }
+}
+
+static int tja11xx_cable_test_report(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_read(phydev, MII_EXTSTAT);
+ if (ret < 0)
+ return ret;
+
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ tja11xx_cable_test_report_trans(ret));
+
+ return 0;
+}
+
+static int tja11xx_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ int ret;
+
+ *finished = false;
+
+ ret = phy_read(phydev, MII_ECTRL);
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & MII_ECTRL_CABLE_TEST)) {
+ *finished = true;
+
+ ret = phy_set_bits(phydev, MII_COMMCFG, MII_COMMCFG_AUTO_OP);
+ if (ret)
+ return ret;
+
+ return tja11xx_cable_test_report(phydev);
+ }
+
+ return 0;
+}
+
static struct phy_driver tja11xx_driver[] = {
{
PHY_ID_MATCH_MODEL(PHY_ID_TJA1100),
@@ -351,6 +680,7 @@ static struct phy_driver tja11xx_driver[] = {
.features = PHY_BASIC_T1_FEATURES,
.probe = tja11xx_probe,
.soft_reset = tja11xx_soft_reset,
+ .config_aneg = tja11xx_config_aneg,
.config_init = tja11xx_config_init,
.read_status = tja11xx_read_status,
.suspend = genphy_suspend,
@@ -366,8 +696,47 @@ static struct phy_driver tja11xx_driver[] = {
.features = PHY_BASIC_T1_FEATURES,
.probe = tja11xx_probe,
.soft_reset = tja11xx_soft_reset,
+ .config_aneg = tja11xx_config_aneg,
+ .config_init = tja11xx_config_init,
+ .read_status = tja11xx_read_status,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .set_loopback = genphy_loopback,
+ /* Statistics */
+ .get_sset_count = tja11xx_get_sset_count,
+ .get_strings = tja11xx_get_strings,
+ .get_stats = tja11xx_get_stats,
+ }, {
+ .name = "NXP TJA1102 Port 0",
+ .features = PHY_BASIC_T1_FEATURES,
+ .flags = PHY_POLL_CABLE_TEST,
+ .probe = tja1102_p0_probe,
+ .soft_reset = tja11xx_soft_reset,
+ .config_aneg = tja11xx_config_aneg,
+ .config_init = tja11xx_config_init,
+ .read_status = tja11xx_read_status,
+ .match_phy_device = tja1102_p0_match_phy_device,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .set_loopback = genphy_loopback,
+ /* Statistics */
+ .get_sset_count = tja11xx_get_sset_count,
+ .get_strings = tja11xx_get_strings,
+ .get_stats = tja11xx_get_stats,
+ .ack_interrupt = tja11xx_ack_interrupt,
+ .config_intr = tja11xx_config_intr,
+ .cable_test_start = tja11xx_cable_test_start,
+ .cable_test_get_status = tja11xx_cable_test_get_status,
+ }, {
+ .name = "NXP TJA1102 Port 1",
+ .features = PHY_BASIC_T1_FEATURES,
+ .flags = PHY_POLL_CABLE_TEST,
+ /* currently no probe for Port 1 is need */
+ .soft_reset = tja11xx_soft_reset,
+ .config_aneg = tja11xx_config_aneg,
.config_init = tja11xx_config_init,
.read_status = tja11xx_read_status,
+ .match_phy_device = tja1102_p1_match_phy_device,
.suspend = genphy_suspend,
.resume = genphy_resume,
.set_loopback = genphy_loopback,
@@ -375,6 +744,10 @@ static struct phy_driver tja11xx_driver[] = {
.get_sset_count = tja11xx_get_sset_count,
.get_strings = tja11xx_get_strings,
.get_stats = tja11xx_get_stats,
+ .ack_interrupt = tja11xx_ack_interrupt,
+ .config_intr = tja11xx_config_intr,
+ .cable_test_start = tja11xx_cable_test_start,
+ .cable_test_get_status = tja11xx_cable_test_get_status,
}
};
@@ -383,6 +756,7 @@ module_phy_driver(tja11xx_driver);
static struct mdio_device_id __maybe_unused tja11xx_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_TJA1100) },
{ PHY_ID_MATCH_MODEL(PHY_ID_TJA1101) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_TJA1102) },
{ }
};
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index 67ba47ae5284..defe09d94422 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -564,6 +564,5 @@ struct phy_driver genphy_c45_driver = {
.phy_id = 0xffffffff,
.phy_id_mask = 0xffffffff,
.name = "Generic Clause 45 PHY",
- .soft_reset = genphy_no_soft_reset,
.read_status = genphy_c45_read_status,
};
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 20ca6418f7bc..d4bbf79dab6c 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -15,12 +15,14 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
+#include <linux/netlink.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
+#include <linux/ethtool_netlink.h>
#include <linux/phy.h>
#include <linux/phy_led_triggers.h>
#include <linux/sfp.h>
@@ -29,6 +31,9 @@
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/atomic.h>
+#include <net/netlink.h>
+#include <net/genetlink.h>
+#include <net/sock.h>
#define PHY_STATE_TIME HZ
@@ -44,6 +49,7 @@ static const char *phy_state_to_str(enum phy_state st)
PHY_STATE_STR(UP)
PHY_STATE_STR(RUNNING)
PHY_STATE_STR(NOLINK)
+ PHY_STATE_STR(CABLETEST)
PHY_STATE_STR(HALTED)
}
@@ -295,7 +301,7 @@ int phy_ethtool_ksettings_set(struct phy_device *phydev,
phydev->advertising, autoneg == AUTONEG_ENABLE);
phydev->duplex = duplex;
-
+ phydev->master_slave_set = cmd->base.master_slave_cfg;
phydev->mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
/* Restart the PHY */
@@ -314,6 +320,8 @@ void phy_ethtool_ksettings_get(struct phy_device *phydev,
cmd->base.speed = phydev->speed;
cmd->base.duplex = phydev->duplex;
+ cmd->base.master_slave_cfg = phydev->master_slave_get;
+ cmd->base.master_slave_state = phydev->master_slave_state;
if (phydev->interface == PHY_INTERFACE_MODE_MOCA)
cmd->base.port = PORT_BNC;
else
@@ -470,6 +478,80 @@ static void phy_trigger_machine(struct phy_device *phydev)
phy_queue_state_machine(phydev, 0);
}
+static void phy_abort_cable_test(struct phy_device *phydev)
+{
+ int err;
+
+ ethnl_cable_test_finished(phydev);
+
+ err = phy_init_hw(phydev);
+ if (err)
+ phydev_err(phydev, "Error while aborting cable test");
+}
+
+int phy_start_cable_test(struct phy_device *phydev,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *dev = phydev->attached_dev;
+ int err = -ENOMEM;
+
+ if (!(phydev->drv &&
+ phydev->drv->cable_test_start &&
+ phydev->drv->cable_test_get_status)) {
+ NL_SET_ERR_MSG(extack,
+ "PHY driver does not support cable testing");
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&phydev->lock);
+ if (phydev->state == PHY_CABLETEST) {
+ NL_SET_ERR_MSG(extack,
+ "PHY already performing a test");
+ err = -EBUSY;
+ goto out;
+ }
+
+ if (phydev->state < PHY_UP ||
+ phydev->state > PHY_CABLETEST) {
+ NL_SET_ERR_MSG(extack,
+ "PHY not configured. Try setting interface up");
+ err = -EBUSY;
+ goto out;
+ }
+
+ err = ethnl_cable_test_alloc(phydev);
+ if (err)
+ goto out;
+
+ /* Mark the carrier down until the test is complete */
+ phy_link_down(phydev, true);
+
+ netif_testing_on(dev);
+ err = phydev->drv->cable_test_start(phydev);
+ if (err) {
+ netif_testing_off(dev);
+ phy_link_up(phydev);
+ goto out_free;
+ }
+
+ phydev->state = PHY_CABLETEST;
+
+ if (phy_polling_mode(phydev))
+ phy_trigger_machine(phydev);
+
+ mutex_unlock(&phydev->lock);
+
+ return 0;
+
+out_free:
+ ethnl_cable_test_free(phydev);
+out:
+ mutex_unlock(&phydev->lock);
+
+ return err;
+}
+EXPORT_SYMBOL(phy_start_cable_test);
+
static int phy_config_aneg(struct phy_device *phydev)
{
if (phydev->drv->config_aneg)
@@ -800,6 +882,8 @@ EXPORT_SYMBOL(phy_free_interrupt);
*/
void phy_stop(struct phy_device *phydev)
{
+ struct net_device *dev = phydev->attached_dev;
+
if (!phy_is_started(phydev)) {
WARN(1, "called from state %s\n",
phy_state_to_str(phydev->state));
@@ -808,6 +892,11 @@ void phy_stop(struct phy_device *phydev)
mutex_lock(&phydev->lock);
+ if (phydev->state == PHY_CABLETEST) {
+ phy_abort_cable_test(phydev);
+ netif_testing_off(dev);
+ }
+
if (phydev->sfp_bus)
sfp_upstream_stop(phydev->sfp_bus);
@@ -868,8 +957,10 @@ void phy_state_machine(struct work_struct *work)
struct delayed_work *dwork = to_delayed_work(work);
struct phy_device *phydev =
container_of(dwork, struct phy_device, state_queue);
+ struct net_device *dev = phydev->attached_dev;
bool needs_aneg = false, do_suspend = false;
enum phy_state old_state;
+ bool finished = false;
int err = 0;
mutex_lock(&phydev->lock);
@@ -888,6 +979,23 @@ void phy_state_machine(struct work_struct *work)
case PHY_RUNNING:
err = phy_check_link_status(phydev);
break;
+ case PHY_CABLETEST:
+ err = phydev->drv->cable_test_get_status(phydev, &finished);
+ if (err) {
+ phy_abort_cable_test(phydev);
+ netif_testing_off(dev);
+ needs_aneg = true;
+ phydev->state = PHY_UP;
+ break;
+ }
+
+ if (finished) {
+ ethnl_cable_test_finished(phydev);
+ netif_testing_off(dev);
+ needs_aneg = true;
+ phydev->state = PHY_UP;
+ }
+ break;
case PHY_HALTED:
if (phydev->link) {
phydev->link = 0;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index ac2784192472..c3a107cf578e 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1082,8 +1082,12 @@ int phy_init_hw(struct phy_device *phydev)
if (!phydev->drv)
return 0;
- if (phydev->drv->soft_reset)
+ if (phydev->drv->soft_reset) {
ret = phydev->drv->soft_reset(phydev);
+ /* see comment in genphy_soft_reset for an explanation */
+ if (!ret)
+ phydev->suspended = 0;
+ }
if (ret < 0)
return ret;
@@ -1458,6 +1462,144 @@ bool phy_driver_is_genphy_10g(struct phy_device *phydev)
EXPORT_SYMBOL_GPL(phy_driver_is_genphy_10g);
/**
+ * phy_package_join - join a common PHY group
+ * @phydev: target phy_device struct
+ * @addr: cookie and PHY address for global register access
+ * @priv_size: if non-zero allocate this amount of bytes for private data
+ *
+ * This joins a PHY group and provides a shared storage for all phydevs in
+ * this group. This is intended to be used for packages which contain
+ * more than one PHY, for example a quad PHY transceiver.
+ *
+ * The addr parameter serves as a cookie which has to have the same value
+ * for all members of one group and as a PHY address to access generic
+ * registers of a PHY package. Usually, one of the PHY addresses of the
+ * different PHYs in the package provides access to these global registers.
+ * The address which is given here, will be used in the phy_package_read()
+ * and phy_package_write() convenience functions. If your PHY doesn't have
+ * global registers you can just pick any of the PHY addresses.
+ *
+ * This will set the shared pointer of the phydev to the shared storage.
+ * If this is the first call for a this cookie the shared storage will be
+ * allocated. If priv_size is non-zero, the given amount of bytes are
+ * allocated for the priv member.
+ *
+ * Returns < 1 on error, 0 on success. Esp. calling phy_package_join()
+ * with the same cookie but a different priv_size is an error.
+ */
+int phy_package_join(struct phy_device *phydev, int addr, size_t priv_size)
+{
+ struct mii_bus *bus = phydev->mdio.bus;
+ struct phy_package_shared *shared;
+ int ret;
+
+ if (addr < 0 || addr >= PHY_MAX_ADDR)
+ return -EINVAL;
+
+ mutex_lock(&bus->shared_lock);
+ shared = bus->shared[addr];
+ if (!shared) {
+ ret = -ENOMEM;
+ shared = kzalloc(sizeof(*shared), GFP_KERNEL);
+ if (!shared)
+ goto err_unlock;
+ if (priv_size) {
+ shared->priv = kzalloc(priv_size, GFP_KERNEL);
+ if (!shared->priv)
+ goto err_free;
+ shared->priv_size = priv_size;
+ }
+ shared->addr = addr;
+ refcount_set(&shared->refcnt, 1);
+ bus->shared[addr] = shared;
+ } else {
+ ret = -EINVAL;
+ if (priv_size && priv_size != shared->priv_size)
+ goto err_unlock;
+ refcount_inc(&shared->refcnt);
+ }
+ mutex_unlock(&bus->shared_lock);
+
+ phydev->shared = shared;
+
+ return 0;
+
+err_free:
+ kfree(shared);
+err_unlock:
+ mutex_unlock(&bus->shared_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(phy_package_join);
+
+/**
+ * phy_package_leave - leave a common PHY group
+ * @phydev: target phy_device struct
+ *
+ * This leaves a PHY group created by phy_package_join(). If this phydev
+ * was the last user of the shared data between the group, this data is
+ * freed. Resets the phydev->shared pointer to NULL.
+ */
+void phy_package_leave(struct phy_device *phydev)
+{
+ struct phy_package_shared *shared = phydev->shared;
+ struct mii_bus *bus = phydev->mdio.bus;
+
+ if (!shared)
+ return;
+
+ if (refcount_dec_and_mutex_lock(&shared->refcnt, &bus->shared_lock)) {
+ bus->shared[shared->addr] = NULL;
+ mutex_unlock(&bus->shared_lock);
+ kfree(shared->priv);
+ kfree(shared);
+ }
+
+ phydev->shared = NULL;
+}
+EXPORT_SYMBOL_GPL(phy_package_leave);
+
+static void devm_phy_package_leave(struct device *dev, void *res)
+{
+ phy_package_leave(*(struct phy_device **)res);
+}
+
+/**
+ * devm_phy_package_join - resource managed phy_package_join()
+ * @dev: device that is registering this PHY package
+ * @phydev: target phy_device struct
+ * @addr: cookie and PHY address for global register access
+ * @priv_size: if non-zero allocate this amount of bytes for private data
+ *
+ * Managed phy_package_join(). Shared storage fetched by this function,
+ * phy_package_leave() is automatically called on driver detach. See
+ * phy_package_join() for more information.
+ */
+int devm_phy_package_join(struct device *dev, struct phy_device *phydev,
+ int addr, size_t priv_size)
+{
+ struct phy_device **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_phy_package_leave, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = phy_package_join(phydev, addr, priv_size);
+
+ if (!ret) {
+ *ptr = phydev;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_phy_package_join);
+
+/**
* phy_detach - detach a PHY device from its network device
* @phydev: target phy_device struct
*
@@ -1524,6 +1666,9 @@ int phy_suspend(struct phy_device *phydev)
struct phy_driver *phydrv = phydev->drv;
int ret;
+ if (phydev->suspended)
+ return 0;
+
/* If the device has WOL enabled, we cannot suspend the PHY */
phy_ethtool_get_wol(phydev, &wol);
if (wol.wolopts || (netdev && netdev->wol_enabled))
@@ -1768,6 +1913,90 @@ int genphy_setup_forced(struct phy_device *phydev)
}
EXPORT_SYMBOL(genphy_setup_forced);
+static int genphy_setup_master_slave(struct phy_device *phydev)
+{
+ u16 ctl = 0;
+
+ if (!phydev->is_gigabit_capable)
+ return 0;
+
+ switch (phydev->master_slave_set) {
+ case MASTER_SLAVE_CFG_MASTER_PREFERRED:
+ ctl |= CTL1000_PREFER_MASTER;
+ break;
+ case MASTER_SLAVE_CFG_SLAVE_PREFERRED:
+ break;
+ case MASTER_SLAVE_CFG_MASTER_FORCE:
+ ctl |= CTL1000_AS_MASTER;
+ /* fallthrough */
+ case MASTER_SLAVE_CFG_SLAVE_FORCE:
+ ctl |= CTL1000_ENABLE_MASTER;
+ break;
+ case MASTER_SLAVE_CFG_UNKNOWN:
+ case MASTER_SLAVE_CFG_UNSUPPORTED:
+ return 0;
+ default:
+ phydev_warn(phydev, "Unsupported Master/Slave mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ return phy_modify_changed(phydev, MII_CTRL1000,
+ (CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER |
+ CTL1000_PREFER_MASTER), ctl);
+}
+
+static int genphy_read_master_slave(struct phy_device *phydev)
+{
+ int cfg, state;
+ int val;
+
+ if (!phydev->is_gigabit_capable) {
+ phydev->master_slave_get = MASTER_SLAVE_CFG_UNSUPPORTED;
+ phydev->master_slave_state = MASTER_SLAVE_STATE_UNSUPPORTED;
+ return 0;
+ }
+
+ phydev->master_slave_get = MASTER_SLAVE_CFG_UNKNOWN;
+ phydev->master_slave_state = MASTER_SLAVE_STATE_UNKNOWN;
+
+ val = phy_read(phydev, MII_CTRL1000);
+ if (val < 0)
+ return val;
+
+ if (val & CTL1000_ENABLE_MASTER) {
+ if (val & CTL1000_AS_MASTER)
+ cfg = MASTER_SLAVE_CFG_MASTER_FORCE;
+ else
+ cfg = MASTER_SLAVE_CFG_SLAVE_FORCE;
+ } else {
+ if (val & CTL1000_PREFER_MASTER)
+ cfg = MASTER_SLAVE_CFG_MASTER_PREFERRED;
+ else
+ cfg = MASTER_SLAVE_CFG_SLAVE_PREFERRED;
+ }
+
+ val = phy_read(phydev, MII_STAT1000);
+ if (val < 0)
+ return val;
+
+ if (val & LPA_1000MSFAIL) {
+ state = MASTER_SLAVE_STATE_ERR;
+ } else if (phydev->link) {
+ /* this bits are valid only for active link */
+ if (val & LPA_1000MSRES)
+ state = MASTER_SLAVE_STATE_MASTER;
+ else
+ state = MASTER_SLAVE_STATE_SLAVE;
+ } else {
+ state = MASTER_SLAVE_STATE_UNKNOWN;
+ }
+
+ phydev->master_slave_get = cfg;
+ phydev->master_slave_state = state;
+
+ return 0;
+}
+
/**
* genphy_restart_aneg - Enable and Restart Autonegotiation
* @phydev: target phy_device struct
@@ -1826,6 +2055,12 @@ int __genphy_config_aneg(struct phy_device *phydev, bool changed)
if (genphy_config_eee_advert(phydev))
changed = true;
+ err = genphy_setup_master_slave(phydev);
+ if (err < 0)
+ return err;
+ else if (err)
+ changed = true;
+
if (AUTONEG_ENABLE != phydev->autoneg)
return genphy_setup_forced(phydev);
@@ -2060,6 +2295,10 @@ int genphy_read_status(struct phy_device *phydev)
phydev->pause = 0;
phydev->asym_pause = 0;
+ err = genphy_read_master_slave(phydev);
+ if (err < 0)
+ return err;
+
err = genphy_read_lpa(phydev);
if (err < 0)
return err;
@@ -2154,6 +2393,12 @@ int genphy_soft_reset(struct phy_device *phydev)
if (ret < 0)
return ret;
+ /* Clause 22 states that setting bit BMCR_RESET sets control registers
+ * to their default value. Therefore the POWER DOWN bit is supposed to
+ * be cleared after soft reset.
+ */
+ phydev->suspended = 0;
+
ret = phy_poll_reset(phydev);
if (ret)
return ret;
@@ -2627,7 +2872,6 @@ static struct phy_driver genphy_driver = {
.phy_id = 0xffffffff,
.phy_id_mask = 0xffffffff,
.name = "Generic PHY",
- .soft_reset = genphy_no_soft_reset,
.get_features = genphy_read_abilities,
.suspend = genphy_suspend,
.resume = genphy_resume,
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 34ca12aec61b..0f23bec431c1 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -480,8 +480,8 @@ static void phylink_get_fixed_state(struct phylink *pl,
struct phylink_link_state *state)
{
*state = pl->link_config;
- if (pl->get_fixed_state)
- pl->get_fixed_state(pl->netdev, state);
+ if (pl->config->get_fixed_state)
+ pl->config->get_fixed_state(pl->config, state);
else if (pl->link_gpio)
state->link = !!gpiod_get_value_cansleep(pl->link_gpio);
@@ -1045,32 +1045,6 @@ void phylink_disconnect_phy(struct phylink *pl)
EXPORT_SYMBOL_GPL(phylink_disconnect_phy);
/**
- * phylink_fixed_state_cb() - allow setting a fixed link callback
- * @pl: a pointer to a &struct phylink returned from phylink_create()
- * @cb: callback to execute to determine the fixed link state.
- *
- * The MAC driver should call this driver when the state of its link
- * can be determined through e.g: an out of band MMIO register.
- */
-int phylink_fixed_state_cb(struct phylink *pl,
- void (*cb)(struct net_device *dev,
- struct phylink_link_state *state))
-{
- /* It does not make sense to let the link be overriden unless we use
- * MLO_AN_FIXED
- */
- if (pl->cfg_link_an_mode != MLO_AN_FIXED)
- return -EINVAL;
-
- mutex_lock(&pl->state_mutex);
- pl->get_fixed_state = cb;
- mutex_unlock(&pl->state_mutex);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(phylink_fixed_state_cb);
-
-/**
* phylink_mac_change() - notify phylink of a change in MAC state
* @pl: a pointer to a &struct phylink returned from phylink_create()
* @up: indicates whether the link is currently up.
@@ -1106,6 +1080,8 @@ static irqreturn_t phylink_link_handler(int irq, void *data)
*/
void phylink_start(struct phylink *pl)
{
+ bool poll = false;
+
ASSERT_RTNL();
phylink_info(pl, "configuring for %s/%s link mode\n",
@@ -1142,10 +1118,18 @@ void phylink_start(struct phylink *pl)
irq = 0;
}
if (irq <= 0)
- mod_timer(&pl->link_poll, jiffies + HZ);
+ poll = true;
+ }
+
+ switch (pl->cfg_link_an_mode) {
+ case MLO_AN_FIXED:
+ poll |= pl->config->poll_fixed_state;
+ break;
+ case MLO_AN_INBAND:
+ poll |= pl->config->pcs_poll;
+ break;
}
- if ((pl->cfg_link_an_mode == MLO_AN_FIXED && pl->get_fixed_state) ||
- pl->config->pcs_poll)
+ if (poll)
mod_timer(&pl->link_poll, jiffies + HZ);
if (pl->phydev)
phy_start(pl->phydev);
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 2d99e9de6ee1..c7229d022a27 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -11,6 +11,7 @@
#include <linux/bitops.h>
#include <linux/phy.h>
#include <linux/module.h>
+#include <linux/delay.h>
#define RTL821x_PHYSR 0x11
#define RTL821x_PHYSR_DUPLEX BIT(13)
@@ -526,6 +527,16 @@ static int rtl8125_match_phy_device(struct phy_device *phydev)
rtlgen_supports_2_5gbps(phydev);
}
+static int rtlgen_resume(struct phy_device *phydev)
+{
+ int ret = genphy_resume(phydev);
+
+ /* Internal PHY's from RTL8168h up may not be instantly ready */
+ msleep(20);
+
+ return ret;
+}
+
static struct phy_driver realtek_drvs[] = {
{
PHY_ID_MATCH_EXACT(0x00008201),
@@ -609,7 +620,7 @@ static struct phy_driver realtek_drvs[] = {
.match_phy_device = rtlgen_match_phy_device,
.read_status = rtlgen_read_status,
.suspend = genphy_suspend,
- .resume = genphy_resume,
+ .resume = rtlgen_resume,
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
.read_mmd = rtlgen_read_mmd,
@@ -621,7 +632,7 @@ static struct phy_driver realtek_drvs[] = {
.config_aneg = rtl8125_config_aneg,
.read_status = rtl8125_read_status,
.suspend = genphy_suspend,
- .resume = genphy_resume,
+ .resume = rtlgen_resume,
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
.read_mmd = rtl8125_read_mmd,
diff --git a/drivers/net/phy/teranetics.c b/drivers/net/phy/teranetics.c
index beb054b931ee..8057ea8dbc21 100644
--- a/drivers/net/phy/teranetics.c
+++ b/drivers/net/phy/teranetics.c
@@ -78,7 +78,6 @@ static struct phy_driver teranetics_driver[] = {
.phy_id_mask = 0xffffffff,
.name = "Teranetics TN2020",
.features = PHY_10GBIT_FEATURES,
- .soft_reset = genphy_no_soft_reset,
.aneg_done = teranetics_aneg_done,
.config_aneg = gen10g_config_aneg,
.read_status = teranetics_read_status,
diff --git a/drivers/net/plip/Kconfig b/drivers/net/plip/Kconfig
index b41035be2d51..e03556d1d0c2 100644
--- a/drivers/net/plip/Kconfig
+++ b/drivers/net/plip/Kconfig
@@ -21,7 +21,7 @@ config PLIP
bits at a time (mode 0) or with special PLIP cables, to be used on
bidirectional parallel ports only, which can transmit 8 bits at a
time (mode 1); you can find the wiring of these cables in
- <file:Documentation/networking/PLIP.txt>. The cables can be up to
+ <file:Documentation/networking/plip.rst>. The cables can be up to
15m long. Mode 0 works also if one of the machines runs DOS/Windows
and has some PLIP software installed, e.g. the Crynwr PLIP packet
driver (<http://oak.oakland.edu/simtel.net/msdos/pktdrvr-pre.html>)
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 22cc2cb9d878..7d005896a0f9 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1410,6 +1410,8 @@ static int ppp_dev_init(struct net_device *dev)
{
struct ppp *ppp;
+ netdev_lockdep_set_classes(dev);
+
ppp = netdev_priv(dev);
/* Let the netdevice take a reference on the ppp file. This ensures
* that ppp_destroy_interface() won't run before the device gets
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 8eeb38c6199e..2056d6ad04b5 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -166,7 +166,8 @@ static int rionet_queue_tx_msg(struct sk_buff *skb, struct net_device *ndev,
return 0;
}
-static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t rionet_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
{
int i;
struct rionet_private *rnet = netdev_priv(ndev);
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 04845a4017f9..8c1e02752ff6 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1647,6 +1647,7 @@ static int team_init(struct net_device *dev)
lockdep_register_key(&team->team_lock_key);
__mutex_init(&team->lock, "team->team_lock_key", &team->team_lock_key);
+ netdev_lockdep_set_classes(dev);
return 0;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 44889eba1dbc..c54f967e2c66 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1671,6 +1671,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + len;
xdp.rxq = &tfile->xdp_rxq;
+ xdp.frame_sz = buflen;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
if (act == XDP_REDIRECT || act == XDP_TX) {
@@ -2411,6 +2412,7 @@ static int tun_xdp_one(struct tun_struct *tun,
}
xdp_set_data_meta_invalid(xdp);
xdp->rxq = &tfile->xdp_rxq;
+ xdp->frame_sz = buflen;
act = bpf_prog_run_xdp(xdp_prog, xdp);
err = tun_xdp_act(tun, xdp_prog, xdp, act);
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 93044cf1417a..950711448f39 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -31,6 +31,7 @@
#define AX_ACCESS_PHY 0x02
#define AX_ACCESS_EEPROM 0x04
#define AX_ACCESS_EFUS 0x05
+#define AX_RELOAD_EEPROM_EFUSE 0x06
#define AX_PAUSE_WATERLVL_HIGH 0x54
#define AX_PAUSE_WATERLVL_LOW 0x55
@@ -611,6 +612,81 @@ ax88179_get_eeprom(struct net_device *net, struct ethtool_eeprom *eeprom,
return 0;
}
+static int
+ax88179_set_eeprom(struct net_device *net, struct ethtool_eeprom *eeprom,
+ u8 *data)
+{
+ struct usbnet *dev = netdev_priv(net);
+ u16 *eeprom_buff;
+ int first_word;
+ int last_word;
+ int ret;
+ int i;
+
+ netdev_dbg(net, "write EEPROM len %d, offset %d, magic 0x%x\n",
+ eeprom->len, eeprom->offset, eeprom->magic);
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ if (eeprom->magic != AX88179_EEPROM_MAGIC)
+ return -EINVAL;
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+
+ eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16),
+ GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ /* align data to 16 bit boundaries, read the missing data from
+ the EEPROM */
+ if (eeprom->offset & 1) {
+ ret = ax88179_read_cmd(dev, AX_ACCESS_EEPROM, first_word, 1, 2,
+ &eeprom_buff[0]);
+ if (ret < 0) {
+ netdev_err(net, "Failed to read EEPROM at offset 0x%02x.\n", first_word);
+ goto free;
+ }
+ }
+
+ if ((eeprom->offset + eeprom->len) & 1) {
+ ret = ax88179_read_cmd(dev, AX_ACCESS_EEPROM, last_word, 1, 2,
+ &eeprom_buff[last_word - first_word]);
+ if (ret < 0) {
+ netdev_err(net, "Failed to read EEPROM at offset 0x%02x.\n", last_word);
+ goto free;
+ }
+ }
+
+ memcpy((u8 *)eeprom_buff + (eeprom->offset & 1), data, eeprom->len);
+
+ for (i = first_word; i <= last_word; i++) {
+ netdev_dbg(net, "write to EEPROM at offset 0x%02x, data 0x%04x\n",
+ i, eeprom_buff[i - first_word]);
+ ret = ax88179_write_cmd(dev, AX_ACCESS_EEPROM, i, 1, 2,
+ &eeprom_buff[i - first_word]);
+ if (ret < 0) {
+ netdev_err(net, "Failed to write EEPROM at offset 0x%02x.\n", i);
+ goto free;
+ }
+ msleep(20);
+ }
+
+ /* reload EEPROM data */
+ ret = ax88179_write_cmd(dev, AX_RELOAD_EEPROM_EFUSE, 0x0000, 0, 0, NULL);
+ if (ret < 0) {
+ netdev_err(net, "Failed to reload EEPROM data\n");
+ goto free;
+ }
+
+ ret = 0;
+free:
+ kfree(eeprom_buff);
+ return ret;
+}
+
static int ax88179_get_link_ksettings(struct net_device *net,
struct ethtool_link_ksettings *cmd)
{
@@ -784,7 +860,7 @@ static int ax88179_set_eee(struct net_device *net, struct ethtool_eee *edata)
{
struct usbnet *dev = netdev_priv(net);
struct ax88179_data *priv = (struct ax88179_data *)dev->data;
- int ret = -EOPNOTSUPP;
+ int ret;
priv->eee_enabled = edata->eee_enabled;
if (!priv->eee_enabled) {
@@ -822,6 +898,7 @@ static const struct ethtool_ops ax88179_ethtool_ops = {
.set_wol = ax88179_set_wol,
.get_eeprom_len = ax88179_get_eeprom_len,
.get_eeprom = ax88179_get_eeprom,
+ .set_eeprom = ax88179_set_eeprom,
.get_eee = ax88179_get_eee,
.set_eee = ax88179_set_eee,
.nway_reset = usbnet_nway_reset,
diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c
index 099d84827004..a87f0dabcdb7 100644
--- a/drivers/net/usb/huawei_cdc_ncm.c
+++ b/drivers/net/usb/huawei_cdc_ncm.c
@@ -67,7 +67,7 @@ static int huawei_cdc_ncm_bind(struct usbnet *usbnet_dev,
{
struct cdc_ncm_ctx *ctx;
struct usb_driver *subdriver = ERR_PTR(-ENODEV);
- int ret = -ENODEV;
+ int ret;
struct huawei_cdc_ncm_state *drvstate = (void *)&usbnet_dev->data;
int drvflags = 0;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 4bb8552a00d3..b0eab6e5279d 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -719,7 +719,7 @@ static int qmi_wwan_change_dtr(struct usbnet *dev, bool on)
static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
{
- int status = -1;
+ int status;
u8 *buf = intf->cur_altsetting->extra;
int len = intf->cur_altsetting->extralen;
struct usb_interface_descriptor *desc = &intf->cur_altsetting->desc;
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 8f8d9883d363..1af72ec284ca 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1504,15 +1504,19 @@ static int determine_ethernet_addr(struct r8152 *tp, struct sockaddr *sa)
sa->sa_family = dev->type;
- if (tp->version == RTL_VER_01) {
- ret = pla_ocp_read(tp, PLA_IDR, 8, sa->sa_data);
- } else {
- /* if device doesn't support MAC pass through this will
- * be expected to be non-zero
- */
- ret = vendor_mac_passthru_addr_read(tp, sa);
- if (ret < 0)
- ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa->sa_data);
+ ret = eth_platform_get_mac_address(&dev->dev, sa->sa_data);
+ if (ret < 0) {
+ if (tp->version == RTL_VER_01) {
+ ret = pla_ocp_read(tp, PLA_IDR, 8, sa->sa_data);
+ } else {
+ /* if device doesn't support MAC pass through this will
+ * be expected to be non-zero
+ */
+ ret = vendor_mac_passthru_addr_read(tp, sa);
+ if (ret < 0)
+ ret = pla_ocp_read(tp, PLA_BACKUP, 8,
+ sa->sa_data);
+ }
}
if (ret < 0) {
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index 389d19dd7909..0abd257b634c 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -354,11 +354,6 @@ static void sierra_net_set_ctx_index(struct sierra_net_data *priv, u8 ctx_ix)
cpu_to_be16(SIERRA_NET_HIP_EXT_IP_OUT_ID);
}
-static inline int sierra_net_is_valid_addrlen(u8 len)
-{
- return len == sizeof(struct in_addr);
-}
-
static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen)
{
struct lsi_umts *lsi = (struct lsi_umts *)data;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index aece0e5eec8c..b586d2fa5551 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -405,10 +405,6 @@ static struct sk_buff *veth_build_skb(void *head, int headroom, int len,
{
struct sk_buff *skb;
- if (!buflen) {
- buflen = SKB_DATA_ALIGN(headroom + len) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- }
skb = build_skb(head, buflen);
if (!skb)
return NULL;
@@ -564,13 +560,15 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
struct veth_stats *stats)
{
void *hard_start = frame->data - frame->headroom;
- void *head = hard_start - sizeof(struct xdp_frame);
int len = frame->len, delta = 0;
struct xdp_frame orig_frame;
struct bpf_prog *xdp_prog;
unsigned int headroom;
struct sk_buff *skb;
+ /* bpf_xdp_adjust_head() assures BPF cannot access xdp_frame area */
+ hard_start -= sizeof(struct xdp_frame);
+
rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog);
if (likely(xdp_prog)) {
@@ -581,6 +579,7 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
xdp.data = frame->data;
xdp.data_end = frame->data + frame->len;
xdp.data_meta = frame->data - frame->metasize;
+ xdp.frame_sz = frame->frame_sz;
xdp.rxq = &rq->xdp_rxq;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
@@ -592,7 +591,6 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
break;
case XDP_TX:
orig_frame = *frame;
- xdp.data_hard_start = head;
xdp.rxq->mem = frame->mem;
if (unlikely(veth_xdp_tx(rq, &xdp, bq) < 0)) {
trace_xdp_exception(rq->dev, xdp_prog, act);
@@ -605,7 +603,6 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
goto xdp_xmit;
case XDP_REDIRECT:
orig_frame = *frame;
- xdp.data_hard_start = head;
xdp.rxq->mem = frame->mem;
if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
frame = &orig_frame;
@@ -629,7 +626,7 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
rcu_read_unlock();
headroom = sizeof(struct xdp_frame) + frame->headroom - delta;
- skb = veth_build_skb(head, headroom, len, 0);
+ skb = veth_build_skb(hard_start, headroom, len, frame->frame_sz);
if (!skb) {
xdp_return_frame(frame);
stats->rx_drops++;
@@ -695,9 +692,8 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
goto drop;
}
- nskb = veth_build_skb(head,
- VETH_XDP_HEADROOM + mac_len, skb->len,
- PAGE_SIZE);
+ nskb = veth_build_skb(head, VETH_XDP_HEADROOM + mac_len,
+ skb->len, PAGE_SIZE);
if (!nskb) {
page_frag_free(head);
goto drop;
@@ -715,6 +711,11 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
xdp.data_end = xdp.data + pktlen;
xdp.data_meta = xdp.data;
xdp.rxq = &rq->xdp_rxq;
+
+ /* SKB "head" area always have tailroom for skb_shared_info */
+ xdp.frame_sz = (void *)skb_end_pointer(skb) - xdp.data_hard_start;
+ xdp.frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
orig_data = xdp.data;
orig_data_end = xdp.data_end;
@@ -758,6 +759,7 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
}
rcu_read_unlock();
+ /* check if bpf_xdp_adjust_head was used */
delta = orig_data - xdp.data;
off = mac_len + delta;
if (off > 0)
@@ -765,9 +767,11 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
else if (off < 0)
__skb_pull(skb, -off);
skb->mac_header -= delta;
+
+ /* check if bpf_xdp_adjust_tail was used */
off = xdp.data_end - orig_data_end;
if (off != 0)
- __skb_put(skb, off);
+ __skb_put(skb, off); /* positive on grow, negative on shrink */
skb->protocol = eth_type_trans(skb, rq->dev);
metalen = xdp.data - xdp.data_meta;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index ce07f52d89e7..b6951aa76295 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -689,6 +689,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
xdp.data_end = xdp.data + len;
xdp.data_meta = xdp.data;
xdp.rxq = &rq->xdp_rxq;
+ xdp.frame_sz = buflen;
orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
stats->xdp_packets++;
@@ -797,10 +798,11 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
int offset = buf - page_address(page);
struct sk_buff *head_skb, *curr_skb;
struct bpf_prog *xdp_prog;
- unsigned int truesize;
+ unsigned int truesize = mergeable_ctx_to_truesize(ctx);
unsigned int headroom = mergeable_ctx_to_headroom(ctx);
- int err;
unsigned int metasize = 0;
+ unsigned int frame_sz;
+ int err;
head_skb = NULL;
stats->bytes += len - vi->hdr_len;
@@ -821,6 +823,11 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
if (unlikely(hdr->hdr.gso_type))
goto err_xdp;
+ /* Buffers with headroom use PAGE_SIZE as alloc size,
+ * see add_recvbuf_mergeable() + get_mergeable_buf_len()
+ */
+ frame_sz = headroom ? PAGE_SIZE : truesize;
+
/* This happens when rx buffer size is underestimated
* or headroom is not enough because of the buffer
* was refilled before XDP is set. This should only
@@ -834,6 +841,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
page, offset,
VIRTIO_XDP_HEADROOM,
&len);
+ frame_sz = PAGE_SIZE;
+
if (!xdp_page)
goto err_xdp;
offset = VIRTIO_XDP_HEADROOM;
@@ -850,6 +859,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
xdp.data_end = xdp.data + (len - vi->hdr_len);
xdp.data_meta = xdp.data;
xdp.rxq = &rq->xdp_rxq;
+ xdp.frame_sz = frame_sz - vi->hdr_len;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
stats->xdp_packets++;
@@ -924,7 +934,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
}
rcu_read_unlock();
- truesize = mergeable_ctx_to_truesize(ctx);
if (unlikely(len > truesize)) {
pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
dev->name, len, (unsigned long)ctx);
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 56f8aab46f89..43928a1c2f2a 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -867,6 +867,7 @@ static int vrf_dev_init(struct net_device *dev)
/* similarly, oper state is irrelevant; set to up to avoid confusion */
dev->operstate = IF_OPER_UP;
+ netdev_lockdep_set_classes(dev);
return 0;
out_rth:
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index dbc0e3f7a3e2..3e21726c36e8 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -336,7 +336,7 @@ config DLCI
To use frame relay, you need supporting hardware (called FRAD) and
certain programs from the net-tools package as explained in
- <file:Documentation/networking/framerelay.txt>.
+ <file:Documentation/networking/framerelay.rst>.
To compile this driver as a module, choose M here: the
module will be called dlci.
@@ -361,7 +361,7 @@ config SDLA
These are multi-protocol cards, but only Frame Relay is supported
by the driver at this time. Please read
- <file:Documentation/networking/framerelay.txt>.
+ <file:Documentation/networking/framerelay.rst>.
To compile this driver as a module, choose M here: the
module will be called sdla.
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 1c98d781ae49..15b0ad171f4c 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -57,7 +57,7 @@ config PCMCIA_RAYCS
---help---
Say Y here if you intend to attach an Aviator/Raytheon PCMCIA
(PC-card) wireless Ethernet networking card to your computer.
- Please read the file <file:Documentation/networking/ray_cs.txt> for
+ Please read the file <file:Documentation/networking/ray_cs.rst> for
details.
To compile this driver as a module, choose M here: the module will be
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
index ea908107581d..5b6db6e66f65 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.c
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -380,6 +380,7 @@ static int ath10k_bmi_lz_data_large(struct ath10k *ar, const void *buffer, u32 l
NULL, NULL);
if (ret) {
ath10k_warn(ar, "unable to write to the device\n");
+ kfree(cmd);
return ret;
}
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index a7478c240f78..9711f0eb9117 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -419,7 +419,7 @@ struct ce_pipe_config {
#define PIPEDIR_INOUT 3 /* bidirectional */
/* Establish a mapping between a service/direction and a pipe. */
-struct service_to_pipe {
+struct ce_service_to_pipe {
__le32 service_id;
__le32 pipedir;
__le32 pipenum;
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index f26cc6989dad..22b6937ac225 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -190,6 +190,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.uart_pin_workaround = true,
.tx_stats_over_pktlog = false,
.bmi_large_size_download = true,
+ .supports_peer_stats_info = true,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -723,15 +724,12 @@ static int ath10k_init_sdio(struct ath10k *ar, enum ath10k_firmware_mode mode)
if (ret)
return ret;
- /* Data transfer is not initiated, when reduced Tx completion
- * is used for SDIO. disable it until fixed
- */
- param &= ~HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_SET;
+ param |= HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_SET;
- /* Alternate credit size of 1544 as used by SDIO firmware is
- * not big enough for mac80211 / native wifi frames. disable it
- */
- param &= ~HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE;
+ if (mode == ATH10K_FIRMWARE_MODE_NORMAL)
+ param |= HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE;
+ else
+ param &= ~HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE;
if (mode == ATH10K_FIRMWARE_MODE_UTF)
param &= ~HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_SET;
@@ -2717,7 +2715,7 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
goto err_hif_stop;
}
- status = ath10k_hif_swap_mailbox(ar);
+ status = ath10k_hif_start_post(ar);
if (status) {
ath10k_err(ar, "failed to swap mailbox: %d\n", status);
goto err_hif_stop;
@@ -3280,6 +3278,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
init_completion(&ar->thermal.wmi_sync);
init_completion(&ar->bss_survey_done);
init_completion(&ar->peer_delete_done);
+ init_completion(&ar->peer_stats_info_complete);
INIT_DELAYED_WORK(&ar->scan.timeout, ath10k_scan_timeout_work);
@@ -3291,6 +3290,11 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
if (!ar->workqueue_aux)
goto err_free_wq;
+ ar->workqueue_tx_complete =
+ create_singlethread_workqueue("ath10k_tx_complete_wq");
+ if (!ar->workqueue_tx_complete)
+ goto err_free_aux_wq;
+
mutex_init(&ar->conf_mutex);
mutex_init(&ar->dump_mutex);
spin_lock_init(&ar->data_lock);
@@ -3318,7 +3322,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
ret = ath10k_coredump_create(ar);
if (ret)
- goto err_free_aux_wq;
+ goto err_free_tx_complete;
ret = ath10k_debug_create(ar);
if (ret)
@@ -3328,12 +3332,12 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
err_free_coredump:
ath10k_coredump_destroy(ar);
-
+err_free_tx_complete:
+ destroy_workqueue(ar->workqueue_tx_complete);
err_free_aux_wq:
destroy_workqueue(ar->workqueue_aux);
err_free_wq:
destroy_workqueue(ar->workqueue);
-
err_free_mac:
ath10k_mac_destroy(ar);
@@ -3349,6 +3353,9 @@ void ath10k_core_destroy(struct ath10k *ar)
flush_workqueue(ar->workqueue_aux);
destroy_workqueue(ar->workqueue_aux);
+ flush_workqueue(ar->workqueue_tx_complete);
+ destroy_workqueue(ar->workqueue_tx_complete);
+
ath10k_debug_destroy(ar);
ath10k_coredump_destroy(ar);
ath10k_htt_tx_destroy(&ar->htt);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index bd8ef576c590..ceac76553b8f 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -149,6 +149,26 @@ static inline u32 host_interest_item_address(u32 item_offset)
return QCA988X_HOST_INTEREST_ADDRESS + item_offset;
}
+enum ath10k_phy_mode {
+ ATH10K_PHY_MODE_LEGACY = 0,
+ ATH10K_PHY_MODE_HT = 1,
+ ATH10K_PHY_MODE_VHT = 2,
+};
+
+/* Data rate 100KBPS based on IE Index */
+struct ath10k_index_ht_data_rate_type {
+ u8 beacon_rate_index;
+ u16 supported_rate[4];
+};
+
+/* Data rate 100KBPS based on IE Index */
+struct ath10k_index_vht_data_rate_type {
+ u8 beacon_rate_index;
+ u16 supported_VHT80_rate[2];
+ u16 supported_VHT40_rate[2];
+ u16 supported_VHT20_rate[2];
+};
+
struct ath10k_bmi {
bool done_sent;
};
@@ -500,8 +520,14 @@ struct ath10k_sta {
u16 peer_id;
struct rate_info txrate;
struct ieee80211_tx_info tx_info;
+ u32 tx_retries;
+ u32 tx_failed;
u32 last_tx_bitrate;
+ u32 rx_rate_code;
+ u32 rx_bitrate_kbps;
+ u32 tx_rate_code;
+ u32 tx_bitrate_kbps;
struct work_struct update_wk;
u64 rx_duration;
struct ath10k_htt_tx_stats *tx_stats;
@@ -949,6 +975,11 @@ struct ath10k {
struct ieee80211_hw *hw;
struct ieee80211_ops *ops;
struct device *dev;
+ struct msa_region {
+ dma_addr_t paddr;
+ u32 mem_size;
+ void *vaddr;
+ } msa;
u8 mac_addr[ETH_ALEN];
enum ath10k_hw_rev hw_rev;
@@ -1087,11 +1118,12 @@ struct ath10k {
int last_wmi_vdev_start_status;
struct completion vdev_setup_done;
struct completion vdev_delete_done;
+ struct completion peer_stats_info_complete;
struct workqueue_struct *workqueue;
/* Auxiliary workqueue */
struct workqueue_struct *workqueue_aux;
-
+ struct workqueue_struct *workqueue_tx_complete;
/* prevents concurrent FW reconfiguration */
struct mutex conf_mutex;
@@ -1132,6 +1164,8 @@ struct ath10k {
struct work_struct register_work;
struct work_struct restart_work;
+ struct work_struct bundle_tx_work;
+ struct work_struct tx_complete_work;
/* cycle count is reported twice for each visited channel during scan.
* access protected by data_lock
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index f811e6940fb0..e8250a665433 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -349,7 +349,7 @@ free:
spin_unlock_bh(&ar->data_lock);
}
-static int ath10k_debug_fw_stats_request(struct ath10k *ar)
+int ath10k_debug_fw_stats_request(struct ath10k *ar)
{
unsigned long timeout, time_left;
int ret;
@@ -778,7 +778,7 @@ static ssize_t ath10k_mem_value_read(struct file *file,
ret = ath10k_hif_diag_read(ar, *ppos, buf, count);
if (ret) {
- ath10k_warn(ar, "failed to read address 0x%08x via diagnose window fnrom debugfs: %d\n",
+ ath10k_warn(ar, "failed to read address 0x%08x via diagnose window from debugfs: %d\n",
(u32)(*ppos), ret);
goto exit;
}
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index 82f7eb8583d9..4cbfd9279d6f 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -125,6 +125,9 @@ static inline int ath10k_debug_is_extd_tx_stats_enabled(struct ath10k *ar)
{
return ar->debug.enable_extd_tx_stats;
}
+
+int ath10k_debug_fw_stats_request(struct ath10k *ar);
+
#else
static inline int ath10k_debug_start(struct ath10k *ar)
@@ -192,6 +195,11 @@ static inline int ath10k_debug_is_extd_tx_stats_enabled(struct ath10k *ar)
return 0;
}
+static inline int ath10k_debug_fw_stats_request(struct ath10k *ar)
+{
+ return 0;
+}
+
#define ATH10K_DFS_STAT_INC(ar, c) do { } while (0)
#define ath10k_debug_get_et_strings NULL
diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h
index 496ee34a4d78..9e45fd9073a6 100644
--- a/drivers/net/wireless/ath/ath10k/hif.h
+++ b/drivers/net/wireless/ath/ath10k/hif.h
@@ -54,7 +54,9 @@ struct ath10k_hif_ops {
*/
void (*stop)(struct ath10k *ar);
- int (*swap_mailbox)(struct ath10k *ar);
+ int (*start_post)(struct ath10k *ar);
+
+ int (*get_htt_tx_complete)(struct ath10k *ar);
int (*map_service_to_pipe)(struct ath10k *ar, u16 service_id,
u8 *ul_pipe, u8 *dl_pipe);
@@ -137,10 +139,17 @@ static inline void ath10k_hif_stop(struct ath10k *ar)
return ar->hif.ops->stop(ar);
}
-static inline int ath10k_hif_swap_mailbox(struct ath10k *ar)
+static inline int ath10k_hif_start_post(struct ath10k *ar)
+{
+ if (ar->hif.ops->start_post)
+ return ar->hif.ops->start_post(ar);
+ return 0;
+}
+
+static inline int ath10k_hif_get_htt_tx_complete(struct ath10k *ar)
{
- if (ar->hif.ops->swap_mailbox)
- return ar->hif.ops->swap_mailbox(ar);
+ if (ar->hif.ops->get_htt_tx_complete)
+ return ar->hif.ops->get_htt_tx_complete(ar);
return 0;
}
@@ -161,7 +170,8 @@ static inline void ath10k_hif_get_default_pipe(struct ath10k *ar,
static inline void ath10k_hif_send_complete_check(struct ath10k *ar,
u8 pipe_id, int force)
{
- ar->hif.ops->send_complete_check(ar, pipe_id, force);
+ if (ar->hif.ops->send_complete_check)
+ ar->hif.ops->send_complete_check(ar, pipe_id, force);
}
static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar,
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 2248d6c022f4..31df6dd04bf6 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -51,10 +51,12 @@ void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
struct sk_buff *skb)
{
struct ath10k *ar = ep->htc->ar;
+ struct ath10k_htc_hdr *hdr;
ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %pK\n", __func__,
ep->eid, skb);
+ hdr = (struct ath10k_htc_hdr *)skb->data;
ath10k_htc_restore_tx_skb(ep->htc, skb);
if (!ep->ep_ops.ep_tx_complete) {
@@ -63,6 +65,11 @@ void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
return;
}
+ if (hdr->flags & ATH10K_HTC_FLAG_SEND_BUNDLE) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
ep->ep_ops.ep_tx_complete(ep->htc->ar, skb);
}
EXPORT_SYMBOL(ath10k_htc_notify_tx_completion);
@@ -78,7 +85,7 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
hdr->eid = ep->eid;
hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
hdr->flags = 0;
- if (ep->tx_credit_flow_enabled)
+ if (ep->tx_credit_flow_enabled && !ep->bundle_tx)
hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
spin_lock_bh(&ep->htc->tx_lock);
@@ -86,6 +93,63 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
spin_unlock_bh(&ep->htc->tx_lock);
}
+static int ath10k_htc_consume_credit(struct ath10k_htc_ep *ep,
+ unsigned int len,
+ bool consume)
+{
+ struct ath10k_htc *htc = ep->htc;
+ struct ath10k *ar = htc->ar;
+ enum ath10k_htc_ep_id eid = ep->eid;
+ int credits, ret = 0;
+
+ if (!ep->tx_credit_flow_enabled)
+ return 0;
+
+ credits = DIV_ROUND_UP(len, ep->tx_credit_size);
+ spin_lock_bh(&htc->tx_lock);
+
+ if (ep->tx_credits < credits) {
+ ath10k_dbg(ar, ATH10K_DBG_HTC,
+ "htc insufficient credits ep %d required %d available %d consume %d\n",
+ eid, credits, ep->tx_credits, consume);
+ ret = -EAGAIN;
+ goto unlock;
+ }
+
+ if (consume) {
+ ep->tx_credits -= credits;
+ ath10k_dbg(ar, ATH10K_DBG_HTC,
+ "htc ep %d consumed %d credits total %d\n",
+ eid, credits, ep->tx_credits);
+ }
+
+unlock:
+ spin_unlock_bh(&htc->tx_lock);
+ return ret;
+}
+
+static void ath10k_htc_release_credit(struct ath10k_htc_ep *ep, unsigned int len)
+{
+ struct ath10k_htc *htc = ep->htc;
+ struct ath10k *ar = htc->ar;
+ enum ath10k_htc_ep_id eid = ep->eid;
+ int credits;
+
+ if (!ep->tx_credit_flow_enabled)
+ return;
+
+ credits = DIV_ROUND_UP(len, ep->tx_credit_size);
+ spin_lock_bh(&htc->tx_lock);
+ ep->tx_credits += credits;
+ ath10k_dbg(ar, ATH10K_DBG_HTC,
+ "htc ep %d reverted %d credits back total %d\n",
+ eid, credits, ep->tx_credits);
+ spin_unlock_bh(&htc->tx_lock);
+
+ if (ep->ep_ops.ep_tx_credits)
+ ep->ep_ops.ep_tx_credits(htc->ar);
+}
+
int ath10k_htc_send(struct ath10k_htc *htc,
enum ath10k_htc_ep_id eid,
struct sk_buff *skb)
@@ -95,8 +159,8 @@ int ath10k_htc_send(struct ath10k_htc *htc,
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
struct ath10k_hif_sg_item sg_item;
struct device *dev = htc->ar->dev;
- int credits = 0;
int ret;
+ unsigned int skb_len;
if (htc->ar->state == ATH10K_STATE_WEDGED)
return -ECOMM;
@@ -108,23 +172,10 @@ int ath10k_htc_send(struct ath10k_htc *htc,
skb_push(skb, sizeof(struct ath10k_htc_hdr));
- if (ep->tx_credit_flow_enabled) {
- credits = DIV_ROUND_UP(skb->len, htc->target_credit_size);
- spin_lock_bh(&htc->tx_lock);
- if (ep->tx_credits < credits) {
- ath10k_dbg(ar, ATH10K_DBG_HTC,
- "htc insufficient credits ep %d required %d available %d\n",
- eid, credits, ep->tx_credits);
- spin_unlock_bh(&htc->tx_lock);
- ret = -EAGAIN;
- goto err_pull;
- }
- ep->tx_credits -= credits;
- ath10k_dbg(ar, ATH10K_DBG_HTC,
- "htc ep %d consumed %d credits (total %d)\n",
- eid, credits, ep->tx_credits);
- spin_unlock_bh(&htc->tx_lock);
- }
+ skb_len = skb->len;
+ ret = ath10k_htc_consume_credit(ep, skb_len, true);
+ if (ret)
+ goto err_pull;
ath10k_htc_prepare_tx_skb(ep, skb);
@@ -155,17 +206,7 @@ err_unmap:
if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
err_credits:
- if (ep->tx_credit_flow_enabled) {
- spin_lock_bh(&htc->tx_lock);
- ep->tx_credits += credits;
- ath10k_dbg(ar, ATH10K_DBG_HTC,
- "htc ep %d reverted %d credits back (total %d)\n",
- eid, credits, ep->tx_credits);
- spin_unlock_bh(&htc->tx_lock);
-
- if (ep->ep_ops.ep_tx_credits)
- ep->ep_ops.ep_tx_credits(htc->ar);
- }
+ ath10k_htc_release_credit(ep, skb_len);
err_pull:
skb_pull(skb, sizeof(struct ath10k_htc_hdr));
return ret;
@@ -581,6 +622,278 @@ static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc,
return allocation;
}
+static int ath10k_htc_send_bundle(struct ath10k_htc_ep *ep,
+ struct sk_buff *bundle_skb,
+ struct sk_buff_head *tx_save_head)
+{
+ struct ath10k_hif_sg_item sg_item;
+ struct ath10k_htc *htc = ep->htc;
+ struct ath10k *ar = htc->ar;
+ struct sk_buff *skb;
+ int ret, cn = 0;
+ unsigned int skb_len;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle skb len %d\n", bundle_skb->len);
+ skb_len = bundle_skb->len;
+ ret = ath10k_htc_consume_credit(ep, skb_len, true);
+
+ if (!ret) {
+ sg_item.transfer_id = ep->eid;
+ sg_item.transfer_context = bundle_skb;
+ sg_item.vaddr = bundle_skb->data;
+ sg_item.len = bundle_skb->len;
+
+ ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1);
+ if (ret)
+ ath10k_htc_release_credit(ep, skb_len);
+ }
+
+ if (ret)
+ dev_kfree_skb_any(bundle_skb);
+
+ for (cn = 0; (skb = skb_dequeue_tail(tx_save_head)); cn++) {
+ if (ret) {
+ skb_pull(skb, sizeof(struct ath10k_htc_hdr));
+ skb_queue_head(&ep->tx_req_head, skb);
+ } else {
+ skb_queue_tail(&ep->tx_complete_head, skb);
+ }
+ }
+
+ if (!ret)
+ queue_work(ar->workqueue_tx_complete, &ar->tx_complete_work);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC,
+ "bundle tx status %d eid %d req count %d count %d len %d\n",
+ ret, ep->eid, skb_queue_len(&ep->tx_req_head), cn, bundle_skb->len);
+ return ret;
+}
+
+static void ath10k_htc_send_one_skb(struct ath10k_htc_ep *ep, struct sk_buff *skb)
+{
+ struct ath10k_htc *htc = ep->htc;
+ struct ath10k *ar = htc->ar;
+ int ret;
+
+ ret = ath10k_htc_send(htc, ep->eid, skb);
+
+ if (ret)
+ skb_queue_head(&ep->tx_req_head, skb);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "tx one status %d eid %d len %d pending count %d\n",
+ ret, ep->eid, skb->len, skb_queue_len(&ep->tx_req_head));
+}
+
+static int ath10k_htc_send_bundle_skbs(struct ath10k_htc_ep *ep)
+{
+ struct ath10k_htc *htc = ep->htc;
+ struct sk_buff *bundle_skb, *skb;
+ struct sk_buff_head tx_save_head;
+ struct ath10k_htc_hdr *hdr;
+ u8 *bundle_buf;
+ int ret = 0, credit_pad, credit_remainder, trans_len, bundles_left = 0;
+
+ if (htc->ar->state == ATH10K_STATE_WEDGED)
+ return -ECOMM;
+
+ if (ep->tx_credit_flow_enabled &&
+ ep->tx_credits < ATH10K_MIN_CREDIT_PER_HTC_TX_BUNDLE)
+ return 0;
+
+ bundles_left = ATH10K_MAX_MSG_PER_HTC_TX_BUNDLE * ep->tx_credit_size;
+ bundle_skb = dev_alloc_skb(bundles_left);
+
+ if (!bundle_skb)
+ return -ENOMEM;
+
+ bundle_buf = bundle_skb->data;
+ skb_queue_head_init(&tx_save_head);
+
+ while (true) {
+ skb = skb_dequeue(&ep->tx_req_head);
+ if (!skb)
+ break;
+
+ credit_pad = 0;
+ trans_len = skb->len + sizeof(*hdr);
+ credit_remainder = trans_len % ep->tx_credit_size;
+
+ if (credit_remainder != 0) {
+ credit_pad = ep->tx_credit_size - credit_remainder;
+ trans_len += credit_pad;
+ }
+
+ ret = ath10k_htc_consume_credit(ep,
+ bundle_buf + trans_len - bundle_skb->data,
+ false);
+ if (ret) {
+ skb_queue_head(&ep->tx_req_head, skb);
+ break;
+ }
+
+ if (bundles_left < trans_len) {
+ bundle_skb->len = bundle_buf - bundle_skb->data;
+ ret = ath10k_htc_send_bundle(ep, bundle_skb, &tx_save_head);
+
+ if (ret) {
+ skb_queue_head(&ep->tx_req_head, skb);
+ return ret;
+ }
+
+ if (skb_queue_len(&ep->tx_req_head) == 0) {
+ ath10k_htc_send_one_skb(ep, skb);
+ return ret;
+ }
+
+ if (ep->tx_credit_flow_enabled &&
+ ep->tx_credits < ATH10K_MIN_CREDIT_PER_HTC_TX_BUNDLE) {
+ skb_queue_head(&ep->tx_req_head, skb);
+ return 0;
+ }
+
+ bundles_left =
+ ATH10K_MAX_MSG_PER_HTC_TX_BUNDLE * ep->tx_credit_size;
+ bundle_skb = dev_alloc_skb(bundles_left);
+
+ if (!bundle_skb) {
+ skb_queue_head(&ep->tx_req_head, skb);
+ return -ENOMEM;
+ }
+ bundle_buf = bundle_skb->data;
+ skb_queue_head_init(&tx_save_head);
+ }
+
+ skb_push(skb, sizeof(struct ath10k_htc_hdr));
+ ath10k_htc_prepare_tx_skb(ep, skb);
+
+ memcpy(bundle_buf, skb->data, skb->len);
+ hdr = (struct ath10k_htc_hdr *)bundle_buf;
+ hdr->flags |= ATH10K_HTC_FLAG_SEND_BUNDLE;
+ hdr->pad_len = __cpu_to_le16(credit_pad);
+ bundle_buf += trans_len;
+ bundles_left -= trans_len;
+ skb_queue_tail(&tx_save_head, skb);
+ }
+
+ if (bundle_buf != bundle_skb->data) {
+ bundle_skb->len = bundle_buf - bundle_skb->data;
+ ret = ath10k_htc_send_bundle(ep, bundle_skb, &tx_save_head);
+ } else {
+ dev_kfree_skb_any(bundle_skb);
+ }
+
+ return ret;
+}
+
+static void ath10k_htc_bundle_tx_work(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k, bundle_tx_work);
+ struct ath10k_htc_ep *ep;
+ struct sk_buff *skb;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) {
+ ep = &ar->htc.endpoint[i];
+
+ if (!ep->bundle_tx)
+ continue;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle tx work eid %d count %d\n",
+ ep->eid, skb_queue_len(&ep->tx_req_head));
+
+ if (skb_queue_len(&ep->tx_req_head) >=
+ ATH10K_MIN_MSG_PER_HTC_TX_BUNDLE) {
+ ath10k_htc_send_bundle_skbs(ep);
+ } else {
+ skb = skb_dequeue(&ep->tx_req_head);
+
+ if (!skb)
+ continue;
+ ath10k_htc_send_one_skb(ep, skb);
+ }
+ }
+}
+
+static void ath10k_htc_tx_complete_work(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k, tx_complete_work);
+ struct ath10k_htc_ep *ep;
+ enum ath10k_htc_ep_id eid;
+ struct sk_buff *skb;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) {
+ ep = &ar->htc.endpoint[i];
+ eid = ep->eid;
+ if (ep->bundle_tx && eid == ar->htt.eid) {
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle tx complete eid %d pending complete count%d\n",
+ ep->eid, skb_queue_len(&ep->tx_complete_head));
+
+ while (true) {
+ skb = skb_dequeue(&ep->tx_complete_head);
+ if (!skb)
+ break;
+ ath10k_htc_notify_tx_completion(ep, skb);
+ }
+ }
+ }
+}
+
+int ath10k_htc_send_hl(struct ath10k_htc *htc,
+ enum ath10k_htc_ep_id eid,
+ struct sk_buff *skb)
+{
+ struct ath10k_htc_ep *ep = &htc->endpoint[eid];
+ struct ath10k *ar = htc->ar;
+
+ if (sizeof(struct ath10k_htc_hdr) + skb->len > ep->tx_credit_size) {
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "tx exceed max len %d\n", skb->len);
+ return -ENOMEM;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "htc send hl eid %d bundle %d tx count %d len %d\n",
+ eid, ep->bundle_tx, skb_queue_len(&ep->tx_req_head), skb->len);
+
+ if (ep->bundle_tx) {
+ skb_queue_tail(&ep->tx_req_head, skb);
+ queue_work(ar->workqueue, &ar->bundle_tx_work);
+ return 0;
+ } else {
+ return ath10k_htc_send(htc, eid, skb);
+ }
+}
+
+void ath10k_htc_setup_tx_req(struct ath10k_htc_ep *ep)
+{
+ if (ep->htc->max_msgs_per_htc_bundle >= ATH10K_MIN_MSG_PER_HTC_TX_BUNDLE &&
+ !ep->bundle_tx) {
+ ep->bundle_tx = true;
+ skb_queue_head_init(&ep->tx_req_head);
+ skb_queue_head_init(&ep->tx_complete_head);
+ }
+}
+
+void ath10k_htc_stop_hl(struct ath10k *ar)
+{
+ struct ath10k_htc_ep *ep;
+ int i;
+
+ cancel_work_sync(&ar->bundle_tx_work);
+ cancel_work_sync(&ar->tx_complete_work);
+
+ for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) {
+ ep = &ar->htc.endpoint[i];
+
+ if (!ep->bundle_tx)
+ continue;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "stop tx work eid %d count %d\n",
+ ep->eid, skb_queue_len(&ep->tx_req_head));
+
+ skb_queue_purge(&ep->tx_req_head);
+ }
+}
+
int ath10k_htc_wait_target(struct ath10k_htc *htc)
{
struct ath10k *ar = htc->ar;
@@ -649,17 +962,34 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
*/
if (htc->control_resp_len >=
sizeof(msg->hdr) + sizeof(msg->ready_ext)) {
+ htc->alt_data_credit_size =
+ __le16_to_cpu(msg->ready_ext.reserved) &
+ ATH10K_HTC_MSG_READY_EXT_ALT_DATA_MASK;
htc->max_msgs_per_htc_bundle =
min_t(u8, msg->ready_ext.max_msgs_per_htc_bundle,
HTC_HOST_MAX_MSG_PER_RX_BUNDLE);
ath10k_dbg(ar, ATH10K_DBG_HTC,
- "Extended ready message. RX bundle size: %d\n",
- htc->max_msgs_per_htc_bundle);
+ "Extended ready message RX bundle size %d alt size %d\n",
+ htc->max_msgs_per_htc_bundle,
+ htc->alt_data_credit_size);
}
+ INIT_WORK(&ar->bundle_tx_work, ath10k_htc_bundle_tx_work);
+ INIT_WORK(&ar->tx_complete_work, ath10k_htc_tx_complete_work);
+
return 0;
}
+void ath10k_htc_change_tx_credit_flow(struct ath10k_htc *htc,
+ enum ath10k_htc_ep_id eid,
+ bool enable)
+{
+ struct ath10k *ar = htc->ar;
+ struct ath10k_htc_ep *ep = &ar->htc.endpoint[eid];
+
+ ep->tx_credit_flow_enabled = enable;
+}
+
int ath10k_htc_connect_service(struct ath10k_htc *htc,
struct ath10k_htc_svc_conn_req *conn_req,
struct ath10k_htc_svc_conn_resp *conn_resp)
@@ -791,6 +1121,11 @@ setup:
ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
ep->max_ep_message_len = __le16_to_cpu(resp_msg->max_msg_size);
ep->tx_credits = tx_alloc;
+ ep->tx_credit_size = htc->target_credit_size;
+
+ if (conn_req->service_id == ATH10K_HTC_SVC_ID_HTT_DATA_MSG &&
+ htc->alt_data_credit_size != 0)
+ ep->tx_credit_size = htc->alt_data_credit_size;
/* copy all the callbacks */
ep->ep_ops = conn_req->ep_ops;
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
index 065c82d9d689..0d180faf3b77 100644
--- a/drivers/net/wireless/ath/ath10k/htc.h
+++ b/drivers/net/wireless/ath/ath10k/htc.h
@@ -83,8 +83,14 @@ struct ath10k_htc_hdr {
u8 seq_no; /* for tx */
u8 control_byte1;
} __packed;
- u8 pad0;
- u8 pad1;
+ union {
+ __le16 pad_len;
+ struct {
+ u8 pad0;
+ u8 pad1;
+ } __packed;
+ } __packed;
+
} __packed __aligned(4);
enum ath10k_ath10k_htc_msg_id {
@@ -113,6 +119,8 @@ enum ath10k_htc_conn_flags {
#define ATH10K_HTC_CONN_FLAGS_RECV_ALLOC_LSB 8
};
+#define ATH10K_HTC_MSG_READY_EXT_ALT_DATA_MASK 0xFFF
+
enum ath10k_htc_conn_svc_status {
ATH10K_HTC_CONN_SVC_STATUS_SUCCESS = 0,
ATH10K_HTC_CONN_SVC_STATUS_NOT_FOUND = 1,
@@ -121,6 +129,10 @@ enum ath10k_htc_conn_svc_status {
ATH10K_HTC_CONN_SVC_STATUS_NO_MORE_EP = 4
};
+#define ATH10K_MAX_MSG_PER_HTC_TX_BUNDLE 32
+#define ATH10K_MIN_MSG_PER_HTC_TX_BUNDLE 2
+#define ATH10K_MIN_CREDIT_PER_HTC_TX_BUNDLE 2
+
enum ath10k_htc_setup_complete_flags {
ATH10K_HTC_SETUP_COMPLETE_FLAGS_RX_BNDL_EN = 1
};
@@ -145,8 +157,14 @@ struct ath10k_htc_ready_extended {
struct ath10k_htc_ready base;
u8 htc_version; /* @enum ath10k_htc_version */
u8 max_msgs_per_htc_bundle;
- u8 pad0;
- u8 pad1;
+ union {
+ __le16 reserved;
+ struct {
+ u8 pad0;
+ u8 pad1;
+ } __packed;
+ } __packed;
+
} __packed;
struct ath10k_htc_conn_svc {
@@ -353,7 +371,12 @@ struct ath10k_htc_ep {
u8 seq_no; /* for debugging */
int tx_credits;
+ int tx_credit_size;
bool tx_credit_flow_enabled;
+ bool bundle_tx;
+ struct sk_buff_head tx_req_head;
+ struct sk_buff_head tx_complete_head;
+
};
struct ath10k_htc_svc_tx_credits {
@@ -378,16 +401,25 @@ struct ath10k_htc {
int total_transmit_credits;
int target_credit_size;
u8 max_msgs_per_htc_bundle;
+ int alt_data_credit_size;
};
int ath10k_htc_init(struct ath10k *ar);
int ath10k_htc_wait_target(struct ath10k_htc *htc);
+void ath10k_htc_setup_tx_req(struct ath10k_htc_ep *ep);
int ath10k_htc_start(struct ath10k_htc *htc);
int ath10k_htc_connect_service(struct ath10k_htc *htc,
struct ath10k_htc_svc_conn_req *conn_req,
struct ath10k_htc_svc_conn_resp *conn_resp);
+void ath10k_htc_change_tx_credit_flow(struct ath10k_htc *htc,
+ enum ath10k_htc_ep_id eid,
+ bool enable);
int ath10k_htc_send(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid,
struct sk_buff *packet);
+void ath10k_htc_stop_hl(struct ath10k *ar);
+
+int ath10k_htc_send_hl(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid,
+ struct sk_buff *packet);
struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size);
void ath10k_htc_tx_completion_handler(struct ath10k *ar, struct sk_buff *skb);
void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb);
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 7b75200ceae5..127b4e4980ef 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -10,6 +10,7 @@
#include "htt.h"
#include "core.h"
#include "debug.h"
+#include "hif.h"
static const enum htt_t2h_msg_type htt_main_t2h_msg_types[] = {
[HTT_MAIN_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
@@ -134,6 +135,8 @@ int ath10k_htt_connect(struct ath10k_htt *htt)
{
struct ath10k_htc_svc_conn_req conn_req;
struct ath10k_htc_svc_conn_resp conn_resp;
+ struct ath10k *ar = htt->ar;
+ struct ath10k_htc_ep *ep;
int status;
memset(&conn_req, 0, sizeof(conn_req));
@@ -141,6 +144,7 @@ int ath10k_htt_connect(struct ath10k_htt *htt)
conn_req.ep_ops.ep_tx_complete = ath10k_htt_htc_tx_complete;
conn_req.ep_ops.ep_rx_complete = ath10k_htt_htc_t2h_msg_handler;
+ conn_req.ep_ops.ep_tx_credits = ath10k_htt_op_ep_tx_credits;
/* connect to control service */
conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_DATA_MSG;
@@ -153,6 +157,15 @@ int ath10k_htt_connect(struct ath10k_htt *htt)
htt->eid = conn_resp.eid;
+ if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) {
+ ep = &ar->htc.endpoint[htt->eid];
+ ath10k_htc_setup_tx_req(ep);
+ }
+
+ htt->disable_tx_comp = ath10k_hif_get_htt_tx_complete(htt->ar);
+ if (htt->disable_tx_comp)
+ ath10k_htc_change_tx_credit_flow(&htt->ar->htc, htt->eid, true);
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 4a12564fc30e..8f3710cf28f4 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -150,9 +150,19 @@ enum htt_data_tx_desc_flags1 {
HTT_DATA_TX_DESC_FLAGS1_MORE_IN_BATCH = 1 << 12,
HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD = 1 << 13,
HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD = 1 << 14,
- HTT_DATA_TX_DESC_FLAGS1_RSVD1 = 1 << 15
+ HTT_DATA_TX_DESC_FLAGS1_TX_COMPLETE = 1 << 15
};
+#define HTT_TX_CREDIT_DELTA_ABS_M 0xffff0000
+#define HTT_TX_CREDIT_DELTA_ABS_S 16
+#define HTT_TX_CREDIT_DELTA_ABS_GET(word) \
+ (((word) & HTT_TX_CREDIT_DELTA_ABS_M) >> HTT_TX_CREDIT_DELTA_ABS_S)
+
+#define HTT_TX_CREDIT_SIGN_BIT_M 0x00000100
+#define HTT_TX_CREDIT_SIGN_BIT_S 8
+#define HTT_TX_CREDIT_SIGN_BIT_GET(word) \
+ (((word) & HTT_TX_CREDIT_SIGN_BIT_M) >> HTT_TX_CREDIT_SIGN_BIT_S)
+
enum htt_data_tx_ext_tid {
HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST = 16,
HTT_DATA_TX_EXT_TID_MGMT = 17,
@@ -2021,6 +2031,10 @@ struct ath10k_htt {
bool tx_mem_allocated;
const struct ath10k_htt_tx_ops *tx_ops;
const struct ath10k_htt_rx_ops *rx_ops;
+ bool disable_tx_comp;
+ bool bundle_tx;
+ struct sk_buff_head tx_req_head;
+ struct sk_buff_head tx_complete_head;
};
struct ath10k_htt_tx_ops {
@@ -2035,6 +2049,7 @@ struct ath10k_htt_tx_ops {
int (*htt_h2t_aggr_cfg_msg)(struct ath10k_htt *htt,
u8 max_subfrms_ampdu,
u8 max_subfrms_amsdu);
+ void (*htt_flush_tx)(struct ath10k_htt *htt);
};
static inline int ath10k_htt_send_rx_ring_cfg(struct ath10k_htt *htt)
@@ -2074,6 +2089,12 @@ static inline int ath10k_htt_tx(struct ath10k_htt *htt,
return htt->tx_ops->htt_tx(htt, txmode, msdu);
}
+static inline void ath10k_htt_flush_tx(struct ath10k_htt *htt)
+{
+ if (htt->tx_ops->htt_flush_tx)
+ htt->tx_ops->htt_flush_tx(htt);
+}
+
static inline int ath10k_htt_alloc_txbuff(struct ath10k_htt *htt)
{
if (!htt->tx_ops->htt_alloc_txbuff)
@@ -2267,6 +2288,7 @@ int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
__le16 fetch_seq_num,
struct htt_tx_fetch_record *records,
size_t num_records);
+void ath10k_htt_op_ep_tx_credits(struct ath10k *ar);
void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,
struct ieee80211_txq *txq);
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index f883f2a724dd..d787cbead56a 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -3574,6 +3574,13 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
ieee80211_tx_rate_update(ar->hw, sta, &arsta->tx_info);
}
+ if (ar->htt.disable_tx_comp) {
+ arsta->tx_retries += peer_stats->retry_pkts;
+ arsta->tx_failed += peer_stats->failed_pkts;
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx retries %d tx failed %d\n",
+ arsta->tx_retries, arsta->tx_failed);
+ }
+
if (ath10k_debug_is_extd_tx_stats_enabled(ar))
ath10k_accumulate_per_peer_tx_stats(ar, arsta, peer_stats,
rate_idx);
@@ -3789,6 +3796,9 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
}
case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
struct htt_tx_done tx_done = {};
+ struct ath10k_htt *htt = &ar->htt;
+ struct ath10k_htc *htc = &ar->htc;
+ struct ath10k_htc_ep *ep = &ar->htc.endpoint[htt->eid];
int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
int info = __le32_to_cpu(resp->mgmt_tx_completion.info);
@@ -3814,6 +3824,12 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
+ if (htt->disable_tx_comp) {
+ spin_lock_bh(&htc->tx_lock);
+ ep->tx_credits++;
+ spin_unlock_bh(&htc->tx_lock);
+ }
+
status = ath10k_txrx_tx_unref(htt, &tx_done);
if (!status) {
spin_lock_bh(&htt->tx_lock);
@@ -3888,8 +3904,32 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
return false;
}
- case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
+ case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND: {
+ struct ath10k_htt *htt = &ar->htt;
+ struct ath10k_htc *htc = &ar->htc;
+ struct ath10k_htc_ep *ep = &ar->htc.endpoint[htt->eid];
+ u32 msg_word = __le32_to_cpu(*(__le32 *)resp);
+ int htt_credit_delta;
+
+ htt_credit_delta = HTT_TX_CREDIT_DELTA_ABS_GET(msg_word);
+ if (HTT_TX_CREDIT_SIGN_BIT_GET(msg_word))
+ htt_credit_delta = -htt_credit_delta;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt credit update delta %d\n",
+ htt_credit_delta);
+
+ if (htt->disable_tx_comp) {
+ spin_lock_bh(&htc->tx_lock);
+ ep->tx_credits += htt_credit_delta;
+ spin_unlock_bh(&htc->tx_lock);
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt credit total %d\n",
+ ep->tx_credits);
+ ep->ep_ops.ep_tx_credits(htc->ar);
+ }
break;
+ }
case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
u32 freq = __le32_to_cpu(resp->chan_change.freq);
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index e9d12ea708b6..4fd10ac3a941 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -529,9 +529,15 @@ void ath10k_htt_tx_destroy(struct ath10k_htt *htt)
htt->tx_mem_allocated = false;
}
-void ath10k_htt_tx_stop(struct ath10k_htt *htt)
+static void ath10k_htt_flush_tx_queue(struct ath10k_htt *htt)
{
+ ath10k_htc_stop_hl(htt->ar);
idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
+}
+
+void ath10k_htt_tx_stop(struct ath10k_htt *htt)
+{
+ ath10k_htt_flush_tx_queue(htt);
idr_destroy(&htt->pending_tx);
}
@@ -541,9 +547,46 @@ void ath10k_htt_tx_free(struct ath10k_htt *htt)
ath10k_htt_tx_destroy(htt);
}
+void ath10k_htt_op_ep_tx_credits(struct ath10k *ar)
+{
+ queue_work(ar->workqueue, &ar->bundle_tx_work);
+}
+
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
{
+ struct ath10k_htt *htt = &ar->htt;
+ struct htt_tx_done tx_done = {0};
+ struct htt_cmd_hdr *htt_hdr;
+ struct htt_data_tx_desc *desc_hdr = NULL;
+ u16 flags1 = 0;
+ u8 msg_type = 0;
+
+ if (htt->disable_tx_comp) {
+ htt_hdr = (struct htt_cmd_hdr *)skb->data;
+ msg_type = htt_hdr->msg_type;
+
+ if (msg_type == HTT_H2T_MSG_TYPE_TX_FRM) {
+ desc_hdr = (struct htt_data_tx_desc *)
+ (skb->data + sizeof(*htt_hdr));
+ flags1 = __le16_to_cpu(desc_hdr->flags1);
+ }
+ }
+
dev_kfree_skb_any(skb);
+
+ if ((!htt->disable_tx_comp) || (msg_type != HTT_H2T_MSG_TYPE_TX_FRM))
+ return;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt tx complete msdu id:%u ,flags1:%x\n",
+ __le16_to_cpu(desc_hdr->id), flags1);
+
+ if (flags1 & HTT_DATA_TX_DESC_FLAGS1_TX_COMPLETE)
+ return;
+
+ tx_done.status = HTT_TX_COMPL_STATE_ACK;
+ tx_done.msdu_id = __le16_to_cpu(desc_hdr->id);
+ ath10k_txrx_tx_unref(&ar->htt, &tx_done);
}
void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb)
@@ -1279,6 +1322,9 @@ static int ath10k_htt_tx_hl(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txm
flags0 |= SM(ATH10K_HW_TXRX_MGMT,
HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
+
+ if (htt->disable_tx_comp)
+ flags1 |= HTT_DATA_TX_DESC_FLAGS1_TX_COMPLETE;
break;
}
@@ -1344,7 +1390,7 @@ static int ath10k_htt_tx_hl(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txm
*/
tx_desc->peerid = __cpu_to_le32(HTT_INVALID_PEERID);
- res = ath10k_htc_send(&htt->ar->htc, htt->eid, msdu);
+ res = ath10k_htc_send_hl(&htt->ar->htc, htt->eid, msdu);
out:
return res;
@@ -1784,6 +1830,7 @@ static const struct ath10k_htt_tx_ops htt_tx_ops_hl = {
.htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32,
.htt_tx = ath10k_htt_tx_hl,
.htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_32,
+ .htt_flush_tx = ath10k_htt_flush_tx_queue,
};
void ath10k_htt_set_tx_ops(struct ath10k_htt *htt)
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 970c736ac6bb..d9907a4648a8 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -623,6 +623,9 @@ struct ath10k_hw_params {
/* tx stats support over pktlog */
bool tx_stats_over_pktlog;
+
+ /* provides bitrates for sta_statistics using WMI_TLV_PEER_STATS_INFO_EVENTID */
+ bool supports_peer_stats_info;
};
struct htt_rx_desc;
@@ -765,7 +768,7 @@ ath10k_is_rssi_enable(struct ath10k_hw_params *hw,
#define TARGET_TLV_NUM_TDLS_VDEVS 1
#define TARGET_TLV_NUM_TIDS ((TARGET_TLV_NUM_PEERS) * 2)
#define TARGET_TLV_NUM_MSDU_DESC (1024 + 32)
-#define TARGET_TLV_NUM_MSDU_DESC_HL 64
+#define TARGET_TLV_NUM_MSDU_DESC_HL 1024
#define TARGET_TLV_NUM_WOW_PATTERNS 22
#define TARGET_TLV_MGMT_NUM_MSDU_DESC (50)
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 2d03b8dd3b8c..91f5444ecedb 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -2505,6 +2505,30 @@ ath10k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
return tx_mcs_set;
}
+static u32 get_160mhz_nss_from_maxrate(int rate)
+{
+ u32 nss;
+
+ switch (rate) {
+ case 780:
+ nss = 1;
+ break;
+ case 1560:
+ nss = 2;
+ break;
+ case 2106:
+ nss = 3; /* not support MCS9 from spec*/
+ break;
+ case 3120:
+ nss = 4;
+ break;
+ default:
+ nss = 1;
+ }
+
+ return nss;
+}
+
static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -2512,6 +2536,7 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
{
const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct cfg80211_chan_def def;
enum nl80211_band band;
const u16 *vht_mcs_mask;
@@ -2578,22 +2603,38 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
arg->peer_vht_rates.tx_mcs_set = ath10k_peer_assoc_h_vht_limit(
__le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask);
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
- sta->addr, arg->peer_max_mpdu, arg->peer_flags);
+ /* Configure bandwidth-NSS mapping to FW
+ * for the chip's tx chains setting on 160Mhz bw
+ */
+ if (arg->peer_phymode == MODE_11AC_VHT160 ||
+ arg->peer_phymode == MODE_11AC_VHT80_80) {
+ u32 rx_nss;
+ u32 max_rate;
- if (arg->peer_vht_rates.rx_max_rate &&
- (sta->vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK)) {
- switch (arg->peer_vht_rates.rx_max_rate) {
- case 1560:
- /* Must be 2x2 at 160Mhz is all it can do. */
- arg->peer_bw_rxnss_override = 2;
- break;
- case 780:
- /* Can only do 1x1 at 160Mhz (Long Guard Interval) */
- arg->peer_bw_rxnss_override = 1;
- break;
+ max_rate = arg->peer_vht_rates.rx_max_rate;
+ rx_nss = get_160mhz_nss_from_maxrate(max_rate);
+
+ if (rx_nss == 0)
+ rx_nss = arg->peer_num_spatial_streams;
+ else
+ rx_nss = min(arg->peer_num_spatial_streams, rx_nss);
+
+ max_rate = hw->vht160_mcs_tx_highest;
+ rx_nss = min(rx_nss, get_160mhz_nss_from_maxrate(max_rate));
+
+ arg->peer_bw_rxnss_override =
+ FIELD_PREP(WMI_PEER_NSS_MAP_ENABLE, 1) |
+ FIELD_PREP(WMI_PEER_NSS_160MHZ_MASK, (rx_nss - 1));
+
+ if (arg->peer_phymode == MODE_11AC_VHT80_80) {
+ arg->peer_bw_rxnss_override |=
+ FIELD_PREP(WMI_PEER_NSS_80_80MHZ_MASK, (rx_nss - 1));
}
}
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac vht peer %pM max_mpdu %d flags 0x%x peer_rx_nss_override 0x%x\n",
+ sta->addr, arg->peer_max_mpdu,
+ arg->peer_flags, arg->peer_bw_rxnss_override);
}
static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
@@ -2745,9 +2786,9 @@ static int ath10k_peer_assoc_prepare(struct ath10k *ar,
ath10k_peer_assoc_h_crypto(ar, vif, sta, arg);
ath10k_peer_assoc_h_rates(ar, vif, sta, arg);
ath10k_peer_assoc_h_ht(ar, vif, sta, arg);
+ ath10k_peer_assoc_h_phymode(ar, vif, sta, arg);
ath10k_peer_assoc_h_vht(ar, vif, sta, arg);
ath10k_peer_assoc_h_qos(ar, vif, sta, arg);
- ath10k_peer_assoc_h_phymode(ar, vif, sta, arg);
return 0;
}
@@ -2918,6 +2959,11 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
arvif->aid = bss_conf->aid;
ether_addr_copy(arvif->bssid, bss_conf->bssid);
+ ret = ath10k_wmi_pdev_set_param(ar,
+ ar->wmi.pdev_param->peer_stats_info_enable, 1);
+ if (ret)
+ ath10k_warn(ar, "failed to enable peer stats info: %d\n", ret);
+
ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
if (ret) {
ath10k_warn(ar, "failed to set vdev %d up: %d\n",
@@ -4488,17 +4534,18 @@ static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
return 0;
}
-static void ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg)
+static bool ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg)
{
/* It is not clear that allowing gaps in chainmask
* is helpful. Probably it will not do what user
* is hoping for, so warn in that case.
*/
if (cm == 15 || cm == 7 || cm == 3 || cm == 1 || cm == 0)
- return;
+ return true;
- ath10k_warn(ar, "mac %s antenna chainmask may be invalid: 0x%x. Suggested values: 15, 7, 3, 1 or 0.\n",
+ ath10k_warn(ar, "mac %s antenna chainmask is invalid: 0x%x. Suggested values: 15, 7, 3, 1 or 0.\n",
dbg, cm);
+ return false;
}
static int ath10k_mac_get_vht_cap_bf_sts(struct ath10k *ar)
@@ -4563,13 +4610,6 @@ static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
vht_cap.cap |= val;
}
- /* Currently the firmware seems to be buggy, don't enable 80+80
- * mode until that's resolved.
- */
- if ((ar->vht_cap_info & IEEE80211_VHT_CAP_SHORT_GI_160) &&
- (ar->vht_cap_info & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) == 0)
- vht_cap.cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
-
mcs_map = 0;
for (i = 0; i < 8; i++) {
if ((i < ar->num_rf_chains) && (ar->cfg_tx_chainmask & BIT(i)))
@@ -4688,11 +4728,15 @@ static void ath10k_mac_setup_ht_vht_cap(struct ath10k *ar)
static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant)
{
int ret;
+ bool is_valid_tx_chain_mask, is_valid_rx_chain_mask;
lockdep_assert_held(&ar->conf_mutex);
- ath10k_check_chain_mask(ar, tx_ant, "tx");
- ath10k_check_chain_mask(ar, rx_ant, "rx");
+ is_valid_tx_chain_mask = ath10k_check_chain_mask(ar, tx_ant, "tx");
+ is_valid_rx_chain_mask = ath10k_check_chain_mask(ar, rx_ant, "rx");
+
+ if (!is_valid_tx_chain_mask || !is_valid_rx_chain_mask)
+ return -EINVAL;
ar->cfg_tx_chainmask = tx_ant;
ar->cfg_rx_chainmask = rx_ant;
@@ -7190,6 +7234,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
ath10k_wmi_peer_flush(ar, arvif->vdev_id,
arvif->bssid, bitmap);
}
+ ath10k_htt_flush_tx(&ar->htt);
}
return;
}
@@ -8260,6 +8305,215 @@ static void ath10k_mac_op_sta_pre_rcu_remove(struct ieee80211_hw *hw,
peer->removed = true;
}
+/* HT MCS parameters with Nss = 1 */
+static const struct ath10k_index_ht_data_rate_type supported_ht_mcs_rate_nss1[] = {
+ /* MCS L20 L40 S20 S40 */
+ {0, { 65, 135, 72, 150} },
+ {1, { 130, 270, 144, 300} },
+ {2, { 195, 405, 217, 450} },
+ {3, { 260, 540, 289, 600} },
+ {4, { 390, 810, 433, 900} },
+ {5, { 520, 1080, 578, 1200} },
+ {6, { 585, 1215, 650, 1350} },
+ {7, { 650, 1350, 722, 1500} }
+};
+
+/* HT MCS parameters with Nss = 2 */
+static const struct ath10k_index_ht_data_rate_type supported_ht_mcs_rate_nss2[] = {
+ /* MCS L20 L40 S20 S40 */
+ {0, {130, 270, 144, 300} },
+ {1, {260, 540, 289, 600} },
+ {2, {390, 810, 433, 900} },
+ {3, {520, 1080, 578, 1200} },
+ {4, {780, 1620, 867, 1800} },
+ {5, {1040, 2160, 1156, 2400} },
+ {6, {1170, 2430, 1300, 2700} },
+ {7, {1300, 2700, 1444, 3000} }
+};
+
+/* MCS parameters with Nss = 1 */
+static const struct ath10k_index_vht_data_rate_type supported_vht_mcs_rate_nss1[] = {
+ /* MCS L80 S80 L40 S40 L20 S20 */
+ {0, {293, 325}, {135, 150}, {65, 72} },
+ {1, {585, 650}, {270, 300}, {130, 144} },
+ {2, {878, 975}, {405, 450}, {195, 217} },
+ {3, {1170, 1300}, {540, 600}, {260, 289} },
+ {4, {1755, 1950}, {810, 900}, {390, 433} },
+ {5, {2340, 2600}, {1080, 1200}, {520, 578} },
+ {6, {2633, 2925}, {1215, 1350}, {585, 650} },
+ {7, {2925, 3250}, {1350, 1500}, {650, 722} },
+ {8, {3510, 3900}, {1620, 1800}, {780, 867} },
+ {9, {3900, 4333}, {1800, 2000}, {780, 867} }
+};
+
+/*MCS parameters with Nss = 2 */
+static const struct ath10k_index_vht_data_rate_type supported_vht_mcs_rate_nss2[] = {
+ /* MCS L80 S80 L40 S40 L20 S20 */
+ {0, {585, 650}, {270, 300}, {130, 144} },
+ {1, {1170, 1300}, {540, 600}, {260, 289} },
+ {2, {1755, 1950}, {810, 900}, {390, 433} },
+ {3, {2340, 2600}, {1080, 1200}, {520, 578} },
+ {4, {3510, 3900}, {1620, 1800}, {780, 867} },
+ {5, {4680, 5200}, {2160, 2400}, {1040, 1156} },
+ {6, {5265, 5850}, {2430, 2700}, {1170, 1300} },
+ {7, {5850, 6500}, {2700, 3000}, {1300, 1444} },
+ {8, {7020, 7800}, {3240, 3600}, {1560, 1733} },
+ {9, {7800, 8667}, {3600, 4000}, {1560, 1733} }
+};
+
+static void ath10k_mac_get_rate_flags_ht(struct ath10k *ar, u32 rate, u8 nss, u8 mcs,
+ u8 *flags, u8 *bw)
+{
+ struct ath10k_index_ht_data_rate_type *mcs_rate;
+
+ mcs_rate = (struct ath10k_index_ht_data_rate_type *)
+ ((nss == 1) ? &supported_ht_mcs_rate_nss1 :
+ &supported_ht_mcs_rate_nss2);
+
+ if (rate == mcs_rate[mcs].supported_rate[0]) {
+ *bw = RATE_INFO_BW_20;
+ } else if (rate == mcs_rate[mcs].supported_rate[1]) {
+ *bw |= RATE_INFO_BW_40;
+ } else if (rate == mcs_rate[mcs].supported_rate[2]) {
+ *bw |= RATE_INFO_BW_20;
+ *flags |= RATE_INFO_FLAGS_SHORT_GI;
+ } else if (rate == mcs_rate[mcs].supported_rate[3]) {
+ *bw |= RATE_INFO_BW_40;
+ *flags |= RATE_INFO_FLAGS_SHORT_GI;
+ } else {
+ ath10k_warn(ar, "invalid ht params rate %d 100kbps nss %d mcs %d",
+ rate, nss, mcs);
+ }
+}
+
+static void ath10k_mac_get_rate_flags_vht(struct ath10k *ar, u32 rate, u8 nss, u8 mcs,
+ u8 *flags, u8 *bw)
+{
+ struct ath10k_index_vht_data_rate_type *mcs_rate;
+
+ mcs_rate = (struct ath10k_index_vht_data_rate_type *)
+ ((nss == 1) ? &supported_vht_mcs_rate_nss1 :
+ &supported_vht_mcs_rate_nss2);
+
+ if (rate == mcs_rate[mcs].supported_VHT80_rate[0]) {
+ *bw = RATE_INFO_BW_80;
+ } else if (rate == mcs_rate[mcs].supported_VHT80_rate[1]) {
+ *bw = RATE_INFO_BW_80;
+ *flags |= RATE_INFO_FLAGS_SHORT_GI;
+ } else if (rate == mcs_rate[mcs].supported_VHT40_rate[0]) {
+ *bw = RATE_INFO_BW_40;
+ } else if (rate == mcs_rate[mcs].supported_VHT40_rate[1]) {
+ *bw = RATE_INFO_BW_40;
+ *flags |= RATE_INFO_FLAGS_SHORT_GI;
+ } else if (rate == mcs_rate[mcs].supported_VHT20_rate[0]) {
+ *bw = RATE_INFO_BW_20;
+ } else if (rate == mcs_rate[mcs].supported_VHT20_rate[1]) {
+ *bw = RATE_INFO_BW_20;
+ *flags |= RATE_INFO_FLAGS_SHORT_GI;
+ } else {
+ ath10k_warn(ar, "invalid vht params rate %d 100kbps nss %d mcs %d",
+ rate, nss, mcs);
+ }
+}
+
+static void ath10k_mac_get_rate_flags(struct ath10k *ar, u32 rate,
+ enum ath10k_phy_mode mode, u8 nss, u8 mcs,
+ u8 *flags, u8 *bw)
+{
+ if (mode == ATH10K_PHY_MODE_HT) {
+ *flags = RATE_INFO_FLAGS_MCS;
+ ath10k_mac_get_rate_flags_ht(ar, rate, nss, mcs, flags, bw);
+ } else if (mode == ATH10K_PHY_MODE_VHT) {
+ *flags = RATE_INFO_FLAGS_VHT_MCS;
+ ath10k_mac_get_rate_flags_vht(ar, rate, nss, mcs, flags, bw);
+ }
+}
+
+static void ath10k_mac_parse_bitrate(struct ath10k *ar, u32 rate_code,
+ u32 bitrate_kbps, struct rate_info *rate)
+{
+ enum ath10k_phy_mode mode = ATH10K_PHY_MODE_LEGACY;
+ enum wmi_rate_preamble preamble = WMI_TLV_GET_HW_RC_PREAM_V1(rate_code);
+ u8 nss = WMI_TLV_GET_HW_RC_NSS_V1(rate_code) + 1;
+ u8 mcs = WMI_TLV_GET_HW_RC_RATE_V1(rate_code);
+ u8 flags = 0, bw = 0;
+
+ if (preamble == WMI_RATE_PREAMBLE_HT)
+ mode = ATH10K_PHY_MODE_HT;
+ else if (preamble == WMI_RATE_PREAMBLE_VHT)
+ mode = ATH10K_PHY_MODE_VHT;
+
+ ath10k_mac_get_rate_flags(ar, bitrate_kbps / 100, mode, nss, mcs, &flags, &bw);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac parse bitrate preamble %d mode %d nss %d mcs %d flags %x bw %d\n",
+ preamble, mode, nss, mcs, flags, bw);
+
+ rate->flags = flags;
+ rate->bw = bw;
+ rate->legacy = bitrate_kbps / 100;
+ rate->nss = nss;
+ rate->mcs = mcs;
+}
+
+static void ath10k_mac_sta_get_peer_stats_info(struct ath10k *ar,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo)
+{
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k_peer *peer;
+ unsigned long time_left;
+ int ret;
+
+ if (!(ar->hw_params.supports_peer_stats_info &&
+ arsta->arvif->vdev_type == WMI_VDEV_TYPE_STA))
+ return;
+
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find(ar, arsta->arvif->vdev_id, sta->addr);
+ spin_unlock_bh(&ar->data_lock);
+ if (!peer)
+ return;
+
+ reinit_completion(&ar->peer_stats_info_complete);
+
+ ret = ath10k_wmi_request_peer_stats_info(ar,
+ arsta->arvif->vdev_id,
+ WMI_REQUEST_ONE_PEER_STATS_INFO,
+ arsta->arvif->bssid,
+ 0);
+ if (ret && ret != -EOPNOTSUPP) {
+ ath10k_warn(ar, "could not request peer stats info: %d\n", ret);
+ return;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->peer_stats_info_complete, 3 * HZ);
+ if (time_left == 0) {
+ ath10k_warn(ar, "timed out waiting peer stats info\n");
+ return;
+ }
+
+ if (arsta->rx_rate_code != 0 && arsta->rx_bitrate_kbps != 0) {
+ ath10k_mac_parse_bitrate(ar, arsta->rx_rate_code,
+ arsta->rx_bitrate_kbps,
+ &sinfo->rxrate);
+
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
+ arsta->rx_rate_code = 0;
+ arsta->rx_bitrate_kbps = 0;
+ }
+
+ if (arsta->tx_rate_code != 0 && arsta->tx_bitrate_kbps != 0) {
+ ath10k_mac_parse_bitrate(ar, arsta->tx_rate_code,
+ arsta->tx_bitrate_kbps,
+ &sinfo->txrate);
+
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+ arsta->tx_rate_code = 0;
+ arsta->tx_bitrate_kbps = 0;
+ }
+}
+
static void ath10k_sta_statistics(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -8271,6 +8525,8 @@ static void ath10k_sta_statistics(struct ieee80211_hw *hw,
if (!ath10k_peer_stats_enabled(ar))
return;
+ ath10k_debug_fw_stats_request(ar);
+
sinfo->rx_duration = arsta->rx_duration;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION);
@@ -8286,6 +8542,15 @@ static void ath10k_sta_statistics(struct ieee80211_hw *hw,
}
sinfo->txrate.flags = arsta->txrate.flags;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+
+ if (ar->htt.disable_tx_comp) {
+ sinfo->tx_retries = arsta->tx_retries;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES);
+ sinfo->tx_failed = arsta->tx_failed;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
+ }
+
+ ath10k_mac_sta_get_peer_stats_info(ar, sta, sinfo);
}
static const struct ieee80211_ops ath10k_ops = {
@@ -8625,7 +8890,9 @@ static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = {
.radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
BIT(NL80211_CHAN_WIDTH_20) |
BIT(NL80211_CHAN_WIDTH_40) |
- BIT(NL80211_CHAN_WIDTH_80),
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_80P80) |
+ BIT(NL80211_CHAN_WIDTH_160),
#endif
},
};
@@ -8643,7 +8910,9 @@ ieee80211_iface_combination ath10k_10_4_bcn_int_if_comb[] = {
.radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
BIT(NL80211_CHAN_WIDTH_20) |
BIT(NL80211_CHAN_WIDTH_40) |
- BIT(NL80211_CHAN_WIDTH_80),
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_80P80) |
+ BIT(NL80211_CHAN_WIDTH_160),
#endif
},
};
@@ -8919,7 +9188,6 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
if (test_bit(WMI_SERVICE_NLO, ar->wmi.svc_map)) {
- ar->hw->wiphy->max_sched_scan_reqs = 1;
ar->hw->wiphy->max_sched_scan_ssids = WMI_PNO_MAX_SUPP_NETWORKS;
ar->hw->wiphy->max_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
ar->hw->wiphy->max_sched_scan_ie_len = WMI_PNO_MAX_IE_LENGTH;
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index ded7a220a4aa..1d941d53fdc9 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -116,7 +116,7 @@ static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
-static struct ce_attr host_ce_config_wlan[] = {
+static const struct ce_attr pci_host_ce_config_wlan[] = {
/* CE0: host->target HTC control and raw streams */
{
.flags = CE_ATTR_FLAGS,
@@ -222,7 +222,7 @@ static struct ce_attr host_ce_config_wlan[] = {
};
/* Target firmware's Copy Engine configuration. */
-static struct ce_pipe_config target_ce_config_wlan[] = {
+static const struct ce_pipe_config pci_target_ce_config_wlan[] = {
/* CE0: host->target HTC control and raw streams */
{
.pipenum = __cpu_to_le32(0),
@@ -335,7 +335,7 @@ static struct ce_pipe_config target_ce_config_wlan[] = {
* This table is derived from the CE_PCI TABLE, above.
* It is passed to the Target at startup for use by firmware.
*/
-static struct service_to_pipe target_service_to_ce_map_wlan[] = {
+static const struct ce_service_to_pipe pci_target_service_to_ce_map_wlan[] = {
{
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
@@ -1787,6 +1787,8 @@ static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
int force)
{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
if (!force) {
@@ -1804,7 +1806,7 @@ void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
* If at least 50% of the total resources are still available,
* don't bother checking again yet.
*/
- if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
+ if (resources > (ar_pci->attr[pipe].src_nentries >> 1))
return;
}
ath10k_ce_per_engine_service(ar, pipe);
@@ -1820,14 +1822,15 @@ static void ath10k_pci_rx_retry_sync(struct ath10k *ar)
int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
u8 *ul_pipe, u8 *dl_pipe)
{
- const struct service_to_pipe *entry;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ const struct ce_service_to_pipe *entry;
bool ul_set = false, dl_set = false;
int i;
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
- for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
- entry = &target_service_to_ce_map_wlan[i];
+ for (i = 0; i < ARRAY_SIZE(pci_target_service_to_ce_map_wlan); i++) {
+ entry = &ar_pci->serv_to_pipe[i];
if (__le32_to_cpu(entry->service_id) != service_id)
continue;
@@ -2074,6 +2077,7 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
ath10k_pci_irq_sync(ar);
napi_synchronize(&ar->napi);
napi_disable(&ar->napi);
+ cancel_work_sync(&ar_pci->dump_work);
/* Most likely the device has HTT Rx ring configured. The only way to
* prevent the device from accessing (and possible corrupting) host
@@ -2315,6 +2319,7 @@ static int ath10k_bus_get_num_banks(struct ath10k *ar)
int ath10k_pci_init_config(struct ath10k *ar)
{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
u32 interconnect_targ_addr;
u32 pcie_state_targ_addr = 0;
u32 pipe_cfg_targ_addr = 0;
@@ -2360,7 +2365,7 @@ int ath10k_pci_init_config(struct ath10k *ar)
}
ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
- target_ce_config_wlan,
+ ar_pci->pipe_config,
sizeof(struct ce_pipe_config) *
NUM_TARGET_CE_CONFIG_WLAN);
@@ -2385,8 +2390,8 @@ int ath10k_pci_init_config(struct ath10k *ar)
}
ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
- target_service_to_ce_map_wlan,
- sizeof(target_service_to_ce_map_wlan));
+ ar_pci->serv_to_pipe,
+ sizeof(pci_target_service_to_ce_map_wlan));
if (ret != 0) {
ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
return ret;
@@ -2458,23 +2463,24 @@ static void ath10k_pci_override_ce_config(struct ath10k *ar)
{
struct ce_attr *attr;
struct ce_pipe_config *config;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
/* For QCA6174 we're overriding the Copy Engine 5 configuration,
* since it is currently used for other feature.
*/
/* Override Host's Copy Engine 5 configuration */
- attr = &host_ce_config_wlan[5];
+ attr = &ar_pci->attr[5];
attr->src_sz_max = 0;
attr->dest_nentries = 0;
/* Override Target firmware's Copy Engine configuration */
- config = &target_ce_config_wlan[5];
+ config = &ar_pci->pipe_config[5];
config->pipedir = __cpu_to_le32(PIPEDIR_OUT);
config->nbytes_max = __cpu_to_le32(2048);
/* Map from service/endpoint to Copy Engine */
- target_service_to_ce_map_wlan[15].pipenum = __cpu_to_le32(1);
+ ar_pci->serv_to_pipe[15].pipenum = __cpu_to_le32(1);
}
int ath10k_pci_alloc_pipes(struct ath10k *ar)
@@ -2490,7 +2496,7 @@ int ath10k_pci_alloc_pipes(struct ath10k *ar)
pipe->pipe_num = i;
pipe->hif_ce_state = ar;
- ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
+ ret = ath10k_ce_alloc_pipe(ar, i, &ar_pci->attr[i]);
if (ret) {
ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
i, ret);
@@ -2503,7 +2509,7 @@ int ath10k_pci_alloc_pipes(struct ath10k *ar)
continue;
}
- pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max);
+ pipe->buf_sz = (size_t)(ar_pci->attr[i].src_sz_max);
}
return 0;
@@ -2519,10 +2525,11 @@ void ath10k_pci_free_pipes(struct ath10k *ar)
int ath10k_pci_init_pipes(struct ath10k *ar)
{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int i, ret;
for (i = 0; i < CE_COUNT; i++) {
- ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
+ ret = ath10k_ce_init_pipe(ar, i, &ar_pci->attr[i]);
if (ret) {
ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
i, ret);
@@ -3594,6 +3601,30 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
timer_setup(&ar_pci->ps_timer, ath10k_pci_ps_timer, 0);
+ ar_pci->attr = kmemdup(pci_host_ce_config_wlan,
+ sizeof(pci_host_ce_config_wlan),
+ GFP_KERNEL);
+ if (!ar_pci->attr) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ ar_pci->pipe_config = kmemdup(pci_target_ce_config_wlan,
+ sizeof(pci_target_ce_config_wlan),
+ GFP_KERNEL);
+ if (!ar_pci->pipe_config) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ ar_pci->serv_to_pipe = kmemdup(pci_target_service_to_ce_map_wlan,
+ sizeof(pci_target_service_to_ce_map_wlan),
+ GFP_KERNEL);
+ if (!ar_pci->serv_to_pipe) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
ret = ath10k_pci_setup_resource(ar);
if (ret) {
ath10k_err(ar, "failed to setup resource: %d\n", ret);
@@ -3689,6 +3720,11 @@ err_free_pipes:
err_core_destroy:
ath10k_core_destroy(ar);
+err_free:
+ kfree(ar_pci->attr);
+ kfree(ar_pci->pipe_config);
+ kfree(ar_pci->serv_to_pipe);
+
return ret;
}
@@ -3714,6 +3750,9 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
ath10k_pci_sleep_sync(ar);
ath10k_pci_release(ar);
ath10k_core_destroy(ar);
+ kfree(ar_pci->attr);
+ kfree(ar_pci->pipe_config);
+ kfree(ar_pci->serv_to_pipe);
}
MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index 4455ed6c5275..e3cbd259a2dc 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -183,6 +183,10 @@ struct ath10k_pci {
* this struct.
*/
struct ath10k_ahb ahb[0];
+
+ struct ce_attr *attr;
+ struct ce_pipe_config *pipe_config;
+ struct ce_service_to_pipe *serv_to_pipe;
};
static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c
index 85dce43c5439..5ae829b46c3d 100644
--- a/drivers/net/wireless/ath/ath10k/qmi.c
+++ b/drivers/net/wireless/ath/ath10k/qmi.c
@@ -122,8 +122,8 @@ static int ath10k_qmi_msa_mem_info_send_sync_msg(struct ath10k_qmi *qmi)
int ret;
int i;
- req.msa_addr = qmi->msa_pa;
- req.size = qmi->msa_mem_size;
+ req.msa_addr = ar->msa.paddr;
+ req.size = ar->msa.mem_size;
ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
wlfw_msa_info_resp_msg_v01_ei, &resp);
@@ -157,12 +157,12 @@ static int ath10k_qmi_msa_mem_info_send_sync_msg(struct ath10k_qmi *qmi)
goto out;
}
- max_mapped_addr = qmi->msa_pa + qmi->msa_mem_size;
+ max_mapped_addr = ar->msa.paddr + ar->msa.mem_size;
qmi->nr_mem_region = resp.mem_region_info_len;
for (i = 0; i < resp.mem_region_info_len; i++) {
- if (resp.mem_region_info[i].size > qmi->msa_mem_size ||
+ if (resp.mem_region_info[i].size > ar->msa.mem_size ||
resp.mem_region_info[i].region_addr > max_mapped_addr ||
- resp.mem_region_info[i].region_addr < qmi->msa_pa ||
+ resp.mem_region_info[i].region_addr < ar->msa.paddr ||
resp.mem_region_info[i].size +
resp.mem_region_info[i].region_addr > max_mapped_addr) {
ath10k_err(ar, "received out of range memory region address 0x%llx with size 0x%x, aborting\n",
@@ -1006,54 +1006,10 @@ static void ath10k_qmi_driver_event_work(struct work_struct *work)
spin_unlock(&qmi->event_lock);
}
-static int ath10k_qmi_setup_msa_resources(struct ath10k_qmi *qmi, u32 msa_size)
-{
- struct ath10k *ar = qmi->ar;
- struct device *dev = ar->dev;
- struct device_node *node;
- struct resource r;
- int ret;
-
- node = of_parse_phandle(dev->of_node, "memory-region", 0);
- if (node) {
- ret = of_address_to_resource(node, 0, &r);
- if (ret) {
- dev_err(dev, "failed to resolve msa fixed region\n");
- return ret;
- }
- of_node_put(node);
-
- qmi->msa_pa = r.start;
- qmi->msa_mem_size = resource_size(&r);
- qmi->msa_va = devm_memremap(dev, qmi->msa_pa, qmi->msa_mem_size,
- MEMREMAP_WT);
- if (IS_ERR(qmi->msa_va)) {
- dev_err(dev, "failed to map memory region: %pa\n", &r.start);
- return PTR_ERR(qmi->msa_va);
- }
- } else {
- qmi->msa_va = dmam_alloc_coherent(dev, msa_size,
- &qmi->msa_pa, GFP_KERNEL);
- if (!qmi->msa_va) {
- ath10k_err(ar, "failed to allocate dma memory for msa region\n");
- return -ENOMEM;
- }
- qmi->msa_mem_size = msa_size;
- }
-
- if (of_property_read_bool(dev->of_node, "qcom,msa-fixed-perm"))
- qmi->msa_fixed_perm = true;
-
- ath10k_dbg(ar, ATH10K_DBG_QMI, "msa pa: %pad , msa va: 0x%p\n",
- &qmi->msa_pa,
- qmi->msa_va);
-
- return 0;
-}
-
int ath10k_qmi_init(struct ath10k *ar, u32 msa_size)
{
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct device *dev = ar->dev;
struct ath10k_qmi *qmi;
int ret;
@@ -1064,9 +1020,8 @@ int ath10k_qmi_init(struct ath10k *ar, u32 msa_size)
qmi->ar = ar;
ar_snoc->qmi = qmi;
- ret = ath10k_qmi_setup_msa_resources(qmi, msa_size);
- if (ret)
- goto err;
+ if (of_property_read_bool(dev->of_node, "qcom,msa-fixed-perm"))
+ qmi->msa_fixed_perm = true;
ret = qmi_handle_init(&qmi->qmi_hdl,
WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN,
diff --git a/drivers/net/wireless/ath/ath10k/qmi.h b/drivers/net/wireless/ath/ath10k/qmi.h
index dc257375f161..450be18b60ad 100644
--- a/drivers/net/wireless/ath/ath10k/qmi.h
+++ b/drivers/net/wireless/ath/ath10k/qmi.h
@@ -93,9 +93,6 @@ struct ath10k_qmi {
spinlock_t event_lock; /* spinlock for qmi event list */
u32 nr_mem_region;
struct ath10k_msa_mem_info mem_region[MAX_NUM_MEMORY_REGIONS];
- dma_addr_t msa_pa;
- u32 msa_mem_size;
- void *msa_va;
struct ath10k_qmi_chip_info chip_info;
struct ath10k_qmi_board_info board_info;
struct ath10k_qmi_soc_info soc_info;
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index 1f709b65c29b..e2aff2254a40 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -542,7 +542,7 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
int pkt_cnt = 0;
if (n_lookaheads > ATH10K_SDIO_MAX_RX_MSGS) {
- ath10k_warn(ar, "the total number of pkgs to be fetched (%u) exceeds maximum %u\n",
+ ath10k_warn(ar, "the total number of pkts to be fetched (%u) exceeds maximum %u\n",
n_lookaheads, ATH10K_SDIO_MAX_RX_MSGS);
ret = -ENOMEM;
goto err;
@@ -1361,23 +1361,117 @@ static void ath10k_rx_indication_async_work(struct work_struct *work)
napi_schedule(&ar->napi);
}
+static int ath10k_sdio_read_rtc_state(struct ath10k_sdio *ar_sdio, unsigned char *state)
+{
+ struct ath10k *ar = ar_sdio->ar;
+ unsigned char rtc_state = 0;
+ int ret = 0;
+
+ rtc_state = sdio_f0_readb(ar_sdio->func, ATH10K_CIS_RTC_STATE_ADDR, &ret);
+ if (ret) {
+ ath10k_warn(ar, "failed to read rtc state: %d\n", ret);
+ return ret;
+ }
+
+ *state = rtc_state & 0x3;
+
+ return ret;
+}
+
+static int ath10k_sdio_set_mbox_sleep(struct ath10k *ar, bool enable_sleep)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ u32 val;
+ int retry = ATH10K_CIS_READ_RETRY, ret = 0;
+ unsigned char rtc_state = 0;
+
+ sdio_claim_host(ar_sdio->func);
+
+ ret = ath10k_sdio_read32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, &val);
+ if (ret) {
+ ath10k_warn(ar, "failed to read fifo/chip control register: %d\n",
+ ret);
+ goto release;
+ }
+
+ if (enable_sleep) {
+ val &= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF;
+ ar_sdio->mbox_state = SDIO_MBOX_SLEEP_STATE;
+ } else {
+ val |= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON;
+ ar_sdio->mbox_state = SDIO_MBOX_AWAKE_STATE;
+ }
+
+ ret = ath10k_sdio_write32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, val);
+ if (ret) {
+ ath10k_warn(ar, "failed to write to FIFO_TIMEOUT_AND_CHIP_CONTROL: %d",
+ ret);
+ }
+
+ if (!enable_sleep) {
+ do {
+ udelay(ATH10K_CIS_READ_WAIT_4_RTC_CYCLE_IN_US);
+ ret = ath10k_sdio_read_rtc_state(ar_sdio, &rtc_state);
+
+ if (ret) {
+ ath10k_warn(ar, "failed to disable mbox sleep: %d", ret);
+ break;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read rtc state: %d\n",
+ rtc_state);
+
+ if (rtc_state == ATH10K_CIS_RTC_STATE_ON)
+ break;
+
+ udelay(ATH10K_CIS_XTAL_SETTLE_DURATION_IN_US);
+ retry--;
+ } while (retry > 0);
+ }
+
+release:
+ sdio_release_host(ar_sdio->func);
+
+ return ret;
+}
+
+static void ath10k_sdio_sleep_timer_handler(struct timer_list *t)
+{
+ struct ath10k_sdio *ar_sdio = from_timer(ar_sdio, t, sleep_timer);
+
+ ar_sdio->mbox_state = SDIO_MBOX_REQUEST_TO_SLEEP_STATE;
+ queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);
+}
+
static void ath10k_sdio_write_async_work(struct work_struct *work)
{
struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio,
wr_async_work);
struct ath10k *ar = ar_sdio->ar;
struct ath10k_sdio_bus_request *req, *tmp_req;
+ struct ath10k_mbox_info *mbox_info = &ar_sdio->mbox_info;
spin_lock_bh(&ar_sdio->wr_async_lock);
list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
list_del(&req->list);
spin_unlock_bh(&ar_sdio->wr_async_lock);
+
+ if (req->address >= mbox_info->htc_addr &&
+ ar_sdio->mbox_state == SDIO_MBOX_SLEEP_STATE) {
+ ath10k_sdio_set_mbox_sleep(ar, false);
+ mod_timer(&ar_sdio->sleep_timer, jiffies +
+ msecs_to_jiffies(ATH10K_MIN_SLEEP_INACTIVITY_TIME_MS));
+ }
+
__ath10k_sdio_write_async(ar, req);
spin_lock_bh(&ar_sdio->wr_async_lock);
}
spin_unlock_bh(&ar_sdio->wr_async_lock);
+
+ if (ar_sdio->mbox_state == SDIO_MBOX_REQUEST_TO_SLEEP_STATE)
+ ath10k_sdio_set_mbox_sleep(ar, true);
}
static int ath10k_sdio_prep_async_req(struct ath10k *ar, u32 addr,
@@ -1444,7 +1538,7 @@ static void ath10k_sdio_irq_handler(struct sdio_func *func)
/* sdio HIF functions */
-static int ath10k_sdio_hif_disable_intrs(struct ath10k *ar)
+static int ath10k_sdio_disable_intrs(struct ath10k *ar)
{
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
@@ -1500,7 +1594,7 @@ static int ath10k_sdio_hif_power_up(struct ath10k *ar,
ar_sdio->is_disabled = false;
- ret = ath10k_sdio_hif_disable_intrs(ar);
+ ret = ath10k_sdio_disable_intrs(ar);
if (ret)
return ret;
@@ -1517,6 +1611,9 @@ static void ath10k_sdio_hif_power_down(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power off\n");
+ del_timer_sync(&ar_sdio->sleep_timer);
+ ath10k_sdio_set_mbox_sleep(ar, true);
+
/* Disable the card */
sdio_claim_host(ar_sdio->func);
@@ -1569,7 +1666,7 @@ static int ath10k_sdio_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
return 0;
}
-static int ath10k_sdio_hif_enable_intrs(struct ath10k *ar)
+static int ath10k_sdio_enable_intrs(struct ath10k *ar)
{
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
@@ -1617,33 +1714,6 @@ static int ath10k_sdio_hif_enable_intrs(struct ath10k *ar)
return ret;
}
-static int ath10k_sdio_hif_set_mbox_sleep(struct ath10k *ar, bool enable_sleep)
-{
- u32 val;
- int ret;
-
- ret = ath10k_sdio_read32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, &val);
- if (ret) {
- ath10k_warn(ar, "failed to read fifo/chip control register: %d\n",
- ret);
- return ret;
- }
-
- if (enable_sleep)
- val &= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF;
- else
- val |= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON;
-
- ret = ath10k_sdio_write32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, val);
- if (ret) {
- ath10k_warn(ar, "failed to write to FIFO_TIMEOUT_AND_CHIP_CONTROL: %d",
- ret);
- return ret;
- }
-
- return 0;
-}
-
/* HIF diagnostics */
static int ath10k_sdio_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
@@ -1679,8 +1749,8 @@ out:
return ret;
}
-static int ath10k_sdio_hif_diag_read32(struct ath10k *ar, u32 address,
- u32 *value)
+static int ath10k_sdio_diag_read32(struct ath10k *ar, u32 address,
+ u32 *value)
{
__le32 *val;
int ret;
@@ -1725,7 +1795,7 @@ static int ath10k_sdio_hif_diag_write_mem(struct ath10k *ar, u32 address,
return 0;
}
-static int ath10k_sdio_hif_swap_mailbox(struct ath10k *ar)
+static int ath10k_sdio_hif_start_post(struct ath10k *ar)
{
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
u32 addr, val;
@@ -1733,7 +1803,7 @@ static int ath10k_sdio_hif_swap_mailbox(struct ath10k *ar)
addr = host_interest_item_address(HI_ITEM(hi_acs_flags));
- ret = ath10k_sdio_hif_diag_read32(ar, addr, &val);
+ ret = ath10k_sdio_diag_read32(ar, addr, &val);
if (ret) {
ath10k_warn(ar, "unable to read hi_acs_flags : %d\n", ret);
return ret;
@@ -1749,9 +1819,33 @@ static int ath10k_sdio_hif_swap_mailbox(struct ath10k *ar)
ar_sdio->swap_mbox = false;
}
+ ath10k_sdio_set_mbox_sleep(ar, true);
+
return 0;
}
+static int ath10k_sdio_get_htt_tx_complete(struct ath10k *ar)
+{
+ u32 addr, val;
+ int ret;
+
+ addr = host_interest_item_address(HI_ITEM(hi_acs_flags));
+
+ ret = ath10k_sdio_diag_read32(ar, addr, &val);
+ if (ret) {
+ ath10k_warn(ar,
+ "unable to read hi_acs_flags for htt tx comple : %d\n", ret);
+ return ret;
+ }
+
+ ret = (val & HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_FW_ACK);
+
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio reduce tx complete fw%sack\n",
+ ret ? " " : " not ");
+
+ return ret;
+}
+
/* HIF start/stop */
static int ath10k_sdio_hif_start(struct ath10k *ar)
@@ -1766,7 +1860,7 @@ static int ath10k_sdio_hif_start(struct ath10k *ar)
* request before interrupts are disabled.
*/
msleep(20);
- ret = ath10k_sdio_hif_disable_intrs(ar);
+ ret = ath10k_sdio_disable_intrs(ar);
if (ret)
return ret;
@@ -1788,19 +1882,19 @@ static int ath10k_sdio_hif_start(struct ath10k *ar)
sdio_release_host(ar_sdio->func);
- ret = ath10k_sdio_hif_enable_intrs(ar);
+ ret = ath10k_sdio_enable_intrs(ar);
if (ret)
ath10k_warn(ar, "failed to enable sdio interrupts: %d\n", ret);
/* Enable sleep and then disable it again */
- ret = ath10k_sdio_hif_set_mbox_sleep(ar, true);
+ ret = ath10k_sdio_set_mbox_sleep(ar, true);
if (ret)
return ret;
/* Wait for 20ms for the written value to take effect */
msleep(20);
- ret = ath10k_sdio_hif_set_mbox_sleep(ar, false);
+ ret = ath10k_sdio_set_mbox_sleep(ar, false);
if (ret)
return ret;
@@ -2007,17 +2101,6 @@ static void ath10k_sdio_hif_get_default_pipe(struct ath10k *ar,
*dl_pipe = 0;
}
-/* This op is currently only used by htc_wait_target if the HTC ready
- * message times out. It is not applicable for SDIO since there is nothing
- * we can do if the HTC ready message does not arrive in time.
- * TODO: Make this op non mandatory by introducing a NULL check in the
- * hif op wrapper.
- */
-static void ath10k_sdio_hif_send_complete_check(struct ath10k *ar,
- u8 pipe, int force)
-{
-}
-
static const struct ath10k_hif_ops ath10k_sdio_hif_ops = {
.tx_sg = ath10k_sdio_hif_tx_sg,
.diag_read = ath10k_sdio_hif_diag_read,
@@ -2025,10 +2108,10 @@ static const struct ath10k_hif_ops ath10k_sdio_hif_ops = {
.exchange_bmi_msg = ath10k_sdio_bmi_exchange_msg,
.start = ath10k_sdio_hif_start,
.stop = ath10k_sdio_hif_stop,
- .swap_mailbox = ath10k_sdio_hif_swap_mailbox,
+ .start_post = ath10k_sdio_hif_start_post,
+ .get_htt_tx_complete = ath10k_sdio_get_htt_tx_complete,
.map_service_to_pipe = ath10k_sdio_hif_map_service_to_pipe,
.get_default_pipe = ath10k_sdio_hif_get_default_pipe,
- .send_complete_check = ath10k_sdio_hif_send_complete_check,
.power_up = ath10k_sdio_hif_power_up,
.power_down = ath10k_sdio_hif_power_down,
#ifdef CONFIG_PM
@@ -2053,6 +2136,8 @@ static int ath10k_sdio_pm_suspend(struct device *device)
if (!device_may_wakeup(ar->dev))
return 0;
+ ath10k_sdio_set_mbox_sleep(ar, true);
+
pm_flag = MMC_PM_KEEP_POWER;
ret = sdio_set_host_pm_flags(func, pm_flag);
@@ -2216,6 +2301,8 @@ static int ath10k_sdio_probe(struct sdio_func *func,
goto err_free_wq;
}
+ timer_setup(&ar_sdio->sleep_timer, ath10k_sdio_sleep_timer_handler, 0);
+
return 0;
err_free_wq:
diff --git a/drivers/net/wireless/ath/ath10k/sdio.h b/drivers/net/wireless/ath/ath10k/sdio.h
index 33195f49acab..29523600887d 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.h
+++ b/drivers/net/wireless/ath/ath10k/sdio.h
@@ -37,7 +37,7 @@
(ATH10K_SDIO_MAX_BUFFER_SIZE - sizeof(struct ath10k_htc_hdr))
#define ATH10K_HIF_MBOX_NUM_MAX 4
-#define ATH10K_SDIO_BUS_REQUEST_MAX_NUM 64
+#define ATH10K_SDIO_BUS_REQUEST_MAX_NUM 1024
#define ATH10K_SDIO_HIF_COMMUNICATION_TIMEOUT_HZ (100 * HZ)
@@ -98,6 +98,21 @@
#define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF 0xFFFEFFFF
#define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON 0x10000
+enum sdio_mbox_state {
+ SDIO_MBOX_UNKNOWN_STATE = 0,
+ SDIO_MBOX_REQUEST_TO_SLEEP_STATE = 1,
+ SDIO_MBOX_SLEEP_STATE = 2,
+ SDIO_MBOX_AWAKE_STATE = 3,
+};
+
+#define ATH10K_CIS_READ_WAIT_4_RTC_CYCLE_IN_US 125
+#define ATH10K_CIS_RTC_STATE_ADDR 0x1138
+#define ATH10K_CIS_RTC_STATE_ON 0x01
+#define ATH10K_CIS_XTAL_SETTLE_DURATION_IN_US 1500
+#define ATH10K_CIS_READ_RETRY 10
+#define ATH10K_MIN_SLEEP_INACTIVITY_TIME_MS 50
+
+/* TODO: remove this and use skb->cb instead, much cleaner approach */
struct ath10k_sdio_bus_request {
struct list_head list;
@@ -217,6 +232,8 @@ struct ath10k_sdio {
spinlock_t wr_async_lock;
struct work_struct async_work_rx;
+ struct timer_list sleep_timer;
+ enum sdio_mbox_state mbox_state;
};
static inline struct ath10k_sdio *ath10k_sdio_priv(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index 21081b4a27d7..354d49b1cd45 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -11,6 +11,8 @@
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/regulator/consumer.h>
+#include <linux/of_address.h>
+#include <linux/iommu.h>
#include "ce.h"
#include "coredump.h"
@@ -356,7 +358,7 @@ static struct ce_pipe_config target_ce_config_wlan[] = {
},
};
-static struct service_to_pipe target_service_to_ce_map_wlan[] = {
+static struct ce_service_to_pipe target_service_to_ce_map_wlan[] = {
{
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
@@ -769,7 +771,7 @@ static int ath10k_snoc_hif_map_service_to_pipe(struct ath10k *ar,
u16 service_id,
u8 *ul_pipe, u8 *dl_pipe)
{
- const struct service_to_pipe *entry;
+ const struct ce_service_to_pipe *entry;
bool ul_set = false, dl_set = false;
int i;
@@ -1393,7 +1395,6 @@ static int ath10k_hw_power_off(struct ath10k *ar)
static void ath10k_msa_dump_memory(struct ath10k *ar,
struct ath10k_fw_crash_data *crash_data)
{
- struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
const struct ath10k_hw_mem_layout *mem_layout;
const struct ath10k_mem_region *current_region;
struct ath10k_dump_ram_data_hdr *hdr;
@@ -1419,15 +1420,15 @@ static void ath10k_msa_dump_memory(struct ath10k *ar,
buf_len -= sizeof(*hdr);
hdr->region_type = cpu_to_le32(current_region->type);
- hdr->start = cpu_to_le32((unsigned long)ar_snoc->qmi->msa_va);
- hdr->length = cpu_to_le32(ar_snoc->qmi->msa_mem_size);
+ hdr->start = cpu_to_le32((unsigned long)ar->msa.vaddr);
+ hdr->length = cpu_to_le32(ar->msa.mem_size);
- if (current_region->len < ar_snoc->qmi->msa_mem_size) {
- memcpy(buf, ar_snoc->qmi->msa_va, current_region->len);
+ if (current_region->len < ar->msa.mem_size) {
+ memcpy(buf, ar->msa.vaddr, current_region->len);
ath10k_warn(ar, "msa dump length is less than msa size %x, %x\n",
- current_region->len, ar_snoc->qmi->msa_mem_size);
+ current_region->len, ar->msa.mem_size);
} else {
- memcpy(buf, ar_snoc->qmi->msa_va, ar_snoc->qmi->msa_mem_size);
+ memcpy(buf, ar->msa.vaddr, ar->msa.mem_size);
}
}
@@ -1455,6 +1456,155 @@ void ath10k_snoc_fw_crashed_dump(struct ath10k *ar)
mutex_unlock(&ar->dump_mutex);
}
+static int ath10k_setup_msa_resources(struct ath10k *ar, u32 msa_size)
+{
+ struct device *dev = ar->dev;
+ struct device_node *node;
+ struct resource r;
+ int ret;
+
+ node = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (node) {
+ ret = of_address_to_resource(node, 0, &r);
+ if (ret) {
+ dev_err(dev, "failed to resolve msa fixed region\n");
+ return ret;
+ }
+ of_node_put(node);
+
+ ar->msa.paddr = r.start;
+ ar->msa.mem_size = resource_size(&r);
+ ar->msa.vaddr = devm_memremap(dev, ar->msa.paddr,
+ ar->msa.mem_size,
+ MEMREMAP_WT);
+ if (IS_ERR(ar->msa.vaddr)) {
+ dev_err(dev, "failed to map memory region: %pa\n",
+ &r.start);
+ return PTR_ERR(ar->msa.vaddr);
+ }
+ } else {
+ ar->msa.vaddr = dmam_alloc_coherent(dev, msa_size,
+ &ar->msa.paddr,
+ GFP_KERNEL);
+ if (!ar->msa.vaddr) {
+ ath10k_err(ar, "failed to allocate dma memory for msa region\n");
+ return -ENOMEM;
+ }
+ ar->msa.mem_size = msa_size;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi msa.paddr: %pad , msa.vaddr: 0x%p\n",
+ &ar->msa.paddr,
+ ar->msa.vaddr);
+
+ return 0;
+}
+
+static int ath10k_fw_init(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct device *host_dev = &ar_snoc->dev->dev;
+ struct platform_device_info info;
+ struct iommu_domain *iommu_dom;
+ struct platform_device *pdev;
+ struct device_node *node;
+ int ret;
+
+ node = of_get_child_by_name(host_dev->of_node, "wifi-firmware");
+ if (!node) {
+ ar_snoc->use_tz = true;
+ return 0;
+ }
+
+ memset(&info, 0, sizeof(info));
+ info.fwnode = &node->fwnode;
+ info.parent = host_dev;
+ info.name = node->name;
+ info.dma_mask = DMA_BIT_MASK(32);
+
+ pdev = platform_device_register_full(&info);
+ if (IS_ERR(pdev)) {
+ of_node_put(node);
+ return PTR_ERR(pdev);
+ }
+
+ pdev->dev.of_node = node;
+
+ ret = of_dma_configure(&pdev->dev, node, true);
+ if (ret) {
+ ath10k_err(ar, "dma configure fail: %d\n", ret);
+ goto err_unregister;
+ }
+
+ ar_snoc->fw.dev = &pdev->dev;
+
+ iommu_dom = iommu_domain_alloc(&platform_bus_type);
+ if (!iommu_dom) {
+ ath10k_err(ar, "failed to allocate iommu domain\n");
+ ret = -ENOMEM;
+ goto err_unregister;
+ }
+
+ ret = iommu_attach_device(iommu_dom, ar_snoc->fw.dev);
+ if (ret) {
+ ath10k_err(ar, "could not attach device: %d\n", ret);
+ goto err_iommu_free;
+ }
+
+ ar_snoc->fw.iommu_domain = iommu_dom;
+ ar_snoc->fw.fw_start_addr = ar->msa.paddr;
+
+ ret = iommu_map(iommu_dom, ar_snoc->fw.fw_start_addr,
+ ar->msa.paddr, ar->msa.mem_size,
+ IOMMU_READ | IOMMU_WRITE);
+ if (ret) {
+ ath10k_err(ar, "failed to map firmware region: %d\n", ret);
+ goto err_iommu_detach;
+ }
+
+ of_node_put(node);
+
+ return 0;
+
+err_iommu_detach:
+ iommu_detach_device(iommu_dom, ar_snoc->fw.dev);
+
+err_iommu_free:
+ iommu_domain_free(iommu_dom);
+
+err_unregister:
+ platform_device_unregister(pdev);
+ of_node_put(node);
+
+ return ret;
+}
+
+static int ath10k_fw_deinit(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ const size_t mapped_size = ar_snoc->fw.mapped_mem_size;
+ struct iommu_domain *iommu;
+ size_t unmapped_size;
+
+ if (ar_snoc->use_tz)
+ return 0;
+
+ iommu = ar_snoc->fw.iommu_domain;
+
+ unmapped_size = iommu_unmap(iommu, ar_snoc->fw.fw_start_addr,
+ mapped_size);
+ if (unmapped_size != mapped_size)
+ ath10k_err(ar, "failed to unmap firmware: %zu\n",
+ unmapped_size);
+
+ iommu_detach_device(iommu, ar_snoc->fw.dev);
+ iommu_domain_free(iommu);
+
+ platform_device_unregister(to_platform_device(ar_snoc->fw.dev));
+
+ return 0;
+}
+
static const struct of_device_id ath10k_snoc_dt_match[] = {
{ .compatible = "qcom,wcn3990-wifi",
.data = &drv_priv,
@@ -1557,16 +1707,31 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
goto err_free_irq;
}
+ ret = ath10k_setup_msa_resources(ar, msa_size);
+ if (ret) {
+ ath10k_warn(ar, "failed to setup msa resources: %d\n", ret);
+ goto err_power_off;
+ }
+
+ ret = ath10k_fw_init(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to initialize firmware: %d\n", ret);
+ goto err_power_off;
+ }
+
ret = ath10k_qmi_init(ar, msa_size);
if (ret) {
ath10k_warn(ar, "failed to register wlfw qmi client: %d\n", ret);
- goto err_power_off;
+ goto err_fw_deinit;
}
ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n");
return 0;
+err_fw_deinit:
+ ath10k_fw_deinit(ar);
+
err_power_off:
ath10k_hw_power_off(ar);
@@ -1598,6 +1763,7 @@ static int ath10k_snoc_remove(struct platform_device *pdev)
ath10k_core_unregister(ar);
ath10k_hw_power_off(ar);
+ ath10k_fw_deinit(ar);
ath10k_snoc_free_irq(ar);
ath10k_snoc_release_resource(ar);
ath10k_qmi_deinit(ar);
diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h
index c05df45a3945..a3dd06f6ac62 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.h
+++ b/drivers/net/wireless/ath/ath10k/snoc.h
@@ -55,6 +55,13 @@ struct regulator_bulk_data;
struct ath10k_snoc {
struct platform_device *dev;
struct ath10k *ar;
+ unsigned int use_tz;
+ struct ath10k_firmware {
+ struct device *dev;
+ dma_addr_t fw_start_addr;
+ struct iommu_domain *iommu_domain;
+ size_t mapped_mem_size;
+ } fw;
void __iomem *mem;
dma_addr_t mem_pa;
struct ath10k_snoc_target_info target_info;
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 39abf8b12903..f46b9083bbf1 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -84,9 +84,11 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
wake_up(&htt->empty_tx_wq);
spin_unlock_bh(&htt->tx_lock);
+ rcu_read_lock();
if (txq && txq->sta && skb_cb->airtime_est)
ieee80211_sta_register_airtime(txq->sta, txq->tid,
skb_cb->airtime_est, 0);
+ rcu_read_unlock();
if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c
index 1e0343081be9..b7daf344d012 100644
--- a/drivers/net/wireless/ath/ath10k/usb.c
+++ b/drivers/net/wireless/ath/ath10k/usb.c
@@ -693,17 +693,6 @@ static int ath10k_usb_hif_map_service_to_pipe(struct ath10k *ar, u16 svc_id,
return 0;
}
-/* This op is currently only used by htc_wait_target if the HTC ready
- * message times out. It is not applicable for USB since there is nothing
- * we can do if the HTC ready message does not arrive in time.
- * TODO: Make this op non mandatory by introducing a NULL check in the
- * hif op wrapper.
- */
-static void ath10k_usb_hif_send_complete_check(struct ath10k *ar,
- u8 pipe, int force)
-{
-}
-
static int ath10k_usb_hif_power_up(struct ath10k *ar,
enum ath10k_firmware_mode fw_mode)
{
@@ -737,7 +726,6 @@ static const struct ath10k_hif_ops ath10k_usb_hif_ops = {
.stop = ath10k_usb_hif_stop,
.map_service_to_pipe = ath10k_usb_hif_map_service_to_pipe,
.get_default_pipe = ath10k_usb_hif_get_default_pipe,
- .send_complete_check = ath10k_usb_hif_send_complete_check,
.get_free_queue_number = ath10k_usb_hif_get_free_queue_number,
.power_up = ath10k_usb_hif_power_up,
.power_down = ath10k_usb_hif_power_down,
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index 1491c25518bb..6b730f59fd5b 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -126,6 +126,13 @@ struct wmi_ops {
struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
const struct wmi_wmm_params_all_arg *arg);
struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
+ struct sk_buff *(*gen_request_peer_stats_info)(struct ath10k *ar,
+ u32 vdev_id,
+ enum
+ wmi_peer_stats_info_request_type
+ type,
+ u8 *addr,
+ u32 reset);
struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
enum wmi_force_fw_hang_type type,
u32 delay_ms);
@@ -1065,6 +1072,29 @@ ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
}
static inline int
+ath10k_wmi_request_peer_stats_info(struct ath10k *ar,
+ u32 vdev_id,
+ enum wmi_peer_stats_info_request_type type,
+ u8 *addr,
+ u32 reset)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_request_peer_stats_info)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_request_peer_stats_info(ar,
+ vdev_id,
+ type,
+ addr,
+ reset);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_peer_stats_info_cmdid);
+}
+
+static inline int
ath10k_wmi_force_fw_hang(struct ath10k *ar,
enum wmi_force_fw_hang_type type, u32 delay_ms)
{
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 4e68debda9bf..9187b62b331c 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -219,6 +219,91 @@ static void ath10k_wmi_tlv_event_vdev_delete_resp(struct ath10k *ar,
complete(&ar->vdev_delete_done);
}
+static int ath10k_wmi_tlv_parse_peer_stats_info(struct ath10k *ar, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ const struct wmi_tlv_peer_stats_info *stat = ptr;
+ struct ieee80211_sta *sta;
+ struct ath10k_sta *arsta;
+
+ if (tag != WMI_TLV_TAG_STRUCT_PEER_STATS_INFO)
+ return -EPROTO;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv stats peer addr %pMF rx rate code 0x%x bit rate %d kbps\n",
+ stat->peer_macaddr.addr,
+ __le32_to_cpu(stat->last_rx_rate_code),
+ __le32_to_cpu(stat->last_rx_bitrate_kbps));
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv stats tx rate code 0x%x bit rate %d kbps\n",
+ __le32_to_cpu(stat->last_tx_rate_code),
+ __le32_to_cpu(stat->last_tx_bitrate_kbps));
+
+ sta = ieee80211_find_sta_by_ifaddr(ar->hw, stat->peer_macaddr.addr, NULL);
+ if (!sta) {
+ ath10k_warn(ar, "not found station for peer stats\n");
+ return -EINVAL;
+ }
+
+ arsta = (struct ath10k_sta *)sta->drv_priv;
+ arsta->rx_rate_code = __le32_to_cpu(stat->last_rx_rate_code);
+ arsta->rx_bitrate_kbps = __le32_to_cpu(stat->last_rx_bitrate_kbps);
+ arsta->tx_rate_code = __le32_to_cpu(stat->last_tx_rate_code);
+ arsta->tx_bitrate_kbps = __le32_to_cpu(stat->last_tx_bitrate_kbps);
+
+ return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_peer_stats_info(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_tlv_peer_stats_info_ev *ev;
+ const void *data;
+ u32 num_peer_stats;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_PEER_STATS_INFO_EVENT];
+ data = tb[WMI_TLV_TAG_ARRAY_STRUCT];
+
+ if (!ev || !data) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ num_peer_stats = __le32_to_cpu(ev->num_peers);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv peer stats info update peer vdev id %d peers %i more data %d\n",
+ __le32_to_cpu(ev->vdev_id),
+ num_peer_stats,
+ __le32_to_cpu(ev->more_data));
+
+ ret = ath10k_wmi_tlv_iter(ar, data, ath10k_wmi_tlv_len(data),
+ ath10k_wmi_tlv_parse_peer_stats_info, NULL);
+ if (ret)
+ ath10k_warn(ar, "failed to parse stats info tlv: %d\n", ret);
+
+ kfree(tb);
+ return 0;
+}
+
+static void ath10k_wmi_tlv_event_peer_stats_info(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PEER_STATS_INFO_EVENTID\n");
+ ath10k_wmi_tlv_op_pull_peer_stats_info(ar, skb);
+ complete(&ar->peer_stats_info_complete);
+}
+
static int ath10k_wmi_tlv_event_diag_data(struct ath10k *ar,
struct sk_buff *skb)
{
@@ -576,6 +661,9 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
case WMI_TLV_UPDATE_STATS_EVENTID:
ath10k_wmi_event_update_stats(ar, skb);
break;
+ case WMI_TLV_PEER_STATS_INFO_EVENTID:
+ ath10k_wmi_tlv_event_peer_stats_info(ar, skb);
+ break;
case WMI_TLV_VDEV_START_RESP_EVENTID:
ath10k_wmi_event_vdev_start_resp(ar, skb);
break;
@@ -2123,7 +2211,7 @@ ath10k_wmi_tlv_op_gen_vdev_start(struct ath10k *ar,
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
tlv->len = __cpu_to_le16(sizeof(*ch));
ch = (void *)tlv->value;
- ath10k_wmi_put_wmi_channel(ch, &arg->channel);
+ ath10k_wmi_put_wmi_channel(ar, ch, &arg->channel);
ptr += sizeof(*tlv);
ptr += sizeof(*ch);
@@ -2763,7 +2851,7 @@ ath10k_wmi_tlv_op_gen_scan_chan_list(struct ath10k *ar,
tlv->len = __cpu_to_le16(sizeof(*ci));
ci = (void *)tlv->value;
- ath10k_wmi_put_wmi_channel(ci, ch);
+ ath10k_wmi_put_wmi_channel(ar, ci, ch);
chans += sizeof(*tlv);
chans += sizeof(*ci);
@@ -2897,6 +2985,36 @@ ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
return skb;
}
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_request_peer_stats_info(struct ath10k *ar,
+ u32 vdev_id,
+ enum wmi_peer_stats_info_request_type type,
+ u8 *addr,
+ u32 reset)
+{
+ struct wmi_tlv_request_peer_stats_info *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_REQUEST_PEER_STATS_INFO_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->request_type = __cpu_to_le32(type);
+
+ if (type == WMI_REQUEST_ONE_PEER_STATS_INFO)
+ ether_addr_copy(cmd->peer_macaddr.addr, addr);
+
+ cmd->reset_after_request = reset;
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request peer stats info\n");
+ return skb;
+}
+
static int
ath10k_wmi_mgmt_tx_alloc_msdu_id(struct ath10k *ar, struct sk_buff *skb,
dma_addr_t paddr)
@@ -3450,7 +3568,7 @@ ath10k_wmi_tlv_op_gen_tdls_peer_update(struct ath10k *ar,
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
tlv->len = __cpu_to_le16(sizeof(*chan));
chan = (void *)tlv->value;
- ath10k_wmi_put_wmi_channel(chan, &chan_arg[i]);
+ ath10k_wmi_put_wmi_channel(ar, chan, &chan_arg[i]);
ptr += sizeof(*tlv);
ptr += sizeof(*chan);
@@ -4113,6 +4231,7 @@ static struct wmi_cmd_map wmi_tlv_cmd_map = {
.vdev_spectral_scan_configure_cmdid = WMI_TLV_SPECTRAL_SCAN_CONF_CMDID,
.vdev_spectral_scan_enable_cmdid = WMI_TLV_SPECTRAL_SCAN_ENABLE_CMDID,
.request_stats_cmdid = WMI_TLV_REQUEST_STATS_CMDID,
+ .request_peer_stats_info_cmdid = WMI_TLV_REQUEST_PEER_STATS_INFO_CMDID,
.set_arp_ns_offload_cmdid = WMI_TLV_SET_ARP_NS_OFFLOAD_CMDID,
.network_list_offload_config_cmdid =
WMI_TLV_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
@@ -4269,6 +4388,7 @@ static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
.rfkill_config = WMI_TLV_PDEV_PARAM_HW_RFKILL_CONFIG,
.rfkill_enable = WMI_TLV_PDEV_PARAM_RFKILL_ENABLE,
+ .peer_stats_info_enable = WMI_TLV_PDEV_PARAM_PEER_STATS_INFO_ENABLE,
};
static struct wmi_peer_param_map wmi_tlv_peer_param_map = {
@@ -4416,6 +4536,7 @@ static const struct wmi_ops wmi_tlv_ops = {
.gen_beacon_dma = ath10k_wmi_tlv_op_gen_beacon_dma,
.gen_pdev_set_wmm = ath10k_wmi_tlv_op_gen_pdev_set_wmm,
.gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats,
+ .gen_request_peer_stats_info = ath10k_wmi_tlv_op_gen_request_peer_stats_info,
.gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang,
/* .gen_mgmt_tx = not implemented; HTT is used */
.gen_mgmt_tx_send = ath10k_wmi_tlv_op_gen_mgmt_tx_send,
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index 4972dc12991c..6e0537dabd1d 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -198,6 +198,12 @@ enum wmi_tlv_cmd_id {
WMI_TLV_REQUEST_LINK_STATS_CMDID,
WMI_TLV_START_LINK_STATS_CMDID,
WMI_TLV_CLEAR_LINK_STATS_CMDID,
+ WMI_TLV_CGET_FW_MEM_DUMP_CMDID,
+ WMI_TLV_CDEBUG_MESG_FLUSH_CMDID,
+ WMI_TLV_CDIAG_EVENT_LOG_CONFIG_CMDID,
+ WMI_TLV_CREQUEST_WLAN_STATS_CMDID,
+ WMI_TLV_CREQUEST_RCPI_CMDID,
+ WMI_TLV_REQUEST_PEER_STATS_INFO_CMDID,
WMI_TLV_SET_ARP_NS_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_ARP_NS_OFL),
WMI_TLV_ADD_PROACTIVE_ARP_RSP_PATTERN_CMDID,
WMI_TLV_DEL_PROACTIVE_ARP_RSP_PATTERN_CMDID,
@@ -338,6 +344,13 @@ enum wmi_tlv_event_id {
WMI_TLV_IFACE_LINK_STATS_EVENTID,
WMI_TLV_PEER_LINK_STATS_EVENTID,
WMI_TLV_RADIO_LINK_STATS_EVENTID,
+ WMI_TLV_UPDATE_FW_MEM_DUMP_EVENTID,
+ WMI_TLV_DIAG_EVENT_LOG_SUPPORTED_EVENTID,
+ WMI_TLV_INST_RSSI_STATS_EVENTID,
+ WMI_TLV_RADIO_TX_POWER_LEVEL_STATS_EVENTID,
+ WMI_TLV_REPORT_STATS_EVENTID,
+ WMI_TLV_UPDATE_RCPI_EVENTID,
+ WMI_TLV_PEER_STATS_INFO_EVENTID,
WMI_TLV_NLO_MATCH_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_NLO_OFL),
WMI_TLV_NLO_SCAN_COMPLETE_EVENTID,
WMI_TLV_APFIND_EVENTID,
@@ -451,6 +464,7 @@ enum wmi_tlv_pdev_param {
WMI_TLV_PDEV_PARAM_VDEV_RATE_STATS_UPDATE_PERIOD,
WMI_TLV_PDEV_PARAM_TXPOWER_REASON_NONE,
WMI_TLV_PDEV_PARAM_TXPOWER_REASON_SAR,
+ WMI_TLV_PDEV_PARAM_PEER_STATS_INFO_ENABLE = 0x8b,
WMI_TLV_PDEV_PARAM_TXPOWER_REASON_MAX,
};
@@ -2081,6 +2095,94 @@ struct wmi_tlv_stats_ev {
__le32 num_peer_stats_extd;
} __packed;
+struct wmi_tlv_peer_stats_info_ev {
+ __le32 vdev_id;
+ __le32 num_peers;
+ __le32 more_data;
+} __packed;
+
+#define WMI_TLV_MAX_CHAINS 8
+
+struct wmi_tlv_peer_stats_info {
+ struct wmi_mac_addr peer_macaddr;
+ struct {
+ /* lower 32 bits of the tx_bytes value */
+ __le32 low_32;
+ /* upper 32 bits of the tx_bytes value */
+ __le32 high_32;
+ } __packed tx_bytes;
+ struct {
+ /* lower 32 bits of the tx_packets value */
+ __le32 low_32;
+ /* upper 32 bits of the tx_packets value */
+ __le32 high_32;
+ } __packed tx_packets;
+ struct {
+ /* lower 32 bits of the rx_bytes value */
+ __le32 low_32;
+ /* upper 32 bits of the rx_bytes value */
+ __le32 high_32;
+ } __packed rx_bytes;
+ struct {
+ /* lower 32 bits of the rx_packets value */
+ __le32 low_32;
+ /* upper 32 bits of the rx_packets value */
+ __le32 high_32;
+ } __packed rx_packets;
+ __le32 tx_retries;
+ __le32 tx_failed;
+
+ /* rate information, it is output of WMI_ASSEMBLE_RATECODE_V1
+ * (in format of 0x1000RRRR)
+ * The rate-code is a 4-bytes field in which,
+ * for given rate, nss and preamble
+ *
+ * b'31-b'29 unused / reserved
+ * b'28 indicate the version of rate-code (1 = RATECODE_V1)
+ * b'27-b'11 unused / reserved
+ * b'10-b'8 indicate the preamble (0 OFDM, 1 CCK, 2 HT, 3 VHT)
+ * b'7-b'5 indicate the NSS (0 - 1x1, 1 - 2x2, 2 - 3x3, 3 - 4x4)
+ * b'4-b'0 indicate the rate, which is indicated as follows:
+ * OFDM : 0: OFDM 48 Mbps
+ * 1: OFDM 24 Mbps
+ * 2: OFDM 12 Mbps
+ * 3: OFDM 6 Mbps
+ * 4: OFDM 54 Mbps
+ * 5: OFDM 36 Mbps
+ * 6: OFDM 18 Mbps
+ * 7: OFDM 9 Mbps
+ * CCK (pream == 1)
+ * 0: CCK 11 Mbps Long
+ * 1: CCK 5.5 Mbps Long
+ * 2: CCK 2 Mbps Long
+ * 3: CCK 1 Mbps Long
+ * 4: CCK 11 Mbps Short
+ * 5: CCK 5.5 Mbps Short
+ * 6: CCK 2 Mbps Short
+ * HT/VHT (pream == 2/3)
+ * 0..7: MCS0..MCS7 (HT)
+ * 0..9: MCS0..MCS9 (11AC VHT)
+ * 0..11: MCS0..MCS11 (11AX VHT)
+ * rate-code of the last transmission
+ */
+ __le32 last_tx_rate_code;
+ __le32 last_rx_rate_code;
+ __le32 last_tx_bitrate_kbps;
+ __le32 last_rx_bitrate_kbps;
+ __le32 peer_rssi;
+ __le32 tx_succeed;
+ __le32 peer_rssi_per_chain[WMI_TLV_MAX_CHAINS];
+} __packed;
+
+#define HW_RATECODE_PREAM_V1_MASK GENMASK(10, 8)
+#define WMI_TLV_GET_HW_RC_PREAM_V1(rc) FIELD_GET(HW_RATECODE_PREAM_V1_MASK, rc)
+
+#define HW_RATECODE_NSS_V1_MASK GENMASK(7, 5)
+#define WMI_TLV_GET_HW_RC_NSS_V1(rc) FIELD_GET(HW_RATECODE_NSS_V1_MASK, rc)
+
+#define HW_RATECODE_RATE_V1_MASK GENMASK(4, 0)
+#define WMI_TLV_GET_HW_RC_RATE_V1(rc) FIELD_GET(HW_RATECODE_RATE_V1_MASK, rc)
+
struct wmi_tlv_p2p_noa_ev {
__le32 vdev_id;
} __packed;
@@ -2097,6 +2199,14 @@ struct wmi_tlv_wow_add_del_event_cmd {
__le32 event_bitmap;
} __packed;
+struct wmi_tlv_request_peer_stats_info {
+ __le32 request_type;
+ __le32 vdev_id;
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+ __le32 reset_after_request;
+} __packed;
+
/* Command to set/unset chip in quiet mode */
struct wmi_tlv_set_quiet_cmd {
__le32 vdev_id;
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 2ea77bb880b1..a81a1ab2de19 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -1694,10 +1694,11 @@ static const struct wmi_peer_flags_map wmi_10_2_peer_flags_map = {
.bw160 = WMI_10_2_PEER_160MHZ,
};
-void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
+void ath10k_wmi_put_wmi_channel(struct ath10k *ar, struct wmi_channel *ch,
const struct wmi_channel_arg *arg)
{
u32 flags = 0;
+ struct ieee80211_channel *chan = NULL;
memset(ch, 0, sizeof(*ch));
@@ -1714,12 +1715,39 @@ void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
if (arg->chan_radar)
flags |= WMI_CHAN_FLAG_DFS;
+ ch->band_center_freq2 = 0;
ch->mhz = __cpu_to_le32(arg->freq);
ch->band_center_freq1 = __cpu_to_le32(arg->band_center_freq1);
- if (arg->mode == MODE_11AC_VHT80_80)
+ if (arg->mode == MODE_11AC_VHT80_80) {
ch->band_center_freq2 = __cpu_to_le32(arg->band_center_freq2);
- else
- ch->band_center_freq2 = 0;
+ chan = ieee80211_get_channel(ar->hw->wiphy,
+ arg->band_center_freq2 - 10);
+ }
+
+ if (arg->mode == MODE_11AC_VHT160) {
+ u32 band_center_freq1;
+ u32 band_center_freq2;
+
+ if (arg->freq > arg->band_center_freq1) {
+ band_center_freq1 = arg->band_center_freq1 + 40;
+ band_center_freq2 = arg->band_center_freq1 - 40;
+ } else {
+ band_center_freq1 = arg->band_center_freq1 - 40;
+ band_center_freq2 = arg->band_center_freq1 + 40;
+ }
+
+ ch->band_center_freq1 =
+ __cpu_to_le32(band_center_freq1);
+ /* Minus 10 to get a defined 5G channel frequency*/
+ chan = ieee80211_get_channel(ar->hw->wiphy,
+ band_center_freq2 - 10);
+ /* The center frequency of the entire VHT160 */
+ ch->band_center_freq2 = __cpu_to_le32(arg->band_center_freq1);
+ }
+
+ if (chan && chan->flags & IEEE80211_CHAN_RADAR)
+ flags |= WMI_CHAN_FLAG_DFS_CFREQ2;
+
ch->min_power = arg->min_power;
ch->max_power = arg->max_power;
ch->reg_power = arg->max_reg_power;
@@ -7165,7 +7193,7 @@ ath10k_wmi_op_gen_vdev_start(struct ath10k *ar,
memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
}
- ath10k_wmi_put_wmi_channel(&cmd->chan, &arg->channel);
+ ath10k_wmi_put_wmi_channel(ar, &cmd->chan, &arg->channel);
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, ch_flags: 0x%0X, max_power: %d\n",
@@ -7537,7 +7565,7 @@ ath10k_wmi_op_gen_scan_chan_list(struct ath10k *ar,
ch = &arg->channels[i];
ci = &cmd->chan_info[i];
- ath10k_wmi_put_wmi_channel(ci, ch);
+ ath10k_wmi_put_wmi_channel(ar, ci, ch);
}
return skb;
@@ -7628,12 +7656,8 @@ ath10k_wmi_peer_assoc_fill_10_4(struct ath10k *ar, void *buf,
struct wmi_10_4_peer_assoc_complete_cmd *cmd = buf;
ath10k_wmi_peer_assoc_fill_10_2(ar, buf, arg);
- if (arg->peer_bw_rxnss_override)
- cmd->peer_bw_rxnss_override =
- __cpu_to_le32((arg->peer_bw_rxnss_override - 1) |
- BIT(PEER_BW_RXNSS_OVERRIDE_OFFSET));
- else
- cmd->peer_bw_rxnss_override = 0;
+ cmd->peer_bw_rxnss_override =
+ __cpu_to_le32(arg->peer_bw_rxnss_override);
}
static int
@@ -8312,7 +8336,7 @@ ath10k_wmi_fw_pdev_rx_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MPDUs delivered to stack", pdev->loc_mpdus);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Oversized AMSUs", pdev->oversize_amsdu);
+ "Oversized AMSDUs", pdev->oversize_amsdu);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"PHY errors", pdev->phy_errs);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
@@ -8945,7 +8969,7 @@ ath10k_wmi_10_4_gen_tdls_peer_update(struct ath10k *ar,
for (i = 0; i < cap->peer_chan_len; i++) {
chan = (struct wmi_channel *)&peer_cap->peer_chan_list[i];
- ath10k_wmi_put_wmi_channel(chan, &chan_arg[i]);
+ ath10k_wmi_put_wmi_channel(ar, chan, &chan_arg[i]);
}
ath10k_dbg(ar, ATH10K_DBG_WMI,
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 6df415778374..0f05405bebc0 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -940,6 +940,7 @@ struct wmi_cmd_map {
u32 vdev_spectral_scan_configure_cmdid;
u32 vdev_spectral_scan_enable_cmdid;
u32 request_stats_cmdid;
+ u32 request_peer_stats_info_cmdid;
u32 set_arp_ns_offload_cmdid;
u32 network_list_offload_config_cmdid;
u32 gtk_offload_cmdid;
@@ -2094,7 +2095,8 @@ enum wmi_channel_change_cause {
/* Indicate reason for channel switch */
#define WMI_CHANNEL_CHANGE_CAUSE_CSA (1 << 13)
-
+/* DFS required on channel for 2nd segment of VHT160 and VHT80+80*/
+#define WMI_CHAN_FLAG_DFS_CFREQ2 (1 << 15)
#define WMI_MAX_SPATIAL_STREAM 3 /* default max ss */
/* HT Capabilities*/
@@ -3797,6 +3799,7 @@ struct wmi_pdev_param_map {
u32 enable_btcoex;
u32 rfkill_config;
u32 rfkill_enable;
+ u32 peer_stats_info_enable;
};
#define WMI_PDEV_PARAM_UNSUPPORTED 0
@@ -4577,6 +4580,13 @@ struct wmi_request_stats_cmd {
struct wlan_inst_rssi_args inst_rssi_args;
} __packed;
+enum wmi_peer_stats_info_request_type {
+ /* request stats of one specified peer */
+ WMI_REQUEST_ONE_PEER_STATS_INFO = 0x01,
+ /* request stats of all peers belong to specified VDEV */
+ WMI_REQUEST_VDEV_ALL_PEER_STATS_INFO = 0x02,
+};
+
/* Suspend option */
enum {
/* suspend */
@@ -6508,7 +6518,10 @@ struct wmi_10_2_peer_assoc_complete_cmd {
__le32 info0; /* WMI_PEER_ASSOC_INFO0_ */
} __packed;
-#define PEER_BW_RXNSS_OVERRIDE_OFFSET 31
+/* NSS Mapping to FW */
+#define WMI_PEER_NSS_MAP_ENABLE BIT(31)
+#define WMI_PEER_NSS_160MHZ_MASK GENMASK(2, 0)
+#define WMI_PEER_NSS_80_80MHZ_MASK GENMASK(5, 3)
struct wmi_10_4_peer_assoc_complete_cmd {
struct wmi_10_2_peer_assoc_complete_cmd cmd;
@@ -7348,7 +7361,7 @@ void ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn,
const struct wmi_start_scan_arg *arg);
void ath10k_wmi_set_wmm_param(struct wmi_wmm_params *params,
const struct wmi_wmm_params_arg *arg);
-void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
+void ath10k_wmi_put_wmi_channel(struct ath10k *ar, struct wmi_channel *ch,
const struct wmi_channel_arg *arg);
int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg);
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
index 59342d2797ca..3b2b76d602f2 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.c
+++ b/drivers/net/wireless/ath/ath11k/ahb.c
@@ -788,7 +788,7 @@ static int ath11k_ahb_ext_irq_config(struct ath11k_base *ab)
irq = platform_get_irq_byname(ab->pdev,
irq_name[irq_idx]);
ab->irq_num[irq_idx] = irq;
- irq_set_status_flags(irq, IRQ_NOAUTOEN);
+ irq_set_status_flags(irq, IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY);
ret = request_irq(irq, ath11k_ahb_ext_interrupt_handler,
IRQF_TRIGGER_RISING,
irq_name[irq_idx], irq_grp);
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index 6e7b8ecd09a6..70ec544eee67 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -60,9 +60,14 @@ static inline enum wme_ac ath11k_tid_to_ac(u32 tid)
WME_AC_VO);
}
+enum ath11k_skb_flags {
+ ATH11K_SKB_HW_80211_ENCAP = BIT(0),
+};
+
struct ath11k_skb_cb {
dma_addr_t paddr;
u8 eid;
+ u8 flags;
struct ath11k *ar;
struct ieee80211_vif *vif;
} __packed;
@@ -341,6 +346,11 @@ struct ath11k_sta {
u8 rssi_comb;
struct ath11k_htt_tx_stats *tx_stats;
struct ath11k_rx_peer_stats *rx_stats;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+ /* protected by conf_mutex */
+ bool aggr_mode;
+#endif
};
#define ATH11K_NUM_CHANS 41
@@ -387,6 +397,7 @@ struct ath11k_debug {
u32 pktlog_mode;
u32 pktlog_peer_valid;
u8 pktlog_peer_addr[ETH_ALEN];
+ u32 rx_filter;
};
struct ath11k_per_peer_tx_stats {
@@ -650,6 +661,10 @@ struct ath11k_base {
/* protected by data_lock */
u32 fw_crash_counter;
} stats;
+ u32 pktlog_defs_checksum;
+
+ /* Round robbin based TCL ring selector */
+ atomic_t tcl_ring_selector;
};
struct ath11k_fw_stats_pdev {
diff --git a/drivers/net/wireless/ath/ath11k/debug.c b/drivers/net/wireless/ath/ath11k/debug.c
index 8d485171b0b3..3fd6b5af073b 100644
--- a/drivers/net/wireless/ath/ath11k/debug.c
+++ b/drivers/net/wireless/ath/ath11k/debug.c
@@ -195,7 +195,7 @@ void ath11k_debug_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb)
total_vdevs_started += ar->num_started_vdevs;
}
- is_end = ((++num_vdev) == total_vdevs_started ? true : false);
+ is_end = ((++num_vdev) == total_vdevs_started);
list_splice_tail_init(&stats.vdevs,
&ar->debug.fw_stats.vdevs);
@@ -215,7 +215,7 @@ void ath11k_debug_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb)
/* Mark end until we reached the count of all started VDEVs
* within the PDEV
*/
- is_end = ((++num_bcn) == ar->num_started_vdevs ? true : false);
+ is_end = ((++num_bcn) == ar->num_started_vdevs);
list_splice_tail_init(&stats.bcn,
&ar->debug.fw_stats.bcn);
@@ -698,6 +698,8 @@ static ssize_t ath11k_write_extd_rx_stats(struct file *file,
tlv_filter = ath11k_mac_mon_status_filter_default;
}
+ ar->debug.rx_filter = tlv_filter.rx_filter;
+
ring_id = ar->dp.rx_mon_status_refill_ring.refill_buf_ring.ring_id;
ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id,
HAL_RXDMA_MONITOR_STATUS,
@@ -803,6 +805,9 @@ static const struct file_operations fops_soc_rx_stats = {
int ath11k_debug_pdev_create(struct ath11k_base *ab)
{
+ if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
+ return 0;
+
ab->debugfs_soc = debugfs_create_dir(ab->hw_params.name, ab->debugfs_ath11k);
if (IS_ERR_OR_NULL(ab->debugfs_soc)) {
diff --git a/drivers/net/wireless/ath/ath11k/debug.h b/drivers/net/wireless/ath/ath11k/debug.h
index 97e7306c506d..c30085406bfb 100644
--- a/drivers/net/wireless/ath/ath11k/debug.h
+++ b/drivers/net/wireless/ath/ath11k/debug.h
@@ -67,7 +67,7 @@ struct debug_htt_stats_req {
u8 peer_addr[ETH_ALEN];
struct completion cmpln;
u32 buf_len;
- u8 buf[0];
+ u8 buf[];
};
struct ath_pktlog_hdr {
@@ -77,9 +77,11 @@ struct ath_pktlog_hdr {
u16 size;
u32 timestamp;
u32 type_specific_data;
- u8 payload[0];
+ u8 payload[];
};
+#define ATH11K_HTT_PEER_STATS_RESET BIT(16)
+
#define ATH11K_HTT_STATS_BUF_SIZE (1024 * 512)
#define ATH11K_FW_STATS_BUF_SIZE (1024 * 1024)
@@ -112,6 +114,12 @@ enum ath11k_pktlog_enum {
ATH11K_PKTLOG_TYPE_LITE_RX = 24,
};
+enum ath11k_dbg_aggr_mode {
+ ATH11K_DBG_AGGR_MODE_AUTO,
+ ATH11K_DBG_AGGR_MODE_MANUAL,
+ ATH11K_DBG_AGGR_MODE_MAX,
+};
+
__printf(2, 3) void ath11k_info(struct ath11k_base *ab, const char *fmt, ...);
__printf(2, 3) void ath11k_err(struct ath11k_base *ab, const char *fmt, ...);
__printf(2, 3) void ath11k_warn(struct ath11k_base *ab, const char *fmt, ...);
@@ -182,6 +190,11 @@ static inline int ath11k_debug_is_extd_rx_stats_enabled(struct ath11k *ar)
return ar->debug.extd_rx_stats;
}
+static inline int ath11k_debug_rx_filter(struct ath11k *ar)
+{
+ return ar->debug.rx_filter;
+}
+
void ath11k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir);
void
@@ -263,6 +276,11 @@ static inline bool ath11k_debug_is_pktlog_peer_valid(struct ath11k *ar, u8 *addr
return false;
}
+static inline int ath11k_debug_rx_filter(struct ath11k *ar)
+{
+ return 0;
+}
+
static inline void
ath11k_accumulate_per_peer_tx_stats(struct ath11k_sta *arsta,
struct ath11k_per_peer_tx_stats *peer_stats,
diff --git a/drivers/net/wireless/ath/ath11k/debug_htt_stats.h b/drivers/net/wireless/ath/ath11k/debug_htt_stats.h
index 23a6baa9e95a..682a6ff222bd 100644
--- a/drivers/net/wireless/ath/ath11k/debug_htt_stats.h
+++ b/drivers/net/wireless/ath/ath11k/debug_htt_stats.h
@@ -239,7 +239,7 @@ struct htt_tx_pdev_stats_tx_ppdu_stats_tlv_v {
*/
struct htt_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v {
u32 hist_bin_size;
- u32 tried_mpdu_cnt_hist[0]; /* HTT_TX_PDEV_TRIED_MPDU_CNT_HIST */
+ u32 tried_mpdu_cnt_hist[]; /* HTT_TX_PDEV_TRIED_MPDU_CNT_HIST */
};
/* == SOC ERROR STATS == */
@@ -550,7 +550,7 @@ struct htt_tx_hwq_stats_cmn_tlv {
struct htt_tx_hwq_difs_latency_stats_tlv_v {
u32 hist_intvl;
/* histogram of ppdu post to hwsch - > cmd status received */
- u32 difs_latency_hist[0]; /* HTT_TX_HWQ_MAX_DIFS_LATENCY_BINS */
+ u32 difs_latency_hist[]; /* HTT_TX_HWQ_MAX_DIFS_LATENCY_BINS */
};
/* NOTE: Variable length TLV, use length spec to infer array size */
@@ -586,7 +586,7 @@ struct htt_tx_hwq_fes_result_stats_tlv_v {
struct htt_tx_hwq_tried_mpdu_cnt_hist_tlv_v {
u32 hist_bin_size;
/* Histogram of number of mpdus on tried mpdu */
- u32 tried_mpdu_cnt_hist[0]; /* HTT_TX_HWQ_TRIED_MPDU_CNT_HIST */
+ u32 tried_mpdu_cnt_hist[]; /* HTT_TX_HWQ_TRIED_MPDU_CNT_HIST */
};
/* NOTE: Variable length TLV, use length spec to infer array size
@@ -1584,7 +1584,7 @@ struct htt_pdev_stats_twt_session_tlv {
struct htt_pdev_stats_twt_sessions_tlv {
u32 pdev_id;
u32 num_sessions;
- struct htt_pdev_stats_twt_session_tlv twt_session[0];
+ struct htt_pdev_stats_twt_session_tlv twt_session[];
};
enum htt_rx_reo_resource_sample_id_enum {
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_sta.c b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
index 389dac219238..7308ed254232 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
@@ -8,6 +8,8 @@
#include "core.h"
#include "peer.h"
#include "debug.h"
+#include "dp_tx.h"
+#include "debug_htt_stats.h"
void
ath11k_accumulate_per_peer_tx_stats(struct ath11k_sta *arsta,
@@ -435,13 +437,22 @@ ath11k_dbg_sta_open_htt_peer_stats(struct inode *inode, struct file *file)
return 0;
out:
vfree(stats_req);
+ ar->debug.htt_stats.stats_req = NULL;
return ret;
}
static int
ath11k_dbg_sta_release_htt_peer_stats(struct inode *inode, struct file *file)
{
+ struct ieee80211_sta *sta = inode->i_private;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+
+ mutex_lock(&ar->conf_mutex);
vfree(file->private_data);
+ ar->debug.htt_stats.stats_req = NULL;
+ mutex_unlock(&ar->conf_mutex);
+
return 0;
}
@@ -533,6 +544,282 @@ static const struct file_operations fops_peer_pktlog = {
.llseek = default_llseek,
};
+static ssize_t ath11k_dbg_sta_write_delba(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ u32 tid, initiator, reason;
+ int ret;
+ char buf[64] = {0};
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
+ user_buf, count);
+ if (ret <= 0)
+ return ret;
+
+ ret = sscanf(buf, "%u %u %u", &tid, &initiator, &reason);
+ if (ret != 3)
+ return -EINVAL;
+
+ /* Valid TID values are 0 through 15 */
+ if (tid > HAL_DESC_REO_NON_QOS_TID - 1)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ if (ar->state != ATH11K_STATE_ON ||
+ arsta->aggr_mode != ATH11K_DBG_AGGR_MODE_MANUAL) {
+ ret = count;
+ goto out;
+ }
+
+ ret = ath11k_wmi_delba_send(ar, arsta->arvif->vdev_id, sta->addr,
+ tid, initiator, reason);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send delba: vdev_id %u peer %pM tid %u initiator %u reason %u\n",
+ arsta->arvif->vdev_id, sta->addr, tid, initiator,
+ reason);
+ }
+ ret = count;
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_delba = {
+ .write = ath11k_dbg_sta_write_delba,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_dbg_sta_write_addba_resp(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ u32 tid, status;
+ int ret;
+ char buf[64] = {0};
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
+ user_buf, count);
+ if (ret <= 0)
+ return ret;
+
+ ret = sscanf(buf, "%u %u", &tid, &status);
+ if (ret != 2)
+ return -EINVAL;
+
+ /* Valid TID values are 0 through 15 */
+ if (tid > HAL_DESC_REO_NON_QOS_TID - 1)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ if (ar->state != ATH11K_STATE_ON ||
+ arsta->aggr_mode != ATH11K_DBG_AGGR_MODE_MANUAL) {
+ ret = count;
+ goto out;
+ }
+
+ ret = ath11k_wmi_addba_set_resp(ar, arsta->arvif->vdev_id, sta->addr,
+ tid, status);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send addba response: vdev_id %u peer %pM tid %u status%u\n",
+ arsta->arvif->vdev_id, sta->addr, tid, status);
+ }
+ ret = count;
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_addba_resp = {
+ .write = ath11k_dbg_sta_write_addba_resp,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_dbg_sta_write_addba(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ u32 tid, buf_size;
+ int ret;
+ char buf[64] = {0};
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
+ user_buf, count);
+ if (ret <= 0)
+ return ret;
+
+ ret = sscanf(buf, "%u %u", &tid, &buf_size);
+ if (ret != 2)
+ return -EINVAL;
+
+ /* Valid TID values are 0 through 15 */
+ if (tid > HAL_DESC_REO_NON_QOS_TID - 1)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ if (ar->state != ATH11K_STATE_ON ||
+ arsta->aggr_mode != ATH11K_DBG_AGGR_MODE_MANUAL) {
+ ret = count;
+ goto out;
+ }
+
+ ret = ath11k_wmi_addba_send(ar, arsta->arvif->vdev_id, sta->addr,
+ tid, buf_size);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send addba request: vdev_id %u peer %pM tid %u buf_size %u\n",
+ arsta->arvif->vdev_id, sta->addr, tid, buf_size);
+ }
+
+ ret = count;
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_addba = {
+ .write = ath11k_dbg_sta_write_addba,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_dbg_sta_read_aggr_mode(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ char buf[64];
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len,
+ "aggregation mode: %s\n\n%s\n%s\n",
+ (arsta->aggr_mode == ATH11K_DBG_AGGR_MODE_AUTO) ?
+ "auto" : "manual", "auto = 0", "manual = 1");
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath11k_dbg_sta_write_aggr_mode(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ u32 aggr_mode;
+ int ret;
+
+ if (kstrtouint_from_user(user_buf, count, 0, &aggr_mode))
+ return -EINVAL;
+
+ if (aggr_mode >= ATH11K_DBG_AGGR_MODE_MAX)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ if (ar->state != ATH11K_STATE_ON ||
+ aggr_mode == arsta->aggr_mode) {
+ ret = count;
+ goto out;
+ }
+
+ ret = ath11k_wmi_addba_clear_resp(ar, arsta->arvif->vdev_id, sta->addr);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to clear addba session ret: %d\n",
+ ret);
+ goto out;
+ }
+
+ arsta->aggr_mode = aggr_mode;
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_aggr_mode = {
+ .read = ath11k_dbg_sta_read_aggr_mode,
+ .write = ath11k_dbg_sta_write_aggr_mode,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t
+ath11k_write_htt_peer_stats_reset(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ struct htt_ext_stats_cfg_params cfg_params = { 0 };
+ int ret;
+ u8 type;
+
+ ret = kstrtou8_from_user(user_buf, count, 0, &type);
+ if (ret)
+ return ret;
+
+ if (!type)
+ return ret;
+
+ mutex_lock(&ar->conf_mutex);
+ cfg_params.cfg0 = HTT_STAT_PEER_INFO_MAC_ADDR;
+ cfg_params.cfg0 |= FIELD_PREP(GENMASK(15, 1),
+ HTT_PEER_STATS_REQ_MODE_FLUSH_TQM);
+
+ cfg_params.cfg1 = HTT_STAT_DEFAULT_PEER_REQ_TYPE;
+
+ cfg_params.cfg2 |= FIELD_PREP(GENMASK(7, 0), sta->addr[0]);
+ cfg_params.cfg2 |= FIELD_PREP(GENMASK(15, 8), sta->addr[1]);
+ cfg_params.cfg2 |= FIELD_PREP(GENMASK(23, 16), sta->addr[2]);
+ cfg_params.cfg2 |= FIELD_PREP(GENMASK(31, 24), sta->addr[3]);
+
+ cfg_params.cfg3 |= FIELD_PREP(GENMASK(7, 0), sta->addr[4]);
+ cfg_params.cfg3 |= FIELD_PREP(GENMASK(15, 8), sta->addr[5]);
+
+ cfg_params.cfg3 |= ATH11K_HTT_PEER_STATS_RESET;
+
+ ret = ath11k_dp_tx_htt_h2t_ext_stats_req(ar,
+ ATH11K_DBG_HTT_EXT_STATS_PEER_INFO,
+ &cfg_params,
+ 0ULL);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send htt peer stats request: %d\n", ret);
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+
+ ret = count;
+
+ return ret;
+}
+
+static const struct file_operations fops_htt_peer_stats_reset = {
+ .write = ath11k_write_htt_peer_stats_reset,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
void ath11k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir)
{
@@ -550,4 +837,14 @@ void ath11k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
debugfs_create_file("peer_pktlog", 0644, dir, sta,
&fops_peer_pktlog);
+
+ debugfs_create_file("aggr_mode", 0644, dir, sta, &fops_aggr_mode);
+ debugfs_create_file("addba", 0200, dir, sta, &fops_addba);
+ debugfs_create_file("addba_resp", 0200, dir, sta, &fops_addba_resp);
+ debugfs_create_file("delba", 0200, dir, sta, &fops_delba);
+
+ if (test_bit(WMI_TLV_SERVICE_PER_PEER_HTT_STATS_RESET,
+ ar->ab->wmi_ab.svc_map))
+ debugfs_create_file("htt_peer_stats_reset", 0600, dir, sta,
+ &fops_htt_peer_stats_reset);
}
diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c
index 50350f77b309..145015d2f49c 100644
--- a/drivers/net/wireless/ath/ath11k/dp.c
+++ b/drivers/net/wireless/ath/ath11k/dp.c
@@ -880,6 +880,8 @@ int ath11k_dp_alloc(struct ath11k_base *ab)
INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
spin_lock_init(&dp->reo_cmd_lock);
+ dp->reo_cmd_cache_flush_count = 0;
+
ret = ath11k_wbm_idle_ring_setup(ab, &n_link_desc);
if (ret) {
ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
@@ -909,8 +911,10 @@ int ath11k_dp_alloc(struct ath11k_base *ab)
dp->tx_ring[i].tx_status_head = 0;
dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL);
- if (!dp->tx_ring[i].tx_status)
+ if (!dp->tx_ring[i].tx_status) {
+ ret = -ENOMEM;
goto fail_cmn_srng_cleanup;
+ }
}
for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)
diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h
index 551f9c9fb847..222de10e4b93 100644
--- a/drivers/net/wireless/ath/ath11k/dp.h
+++ b/drivers/net/wireless/ath/ath11k/dp.h
@@ -36,6 +36,7 @@ struct dp_rx_tid {
struct ath11k_base *ab;
};
+#define DP_REO_DESC_FREE_THRESHOLD 64
#define DP_REO_DESC_FREE_TIMEOUT_MS 1000
struct dp_reo_cache_flush_elem {
@@ -169,8 +170,8 @@ struct ath11k_pdev_dp {
#define DP_WBM_RELEASE_RING_SIZE 64
#define DP_TCL_DATA_RING_SIZE 512
-#define DP_TX_COMP_RING_SIZE 8192
-#define DP_TX_IDR_SIZE (DP_TX_COMP_RING_SIZE << 1)
+#define DP_TX_COMP_RING_SIZE 32768
+#define DP_TX_IDR_SIZE DP_TX_COMP_RING_SIZE
#define DP_TCL_CMD_RING_SIZE 32
#define DP_TCL_STATUS_RING_SIZE 32
#define DP_REO_DST_RING_MAX 4
@@ -222,7 +223,13 @@ struct ath11k_dp {
struct hal_wbm_idle_scatter_list scatter_list[DP_IDLE_SCATTER_BUFS_MAX];
struct list_head reo_cmd_list;
struct list_head reo_cmd_cache_flush_list;
- /* protects access to reo_cmd_list and reo_cmd_cache_flush_list */
+ u32 reo_cmd_cache_flush_count;
+ /**
+ * protects access to below fields,
+ * - reo_cmd_list
+ * - reo_cmd_cache_flush_list
+ * - reo_cmd_cache_flush_count
+ */
spinlock_t reo_cmd_lock;
};
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index f74a0e74bf3e..85670608c3e2 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -252,7 +252,7 @@ static bool ath11k_dp_rxdesc_mpdu_valid(struct hal_rx_desc *rx_desc)
tlv_tag = FIELD_GET(HAL_TLV_HDR_TAG,
__le32_to_cpu(rx_desc->mpdu_start_tag));
- return tlv_tag == HAL_RX_MPDU_START ? true : false;
+ return tlv_tag == HAL_RX_MPDU_START;
}
static u32 ath11k_dp_rxdesc_get_ppduid(struct hal_rx_desc *rx_desc)
@@ -565,6 +565,7 @@ void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab)
list_for_each_entry_safe(cmd_cache, tmp_cache,
&dp->reo_cmd_cache_flush_list, list) {
list_del(&cmd_cache->list);
+ dp->reo_cmd_cache_flush_count--;
dma_unmap_single(ab->dev, cmd_cache->data.paddr,
cmd_cache->data.size, DMA_BIDIRECTIONAL);
kfree(cmd_cache->data.vaddr);
@@ -651,15 +652,18 @@ static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx,
spin_lock_bh(&dp->reo_cmd_lock);
list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
+ dp->reo_cmd_cache_flush_count++;
spin_unlock_bh(&dp->reo_cmd_lock);
/* Flush and invalidate aged REO desc from HW cache */
spin_lock_bh(&dp->reo_cmd_lock);
list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
list) {
- if (time_after(jiffies, elem->ts +
+ if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD ||
+ time_after(jiffies, elem->ts +
msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) {
list_del(&elem->list);
+ dp->reo_cmd_cache_flush_count--;
spin_unlock_bh(&dp->reo_cmd_lock);
ath11k_dp_reo_cache_flush(ab, &elem->data);
@@ -892,7 +896,7 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
else
hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
- vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_KERNEL);
+ vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
if (!vaddr) {
spin_unlock_bh(&ab->base_lock);
return -ENOMEM;
@@ -1491,7 +1495,8 @@ static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb)
return;
}
- trace_ath11k_htt_pktlog(ar, data->payload, hdr->size);
+ trace_ath11k_htt_pktlog(ar, data->payload, hdr->size,
+ ar->ab->pktlog_defs_checksum);
}
static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab,
@@ -2265,6 +2270,7 @@ static int ath11k_dp_rx_process_msdu(struct ath11k *ar,
struct ieee80211_hdr *hdr;
struct sk_buff *last_buf;
u8 l3_pad_bytes;
+ u8 *hdr_status;
u16 msdu_len;
int ret;
@@ -2293,8 +2299,13 @@ static int ath11k_dp_rx_process_msdu(struct ath11k *ar,
skb_pull(msdu, HAL_RX_DESC_SIZE);
} else if (!rxcb->is_continuation) {
if ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE) {
+ hdr_status = ath11k_dp_rx_h_80211_hdr(rx_desc);
ret = -EINVAL;
ath11k_warn(ar->ab, "invalid msdu len %u\n", msdu_len);
+ ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
+ sizeof(struct ieee80211_hdr));
+ ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
+ sizeof(struct hal_rx_desc));
goto free_out;
}
skb_put(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes + msdu_len);
@@ -2402,12 +2413,12 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
try_again:
while ((rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
- struct hal_reo_dest_ring *desc = (struct hal_reo_dest_ring *)rx_desc;
+ struct hal_reo_dest_ring desc = *(struct hal_reo_dest_ring *)rx_desc;
enum hal_reo_dest_ring_push_reason push_reason;
u32 cookie;
cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
- desc->buf_addr_info.info1);
+ desc.buf_addr_info.info1);
buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
cookie);
mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie);
@@ -2435,7 +2446,7 @@ try_again:
total_msdu_reaped++;
push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
- desc->info0);
+ desc.info0);
if (push_reason !=
HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
dev_kfree_skb_any(msdu);
@@ -2443,15 +2454,15 @@ try_again:
continue;
}
- rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 &
+ rxcb->is_first_msdu = !!(desc.rx_msdu_info.info0 &
RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
- rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 &
+ rxcb->is_last_msdu = !!(desc.rx_msdu_info.info0 &
RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
- rxcb->is_continuation = !!(desc->rx_msdu_info.info0 &
+ rxcb->is_continuation = !!(desc.rx_msdu_info.info0 &
RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
rxcb->mac_id = mac_id;
rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM,
- desc->info0);
+ desc.info0);
__skb_queue_tail(&msdu_list, msdu);
@@ -2960,8 +2971,8 @@ static int ath11k_dp_rx_h_verify_tkip_mic(struct ath11k *ar, struct ath11k_peer
return 0;
mic_fail:
- (ATH11K_SKB_RXCB(msdu))->is_first_msdu = 1;
- (ATH11K_SKB_RXCB(msdu))->is_last_msdu = 1;
+ (ATH11K_SKB_RXCB(msdu))->is_first_msdu = true;
+ (ATH11K_SKB_RXCB(msdu))->is_last_msdu = true;
rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
@@ -3389,6 +3400,7 @@ ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool
struct sk_buff *msdu;
struct ath11k_skb_rxcb *rxcb;
struct hal_rx_desc *rx_desc;
+ u8 *hdr_status;
u16 msdu_len;
spin_lock_bh(&rx_ring->idr_lock);
@@ -3426,6 +3438,17 @@ ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool
rx_desc = (struct hal_rx_desc *)msdu->data;
msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc);
+ if ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE) {
+ hdr_status = ath11k_dp_rx_h_80211_hdr(rx_desc);
+ ath11k_warn(ar->ab, "invalid msdu leng %u", msdu_len);
+ ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
+ sizeof(struct ieee80211_hdr));
+ ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
+ sizeof(struct hal_rx_desc));
+ dev_kfree_skb_any(msdu);
+ goto exit;
+ }
+
skb_put(msdu, HAL_RX_DESC_SIZE + msdu_len);
if (ath11k_dp_rx_frag_h_mpdu(ar, msdu, ring_desc)) {
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c
index 7aac4b0eea0c..41c990aec6b7 100644
--- a/drivers/net/wireless/ath/ath11k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.c
@@ -9,14 +9,14 @@
#include "hw.h"
#include "peer.h"
-/* NOTE: Any of the mapped ring id value must not exceed DP_TCL_NUM_RING_MAX */
-static const u8
-ath11k_txq_tcl_ring_map[ATH11K_HW_MAX_QUEUES] = { 0x0, 0x1, 0x2, 0x2 };
-
static enum hal_tcl_encap_type
ath11k_dp_tx_get_encap_type(struct ath11k_vif *arvif, struct sk_buff *skb)
{
- /* TODO: Determine encap type based on vif_type and configuration */
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+
+ if (tx_info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP)
+ return HAL_TCL_ENCAP_TYPE_ETHERNET;
+
return HAL_TCL_ENCAP_TYPE_NATIVE_WIFI;
}
@@ -40,8 +40,11 @@ static void ath11k_dp_tx_encap_nwifi(struct sk_buff *skb)
static u8 ath11k_dp_tx_get_tid(struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct ath11k_skb_cb *cb = ATH11K_SKB_CB(skb);
- if (!ieee80211_is_data_qos(hdr->frame_control))
+ if (cb->flags & ATH11K_SKB_HW_80211_ENCAP)
+ return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+ else if (!ieee80211_is_data_qos(hdr->frame_control))
return HAL_DESC_REO_NON_QOS_TID;
else
return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
@@ -84,15 +87,31 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
u8 pool_id;
u8 hal_ring_id;
int ret;
+ u8 ring_selector = 0, ring_map = 0;
+ bool tcl_ring_retry;
if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
return -ESHUTDOWN;
- if (!ieee80211_is_data(hdr->frame_control))
+ if (!(info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP) &&
+ !ieee80211_is_data(hdr->frame_control))
return -ENOTSUPP;
pool_id = skb_get_queue_mapping(skb) & (ATH11K_HW_MAX_QUEUES - 1);
- ti.ring_id = ath11k_txq_tcl_ring_map[pool_id];
+
+ /* Let the default ring selection be based on a round robin
+ * fashion where one of the 3 tcl rings are selected based on
+ * the tcl_ring_selector counter. In case that ring
+ * is full/busy, we resort to other available rings.
+ * If all rings are full, we drop the packet.
+ * //TODO Add throttling logic when all rings are full
+ */
+ ring_selector = atomic_inc_return(&ab->tcl_ring_selector);
+
+tcl_ring_sel:
+ tcl_ring_retry = false;
+ ti.ring_id = ring_selector % DP_TCL_NUM_RING_MAX;
+ ring_map |= BIT(ti.ring_id);
tx_ring = &dp->tx_ring[ti.ring_id];
@@ -101,8 +120,14 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
DP_TX_IDR_SIZE - 1, GFP_ATOMIC);
spin_unlock_bh(&tx_ring->tx_idr_lock);
- if (ret < 0)
- return -ENOSPC;
+ if (ret < 0) {
+ if (ring_map == (BIT(DP_TCL_NUM_RING_MAX) - 1))
+ return -ENOSPC;
+
+ /* Check if the next ring is available */
+ ring_selector++;
+ goto tcl_ring_sel;
+ }
ti.desc_id = FIELD_PREP(DP_TX_DESC_ID_MAC_ID, ar->pdev_idx) |
FIELD_PREP(DP_TX_DESC_ID_MSDU_ID, ret) |
@@ -149,7 +174,10 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
* skb_checksum_help() is needed
*/
case HAL_TCL_ENCAP_TYPE_ETHERNET:
+ /* no need to encap */
+ break;
case HAL_TCL_ENCAP_TYPE_802_3:
+ default:
/* TODO: Take care of other encap modes as well */
ret = -EINVAL;
goto fail_remove_idr;
@@ -178,11 +206,21 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
if (!hal_tcl_desc) {
/* NOTE: It is highly unlikely we'll be running out of tcl_ring
* desc because the desc is directly enqueued onto hw queue.
- * So add tx packet throttling logic in future if required.
*/
ath11k_hal_srng_access_end(ab, tcl_ring);
spin_unlock_bh(&tcl_ring->lock);
ret = -ENOMEM;
+
+ /* Checking for available tcl descritors in another ring in
+ * case of failure due to full tcl ring now, is better than
+ * checking this ring earlier for each pkt tx.
+ * Restart ring selection if some rings are not checked yet.
+ */
+ if (ring_map != (BIT(DP_TCL_NUM_RING_MAX) - 1)) {
+ tcl_ring_retry = true;
+ ring_selector++;
+ }
+
goto fail_unmap_dma;
}
@@ -206,6 +244,9 @@ fail_remove_idr:
FIELD_GET(DP_TX_DESC_ID_MSDU_ID, ti.desc_id));
spin_unlock_bh(&tx_ring->tx_idr_lock);
+ if (tcl_ring_retry)
+ goto tcl_ring_sel;
+
return ret;
}
@@ -543,8 +584,12 @@ int ath11k_dp_tx_send_reo_cmd(struct ath11k_base *ab, struct dp_rx_tid *rx_tid,
cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
cmd_num = ath11k_hal_reo_cmd_send(ab, cmd_ring, type, cmd);
+ /* cmd_num should start from 1, during failure return the error code */
+ if (cmd_num < 0)
+ return cmd_num;
+
/* reo cmd ring descriptors has cmd_num starting from 1 */
- if (cmd_num <= 0)
+ if (cmd_num == 0)
return -EINVAL;
if (!cb)
diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h
index 7722822a0456..780a3e11b609 100644
--- a/drivers/net/wireless/ath/ath11k/hal.h
+++ b/drivers/net/wireless/ath/ath11k/hal.h
@@ -599,7 +599,7 @@ struct hal_srng {
/* Interrupt mitigation - timer threshold in us */
#define HAL_SRNG_INT_TIMER_THRESHOLD_TX 1000
#define HAL_SRNG_INT_TIMER_THRESHOLD_RX 500
-#define HAL_SRNG_INT_TIMER_THRESHOLD_OTHER 1000
+#define HAL_SRNG_INT_TIMER_THRESHOLD_OTHER 256
/* HW SRNG configuration table */
struct hal_srng_config {
diff --git a/drivers/net/wireless/ath/ath11k/hal_desc.h b/drivers/net/wireless/ath/ath11k/hal_desc.h
index 5e200380cca4..a1f747c1c44d 100644
--- a/drivers/net/wireless/ath/ath11k/hal_desc.h
+++ b/drivers/net/wireless/ath/ath11k/hal_desc.h
@@ -477,7 +477,7 @@ enum hal_tlv_tag {
struct hal_tlv_hdr {
u32 tl;
- u8 value[0];
+ u8 value[];
} __packed;
#define RX_MPDU_DESC_INFO0_MSDU_COUNT GENMASK(7, 0)
@@ -1972,7 +1972,7 @@ struct hal_rx_reo_queue {
u32 processed_total_bytes;
u32 info5;
u32 rsvd[3];
- struct hal_rx_reo_queue_ext ext_desc[0];
+ struct hal_rx_reo_queue_ext ext_desc[];
} __packed;
/* hal_rx_reo_queue
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.h b/drivers/net/wireless/ath/ath11k/hal_rx.h
index e863e4abfcc1..c436191ae1e8 100644
--- a/drivers/net/wireless/ath/ath11k/hal_rx.h
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.h
@@ -23,7 +23,7 @@ struct hal_rx_wbm_rel_info {
struct hal_rx_mon_status_tlv_hdr {
u32 hdr;
- u8 value[0];
+ u8 value[];
};
enum hal_rx_su_mu_coding {
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
index 9973477ae373..cdec95644758 100644
--- a/drivers/net/wireless/ath/ath11k/hw.h
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -111,7 +111,7 @@ struct ath11k_hw_params {
struct ath11k_fw_ie {
__le32 id;
__le32 len;
- u8 data[0];
+ u8 data[];
};
enum ath11k_bd_ie_board_type {
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 9f8bc19cc5ae..5ffe55801ca4 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -33,6 +33,12 @@
.max_power = 30, \
}
+/* frame mode values are mapped as per enum ath11k_hw_txrx_mode */
+static unsigned int ath11k_frame_mode = ATH11K_HW_TXRX_NATIVE_WIFI;
+module_param_named(frame_mode, ath11k_frame_mode, uint, 0644);
+MODULE_PARM_DESC(frame_mode,
+ "Datapath frame mode (0: raw, 1: native wifi (default), 2: ethernet)");
+
static const struct ieee80211_channel ath11k_2ghz_channels[] = {
CHAN2G(1, 2412, 0),
CHAN2G(2, 2417, 0),
@@ -1142,6 +1148,10 @@ static void ath11k_peer_assoc_h_vht(struct ath11k *ar,
arg->tx_mcs_set &= ~IEEE80211_VHT_MCS_SUPPORT_0_11_MASK;
arg->tx_mcs_set |= IEEE80211_DISABLE_VHT_MCS_SUPPORT_0_11;
+ if ((arg->tx_mcs_set & IEEE80211_VHT_MCS_NOT_SUPPORTED) ==
+ IEEE80211_VHT_MCS_NOT_SUPPORTED)
+ arg->peer_vht_caps &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
+
/* TODO: Check */
arg->tx_max_mcs_nss = 0xFF;
@@ -3682,10 +3692,10 @@ static int __ath11k_set_antenna(struct ath11k *ar, u32 tx_ant, u32 rx_ant)
int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx)
{
+ struct sk_buff *msdu = skb;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
struct ath11k *ar = ctx;
struct ath11k_base *ab = ar->ab;
- struct sk_buff *msdu = skb;
- struct ieee80211_tx_info *info;
spin_lock_bh(&ar->txmgmt_idr_lock);
idr_remove(&ar->txmgmt_idr, buf_id);
@@ -3725,6 +3735,7 @@ static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif,
{
struct ath11k_base *ab = ar->ab;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *info;
dma_addr_t paddr;
int buf_id;
int ret;
@@ -3736,11 +3747,14 @@ static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif,
if (buf_id < 0)
return -ENOSPC;
- if ((ieee80211_is_action(hdr->frame_control) ||
- ieee80211_is_deauth(hdr->frame_control) ||
- ieee80211_is_disassoc(hdr->frame_control)) &&
- ieee80211_has_protected(hdr->frame_control)) {
- skb_put(skb, IEEE80211_CCMP_MIC_LEN);
+ info = IEEE80211_SKB_CB(skb);
+ if (!(info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP)) {
+ if ((ieee80211_is_action(hdr->frame_control) ||
+ ieee80211_is_deauth(hdr->frame_control) ||
+ ieee80211_is_disassoc(hdr->frame_control)) &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ skb_put(skb, IEEE80211_CCMP_MIC_LEN);
+ }
}
paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
@@ -3789,15 +3803,30 @@ static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work)
while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL) {
info = IEEE80211_SKB_CB(skb);
- arvif = ath11k_vif_to_arvif(info->control.vif);
-
- ret = ath11k_mac_mgmt_tx_wmi(ar, arvif, skb);
- if (ret) {
- ath11k_warn(ar->ab, "failed to transmit management frame %d\n",
- ret);
+ if (!info->control.vif) {
+ ath11k_warn(ar->ab, "no vif found for mgmt frame, flags 0x%x\n",
+ info->control.flags);
ieee80211_free_txskb(ar->hw, skb);
+ continue;
+ }
+
+ arvif = ath11k_vif_to_arvif(info->control.vif);
+ if (ar->allocated_vdev_map & (1LL << arvif->vdev_id) &&
+ arvif->is_started) {
+ ret = ath11k_mac_mgmt_tx_wmi(ar, arvif, skb);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to tx mgmt frame, vdev_id %d :%d\n",
+ arvif->vdev_id, ret);
+ ieee80211_free_txskb(ar->hw, skb);
+ } else {
+ atomic_inc(&ar->num_pending_mgmt_tx);
+ }
} else {
- atomic_inc(&ar->num_pending_mgmt_tx);
+ ath11k_warn(ar->ab,
+ "dropping mgmt frame for vdev %d, flags 0x%x is_started %d\n",
+ arvif->vdev_id, info->control.flags,
+ arvif->is_started);
+ ieee80211_free_txskb(ar->hw, skb);
}
}
}
@@ -3837,6 +3866,7 @@ static void ath11k_mac_op_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
{
+ struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
struct ath11k *ar = hw->priv;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
@@ -3845,7 +3875,9 @@ static void ath11k_mac_op_tx(struct ieee80211_hw *hw,
bool is_prb_rsp;
int ret;
- if (ieee80211_is_mgmt(hdr->frame_control)) {
+ if (info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP) {
+ skb_cb->flags |= ATH11K_SKB_HW_80211_ENCAP;
+ } else if (ieee80211_is_mgmt(hdr->frame_control)) {
is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control);
ret = ath11k_mac_mgmt_tx(ar, skb, is_prb_rsp);
if (ret) {
@@ -3877,8 +3909,10 @@ static int ath11k_mac_config_mon_status_default(struct ath11k *ar, bool enable)
struct htt_rx_ring_tlv_filter tlv_filter = {0};
u32 ring_id;
- if (enable)
+ if (enable) {
tlv_filter = ath11k_mac_mon_status_filter_default;
+ tlv_filter.rx_filter = ath11k_debug_rx_filter(ar);
+ }
ring_id = ar->dp.rx_mon_status_refill_ring.refill_buf_ring.ring_id;
@@ -4124,6 +4158,7 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
struct vdev_create_params vdev_param = {0};
struct peer_create_params peer_param;
u32 param_id, param_value;
+ int hw_encap = 0;
u16 nss;
int i;
int ret;
@@ -4208,6 +4243,8 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
}
ar->num_created_vdevs++;
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "vdev %pM created, vdev_id %d\n",
+ vif->addr, arvif->vdev_id);
ar->allocated_vdev_map |= 1LL << arvif->vdev_id;
ab->free_vdev_map &= ~(1LL << arvif->vdev_id);
@@ -4216,7 +4253,22 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
spin_unlock_bh(&ar->data_lock);
param_id = WMI_VDEV_PARAM_TX_ENCAP_TYPE;
- param_value = ATH11K_HW_TXRX_NATIVE_WIFI;
+ if (ath11k_frame_mode == ATH11K_HW_TXRX_ETHERNET)
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_AP:
+ hw_encap = 1;
+ break;
+ default:
+ break;
+ }
+
+ if (ieee80211_set_hw_80211_encap(vif, hw_encap))
+ param_value = ATH11K_HW_TXRX_ETHERNET;
+ else
+ param_value = ATH11K_HW_TXRX_NATIVE_WIFI;
+
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
param_id, param_value);
if (ret) {
@@ -4378,6 +4430,8 @@ static void ath11k_mac_op_remove_interface(struct ieee80211_hw *hw,
arvif->vdev_id, ret);
ar->num_created_vdevs--;
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "vdev %pM deleted, vdev_id %d\n",
+ vif->addr, arvif->vdev_id);
ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id);
ab->free_vdev_map |= 1LL << (arvif->vdev_id);
@@ -4643,6 +4697,8 @@ ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif,
}
ar->num_started_vdevs++;
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "vdev %pM started, vdev_id %d\n",
+ arvif->vif->addr, arvif->vdev_id);
/* Enable CAC Flag in the driver by checking the channel DFS cac time,
* i.e dfs_cac_ms value which will be valid only for radar channels
@@ -4701,6 +4757,8 @@ static int ath11k_mac_vdev_stop(struct ath11k_vif *arvif)
WARN_ON(ar->num_started_vdevs == 0);
ar->num_started_vdevs--;
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %pM stopped, vdev_id %d\n",
+ arvif->vif->addr, arvif->vdev_id);
if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
clear_bit(ATH11K_CAC_RUNNING, &ar->dev_flags);
@@ -5891,6 +5949,9 @@ int ath11k_mac_register(struct ath11k_base *ab)
int i;
int ret;
+ if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
+ return 0;
+
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
diff --git a/drivers/net/wireless/ath/ath11k/peer.c b/drivers/net/wireless/ath/ath11k/peer.c
index f43deacc01bd..297172538620 100644
--- a/drivers/net/wireless/ath/ath11k/peer.c
+++ b/drivers/net/wireless/ath/ath11k/peer.c
@@ -17,7 +17,26 @@ struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id,
list_for_each_entry(peer, &ab->peers, list) {
if (peer->vdev_id != vdev_id)
continue;
- if (memcmp(peer->addr, addr, ETH_ALEN))
+ if (!ether_addr_equal(peer->addr, addr))
+ continue;
+
+ return peer;
+ }
+
+ return NULL;
+}
+
+static struct ath11k_peer *ath11k_peer_find_by_pdev_idx(struct ath11k_base *ab,
+ u8 pdev_idx, const u8 *addr)
+{
+ struct ath11k_peer *peer;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ list_for_each_entry(peer, &ab->peers, list) {
+ if (peer->pdev_idx != pdev_idx)
+ continue;
+ if (!ether_addr_equal(peer->addr, addr))
continue;
return peer;
@@ -34,7 +53,7 @@ struct ath11k_peer *ath11k_peer_find_by_addr(struct ath11k_base *ab,
lockdep_assert_held(&ab->base_lock);
list_for_each_entry(peer, &ab->peers, list) {
- if (memcmp(peer->addr, addr, ETH_ALEN))
+ if (!ether_addr_equal(peer->addr, addr))
continue;
return peer;
@@ -200,6 +219,17 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
return -ENOBUFS;
}
+ spin_lock_bh(&ar->ab->base_lock);
+ peer = ath11k_peer_find_by_pdev_idx(ar->ab, ar->pdev_idx, param->peer_addr);
+ if (peer) {
+ spin_unlock_bh(&ar->ab->base_lock);
+ ath11k_info(ar->ab,
+ "ignoring the peer %pM creation on same pdev idx %d\n",
+ param->peer_addr, ar->pdev_idx);
+ return -EINVAL;
+ }
+ spin_unlock_bh(&ar->ab->base_lock);
+
ret = ath11k_wmi_send_peer_create_cmd(ar, param);
if (ret) {
ath11k_warn(ar->ab,
@@ -225,6 +255,7 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
return -ENOENT;
}
+ peer->pdev_idx = ar->pdev_idx;
peer->sta = sta;
arvif->ast_hash = peer->ast_hash;
diff --git a/drivers/net/wireless/ath/ath11k/peer.h b/drivers/net/wireless/ath/ath11k/peer.h
index ccca1523a6ea..5d125ce8984e 100644
--- a/drivers/net/wireless/ath/ath11k/peer.h
+++ b/drivers/net/wireless/ath/ath11k/peer.h
@@ -13,6 +13,7 @@ struct ath11k_peer {
u8 addr[ETH_ALEN];
int peer_id;
u16 ast_hash;
+ u8 pdev_idx;
/* protected by ab->data_lock */
struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
diff --git a/drivers/net/wireless/ath/ath11k/trace.h b/drivers/net/wireless/ath/ath11k/trace.h
index 8700a622be7b..66d0aae7816c 100644
--- a/drivers/net/wireless/ath/ath11k/trace.h
+++ b/drivers/net/wireless/ath/ath11k/trace.h
@@ -21,14 +21,16 @@ static inline void trace_ ## name(proto) {}
#define TRACE_SYSTEM ath11k
TRACE_EVENT(ath11k_htt_pktlog,
- TP_PROTO(struct ath11k *ar, const void *buf, u16 buf_len),
+ TP_PROTO(struct ath11k *ar, const void *buf, u16 buf_len,
+ u32 pktlog_checksum),
- TP_ARGS(ar, buf, buf_len),
+ TP_ARGS(ar, buf, buf_len, pktlog_checksum),
TP_STRUCT__entry(
__string(device, dev_name(ar->ab->dev))
__string(driver, dev_driver_string(ar->ab->dev))
__field(u16, buf_len)
+ __field(u32, pktlog_checksum)
__dynamic_array(u8, pktlog, buf_len)
),
@@ -36,14 +38,16 @@ TRACE_EVENT(ath11k_htt_pktlog,
__assign_str(device, dev_name(ar->ab->dev));
__assign_str(driver, dev_driver_string(ar->ab->dev));
__entry->buf_len = buf_len;
+ __entry->pktlog_checksum = pktlog_checksum;
memcpy(__get_dynamic_array(pktlog), buf, buf_len);
),
TP_printk(
- "%s %s size %hu",
+ "%s %s size %hu pktlog_checksum %d",
__get_str(driver),
__get_str(device),
- __entry->buf_len
+ __entry->buf_len,
+ __entry->pktlog_checksum
)
);
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index e7ce36966d6a..c2a972377687 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -87,8 +87,8 @@ static const struct wmi_tlv_policy wmi_tlv_policies[] = {
= { .min_len = sizeof(struct wmi_pdev_bss_chan_info_event) },
[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT]
= { .min_len = sizeof(struct wmi_vdev_install_key_compl_event) },
- [WMI_TAG_READY_EVENT]
- = {.min_len = sizeof(struct wmi_ready_event) },
+ [WMI_TAG_READY_EVENT] = {
+ .min_len = sizeof(struct wmi_ready_event_min) },
[WMI_TAG_SERVICE_AVAILABLE_EVENT]
= {.min_len = sizeof(struct wmi_service_available_event) },
[WMI_TAG_PEER_ASSOC_CONF_EVENT]
@@ -2368,6 +2368,146 @@ int ath11k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath11k *ar,
return ret;
}
+int ath11k_wmi_delba_send(struct ath11k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 initiator, u32 reason)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_delba_send_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_delba_send_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_DELBA_SEND_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+ cmd->tid = tid;
+ cmd->initiator = initiator;
+ cmd->reasoncode = reason;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
+ vdev_id, mac, tid, initiator, reason);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_DELBA_SEND_CMDID);
+
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_DELBA_SEND_CMDID cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_addba_set_resp(struct ath11k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 status)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_addba_setresponse_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ADDBA_SETRESPONSE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+ cmd->tid = tid;
+ cmd->statuscode = status;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
+ vdev_id, mac, tid, status);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SET_RESP_CMDID);
+
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_ADDBA_SET_RESP_CMDID cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_addba_send(struct ath11k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 buf_size)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_addba_send_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_addba_send_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ADDBA_SEND_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+ cmd->tid = tid;
+ cmd->buffersize = buf_size;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
+ vdev_id, mac, tid, buf_size);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SEND_CMDID);
+
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_ADDBA_SEND_CMDID cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_addba_clear_resp(struct ath11k *ar, u32 vdev_id, const u8 *mac)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_addba_clear_resp_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ADDBA_CLEAR_RESP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n",
+ vdev_id, mac);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_ADDBA_CLEAR_RESP_CMDID);
+
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_ADDBA_CLEAR_RESP_CMDID cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
int ath11k_wmi_pdev_peer_pktlog_filter(struct ath11k *ar, u8 *addr, u8 enable)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
@@ -2779,7 +2919,7 @@ int ath11k_wmi_send_bss_color_change_enable_cmd(struct ath11k *ar, u32 vdev_id,
ret = ath11k_wmi_cmd_send(wmi, skb,
WMI_BSS_COLOR_CHANGE_ENABLE_CMDID);
if (ret) {
- ath11k_warn(ab, "Failed to send WMI_TWT_DIeABLE_CMDID");
+ ath11k_warn(ab, "Failed to send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID");
dev_kfree_skb(skb);
}
return ret;
@@ -3105,7 +3245,7 @@ int ath11k_wmi_cmd_init(struct ath11k_base *ab)
config.beacon_tx_offload_max_vdev = ab->num_radios * TARGET_MAX_BCN_OFFLD;
config.rx_batchmode = TARGET_RX_BATCHMODE;
config.peer_map_unmap_v2_support = 1;
- config.twt_ap_pdev_count = 2;
+ config.twt_ap_pdev_count = ab->num_radios;
config.twt_ap_sta_count = 1000;
memcpy(&wmi_sc->wlan_resource_config, &config, sizeof(config));
@@ -3740,8 +3880,9 @@ static int wmi_process_mgmt_tx_comp(struct ath11k *ar, u32 desc_id,
ieee80211_tx_status_irqsafe(ar->hw, msdu);
- WARN_ON_ONCE(atomic_read(&ar->num_pending_mgmt_tx) == 0);
- atomic_dec(&ar->num_pending_mgmt_tx);
+ /* WARN when we received this event without doing any mgmt tx */
+ if (atomic_dec_if_positive(&ar->num_pending_mgmt_tx) < 0)
+ WARN_ON_ONCE(1);
return 0;
}
@@ -4851,7 +4992,7 @@ static int ath11k_wmi_tlv_rdy_parse(struct ath11k_base *ab, u16 tag, u16 len,
const void *ptr, void *data)
{
struct wmi_tlv_rdy_parse *rdy_parse = data;
- struct wmi_ready_event *fixed_param;
+ struct wmi_ready_event fixed_param;
struct wmi_mac_addr *addr_list;
struct ath11k_pdev *pdev;
u32 num_mac_addr;
@@ -4859,11 +5000,16 @@ static int ath11k_wmi_tlv_rdy_parse(struct ath11k_base *ab, u16 tag, u16 len,
switch (tag) {
case WMI_TAG_READY_EVENT:
- fixed_param = (struct wmi_ready_event *)ptr;
- ab->wlan_init_status = fixed_param->status;
- rdy_parse->num_extra_mac_addr = fixed_param->num_extra_mac_addr;
-
- ether_addr_copy(ab->mac_addr, fixed_param->mac_addr.addr);
+ memset(&fixed_param, 0, sizeof(fixed_param));
+ memcpy(&fixed_param, (struct wmi_ready_event *)ptr,
+ min_t(u16, sizeof(fixed_param), len));
+ ab->wlan_init_status = fixed_param.ready_event_min.status;
+ rdy_parse->num_extra_mac_addr =
+ fixed_param.ready_event_min.num_extra_mac_addr;
+
+ ether_addr_copy(ab->mac_addr,
+ fixed_param.ready_event_min.mac_addr.addr);
+ ab->pktlog_defs_checksum = fixed_param.pktlog_defs_checksum;
ab->wmi_ready = true;
break;
case WMI_TAG_ARRAY_FIXED_STRUCT:
diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h
index 510f9c6bc1d7..b9f3e559ced7 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.h
+++ b/drivers/net/wireless/ath/ath11k/wmi.h
@@ -39,7 +39,7 @@ struct wmi_cmd_hdr {
struct wmi_tlv {
u32 header;
- u8 value[0];
+ u8 value[];
} __packed;
#define WMI_TLV_LEN GENMASK(15, 0)
@@ -1976,6 +1976,43 @@ enum wmi_tlv_service {
WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI = 174,
WMI_TLV_SERVICE_NAN_DISABLE_SUPPORT = 175,
WMI_TLV_SERVICE_HTT_H2T_NO_HTC_HDR_LEN_IN_MSG_LEN = 176,
+ WMI_TLV_SERVICE_COEX_SUPPORT_UNEQUAL_ISOLATION = 177,
+ WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT = 178,
+ WMI_TLV_SERVICE_SUPPORT_EXTEND_ADDRESS = 179,
+ WMI_TLV_SERVICE_BEACON_RECEPTION_STATS = 180,
+ WMI_TLV_SERVICE_FETCH_TX_PN = 181,
+ WMI_TLV_SERVICE_PEER_UNMAP_RESPONSE_SUPPORT = 182,
+ WMI_TLV_SERVICE_TX_PER_PEER_AMPDU_SIZE = 183,
+ WMI_TLV_SERVICE_BSS_COLOR_SWITCH_COUNT = 184,
+ WMI_TLV_SERVICE_HTT_PEER_STATS_SUPPORT = 185,
+ WMI_TLV_SERVICE_UL_RU26_ALLOWED = 186,
+ WMI_TLV_SERVICE_GET_MWS_COEX_STATE = 187,
+ WMI_TLV_SERVICE_GET_MWS_DPWB_STATE = 188,
+ WMI_TLV_SERVICE_GET_MWS_TDM_STATE = 189,
+ WMI_TLV_SERVICE_GET_MWS_IDRX_STATE = 190,
+ WMI_TLV_SERVICE_GET_MWS_ANTENNA_SHARING_STATE = 191,
+ WMI_TLV_SERVICE_ENHANCED_TPC_CONFIG_EVENT = 192,
+ WMI_TLV_SERVICE_WLM_STATS_REQUEST = 193,
+ WMI_TLV_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT = 194,
+ WMI_TLV_SERVICE_WPA3_FT_SAE_SUPPORT = 195,
+ WMI_TLV_SERVICE_WPA3_FT_SUITE_B_SUPPORT = 196,
+ WMI_TLV_SERVICE_VOW_ENABLE = 197,
+ WMI_TLV_SERVICE_CFR_CAPTURE_IND_EVT_TYPE_1 = 198,
+ WMI_TLV_SERVICE_BROADCAST_TWT = 199,
+ WMI_TLV_SERVICE_RAP_DETECTION_SUPPORT = 200,
+ WMI_TLV_SERVICE_PS_TDCC = 201,
+ WMI_TLV_SERVICE_THREE_WAY_COEX_CONFIG_LEGACY = 202,
+ WMI_TLV_SERVICE_THREE_WAY_COEX_CONFIG_OVERRIDE = 203,
+ WMI_TLV_SERVICE_TX_PWR_PER_PEER = 204,
+ WMI_TLV_SERVICE_STA_PLUS_STA_SUPPORT = 205,
+ WMI_TLV_SERVICE_WPA3_FT_FILS = 206,
+ WMI_TLV_SERVICE_ADAPTIVE_11R_ROAM = 207,
+ WMI_TLV_SERVICE_CHAN_RF_CHARACTERIZATION_INFO = 208,
+ WMI_TLV_SERVICE_FW_IFACE_COMBINATION_SUPPORT = 209,
+ WMI_TLV_SERVICE_TX_COMPL_TSF64 = 210,
+ WMI_TLV_SERVICE_DSM_ROAM_FILTER = 211,
+ WMI_TLV_SERVICE_PACKET_CAPTURE_SUPPORT = 212,
+ WMI_TLV_SERVICE_PER_PEER_HTT_STATS_RESET = 213,
WMI_MAX_EXT_SERVICE
@@ -2345,7 +2382,7 @@ struct wmi_mac_addr {
} __packed;
} __packed;
-struct wmi_ready_event {
+struct wmi_ready_event_min {
struct wmi_abi_version fw_abi_vers;
struct wmi_mac_addr mac_addr;
u32 status;
@@ -2355,6 +2392,12 @@ struct wmi_ready_event {
u32 num_extra_peers;
} __packed;
+struct wmi_ready_event {
+ struct wmi_ready_event_min ready_event_min;
+ u32 max_ast_index;
+ u32 pktlog_defs_checksum;
+} __packed;
+
struct wmi_service_available_event {
u32 wmi_service_segment_offset;
u32 wmi_service_segment_bitmap[WMI_SERVICE_SEGMENT_BM_SIZE32];
@@ -3649,6 +3692,37 @@ struct wmi_therm_throt_level_config_info {
u32 prio;
} __packed;
+struct wmi_delba_send_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 tid;
+ u32 initiator;
+ u32 reasoncode;
+} __packed;
+
+struct wmi_addba_setresponse_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 tid;
+ u32 statuscode;
+} __packed;
+
+struct wmi_addba_send_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 tid;
+ u32 buffersize;
+} __packed;
+
+struct wmi_addba_clear_resp_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
struct wmi_pdev_pktlog_filter_info {
u32 tlv_header;
struct wmi_mac_addr peer_macaddr;
@@ -4531,6 +4605,9 @@ enum wmi_sta_ps_param_rx_wake_policy {
WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD = 1,
};
+/* Do not change existing values! Used by ath11k_frame_mode parameter
+ * module parameter.
+ */
enum ath11k_hw_txrx_mode {
ATH11K_HW_TXRX_RAW = 0,
ATH11K_HW_TXRX_NATIVE_WIFI = 1,
@@ -4822,6 +4899,13 @@ int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar,
struct scan_chan_list_params *chan_list);
int ath11k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath11k *ar,
u32 pdev_id);
+int ath11k_wmi_addba_clear_resp(struct ath11k *ar, u32 vdev_id, const u8 *mac);
+int ath11k_wmi_addba_send(struct ath11k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 buf_size);
+int ath11k_wmi_addba_set_resp(struct ath11k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 status);
+int ath11k_wmi_delba_send(struct ath11k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 initiator, u32 reason);
int ath11k_wmi_send_bcn_offload_control_cmd(struct ath11k *ar,
u32 vdev_id, u32 bcn_ctrl_op);
int
diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c
index 0624333f5430..850c608b43a3 100644
--- a/drivers/net/wireless/ath/ath5k/ani.c
+++ b/drivers/net/wireless/ath/ath5k/ani.c
@@ -501,7 +501,7 @@ ath5k_ani_calibration(struct ath5k_hw *ah)
if (as->ofdm_errors > ofdm_high || as->cck_errors > cck_high) {
/* too many PHY errors - we have to raise immunity */
- bool ofdm_flag = as->ofdm_errors > ofdm_high ? true : false;
+ bool ofdm_flag = as->ofdm_errors > ofdm_high;
ath5k_ani_raise_immunity(ah, as, ofdm_flag);
ath5k_ani_period_restart(as);
diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
index 0d30e762c090..77e052336eb5 100644
--- a/drivers/net/wireless/ath/ath6kl/core.h
+++ b/drivers/net/wireless/ath/ath6kl/core.h
@@ -160,7 +160,7 @@ enum ath6kl_fw_capability {
struct ath6kl_fw_ie {
__le32 id;
__le32 len;
- u8 data[0];
+ u8 data[];
};
enum ath6kl_hw_flags {
@@ -406,7 +406,7 @@ struct ath6kl_mgmt_buff {
u32 id;
bool no_cck;
size_t len;
- u8 buf[0];
+ u8 buf[];
};
struct ath6kl_sta {
diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c
index 54337d60f288..7506cea46f58 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.c
+++ b/drivers/net/wireless/ath/ath6kl/debug.c
@@ -30,7 +30,7 @@ struct ath6kl_fwlog_slot {
__le32 length;
/* max ATH6KL_FWLOG_PAYLOAD_SIZE bytes */
- u8 payload[0];
+ u8 payload[];
};
#define ATH6KL_FWLOG_MAX_ENTRIES 20
diff --git a/drivers/net/wireless/ath/ath6kl/hif.h b/drivers/net/wireless/ath/ath6kl/hif.h
index dc6bd8cd9b83..aea7fea2a81e 100644
--- a/drivers/net/wireless/ath/ath6kl/hif.h
+++ b/drivers/net/wireless/ath/ath6kl/hif.h
@@ -199,7 +199,7 @@ struct hif_scatter_req {
u32 scat_q_depth;
- struct hif_scatter_item scat_list[0];
+ struct hif_scatter_item scat_list[];
};
struct ath6kl_irq_proc_registers {
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index fd9db8ca99d7..fd53b5f9e9b5 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -19,6 +19,8 @@
#include "ar9002_phy.h"
#define AR9285_CLCAL_REDO_THRESH 1
+/* AGC & I/Q calibrations time limit, ms */
+#define AR9002_CAL_MAX_TIME 30000
enum ar9002_cal_types {
ADC_GAIN_CAL = BIT(0),
@@ -37,9 +39,8 @@ static bool ar9002_hw_is_cal_supported(struct ath_hw *ah,
break;
case ADC_GAIN_CAL:
case ADC_DC_CAL:
- /* Run ADC Gain Cal for non-CCK & non 2GHz-HT20 only */
- if (!((IS_CHAN_2GHZ(chan) || IS_CHAN_A_FAST_CLOCK(ah, chan)) &&
- IS_CHAN_HT20(chan)))
+ /* Run even/odd ADCs calibrations for HT40 channels only */
+ if (IS_CHAN_HT40(chan))
supported = true;
break;
}
@@ -105,6 +106,14 @@ static bool ar9002_hw_per_calibration(struct ath_hw *ah,
} else {
ar9002_hw_setup_calibration(ah, currCal);
}
+ } else if (time_after(jiffies, ah->cal_start_time +
+ msecs_to_jiffies(AR9002_CAL_MAX_TIME))) {
+ REG_CLR_BIT(ah, AR_PHY_TIMING_CTRL4(0),
+ AR_PHY_TIMING_CTRL4_DO_CAL);
+ ath_dbg(ath9k_hw_common(ah), CALIBRATE,
+ "calibration timeout\n");
+ currCal->calState = CAL_WAITING; /* Try later */
+ iscaldone = true;
}
} else if (!(caldata->CalValid & currCal->calData->calType)) {
ath9k_hw_reset_calibration(ah, currCal);
@@ -664,8 +673,13 @@ static int ar9002_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
int ret;
nfcal = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF);
- if (ah->caldata)
+ if (ah->caldata) {
nfcal_pending = test_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
+ if (longcal) /* Remember to not miss */
+ set_bit(LONGCAL_PENDING, &ah->caldata->cal_flags);
+ else if (test_bit(LONGCAL_PENDING, &ah->caldata->cal_flags))
+ longcal = true; /* Respin a previous one */
+ }
percal_pending = (currCal &&
(currCal->calState == CAL_RUNNING ||
@@ -675,9 +689,24 @@ static int ar9002_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
if (!ar9002_hw_per_calibration(ah, chan, rxchainmask, currCal))
return 0;
- ah->cal_list_curr = currCal = currCal->calNext;
- if (currCal->calState == CAL_WAITING)
- ath9k_hw_reset_calibration(ah, currCal);
+ /* Looking for next waiting calibration if any */
+ for (currCal = currCal->calNext; currCal != ah->cal_list_curr;
+ currCal = currCal->calNext) {
+ if (currCal->calState == CAL_WAITING)
+ break;
+ }
+ if (currCal->calState == CAL_WAITING) {
+ percal_pending = true;
+ ah->cal_list_curr = currCal;
+ } else {
+ percal_pending = false;
+ ah->cal_list_curr = ah->cal_list;
+ }
+ }
+
+ /* Do not start a next calibration if the longcal is in action */
+ if (percal_pending && !nfcal && !longcal) {
+ ath9k_hw_reset_calibration(ah, currCal);
return 0;
}
@@ -701,6 +730,9 @@ static int ar9002_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
}
if (longcal) {
+ if (ah->caldata)
+ clear_bit(LONGCAL_PENDING,
+ &ah->caldata->cal_flags);
ath9k_hw_start_nfcal(ah, false);
/* Do periodic PAOffset Cal */
ar9002_hw_pa_cal(ah, false);
@@ -858,9 +890,6 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
ath9k_hw_loadnf(ah, chan);
ath9k_hw_start_nfcal(ah, true);
- if (ah->caldata)
- set_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
-
ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
/* Enable IQ, ADC Gain and ADC DC offset CALs */
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 695c779ae8cf..0422a33395b7 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -176,6 +176,7 @@ void ath9k_hw_reset_calibration(struct ath_hw *ah,
ath9k_hw_setup_calibration(ah, currCal);
+ ah->cal_start_time = jiffies;
currCal->calState = CAL_RUNNING;
for (i = 0; i < AR5416_MAX_CHAINS; i++) {
@@ -209,14 +210,17 @@ bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
return true;
}
- if (!(ah->supp_cals & currCal->calData->calType))
- return true;
+ currCal = ah->cal_list;
+ do {
+ ath_dbg(common, CALIBRATE, "Resetting Cal %d state for channel %u\n",
+ currCal->calData->calType,
+ ah->curchan->chan->center_freq);
- ath_dbg(common, CALIBRATE, "Resetting Cal %d state for channel %u\n",
- currCal->calData->calType, ah->curchan->chan->center_freq);
+ ah->caldata->CalValid &= ~currCal->calData->calType;
+ currCal->calState = CAL_WAITING;
- ah->caldata->CalValid &= ~currCal->calData->calType;
- currCal->calState = CAL_WAITING;
+ currCal = currCal->calNext;
+ } while (currCal != ah->cal_list);
return false;
}
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index dd0c32379375..4ed21dad6a8e 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -612,6 +612,11 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
hif_dev->remain_skb = nskb;
spin_unlock(&hif_dev->rx_lock);
} else {
+ if (pool_index == MAX_PKT_NUM_IN_TRANSFER) {
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: over RX MAX_PKT_NUM\n");
+ goto err;
+ }
nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC);
if (!nskb) {
dev_err(&hif_dev->udev->dev,
@@ -638,9 +643,9 @@ err:
static void ath9k_hif_usb_rx_cb(struct urb *urb)
{
- struct sk_buff *skb = (struct sk_buff *) urb->context;
- struct hif_device_usb *hif_dev =
- usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
+ struct rx_buf *rx_buf = (struct rx_buf *)urb->context;
+ struct hif_device_usb *hif_dev = rx_buf->hif_dev;
+ struct sk_buff *skb = rx_buf->skb;
int ret;
if (!skb)
@@ -680,14 +685,15 @@ resubmit:
return;
free:
kfree_skb(skb);
+ kfree(rx_buf);
}
static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
{
- struct sk_buff *skb = (struct sk_buff *) urb->context;
+ struct rx_buf *rx_buf = (struct rx_buf *)urb->context;
+ struct hif_device_usb *hif_dev = rx_buf->hif_dev;
+ struct sk_buff *skb = rx_buf->skb;
struct sk_buff *nskb;
- struct hif_device_usb *hif_dev =
- usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
int ret;
if (!skb)
@@ -745,6 +751,7 @@ resubmit:
return;
free:
kfree_skb(skb);
+ kfree(rx_buf);
urb->context = NULL;
}
@@ -790,7 +797,7 @@ static int ath9k_hif_usb_alloc_tx_urbs(struct hif_device_usb *hif_dev)
init_usb_anchor(&hif_dev->mgmt_submitted);
for (i = 0; i < MAX_TX_URB_NUM; i++) {
- tx_buf = kzalloc(sizeof(struct tx_buf), GFP_KERNEL);
+ tx_buf = kzalloc(sizeof(*tx_buf), GFP_KERNEL);
if (!tx_buf)
goto err;
@@ -827,8 +834,9 @@ static void ath9k_hif_usb_dealloc_rx_urbs(struct hif_device_usb *hif_dev)
static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
{
- struct urb *urb = NULL;
+ struct rx_buf *rx_buf = NULL;
struct sk_buff *skb = NULL;
+ struct urb *urb = NULL;
int i, ret;
init_usb_anchor(&hif_dev->rx_submitted);
@@ -836,6 +844,12 @@ static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
for (i = 0; i < MAX_RX_URB_NUM; i++) {
+ rx_buf = kzalloc(sizeof(*rx_buf), GFP_KERNEL);
+ if (!rx_buf) {
+ ret = -ENOMEM;
+ goto err_rxb;
+ }
+
/* Allocate URB */
urb = usb_alloc_urb(0, GFP_KERNEL);
if (urb == NULL) {
@@ -850,11 +864,14 @@ static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
goto err_skb;
}
+ rx_buf->hif_dev = hif_dev;
+ rx_buf->skb = skb;
+
usb_fill_bulk_urb(urb, hif_dev->udev,
usb_rcvbulkpipe(hif_dev->udev,
USB_WLAN_RX_PIPE),
skb->data, MAX_RX_BUF_SIZE,
- ath9k_hif_usb_rx_cb, skb);
+ ath9k_hif_usb_rx_cb, rx_buf);
/* Anchor URB */
usb_anchor_urb(urb, &hif_dev->rx_submitted);
@@ -880,6 +897,8 @@ err_submit:
err_skb:
usb_free_urb(urb);
err_urb:
+ kfree(rx_buf);
+err_rxb:
ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
return ret;
}
@@ -891,14 +910,21 @@ static void ath9k_hif_usb_dealloc_reg_in_urbs(struct hif_device_usb *hif_dev)
static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev)
{
- struct urb *urb = NULL;
+ struct rx_buf *rx_buf = NULL;
struct sk_buff *skb = NULL;
+ struct urb *urb = NULL;
int i, ret;
init_usb_anchor(&hif_dev->reg_in_submitted);
for (i = 0; i < MAX_REG_IN_URB_NUM; i++) {
+ rx_buf = kzalloc(sizeof(*rx_buf), GFP_KERNEL);
+ if (!rx_buf) {
+ ret = -ENOMEM;
+ goto err_rxb;
+ }
+
/* Allocate URB */
urb = usb_alloc_urb(0, GFP_KERNEL);
if (urb == NULL) {
@@ -913,11 +939,14 @@ static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev)
goto err_skb;
}
+ rx_buf->hif_dev = hif_dev;
+ rx_buf->skb = skb;
+
usb_fill_int_urb(urb, hif_dev->udev,
usb_rcvintpipe(hif_dev->udev,
USB_REG_IN_PIPE),
skb->data, MAX_REG_IN_BUF_SIZE,
- ath9k_hif_usb_reg_in_cb, skb, 1);
+ ath9k_hif_usb_reg_in_cb, rx_buf, 1);
/* Anchor URB */
usb_anchor_urb(urb, &hif_dev->reg_in_submitted);
@@ -943,6 +972,8 @@ err_submit:
err_skb:
usb_free_urb(urb);
err_urb:
+ kfree(rx_buf);
+err_rxb:
ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev);
return ret;
}
@@ -973,7 +1004,7 @@ err:
return -ENOMEM;
}
-static void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev)
+void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev)
{
usb_kill_anchored_urbs(&hif_dev->regout_submitted);
ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev);
@@ -1341,8 +1372,9 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
if (hif_dev->flags & HIF_USB_READY) {
ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged);
- ath9k_htc_hw_free(hif_dev->htc_handle);
ath9k_hif_usb_dev_deinit(hif_dev);
+ ath9k_destoy_wmi(hif_dev->htc_handle->drv_priv);
+ ath9k_htc_hw_free(hif_dev->htc_handle);
}
usb_set_intfdata(interface, NULL);
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.h b/drivers/net/wireless/ath/ath9k/hif_usb.h
index 7846916aa01d..5985aa15ca93 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.h
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.h
@@ -86,6 +86,11 @@ struct tx_buf {
struct list_head list;
};
+struct rx_buf {
+ struct sk_buff *skb;
+ struct hif_device_usb *hif_dev;
+};
+
#define HIF_USB_TX_STOP BIT(0)
#define HIF_USB_TX_FLUSH BIT(1)
@@ -133,5 +138,6 @@ struct hif_device_usb {
int ath9k_hif_usb_init(void);
void ath9k_hif_usb_exit(void);
+void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev);
#endif /* HTC_USB_H */
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index d961095ab01f..40a065028ebe 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -931,8 +931,9 @@ err_init:
int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
u16 devid, char *product, u32 drv_info)
{
- struct ieee80211_hw *hw;
+ struct hif_device_usb *hif_dev;
struct ath9k_htc_priv *priv;
+ struct ieee80211_hw *hw;
int ret;
hw = ieee80211_alloc_hw(sizeof(struct ath9k_htc_priv), &ath9k_htc_ops);
@@ -967,7 +968,10 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
return 0;
err_init:
- ath9k_deinit_wmi(priv);
+ ath9k_stop_wmi(priv);
+ hif_dev = (struct hif_device_usb *)htc_handle->hif_dev;
+ ath9k_hif_usb_dealloc_urbs(hif_dev);
+ ath9k_destoy_wmi(priv);
err_free:
ieee80211_free_hw(hw);
return ret;
@@ -982,7 +986,7 @@ void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug)
htc_handle->drv_priv->ah->ah_flags |= AH_UNPLUGGED;
ath9k_deinit_device(htc_handle->drv_priv);
- ath9k_deinit_wmi(htc_handle->drv_priv);
+ ath9k_stop_wmi(htc_handle->drv_priv);
ieee80211_free_hw(htc_handle->drv_priv->hw);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 9cec5c216e1f..118e5550b10c 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -999,9 +999,9 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
* which are not PHY_ERROR (short radar pulses have a length of 3)
*/
if (unlikely(!rs_datalen || (rs_datalen < 10 && !is_phyerr))) {
- ath_warn(common,
- "Short RX data len, dropping (dlen: %d)\n",
- rs_datalen);
+ ath_dbg(common, ANY,
+ "Short RX data len, dropping (dlen: %d)\n",
+ rs_datalen);
goto rx_next;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index d091c8ebdcf0..d2e062eaf561 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -113,6 +113,9 @@ static void htc_process_conn_rsp(struct htc_target *target,
if (svc_rspmsg->status == HTC_SERVICE_SUCCESS) {
epid = svc_rspmsg->endpoint_id;
+ if (epid < 0 || epid >= ENDPOINT_MAX)
+ return;
+
service_id = be16_to_cpu(svc_rspmsg->service_id);
max_msglen = be16_to_cpu(svc_rspmsg->max_msg_len);
endpoint = &target->endpoint[epid];
@@ -170,7 +173,6 @@ static int htc_config_pipe_credits(struct htc_target *target)
time_left = wait_for_completion_timeout(&target->cmd_wait, HZ);
if (!time_left) {
dev_err(target->dev, "HTC credit config timeout\n");
- kfree_skb(skb);
return -ETIMEDOUT;
}
@@ -206,7 +208,6 @@ static int htc_setup_complete(struct htc_target *target)
time_left = wait_for_completion_timeout(&target->cmd_wait, HZ);
if (!time_left) {
dev_err(target->dev, "HTC start timeout\n");
- kfree_skb(skb);
return -ETIMEDOUT;
}
@@ -279,7 +280,6 @@ int htc_connect_service(struct htc_target *target,
if (!time_left) {
dev_err(target->dev, "Service connection timeout for: %d\n",
service_connreq->service_id);
- kfree_skb(skb);
return -ETIMEDOUT;
}
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 2e4489700a85..023599e10dd5 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -427,6 +427,7 @@ enum ath9k_cal_flags {
TXIQCAL_DONE,
TXCLCAL_DONE,
SW_PKDET_DONE,
+ LONGCAL_PENDING,
};
struct ath9k_hw_cal_data {
@@ -833,6 +834,7 @@ struct ath_hw {
/* Calibration */
u32 supp_cals;
+ unsigned long cal_start_time;
struct ath9k_cal_list iq_caldata;
struct ath9k_cal_list adcgain_caldata;
struct ath9k_cal_list adcdc_caldata;
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index cdc146091194..e7a3127395be 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -112,14 +112,17 @@ struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv)
return wmi;
}
-void ath9k_deinit_wmi(struct ath9k_htc_priv *priv)
+void ath9k_stop_wmi(struct ath9k_htc_priv *priv)
{
struct wmi *wmi = priv->wmi;
mutex_lock(&wmi->op_mutex);
wmi->stopped = true;
mutex_unlock(&wmi->op_mutex);
+}
+void ath9k_destoy_wmi(struct ath9k_htc_priv *priv)
+{
kfree(priv->wmi);
}
@@ -336,7 +339,6 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
ath_dbg(common, WMI, "Timeout waiting for WMI command: %s\n",
wmi_cmd_to_name(cmd_id));
mutex_unlock(&wmi->op_mutex);
- kfree_skb(skb);
return -ETIMEDOUT;
}
diff --git a/drivers/net/wireless/ath/ath9k/wmi.h b/drivers/net/wireless/ath/ath9k/wmi.h
index 380175d5ecd7..d8b912206232 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.h
+++ b/drivers/net/wireless/ath/ath9k/wmi.h
@@ -179,7 +179,6 @@ struct wmi {
};
struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv);
-void ath9k_deinit_wmi(struct ath9k_htc_priv *priv);
int ath9k_wmi_connect(struct htc_target *htc, struct wmi *wmi,
enum htc_endpoint_id *wmi_ctrl_epid);
int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
@@ -189,6 +188,8 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
void ath9k_wmi_event_tasklet(unsigned long data);
void ath9k_fatal_work(struct work_struct *work);
void ath9k_wmi_event_drain(struct ath9k_htc_priv *priv);
+void ath9k_stop_wmi(struct ath9k_htc_priv *priv);
+void ath9k_destoy_wmi(struct ath9k_htc_priv *priv);
#define WMI_CMD(_wmi_cmd) \
do { \
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index 51934d191f33..1ab09e1c9ec5 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -338,9 +338,7 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
if (SUPP(CARL9170FW_WLANTX_CAB)) {
- if_comb_types |=
- BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_P2P_GO);
+ if_comb_types |= BIT(NL80211_IFTYPE_AP);
#ifdef CONFIG_MAC80211_MESH
if_comb_types |=
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 5914926a5c5b..816929fb5b14 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -582,11 +582,10 @@ static int carl9170_init_interface(struct ar9170 *ar,
ar->disable_offload |= ((vif->type != NL80211_IFTYPE_STATION) &&
(vif->type != NL80211_IFTYPE_AP));
- /* While the driver supports HW offload in a single
- * P2P client configuration, it doesn't support HW
- * offload in the favourit, concurrent P2P GO+CLIENT
- * configuration. Hence, HW offload will always be
- * disabled for P2P.
+ /* The driver used to have P2P GO+CLIENT support,
+ * but since this was dropped and we don't know if
+ * there are any gremlins lurking in the shadows,
+ * so best we keep HW offload disabled for P2P.
*/
ar->disable_offload |= vif->p2p;
@@ -639,18 +638,6 @@ static int carl9170_op_add_interface(struct ieee80211_hw *hw,
if (vif->type == NL80211_IFTYPE_STATION)
break;
- /* P2P GO [master] use-case
- * Because the P2P GO station is selected dynamically
- * by all participating peers of a WIFI Direct network,
- * the driver has be able to change the main interface
- * operating mode on the fly.
- */
- if (main_vif->p2p && vif->p2p &&
- vif->type == NL80211_IFTYPE_AP) {
- old_main = main_vif;
- break;
- }
-
err = -EBUSY;
rcu_read_unlock();
diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c
index d3c001fa8eb4..c33b4235839d 100644
--- a/drivers/net/wireless/broadcom/b43/phy_n.c
+++ b/drivers/net/wireless/broadcom/b43/phy_n.c
@@ -5507,7 +5507,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
core = (cmd & 0x3000) >> 12;
type = (cmd & 0x0F00) >> 8;
- if (phy6or5x && updated[core] == 0) {
+ if (phy6or5x && !updated[core]) {
b43_nphy_update_tx_cal_ladder(dev, core);
updated[core] = true;
}
diff --git a/drivers/net/wireless/broadcom/b43/pio.c b/drivers/net/wireless/broadcom/b43/pio.c
index 69f8b46c9015..1a11c5dfb8d9 100644
--- a/drivers/net/wireless/broadcom/b43/pio.c
+++ b/drivers/net/wireless/broadcom/b43/pio.c
@@ -765,7 +765,7 @@ void b43_pio_rx(struct b43_pio_rxqueue *q)
bool stop;
while (1) {
- stop = (pio_rx_frame(q) == 0);
+ stop = !pio_rx_frame(q);
if (stop)
break;
cond_resched();
diff --git a/drivers/net/wireless/broadcom/b43/xmit.c b/drivers/net/wireless/broadcom/b43/xmit.c
index 058745219516..55babc6d1091 100644
--- a/drivers/net/wireless/broadcom/b43/xmit.c
+++ b/drivers/net/wireless/broadcom/b43/xmit.c
@@ -629,19 +629,6 @@ static s8 b43_rssi_postprocess(struct b43_wldev *dev,
return (s8) tmp;
}
-//TODO
-#if 0
-static s8 b43_rssinoise_postprocess(struct b43_wldev *dev, u8 in_rssi)
-{
- struct b43_phy *phy = &dev->phy;
- s8 ret;
-
- ret = b43_rssi_postprocess(dev, in_rssi, 0, 1, 1);
-
- return ret;
-}
-#endif
-
void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
{
struct ieee80211_rx_status status;
diff --git a/drivers/net/wireless/broadcom/b43legacy/xmit.c b/drivers/net/wireless/broadcom/b43legacy/xmit.c
index e9b23c2e5bd4..efd63f4ce74f 100644
--- a/drivers/net/wireless/broadcom/b43legacy/xmit.c
+++ b/drivers/net/wireless/broadcom/b43legacy/xmit.c
@@ -558,6 +558,7 @@ void b43legacy_rx(struct b43legacy_wldev *dev,
default:
b43legacywarn(dev->wl, "Unexpected value for chanstat (0x%X)\n",
chanstat);
+ goto drop;
}
memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index b684a5b6d904..22a17ae09e94 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -961,7 +961,7 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43340),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43341),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43362),
- BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43364),
+ BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43364),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4335_4339),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4339),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430),
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 2ba165330038..f2f84af923a9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -4449,6 +4449,11 @@ s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
mgmt_ie_len = &saved_ie->assoc_req_ie_len;
mgmt_ie_buf_len = sizeof(saved_ie->assoc_req_ie);
break;
+ case BRCMF_VNDR_IE_ASSOCRSP_FLAG:
+ mgmt_ie_buf = saved_ie->assoc_res_ie;
+ mgmt_ie_len = &saved_ie->assoc_res_ie_len;
+ mgmt_ie_buf_len = sizeof(saved_ie->assoc_res_ie);
+ break;
default:
err = -EPERM;
bphy_err(drvr, "not suitable type\n");
@@ -4595,6 +4600,15 @@ brcmf_config_ap_mgmt_ie(struct brcmf_cfg80211_vif *vif,
else
brcmf_dbg(TRACE, "Applied Vndr IEs for Probe Resp\n");
+ /* Set Assoc Response IEs to FW */
+ err = brcmf_vif_set_mgmt_ie(vif, BRCMF_VNDR_IE_ASSOCRSP_FLAG,
+ beacon->assocresp_ies,
+ beacon->assocresp_ies_len);
+ if (err)
+ brcmf_err("Set Assoc Resp IE Failed\n");
+ else
+ brcmf_dbg(TRACE, "Applied Vndr IEs for Assoc Resp\n");
+
return err;
}
@@ -4727,7 +4741,8 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
if ((dev_role == NL80211_IFTYPE_AP) &&
((ifp->ifidx == 0) ||
- !brcmf_feat_is_enabled(ifp, BRCMF_FEAT_RSDB))) {
+ (!brcmf_feat_is_enabled(ifp, BRCMF_FEAT_RSDB) &&
+ !brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MCHAN)))) {
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1);
if (err < 0) {
bphy_err(drvr, "BRCMF_C_DOWN error %d\n",
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
index 6ce48f6275a4..3ca8c07d6370 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
@@ -153,19 +153,23 @@ enum brcmf_vif_status {
* @probe_req_ie: IE info for probe request.
* @probe_res_ie: IE info for probe response.
* @beacon_ie: IE info for beacon frame.
+ * @assoc_res_ie: IE info for association response frame.
* @probe_req_ie_len: IE info length for probe request.
* @probe_res_ie_len: IE info length for probe response.
* @beacon_ie_len: IE info length for beacon frame.
+ * @assoc_res_ie_len: IE info length for association response frame.
*/
struct vif_saved_ie {
u8 probe_req_ie[IE_MAX_LEN];
u8 probe_res_ie[IE_MAX_LEN];
u8 beacon_ie[IE_MAX_LEN];
u8 assoc_req_ie[IE_MAX_LEN];
+ u8 assoc_res_ie[IE_MAX_LEN];
u32 probe_req_ie_len;
u32 probe_res_ie_len;
u32 beacon_ie_len;
u32 assoc_req_ie_len;
+ u32 assoc_res_ie_len;
};
/**
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/commonring.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/commonring.c
index 49db54d23e03..e44236cb210e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/commonring.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/commonring.c
@@ -180,14 +180,8 @@ again:
int brcmf_commonring_write_complete(struct brcmf_commonring *commonring)
{
- void *address;
-
- address = commonring->buf_addr;
- address += (commonring->f_ptr * commonring->item_len);
- if (commonring->f_ptr > commonring->w_ptr) {
- address = commonring->buf_addr;
+ if (commonring->f_ptr > commonring->w_ptr)
commonring->f_ptr = 0;
- }
commonring->f_ptr = commonring->w_ptr;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 436f501be937..c88655acc78c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -579,9 +579,6 @@ static int brcmf_netdev_stop(struct net_device *ndev)
brcmf_cfg80211_down(ndev);
- if (ifp->drvr->bus_if->state == BRCMF_BUS_UP)
- brcmf_fil_iovar_data_set(ifp, "arp_hostip_clear", NULL, 0);
-
brcmf_net_setcarrier(ifp, false);
return 0;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c
index 120515fe8250..eecf8a38d94a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c
@@ -47,13 +47,10 @@ struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr)
return drvr->wiphy->debugfsdir;
}
-int brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn,
+void brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn,
int (*read_fn)(struct seq_file *seq, void *data))
{
- struct dentry *e;
-
WARN(!drvr->wiphy->debugfsdir, "wiphy not (yet) registered\n");
- e = debugfs_create_devm_seqfile(drvr->bus_if->dev, fn,
- drvr->wiphy->debugfsdir, read_fn);
- return PTR_ERR_OR_ZERO(e);
+ debugfs_create_devm_seqfile(drvr->bus_if->dev, fn,
+ drvr->wiphy->debugfsdir, read_fn);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h
index 9b221b509ade..4146faeed344 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h
@@ -116,8 +116,8 @@ struct brcmf_bus;
struct brcmf_pub;
#ifdef DEBUG
struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr);
-int brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn,
- int (*read_fn)(struct seq_file *seq, void *data));
+void brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn,
+ int (*read_fn)(struct seq_file *seq, void *data));
int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data,
size_t len);
#else
@@ -126,11 +126,9 @@ static inline struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr)
return ERR_PTR(-ENOENT);
}
static inline
-int brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn,
- int (*read_fn)(struct seq_file *seq, void *data))
-{
- return 0;
-}
+void brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn,
+ int (*read_fn)(struct seq_file *seq, void *data))
+{ }
static inline
int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data,
size_t len)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 5da0dda0d899..0dcefbd0c000 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -285,13 +285,14 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
if (!err)
ifp->drvr->feat_flags |= BIT(BRCMF_FEAT_SCAN_RANDOM_MAC);
+ brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_FWSUP, "sup_wpa");
+
if (drvr->settings->feature_disable) {
brcmf_dbg(INFO, "Features: 0x%02x, disable: 0x%02x\n",
ifp->drvr->feat_flags,
drvr->settings->feature_disable);
ifp->drvr->feat_flags &= ~drvr->settings->feature_disable;
}
- brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_FWSUP, "sup_wpa");
brcmf_feat_firmware_overrides(drvr);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index 8cc52935fd41..2b7837887c0b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -2356,7 +2356,7 @@ struct brcmf_fws_info *brcmf_fws_attach(struct brcmf_pub *drvr)
fws->drvr = drvr;
fws->fcmode = drvr->settings->fcmode;
- if ((drvr->bus_if->always_use_fws_queue == false) &&
+ if (!drvr->bus_if->always_use_fws_queue &&
(fws->fcmode == BRCMF_FWS_FCMODE_NONE)) {
fws->avoid_queueing = true;
brcmf_dbg(INFO, "FWS queueing will be avoided\n");
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index 1f5deea5a288..e32c24a2670d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -17,6 +17,7 @@
#include "fwil_types.h"
#include "p2p.h"
#include "cfg80211.h"
+#include "feature.h"
/* parameters used for p2p escan */
#define P2PAPI_SCAN_NPROBES 1
@@ -59,12 +60,13 @@
#define P2P_AF_MIN_DWELL_TIME 100
#define P2P_AF_MED_DWELL_TIME 400
#define P2P_AF_LONG_DWELL_TIME 1000
-#define P2P_AF_TX_MAX_RETRY 1
+#define P2P_AF_TX_MAX_RETRY 5
#define P2P_AF_MAX_WAIT_TIME msecs_to_jiffies(2000)
#define P2P_INVALID_CHANNEL -1
#define P2P_CHANNEL_SYNC_RETRY 5
#define P2P_AF_FRM_SCAN_MAX_WAIT msecs_to_jiffies(450)
#define P2P_DEFAULT_SLEEP_TIME_VSDB 200
+#define P2P_AF_RETRY_DELAY_TIME 40
/* WiFi P2P Public Action Frame OUI Subtypes */
#define P2P_PAF_GON_REQ 0 /* Group Owner Negotiation Req */
@@ -92,6 +94,9 @@
#define P2PSD_ACTION_ID_GAS_CRESP 0x0d /* GAS Comback Response AF */
#define BRCMF_P2P_DISABLE_TIMEOUT msecs_to_jiffies(500)
+
+/* Mask for retry counter of custom dwell time */
+#define CUSTOM_RETRY_MASK 0xff000000
/**
* struct brcmf_p2p_disc_st_le - set discovery state in firmware.
*
@@ -457,10 +462,21 @@ static int brcmf_p2p_set_firmware(struct brcmf_if *ifp, u8 *p2p_mac)
*/
static void brcmf_p2p_generate_bss_mac(struct brcmf_p2p_info *p2p, u8 *dev_addr)
{
+ struct brcmf_if *pri_ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
bool random_addr = false;
+ bool local_admin = false;
- if (!dev_addr || is_zero_ether_addr(dev_addr))
- random_addr = true;
+ if (!dev_addr || is_zero_ether_addr(dev_addr)) {
+ /* If the primary interface address is already locally
+ * administered, create a new random address.
+ */
+ if (pri_ifp->mac_addr[0] & 0x02) {
+ random_addr = true;
+ } else {
+ dev_addr = pri_ifp->mac_addr;
+ local_admin = true;
+ }
+ }
/* Generate the P2P Device Address obtaining a random ethernet
* address with the locally administered bit set.
@@ -470,13 +486,20 @@ static void brcmf_p2p_generate_bss_mac(struct brcmf_p2p_info *p2p, u8 *dev_addr)
else
memcpy(p2p->dev_addr, dev_addr, ETH_ALEN);
+ if (local_admin)
+ p2p->dev_addr[0] |= 0x02;
+
/* Generate the P2P Interface Address. If the discovery and connection
* BSSCFGs need to simultaneously co-exist, then this address must be
* different from the P2P Device Address, but also locally administered.
*/
- memcpy(p2p->int_addr, p2p->dev_addr, ETH_ALEN);
- p2p->int_addr[0] |= 0x02;
- p2p->int_addr[4] ^= 0x80;
+ memcpy(p2p->conn_int_addr, p2p->dev_addr, ETH_ALEN);
+ p2p->conn_int_addr[0] |= 0x02;
+ p2p->conn_int_addr[4] ^= 0x80;
+
+ memcpy(p2p->conn2_int_addr, p2p->dev_addr, ETH_ALEN);
+ p2p->conn2_int_addr[0] |= 0x02;
+ p2p->conn2_int_addr[4] ^= 0x90;
}
/**
@@ -1491,6 +1514,7 @@ static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p,
{
struct brcmf_pub *drvr = p2p->cfg->pub;
struct brcmf_cfg80211_vif *vif;
+ struct brcmf_p2p_action_frame *p2p_af;
s32 err = 0;
s32 timeout = 0;
@@ -1500,7 +1524,13 @@ static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p,
clear_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status);
clear_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status);
- vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+ /* check if it is a p2p_presence response */
+ p2p_af = (struct brcmf_p2p_action_frame *)af_params->action_frame.data;
+ if (p2p_af->subtype == P2P_AF_PRESENCE_RSP)
+ vif = p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif;
+ else
+ vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+
err = brcmf_fil_bsscfg_data_set(vif->ifp, "actframe", af_params,
sizeof(*af_params));
if (err) {
@@ -1640,6 +1670,17 @@ static s32 brcmf_p2p_pub_af_tx(struct brcmf_cfg80211_info *cfg,
return err;
}
+static bool brcmf_p2p_check_dwell_overflow(s32 requested_dwell,
+ unsigned long dwell_jiffies)
+{
+ if ((requested_dwell & CUSTOM_RETRY_MASK) &&
+ (jiffies_to_msecs(jiffies - dwell_jiffies) >
+ (requested_dwell & ~CUSTOM_RETRY_MASK))) {
+ brcmf_err("Action frame TX retry time over dwell time!\n");
+ return true;
+ }
+ return false;
+}
/**
* brcmf_p2p_send_action_frame() - send action frame .
*
@@ -1664,6 +1705,10 @@ bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
s32 tx_retry;
s32 extra_listen_time;
uint delta_ms;
+ unsigned long dwell_jiffies = 0;
+ bool dwell_overflow = false;
+
+ s32 requested_dwell = af_params->dwell_time;
action_frame = &af_params->action_frame;
action_frame_len = le16_to_cpu(action_frame->len);
@@ -1775,12 +1820,21 @@ bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
/* update channel */
af_params->channel = cpu_to_le32(afx_hdl->peer_chan);
}
+ dwell_jiffies = jiffies;
+ dwell_overflow = brcmf_p2p_check_dwell_overflow(requested_dwell,
+ dwell_jiffies);
tx_retry = 0;
while (!p2p->block_gon_req_tx &&
- (ack == false) && (tx_retry < P2P_AF_TX_MAX_RETRY)) {
+ (!ack) && (tx_retry < P2P_AF_TX_MAX_RETRY) &&
+ !dwell_overflow) {
+ if (af_params->channel)
+ msleep(P2P_AF_RETRY_DELAY_TIME);
+
ack = !brcmf_p2p_tx_action_frame(p2p, af_params);
tx_retry++;
+ dwell_overflow = brcmf_p2p_check_dwell_overflow(requested_dwell,
+ dwell_jiffies);
}
if (ack == false) {
bphy_err(drvr, "Failed to send Action Frame(retry %d)\n",
@@ -1994,7 +2048,7 @@ int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg,
if_request.type = cpu_to_le16((u16)if_type);
if_request.chspec = cpu_to_le16(chanspec);
- memcpy(if_request.addr, p2p->int_addr, sizeof(if_request.addr));
+ memcpy(if_request.addr, p2p->conn_int_addr, sizeof(if_request.addr));
brcmf_cfg80211_arm_vif_event(cfg, vif);
err = brcmf_fil_iovar_data_set(vif->ifp, "p2p_ifupd", &if_request,
@@ -2149,6 +2203,27 @@ fail:
return ERR_PTR(err);
}
+int brcmf_p2p_get_conn_idx(struct brcmf_cfg80211_info *cfg)
+{
+ int i;
+ struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
+
+ if (!ifp)
+ return -ENODEV;
+
+ for (i = P2PAPI_BSSCFG_CONNECTION; i < P2PAPI_BSSCFG_MAX; i++) {
+ if (!cfg->p2p.bss_idx[i].vif) {
+ if (i == P2PAPI_BSSCFG_CONNECTION2 &&
+ !(brcmf_feat_is_enabled(ifp, BRCMF_FEAT_RSDB))) {
+ brcmf_err("Multi p2p not supported");
+ return -EIO;
+ }
+ return i;
+ }
+ }
+ return -EIO;
+}
+
/**
* brcmf_p2p_add_vif() - create a new P2P virtual interface.
*
@@ -2168,7 +2243,9 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
struct brcmf_pub *drvr = cfg->pub;
struct brcmf_cfg80211_vif *vif;
enum brcmf_fil_p2p_if_types iftype;
- int err;
+ int err = 0;
+ int connidx;
+ u8 *p2p_intf_addr;
if (brcmf_cfg80211_vif_event_armed(cfg))
return ERR_PTR(-EBUSY);
@@ -2194,9 +2271,21 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
return (struct wireless_dev *)vif;
brcmf_cfg80211_arm_vif_event(cfg, vif);
- err = brcmf_p2p_request_p2p_if(&cfg->p2p, ifp, cfg->p2p.int_addr,
- iftype);
+ connidx = brcmf_p2p_get_conn_idx(cfg);
+
+ if (connidx == P2PAPI_BSSCFG_CONNECTION)
+ p2p_intf_addr = cfg->p2p.conn_int_addr;
+ else if (connidx == P2PAPI_BSSCFG_CONNECTION2)
+ p2p_intf_addr = cfg->p2p.conn2_int_addr;
+ else
+ err = -EINVAL;
+
+ if (!err)
+ err = brcmf_p2p_request_p2p_if(&cfg->p2p, ifp,
+ p2p_intf_addr, iftype);
+
if (err) {
+ brcmf_err("request p2p interface failed\n");
brcmf_cfg80211_arm_vif_event(cfg, NULL);
goto fail;
}
@@ -2228,7 +2317,7 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
goto fail;
}
- cfg->p2p.bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = vif;
+ cfg->p2p.bss_idx[connidx].vif = vif;
/* Disable firmware roaming for P2P interface */
brcmf_fil_iovar_int_set(ifp, "roam_off", 1);
if (iftype == BRCMF_FIL_P2P_IF_GO) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
index 64ab9b6a677d..d2ecee565bf2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
@@ -14,13 +14,15 @@ struct brcmf_cfg80211_info;
*
* @P2PAPI_BSSCFG_PRIMARY: maps to driver's primary bsscfg.
* @P2PAPI_BSSCFG_DEVICE: maps to driver's P2P device discovery bsscfg.
- * @P2PAPI_BSSCFG_CONNECTION: maps to driver's P2P connection bsscfg.
+ * @P2PAPI_BSSCFG_CONNECTION: maps to driver's 1st P2P connection bsscfg.
+ * @P2PAPI_BSSCFG_CONNECTION2: maps to driver's 2nd P2P connection bsscfg.
* @P2PAPI_BSSCFG_MAX: used for range checking.
*/
enum p2p_bss_type {
P2PAPI_BSSCFG_PRIMARY, /* maps to driver's primary bsscfg */
P2PAPI_BSSCFG_DEVICE, /* maps to driver's P2P device discovery bsscfg */
- P2PAPI_BSSCFG_CONNECTION, /* maps to driver's P2P connection bsscfg */
+ P2PAPI_BSSCFG_CONNECTION, /* driver's 1st P2P connection bsscfg */
+ P2PAPI_BSSCFG_CONNECTION2, /* driver's 2nd P2P connection bsscfg */
P2PAPI_BSSCFG_MAX
};
@@ -119,7 +121,8 @@ struct brcmf_p2p_info {
struct brcmf_cfg80211_info *cfg;
unsigned long status;
u8 dev_addr[ETH_ALEN];
- u8 int_addr[ETH_ALEN];
+ u8 conn_int_addr[ETH_ALEN];
+ u8 conn2_int_addr[ETH_ALEN];
struct p2p_bss bss_idx[P2PAPI_BSSCFG_MAX];
struct timer_list listen_timer;
u8 listen_channel;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
index 8e8b685cfe09..648efcbc819f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
@@ -1431,6 +1431,7 @@ int brcms_up(struct brcms_info *wl)
* precondition: perimeter lock has been acquired
*/
void brcms_down(struct brcms_info *wl)
+ __must_hold(&wl->lock)
{
uint callbacks, ret_val = 0;
@@ -1717,6 +1718,7 @@ int brcms_check_firmwares(struct brcms_info *wl)
* precondition: perimeter lock has been acquired
*/
bool brcms_rfkill_set_hw_state(struct brcms_info *wl)
+ __must_hold(&wl->lock)
{
bool blocked = brcms_c_check_radio_disabled(wl->wlc);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
index 7f2c15c799d2..77494fc30c2c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
@@ -1057,7 +1057,7 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
txs->lasttxtime = 0;
*fatal = brcms_c_dotxstatus(wlc_hw->wlc, txs);
- if (*fatal == true)
+ if (*fatal)
return false;
n++;
}
@@ -3768,17 +3768,14 @@ static void brcms_c_set_ps_ctrl(struct brcms_c_info *wlc)
* Write this BSS config's MAC address to core.
* Updates RXE match engine.
*/
-static int brcms_c_set_mac(struct brcms_bss_cfg *bsscfg)
+static void brcms_c_set_mac(struct brcms_bss_cfg *bsscfg)
{
- int err = 0;
struct brcms_c_info *wlc = bsscfg->wlc;
/* enter the MAC addr into the RXE match registers */
brcms_c_set_addrmatch(wlc, RCM_MAC_OFFSET, wlc->pub->cur_etheraddr);
brcms_c_ampdu_macaddr_upd(wlc);
-
- return err;
}
/* Write the BSS config's BSSID address to core (set_bssid in d11procs.tcl).
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.c
index 0ab865de1491..79d4a7a4da8b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.c
@@ -304,9 +304,8 @@ int brcms_c_stf_txchain_set(struct brcms_c_info *wlc, s32 int_val, bool force)
* update wlc->stf->ss_opmode which represents the operational stf_ss mode
* we're using
*/
-int brcms_c_stf_ss_update(struct brcms_c_info *wlc, struct brcms_band *band)
+void brcms_c_stf_ss_update(struct brcms_c_info *wlc, struct brcms_band *band)
{
- int ret_code = 0;
u8 prev_stf_ss;
u8 upd_stf_ss;
@@ -325,7 +324,7 @@ int brcms_c_stf_ss_update(struct brcms_c_info *wlc, struct brcms_band *band)
PHY_TXC1_MODE_SISO : PHY_TXC1_MODE_CDD;
} else {
if (wlc->band != band)
- return ret_code;
+ return;
upd_stf_ss = (wlc->stf->txstreams == 1) ?
PHY_TXC1_MODE_SISO : band->band_stf_ss_mode;
}
@@ -333,8 +332,6 @@ int brcms_c_stf_ss_update(struct brcms_c_info *wlc, struct brcms_band *band)
wlc->stf->ss_opmode = upd_stf_ss;
brcms_b_band_stf_ss_set(wlc->hw, upd_stf_ss);
}
-
- return ret_code;
}
int brcms_c_stf_attach(struct brcms_c_info *wlc)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.h
index ba9493009a33..aa4ab53bf634 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.h
@@ -25,7 +25,7 @@ void brcms_c_stf_detach(struct brcms_c_info *wlc);
void brcms_c_tempsense_upd(struct brcms_c_info *wlc);
void brcms_c_stf_ss_algo_channel_get(struct brcms_c_info *wlc,
u16 *ss_algo_channel, u16 chanspec);
-int brcms_c_stf_ss_update(struct brcms_c_info *wlc, struct brcms_band *band);
+void brcms_c_stf_ss_update(struct brcms_c_info *wlc, struct brcms_band *band);
void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
int brcms_c_stf_txchain_set(struct brcms_c_info *wlc, s32 int_val, bool force);
bool brcms_c_stf_stbc_rx_set(struct brcms_c_info *wlc, s32 int_val);
diff --git a/drivers/net/wireless/intel/ipw2x00/Kconfig b/drivers/net/wireless/intel/ipw2x00/Kconfig
index ab17903ba9f8..f42b3cdce611 100644
--- a/drivers/net/wireless/intel/ipw2x00/Kconfig
+++ b/drivers/net/wireless/intel/ipw2x00/Kconfig
@@ -16,7 +16,7 @@ config IPW2100
A driver for the Intel PRO/Wireless 2100 Network
Connection 802.11b wireless network adapter.
- See <file:Documentation/networking/device_drivers/intel/ipw2100.txt>
+ See <file:Documentation/networking/device_drivers/intel/ipw2100.rst>
for information on the capabilities currently enabled in this driver
and for tips for debugging issues and problems.
@@ -78,7 +78,7 @@ config IPW2200
A driver for the Intel PRO/Wireless 2200BG and 2915ABG Network
Connection adapters.
- See <file:Documentation/networking/device_drivers/intel/ipw2200.txt>
+ See <file:Documentation/networking/device_drivers/intel/ipw2200.rst>
for information on the capabilities currently enabled in this
driver and for tips for debugging issues and problems.
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
index 97ea6e2035e6..624fe721e2b5 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
@@ -8352,7 +8352,7 @@ static int ipw2100_mod_firmware_load(struct ipw2100_fw *fw)
if (IPW2100_FW_MAJOR(h->version) != IPW2100_FW_MAJOR_VERSION) {
printk(KERN_WARNING DRV_NAME ": Firmware image not compatible "
"(detected version id of %u). "
- "See Documentation/networking/device_drivers/intel/ipw2100.txt\n",
+ "See Documentation/networking/device_drivers/intel/ipw2100.rst\n",
h->version);
return 1;
}
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index 60b5e08dd6df..e9e686ad57b1 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -3770,10 +3770,8 @@ static int ipw_queue_tx_init(struct ipw_priv *priv,
struct pci_dev *dev = priv->pci_dev;
q->txb = kmalloc_array(count, sizeof(q->txb[0]), GFP_KERNEL);
- if (!q->txb) {
- IPW_ERROR("vmalloc for auxiliary BD structures failed\n");
+ if (!q->txb)
return -ENOMEM;
- }
q->bd =
pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
@@ -7042,23 +7040,22 @@ static int ipw_qos_association(struct ipw_priv *priv,
* off the network from the associated setting, adjust the QoS
* setting
*/
-static int ipw_qos_association_resp(struct ipw_priv *priv,
+static void ipw_qos_association_resp(struct ipw_priv *priv,
struct libipw_network *network)
{
- int ret = 0;
unsigned long flags;
u32 size = sizeof(struct libipw_qos_parameters);
int set_qos_param = 0;
if ((priv == NULL) || (network == NULL) ||
(priv->assoc_network == NULL))
- return ret;
+ return;
if (!(priv->status & STATUS_ASSOCIATED))
- return ret;
+ return;
if ((priv->ieee->iw_mode != IW_MODE_INFRA))
- return ret;
+ return;
spin_lock_irqsave(&priv->ieee->lock, flags);
if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
@@ -7088,8 +7085,6 @@ static int ipw_qos_association_resp(struct ipw_priv *priv,
if (set_qos_param == 1)
schedule_work(&priv->qos_activate);
-
- return ret;
}
static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
@@ -10643,10 +10638,8 @@ static void ipw_bg_link_down(struct work_struct *work)
mutex_unlock(&priv->mutex);
}
-static int ipw_setup_deferred_work(struct ipw_priv *priv)
+static void ipw_setup_deferred_work(struct ipw_priv *priv)
{
- int ret = 0;
-
init_waitqueue_head(&priv->wait_command_queue);
init_waitqueue_head(&priv->wait_state);
@@ -10680,8 +10673,6 @@ static int ipw_setup_deferred_work(struct ipw_priv *priv)
tasklet_init(&priv->irq_tasklet,
ipw_irq_tasklet, (unsigned long)priv);
-
- return ret;
}
static void shim__set_security(struct net_device *dev,
@@ -11662,11 +11653,7 @@ static int ipw_pci_probe(struct pci_dev *pdev,
IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
- err = ipw_setup_deferred_work(priv);
- if (err) {
- IPW_ERROR("Unable to setup deferred work\n");
- goto out_iounmap;
- }
+ ipw_setup_deferred_work(priv);
ipw_sw_reset(priv, 1);
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
index 0aae3fa4128c..fbcd1405aeea 100644
--- a/drivers/net/wireless/intel/iwlwifi/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -13,7 +13,8 @@ iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o
iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
iwlwifi-objs += iwl-dbg-tlv.o
iwlwifi-objs += iwl-trans.o
-iwlwifi-objs += fw/notif-wait.o
+
+iwlwifi-objs += fw/img.o fw/notif-wait.o
iwlwifi-objs += fw/dbg.o
iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o
iwlwifi-$(CONFIG_ACPI) += fw/acpi.o
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index bc49cdd819df..2f741f5e3a7d 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -90,7 +90,8 @@
#define IWL_22000_SO_A_GF_A_FW_PRE "iwlwifi-so-a0-gf-a0-"
#define IWL_22000_TY_A_GF_A_FW_PRE "iwlwifi-ty-a0-gf-a0-"
#define IWL_22000_SO_A_GF4_A_FW_PRE "iwlwifi-so-a0-gf4-a0-"
-#define IWL_22000_SOSNJ_A_GF4_A_FW_PRE "iwlwifi-SoSnj-a0-gf4-a0-"
+#define IWL_SNJ_A_GF4_A_FW_PRE "iwlwifi-SoSnj-a0-gf4-a0-"
+#define IWL_SNJ_A_GF_A_FW_PRE "iwlwifi-SoSnj-a0-gf-a0-"
#define IWL_22000_HR_MODULE_FIRMWARE(api) \
IWL_22000_HR_FW_PRE __stringify(api) ".ucode"
@@ -120,6 +121,10 @@
IWL_22000_SO_A_GF_A_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_TY_A_GF_A_MODULE_FIRMWARE(api) \
IWL_22000_TY_A_GF_A_FW_PRE __stringify(api) ".ucode"
+#define IWL_SNJ_A_GF4_A_MODULE_FIRMWARE(api) \
+ IWL_SNJ_A_GF4_A_FW_PRE __stringify(api) ".ucode"
+#define IWL_SNJ_A_GF_A_MODULE_FIRMWARE(api) \
+ IWL_SNJ_A_GF_A_FW_PRE __stringify(api) ".ucode"
static const struct iwl_base_params iwl_22000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
@@ -229,6 +234,15 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
}, \
}
+const struct iwl_cfg_trans_params iwl_qnj_trans_cfg = {
+ .mq_rx_supported = true,
+ .use_tfh = true,
+ .rf_id = true,
+ .gen2 = true,
+ .device_family = IWL_DEVICE_FAMILY_22000,
+ .base_params = &iwl_22000_base_params,
+};
+
const struct iwl_cfg_trans_params iwl_qu_trans_cfg = {
.mq_rx_supported = true,
.use_tfh = true,
@@ -238,9 +252,10 @@ const struct iwl_cfg_trans_params iwl_qu_trans_cfg = {
.base_params = &iwl_22000_base_params,
.integrated = true,
.xtal_latency = 5000,
+ .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_200US,
};
-const struct iwl_cfg_trans_params iwl_qu_long_latency_trans_cfg = {
+const struct iwl_cfg_trans_params iwl_qu_medium_latency_trans_cfg = {
.mq_rx_supported = true,
.use_tfh = true,
.rf_id = true,
@@ -248,17 +263,21 @@ const struct iwl_cfg_trans_params iwl_qu_long_latency_trans_cfg = {
.device_family = IWL_DEVICE_FAMILY_22000,
.base_params = &iwl_22000_base_params,
.integrated = true,
- .xtal_latency = 12000,
- .low_latency_xtal = true,
+ .xtal_latency = 1820,
+ .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_1820US,
};
-const struct iwl_cfg_trans_params iwl_qnj_trans_cfg = {
+const struct iwl_cfg_trans_params iwl_qu_long_latency_trans_cfg = {
.mq_rx_supported = true,
.use_tfh = true,
.rf_id = true,
.gen2 = true,
.device_family = IWL_DEVICE_FAMILY_22000,
.base_params = &iwl_22000_base_params,
+ .integrated = true,
+ .xtal_latency = 12000,
+ .low_latency_xtal = true,
+ .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
};
/*
@@ -522,22 +541,32 @@ const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0 = {
};
const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0 = {
- .name = "Intel(R) Wi-Fi 7 AX210 160MHz",
+ .name = "Intel(R) Wi-Fi 6 AX210 160MHz",
.fw_name_pre = IWL_22000_SO_A_HR_B_FW_PRE,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0 = {
- .name = "Intel(R) Wi-Fi 7 AX211 160MHz",
+ .name = "Intel(R) Wi-Fi 6 AX211 160MHz",
.fw_name_pre = IWL_22000_SO_A_GF_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
+const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0_long = {
+ .name = "Intel(R) Wi-Fi 6 AX211 160MHz",
+ .fw_name_pre = IWL_22000_SO_A_GF_A_FW_PRE,
+ .uhb_supported = true,
+ IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+ .trans.xtal_latency = 12000,
+ .trans.low_latency_xtal = true,
+};
+
const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0 = {
- .name = "Intel(R) Wi-Fi 7 AX210 160MHz",
+ .name = "Intel(R) Wi-Fi 6 AX210 160MHz",
.fw_name_pre = IWL_22000_TY_A_GF_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
@@ -545,16 +574,34 @@ const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0 = {
};
const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0 = {
- .name = "Intel(R) Wi-Fi 7 AX411 160MHz",
+ .name = "Intel(R) Wi-Fi 6 AX411 160MHz",
.fw_name_pre = IWL_22000_SO_A_GF4_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
+const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long = {
+ .name = "Intel(R) Wi-Fi 6 AX411 160MHz",
+ .fw_name_pre = IWL_22000_SO_A_GF4_A_FW_PRE,
+ .uhb_supported = true,
+ IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+ .trans.xtal_latency = 12000,
+ .trans.low_latency_xtal = true,
+};
+
const struct iwl_cfg iwlax411_2ax_cfg_sosnj_gf4_a0 = {
- .name = "Intel(R) Wi-Fi 7 AX411 160MHz",
- .fw_name_pre = IWL_22000_SOSNJ_A_GF4_A_FW_PRE,
+ .name = "Intel(R) Wi-Fi 6 AX411 160MHz",
+ .fw_name_pre = IWL_SNJ_A_GF4_A_FW_PRE,
+ .uhb_supported = true,
+ IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
+const struct iwl_cfg iwlax211_cfg_snj_gf_a0 = {
+ .name = "Intel(R) Wi-Fi 6 AX211 160MHz",
+ .fw_name_pre = IWL_SNJ_A_GF_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
@@ -573,3 +620,5 @@ MODULE_FIRMWARE(IWL_22000_SO_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_SO_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_SO_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_TY_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_SNJ_A_GF4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_SNJ_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index e3a33388be70..e2184ba4d8b5 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -151,6 +151,82 @@ found:
}
IWL_EXPORT_SYMBOL(iwl_acpi_get_wifi_pkg);
+int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
+ __le32 *black_list_array,
+ int *black_list_size)
+{
+ union acpi_object *wifi_pkg, *data;
+ int ret, tbl_rev, i;
+ bool enabled;
+
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_WTAS_METHOD);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_WTAS_WIFI_DATA_SIZE,
+ &tbl_rev);
+ if (IS_ERR(wifi_pkg)) {
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+ }
+
+ if (wifi_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ tbl_rev != 0) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ enabled = !!wifi_pkg->package.elements[0].integer.value;
+
+ if (!enabled) {
+ *black_list_size = -1;
+ IWL_DEBUG_RADIO(fwrt, "TAS not enabled\n");
+ ret = 0;
+ goto out_free;
+ }
+
+ if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
+ wifi_pkg->package.elements[1].integer.value >
+ APCI_WTAS_BLACK_LIST_MAX) {
+ IWL_DEBUG_RADIO(fwrt, "TAS invalid array size %llu\n",
+ wifi_pkg->package.elements[1].integer.value);
+ ret = -EINVAL;
+ goto out_free;
+ }
+ *black_list_size = wifi_pkg->package.elements[1].integer.value;
+
+ IWL_DEBUG_RADIO(fwrt, "TAS array size %d\n", *black_list_size);
+ if (*black_list_size > APCI_WTAS_BLACK_LIST_MAX) {
+ IWL_DEBUG_RADIO(fwrt, "TAS invalid array size value %u\n",
+ *black_list_size);
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ for (i = 0; i < *black_list_size; i++) {
+ u32 country;
+
+ if (wifi_pkg->package.elements[2 + i].type !=
+ ACPI_TYPE_INTEGER) {
+ IWL_DEBUG_RADIO(fwrt,
+ "TAS invalid array elem %d\n", 2 + i);
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ country = wifi_pkg->package.elements[2 + i].integer.value;
+ black_list_array[i] = cpu_to_le32(country);
+ IWL_DEBUG_RADIO(fwrt, "TAS black list country %d\n", country);
+ }
+
+ ret = 0;
+out_free:
+ kfree(data);
+ return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_acpi_get_tas);
+
int iwl_acpi_get_mcc(struct device *dev, char *mcc)
{
union acpi_object *wifi_pkg, *data;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
index 5590e5cc8fbb..6a646dc524e1 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
@@ -64,6 +64,7 @@
#include "fw/api/commands.h"
#include "fw/api/power.h"
#include "fw/api/phy.h"
+#include "fw/api/nvm-reg.h"
#include "fw/img.h"
#include "iwl-trans.h"
@@ -75,6 +76,7 @@
#define ACPI_SPLC_METHOD "SPLC"
#define ACPI_ECKV_METHOD "ECKV"
#define ACPI_PPAG_METHOD "PPAG"
+#define ACPI_WTAS_METHOD "WTAS"
#define ACPI_WIFI_DOMAIN (0x07)
@@ -96,6 +98,12 @@
#define ACPI_SPLC_WIFI_DATA_SIZE 2
#define ACPI_ECKV_WIFI_DATA_SIZE 2
+/*
+ * 1 type, 1 enabled, 1 black list size, 16 black list array
+ */
+#define APCI_WTAS_BLACK_LIST_MAX 16
+#define ACPI_WTAS_WIFI_DATA_SIZE (3 + APCI_WTAS_BLACK_LIST_MAX)
+
#define ACPI_WGDS_NUM_BANDS 2
#define ACPI_WGDS_TABLE_SIZE 3
@@ -174,6 +182,9 @@ int iwl_validate_sar_geo_profile(struct iwl_fw_runtime *fwrt,
int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
struct iwl_per_chain_offset_group *table);
+int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, __le32 *black_list_array,
+ int *black_list_size);
+
#else /* CONFIG_ACPI */
static inline void *iwl_acpi_get_object(struct device *dev, acpi_string method)
@@ -250,5 +261,11 @@ static inline int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
return -ENOENT;
}
+static inline int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
+ __le32 *black_list_array,
+ int *black_list_size)
+{
+ return -ENOENT;
+}
#endif /* CONFIG_ACPI */
#endif /* __iwl_fw_acpi__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/config.h b/drivers/net/wireless/intel/iwlwifi/fw/api/config.h
index 5e88fa2e6fb7..546fa60ed9fd 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/config.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/config.h
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -120,15 +120,48 @@ enum iwl_calib_cfg {
};
/**
+ * struct iwl_phy_specific_cfg - specific PHY filter configuration
+ *
+ * Sent as part of the phy configuration command (v3) to configure specific FW
+ * defined PHY filters that can be applied to each antenna.
+ *
+ * @filter_cfg_chain_a: filter config id for LMAC1 chain A
+ * @filter_cfg_chain_b: filter config id for LMAC1 chain B
+ * @filter_cfg_chain_c: filter config id for LMAC2 chain A
+ * @filter_cfg_chain_d: filter config id for LMAC2 chain B
+ * values: 0 - no filter; 0xffffffff - reserved; otherwise - filter id
+ */
+struct iwl_phy_specific_cfg {
+ __le32 filter_cfg_chain_a;
+ __le32 filter_cfg_chain_b;
+ __le32 filter_cfg_chain_c;
+ __le32 filter_cfg_chain_d;
+} __packed; /* PHY_SPECIFIC_CONFIGURATION_API_VER_1*/
+
+/**
* struct iwl_phy_cfg_cmd - Phy configuration command
+ *
* @phy_cfg: PHY configuration value, uses &enum iwl_fw_phy_cfg
* @calib_control: calibration control data
*/
-struct iwl_phy_cfg_cmd {
+struct iwl_phy_cfg_cmd_v1 {
__le32 phy_cfg;
struct iwl_calib_ctrl calib_control;
} __packed;
+/**
+ * struct iwl_phy_cfg_cmd_v3 - Phy configuration command (v3)
+ *
+ * @phy_cfg: PHY configuration value, uses &enum iwl_fw_phy_cfg
+ * @calib_control: calibration control data
+ * @phy_specific_cfg: configure predefined PHY filters
+ */
+struct iwl_phy_cfg_cmd_v3 {
+ __le32 phy_cfg;
+ struct iwl_calib_ctrl calib_control;
+ struct iwl_phy_specific_cfg phy_specific_cfg;
+} __packed; /* PHY_CONFIGURATION_CMD_API_S_VER_3 */
+
/*
* enum iwl_dc2dc_config_id - flag ids
*
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
index 3643b6ba6385..c4562e1f8d18 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
@@ -618,7 +618,7 @@ struct iwl_wowlan_status_v6 {
* @wake_packet_bufsize: wakeup packet buffer size
* @wake_packet: wakeup packet
*/
-struct iwl_wowlan_status {
+struct iwl_wowlan_status_v7 {
struct iwl_wowlan_gtk_status gtk[WOWLAN_GTK_KEYS_NUM];
struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
__le64 replay_ctr;
@@ -634,6 +634,43 @@ struct iwl_wowlan_status {
u8 wake_packet[]; /* can be truncated from _length to _bufsize */
} __packed; /* WOWLAN_STATUSES_API_S_VER_7 */
+/**
+ * struct iwl_wowlan_status - WoWLAN status
+ * @gtk: GTK data
+ * @igtk: IGTK data
+ * @replay_ctr: GTK rekey replay counter
+ * @pattern_number: number of the matched pattern
+ * @non_qos_seq_ctr: non-QoS sequence counter to use next
+ * @qos_seq_ctr: QoS sequence counters to use next
+ * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason
+ * @num_of_gtk_rekeys: number of GTK rekeys
+ * @transmitted_ndps: number of transmitted neighbor discovery packets
+ * @received_beacons: number of received beacons
+ * @wake_packet_length: wakeup packet length
+ * @wake_packet_bufsize: wakeup packet buffer size
+ * @tid_tear_down: bit mask of tids whose BA sessions were closed
+ * in suspend state
+ * @reserved: unused
+ * @wake_packet: wakeup packet
+ */
+struct iwl_wowlan_status {
+ struct iwl_wowlan_gtk_status gtk[WOWLAN_GTK_KEYS_NUM];
+ struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
+ __le64 replay_ctr;
+ __le16 pattern_number;
+ __le16 non_qos_seq_ctr;
+ __le16 qos_seq_ctr[8];
+ __le32 wakeup_reasons;
+ __le32 num_of_gtk_rekeys;
+ __le32 transmitted_ndps;
+ __le32 received_beacons;
+ __le32 wake_packet_length;
+ __le32 wake_packet_bufsize;
+ u8 tid_tear_down;
+ u8 reserved[3];
+ u8 wake_packet[]; /* can be truncated from _length to _bufsize */
+} __packed; /* WOWLAN_STATUSES_API_S_VER_9 */
+
static inline u8 iwlmvm_wowlan_gtk_idx(struct iwl_wowlan_gtk_status *gtk)
{
return gtk->key_flags & IWL_WOWLAN_GTK_IDX_MASK;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
index 97b49843e318..2d230a7893c2 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
@@ -80,6 +80,11 @@ enum iwl_regulatory_and_nvm_subcmd_ids {
* response is &struct iwl_nvm_get_info_rsp
*/
NVM_GET_INFO = 0x2,
+
+ /**
+ * @TAS_CONFIG: &struct iwl_tas_config_cmd
+ */
+ TAS_CONFIG = 0x3,
};
/**
@@ -431,4 +436,14 @@ enum iwl_mcc_source {
MCC_SOURCE_GETTING_MCC_TEST_MODE = 0x11,
};
+#define IWL_TAS_BLACK_LIST_MAX 16
+/**
+ * struct iwl_tas_config_cmd - configures the TAS
+ * @black_list_size: size of relevant field in black_list_array
+ * @black_list_array: black list countries (without TAS)
+ */
+struct iwl_tas_config_cmd {
+ __le32 black_list_size;
+ __le32 black_list_array[IWL_TAS_BLACK_LIST_MAX];
+} __packed; /* TAS_CONFIG_CMD_API_S_VER_2 */
#endif /* __iwl_fw_api_nvm_reg_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
index 3d770f406c38..5cc33a1b7172 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
@@ -1051,20 +1051,6 @@ struct iwl_scan_req_params_v12 {
} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_12 */
/**
- * struct iwl_scan_req_params_v13
- * @general_params: &struct iwl_scan_general_params_v10
- * @channel_params: &struct iwl_scan_channel_params_v4
- * @periodic_params: &struct iwl_scan_periodic_parms_v1
- * @probe_params: &struct iwl_scan_probe_params_v4
- */
-struct iwl_scan_req_params_v13 {
- struct iwl_scan_general_params_v10 general_params;
- struct iwl_scan_channel_params_v4 channel_params;
- struct iwl_scan_periodic_parms_v1 periodic_params;
- struct iwl_scan_probe_params_v4 probe_params;
-} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_13 */
-
-/**
* struct iwl_scan_req_params_v14
* @general_params: &struct iwl_scan_general_params_v10
* @channel_params: &struct iwl_scan_channel_params_v6
@@ -1091,18 +1077,6 @@ struct iwl_scan_req_umac_v12 {
} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_12 */
/**
- * struct iwl_scan_req_umac_v13
- * @uid: scan id, &enum iwl_umac_scan_uid_offsets
- * @ooc_priority: out of channel priority - &enum iwl_scan_priority
- * @scan_params: scan parameters
- */
-struct iwl_scan_req_umac_v13 {
- __le32 uid;
- __le32 ooc_priority;
- struct iwl_scan_req_params_v13 scan_params;
-} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_13 */
-
-/**
* struct iwl_scan_req_umac_v14
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @ooc_priority: out of channel priority - &enum iwl_scan_priority
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/soc.h b/drivers/net/wireless/intel/iwlwifi/fw/api/soc.h
index aadca78e9846..0c6d7b3e1324 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/soc.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/soc.h
@@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2019 Intel Deutschland GmbH
+ * Copyright(c) 2012 - 2014, 2019 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,10 +27,9 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2019 Intel Deutschland GmbH
+ * Copyright(c) 2012 - 2014, 2019 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -68,6 +66,12 @@
#define SOC_CONFIG_CMD_FLAGS_DISCRETE BIT(0)
#define SOC_CONFIG_CMD_FLAGS_LOW_LATENCY BIT(1)
+#define SOC_FLAGS_LTR_APPLY_DELAY_MASK 0xc
+#define SOC_FLAGS_LTR_APPLY_DELAY_NONE 0
+#define SOC_FLAGS_LTR_APPLY_DELAY_200 1
+#define SOC_FLAGS_LTR_APPLY_DELAY_2500 2
+#define SOC_FLAGS_LTR_APPLY_DELAY_1820 3
+
/**
* struct iwl_soc_configuration_cmd - Set device stabilization latency
*
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
index 970e9e508ad0..c010e6febbf4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
@@ -245,32 +245,6 @@ enum iwl_sta_sleep_flag {
#define STA_KEY_LEN_WEP40 (5)
#define STA_KEY_LEN_WEP104 (13)
-/**
- * struct iwl_mvm_keyinfo - key information
- * @key_flags: type &enum iwl_sta_key_flag
- * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection
- * @reserved1: reserved
- * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
- * @key_offset: key offset in the fw's key table
- * @reserved2: reserved
- * @key: 16-byte unicast decryption key
- * @tx_secur_seq_cnt: initial RSC / PN needed for replay check
- * @hw_tkip_mic_rx_key: byte: MIC Rx Key - used for TKIP only
- * @hw_tkip_mic_tx_key: byte: MIC Tx Key - used for TKIP only
- */
-struct iwl_mvm_keyinfo {
- __le16 key_flags;
- u8 tkip_rx_tsc_byte2;
- u8 reserved1;
- __le16 tkip_rx_ttak[5];
- u8 key_offset;
- u8 reserved2;
- u8 key[16];
- __le64 tx_secur_seq_cnt;
- __le64 hw_tkip_mic_rx_key;
- __le64 hw_tkip_mic_tx_key;
-} __packed;
-
#define IWL_ADD_STA_STATUS_MASK 0xFF
#define IWL_ADD_STA_BAID_VALID_MASK 0x8000
#define IWL_ADD_STA_BAID_MASK 0x7F00
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 14ac7153a3e7..39c8332be3ac 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -818,7 +818,8 @@ static void iwl_dump_paging(struct iwl_fw_runtime *fwrt,
static struct iwl_fw_error_dump_file *
iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_dump_ptrs *fw_error_dump)
+ struct iwl_fw_dump_ptrs *fw_error_dump,
+ struct iwl_fwrt_dump_data *data)
{
struct iwl_fw_error_dump_file *dump_file;
struct iwl_fw_error_dump_data *dump_data;
@@ -900,15 +901,15 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt,
}
/* If we only want a monitor dump, reset the file length */
- if (fwrt->dump.monitor_only) {
+ if (data->monitor_only) {
file_len = sizeof(*dump_file) + sizeof(*dump_data) * 2 +
sizeof(*dump_info) + sizeof(*dump_smem_cfg);
}
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_ERROR_INFO) &&
- fwrt->dump.desc)
+ data->desc)
file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
- fwrt->dump.desc->len;
+ data->desc->len;
dump_file = vzalloc(file_len);
if (!dump_file)
@@ -984,19 +985,19 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt,
iwl_read_radio_regs(fwrt, &dump_data);
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_ERROR_INFO) &&
- fwrt->dump.desc) {
+ data->desc) {
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
dump_data->len = cpu_to_le32(sizeof(*dump_trig) +
- fwrt->dump.desc->len);
+ data->desc->len);
dump_trig = (void *)dump_data->data;
- memcpy(dump_trig, &fwrt->dump.desc->trig_desc,
- sizeof(*dump_trig) + fwrt->dump.desc->len);
+ memcpy(dump_trig, &data->desc->trig_desc,
+ sizeof(*dump_trig) + data->desc->len);
dump_data = iwl_fw_error_next_data(dump_data);
}
/* In case we only want monitor dump, skip to dump trasport data */
- if (fwrt->dump.monitor_only)
+ if (data->monitor_only)
goto out;
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM)) {
@@ -2172,7 +2173,21 @@ static u32 iwl_dump_ini_file_gen(struct iwl_fw_runtime *fwrt,
return le32_to_cpu(hdr->file_len);
}
-static void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
+static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt,
+ const struct iwl_fw_dump_desc **desc)
+{
+ if (desc && *desc != &iwl_dump_desc_assert)
+ kfree(*desc);
+
+ *desc = NULL;
+ fwrt->dump.lmac_err_id[0] = 0;
+ if (fwrt->smem_cfg.num_lmacs > 1)
+ fwrt->dump.lmac_err_id[1] = 0;
+ fwrt->dump.umac_err_id = 0;
+}
+
+static void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
+ struct iwl_fwrt_dump_data *dump_data)
{
struct iwl_fw_dump_ptrs fw_error_dump = {};
struct iwl_fw_error_dump_file *dump_file;
@@ -2180,11 +2195,11 @@ static void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
u32 file_len;
u32 dump_mask = fwrt->fw->dbg.dump_mask;
- dump_file = iwl_fw_error_dump_file(fwrt, &fw_error_dump);
+ dump_file = iwl_fw_error_dump_file(fwrt, &fw_error_dump, dump_data);
if (!dump_file)
- goto out;
+ return;
- if (fwrt->dump.monitor_only)
+ if (dump_data->monitor_only)
dump_mask &= IWL_FW_ERROR_DUMP_FW_MONITOR;
fw_error_dump.trans_ptr = iwl_trans_dump_data(fwrt->trans, dump_mask);
@@ -2213,9 +2228,6 @@ static void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
}
vfree(fw_error_dump.fwrt_ptr);
vfree(fw_error_dump.trans_ptr);
-
-out:
- iwl_fw_free_dump_desc(fwrt);
}
static void iwl_dump_ini_list_free(struct list_head *list)
@@ -2244,7 +2256,7 @@ static void iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt,
u32 file_len = iwl_dump_ini_file_gen(fwrt, dump_data, &dump_list);
if (!file_len)
- goto out;
+ return;
sg_dump_data = alloc_sgtable(file_len);
if (sg_dump_data) {
@@ -2261,9 +2273,6 @@ static void iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt,
GFP_KERNEL);
}
iwl_dump_ini_list_free(&dump_list);
-
-out:
- iwl_fw_error_dump_data_free(dump_data);
}
const struct iwl_fw_dump_desc iwl_dump_desc_assert = {
@@ -2278,27 +2287,40 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
bool monitor_only,
unsigned int delay)
{
+ struct iwl_fwrt_wk_data *wk_data;
+ unsigned long idx;
+
if (iwl_trans_dbg_ini_valid(fwrt->trans)) {
- iwl_fw_free_dump_desc(fwrt);
+ iwl_fw_free_dump_desc(fwrt, &desc);
return 0;
}
- /* use wks[0] since dump flow prior to ini does not need to support
- * consecutive triggers collection
+ /*
+ * Check there is an available worker.
+ * ffz return value is undefined if no zero exists,
+ * so check against ~0UL first.
*/
- if (test_and_set_bit(fwrt->dump.wks[0].idx, &fwrt->dump.active_wks))
+ if (fwrt->dump.active_wks == ~0UL)
+ return -EBUSY;
+
+ idx = ffz(fwrt->dump.active_wks);
+
+ if (idx >= IWL_FW_RUNTIME_DUMP_WK_NUM ||
+ test_and_set_bit(fwrt->dump.wks[idx].idx, &fwrt->dump.active_wks))
return -EBUSY;
- if (WARN_ON(fwrt->dump.desc))
- iwl_fw_free_dump_desc(fwrt);
+ wk_data = &fwrt->dump.wks[idx];
+
+ if (WARN_ON(wk_data->dump_data.desc))
+ iwl_fw_free_dump_desc(fwrt, &wk_data->dump_data.desc);
+
+ wk_data->dump_data.desc = desc;
+ wk_data->dump_data.monitor_only = monitor_only;
IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n",
le32_to_cpu(desc->trig_desc.type));
- fwrt->dump.desc = desc;
- fwrt->dump.monitor_only = monitor_only;
-
- schedule_delayed_work(&fwrt->dump.wks[0].wk, usecs_to_jiffies(delay));
+ schedule_delayed_work(&wk_data->wk, usecs_to_jiffies(delay));
return 0;
}
@@ -2307,26 +2329,40 @@ IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_desc);
int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt,
enum iwl_fw_dbg_trigger trig_type)
{
- int ret;
- struct iwl_fw_dump_desc *iwl_dump_error_desc;
-
if (!test_bit(STATUS_DEVICE_ENABLED, &fwrt->trans->status))
return -EIO;
- iwl_dump_error_desc = kmalloc(sizeof(*iwl_dump_error_desc), GFP_KERNEL);
- if (!iwl_dump_error_desc)
- return -ENOMEM;
+ if (iwl_trans_dbg_ini_valid(fwrt->trans)) {
+ if (trig_type != FW_DBG_TRIGGER_ALIVE_TIMEOUT)
+ return -EIO;
- iwl_dump_error_desc->trig_desc.type = cpu_to_le32(trig_type);
- iwl_dump_error_desc->len = 0;
+ iwl_dbg_tlv_time_point(fwrt,
+ IWL_FW_INI_TIME_POINT_HOST_ALIVE_TIMEOUT,
+ NULL);
+ } else {
+ struct iwl_fw_dump_desc *iwl_dump_error_desc;
+ int ret;
- ret = iwl_fw_dbg_collect_desc(fwrt, iwl_dump_error_desc, false, 0);
- if (ret)
- kfree(iwl_dump_error_desc);
- else
- iwl_trans_sync_nmi(fwrt->trans);
+ iwl_dump_error_desc =
+ kmalloc(sizeof(*iwl_dump_error_desc), GFP_KERNEL);
- return ret;
+ if (!iwl_dump_error_desc)
+ return -ENOMEM;
+
+ iwl_dump_error_desc->trig_desc.type = cpu_to_le32(trig_type);
+ iwl_dump_error_desc->len = 0;
+
+ ret = iwl_fw_dbg_collect_desc(fwrt, iwl_dump_error_desc,
+ false, 0);
+ if (ret) {
+ kfree(iwl_dump_error_desc);
+ return ret;
+ }
+ }
+
+ iwl_trans_sync_nmi(fwrt->trans);
+
+ return 0;
}
IWL_EXPORT_SYMBOL(iwl_fw_dbg_error_collect);
@@ -2504,14 +2540,14 @@ IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf);
static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
{
struct iwl_fw_dbg_params params = {0};
+ struct iwl_fwrt_dump_data *dump_data =
+ &fwrt->dump.wks[wk_idx].dump_data;
if (!test_bit(wk_idx, &fwrt->dump.active_wks))
return;
- if (fwrt->ops && fwrt->ops->fw_running &&
- !fwrt->ops->fw_running(fwrt->ops_ctx)) {
- IWL_ERR(fwrt, "Firmware not running - cannot dump error\n");
- iwl_fw_free_dump_desc(fwrt);
+ if (!test_bit(STATUS_DEVICE_ENABLED, &fwrt->trans->status)) {
+ IWL_ERR(fwrt, "Device is not enabled - cannot dump error\n");
goto out;
}
@@ -2527,12 +2563,17 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
if (iwl_trans_dbg_ini_valid(fwrt->trans))
iwl_fw_error_ini_dump(fwrt, &fwrt->dump.wks[wk_idx].dump_data);
else
- iwl_fw_error_dump(fwrt);
+ iwl_fw_error_dump(fwrt, &fwrt->dump.wks[wk_idx].dump_data);
IWL_DEBUG_FW_INFO(fwrt, "WRT: Data collection done\n");
iwl_fw_dbg_stop_restart_recording(fwrt, &params, false);
out:
+ if (iwl_trans_dbg_ini_valid(fwrt->trans))
+ iwl_fw_error_dump_data_free(dump_data);
+ else
+ iwl_fw_free_dump_desc(fwrt, &dump_data->desc);
+
clear_bit(wk_idx, &fwrt->dump.active_wks);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index 9d3513213f5f..11558df36b94 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -98,17 +98,6 @@ struct iwl_fw_dbg_params {
extern const struct iwl_fw_dump_desc iwl_dump_desc_assert;
-static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt)
-{
- if (fwrt->dump.desc != &iwl_dump_desc_assert)
- kfree(fwrt->dump.desc);
- fwrt->dump.desc = NULL;
- fwrt->dump.lmac_err_id[0] = 0;
- if (fwrt->smem_cfg.num_lmacs > 1)
- fwrt->dump.lmac_err_id[1] = 0;
- fwrt->dump.umac_err_id = 0;
-}
-
int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
const struct iwl_fw_dump_desc *desc,
bool monitor_only, unsigned int delay);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 35f42e529a6d..1fb45fd30ffa 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -449,6 +449,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_CS_MODIFY = (__force iwl_ucode_tlv_capa_t)49,
IWL_UCODE_TLV_CAPA_SET_LTR_GEN2 = (__force iwl_ucode_tlv_capa_t)50,
IWL_UCODE_TLV_CAPA_SET_PPAG = (__force iwl_ucode_tlv_capa_t)52,
+ IWL_UCODE_TLV_CAPA_TAS_CFG = (__force iwl_ucode_tlv_capa_t)53,
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD = (__force iwl_ucode_tlv_capa_t)54,
/* set 2 */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.c b/drivers/net/wireless/intel/iwlwifi/fw/img.c
new file mode 100644
index 000000000000..de8cff463dbe
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.c
@@ -0,0 +1,99 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2019 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2019 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#include "img.h"
+
+u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd)
+{
+ const struct iwl_fw_cmd_version *entry;
+ unsigned int i;
+
+ if (!fw->ucode_capa.cmd_versions ||
+ !fw->ucode_capa.n_cmd_versions)
+ return IWL_FW_CMD_VER_UNKNOWN;
+
+ entry = fw->ucode_capa.cmd_versions;
+ for (i = 0; i < fw->ucode_capa.n_cmd_versions; i++, entry++) {
+ if (entry->group == grp && entry->cmd == cmd)
+ return entry->cmd_ver;
+ }
+
+ return IWL_FW_CMD_VER_UNKNOWN;
+}
+EXPORT_SYMBOL_GPL(iwl_fw_lookup_cmd_ver);
+
+u8 iwl_fw_lookup_notif_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def)
+{
+ const struct iwl_fw_cmd_version *entry;
+ unsigned int i;
+
+ if (!fw->ucode_capa.cmd_versions ||
+ !fw->ucode_capa.n_cmd_versions)
+ return def;
+
+ entry = fw->ucode_capa.cmd_versions;
+ for (i = 0; i < fw->ucode_capa.n_cmd_versions; i++, entry++) {
+ if (entry->group == grp && entry->cmd == cmd) {
+ if (entry->notif_ver == IWL_FW_CMD_VER_UNKNOWN)
+ return def;
+ return entry->notif_ver;
+ }
+ }
+
+ return def;
+}
+EXPORT_SYMBOL_GPL(iwl_fw_lookup_notif_ver);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h
index 90ca5f929cf9..a8630bf90b63 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h
@@ -313,22 +313,7 @@ iwl_get_ucode_image(const struct iwl_fw *fw, enum iwl_ucode_type ucode_type)
return &fw->img[ucode_type];
}
-static inline u8 iwl_mvm_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd)
-{
- const struct iwl_fw_cmd_version *entry;
- unsigned int i;
-
- if (!fw->ucode_capa.cmd_versions ||
- !fw->ucode_capa.n_cmd_versions)
- return IWL_FW_CMD_VER_UNKNOWN;
-
- entry = fw->ucode_capa.cmd_versions;
- for (i = 0; i < fw->ucode_capa.n_cmd_versions; i++, entry++) {
- if (entry->group == grp && entry->cmd == cmd)
- return entry->cmd_ver;
- }
-
- return IWL_FW_CMD_VER_UNKNOWN;
-}
+u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd);
+u8 iwl_fw_lookup_notif_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def);
#endif /* __iwl_fw_img_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index da0d90e2b537..9906d9b9bdd5 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -98,8 +98,16 @@ struct iwl_fwrt_shared_mem_cfg {
* @fw_pkt: packet received from FW
*/
struct iwl_fwrt_dump_data {
- struct iwl_fw_ini_trigger_tlv *trig;
- struct iwl_rx_packet *fw_pkt;
+ union {
+ struct {
+ struct iwl_fw_ini_trigger_tlv *trig;
+ struct iwl_rx_packet *fw_pkt;
+ };
+ struct {
+ const struct iwl_fw_dump_desc *desc;
+ bool monitor_only;
+ };
+ };
};
/**
@@ -162,8 +170,6 @@ struct iwl_fw_runtime {
/* debug */
struct {
- const struct iwl_fw_dump_desc *desc;
- bool monitor_only;
struct iwl_fwrt_wk_data wks[IWL_FW_RUNTIME_DUMP_WK_NUM];
unsigned long active_wks;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index d5d984d7ce83..3a9a33851793 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -5,9 +5,8 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright (C) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2007 - 2014, 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -27,9 +26,8 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright (C) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2005 - 2014, 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -284,6 +282,13 @@ struct iwl_pwr_tx_backoff {
u32 backoff;
};
+enum iwl_cfg_trans_ltr_delay {
+ IWL_CFG_TRANS_LTR_DELAY_NONE = 0,
+ IWL_CFG_TRANS_LTR_DELAY_200US = 1,
+ IWL_CFG_TRANS_LTR_DELAY_2500US = 2,
+ IWL_CFG_TRANS_LTR_DELAY_1820US = 3,
+};
+
/**
* struct iwl_cfg_trans - information needed to start the trans
*
@@ -304,6 +309,7 @@ struct iwl_pwr_tx_backoff {
* @mq_rx_supported: multi-queue rx support
* @integrated: discrete or integrated
* @low_latency_xtal: use the low latency xtal if supported
+ * @ltr_delay: LTR delay parameter, &enum iwl_cfg_trans_ltr_delay.
*/
struct iwl_cfg_trans_params {
const struct iwl_base_params *base_params;
@@ -317,7 +323,8 @@ struct iwl_cfg_trans_params {
mq_rx_supported:1,
integrated:1,
low_latency_xtal:1,
- bisr_workaround:1;
+ bisr_workaround:1,
+ ltr_delay:2;
};
/**
@@ -506,9 +513,10 @@ struct iwl_dev_info {
extern const struct iwl_cfg_trans_params iwl9000_trans_cfg;
extern const struct iwl_cfg_trans_params iwl9560_trans_cfg;
extern const struct iwl_cfg_trans_params iwl9560_shared_clk_trans_cfg;
+extern const struct iwl_cfg_trans_params iwl_qnj_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_qu_trans_cfg;
+extern const struct iwl_cfg_trans_params iwl_qu_medium_latency_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_qu_long_latency_trans_cfg;
-extern const struct iwl_cfg_trans_params iwl_qnj_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_ax200_trans_cfg;
extern const char iwl9162_name[];
extern const char iwl9260_name[];
@@ -622,9 +630,12 @@ extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0;
extern const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0;
extern const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0;
extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0;
+extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0_long;
extern const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0;
extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0;
+extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long;
extern const struct iwl_cfg iwlax411_2ax_cfg_sosnj_gf4_a0;
+extern const struct iwl_cfg iwlax211_cfg_snj_gf_a0;
#endif /* CPTCFG_IWLMVM || CPTCFG_IWLFMAC */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index bf2f00b89214..9eb8fbfaa2a2 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright (C) 2018 - 2019 Intel Corporation
+ * Copyright (C) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,7 +28,7 @@
*
* BSD LICENSE
*
- * Copyright (C) 2018 - 2019 Intel Corporation
+ * Copyright (C) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -170,14 +170,24 @@ static int iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans *trans,
if (le32_to_cpu(tlv->length) != sizeof(*alloc) ||
(buf_location != IWL_FW_INI_LOCATION_SRAM_PATH &&
- buf_location != IWL_FW_INI_LOCATION_DRAM_PATH))
+ buf_location != IWL_FW_INI_LOCATION_DRAM_PATH &&
+ buf_location != IWL_FW_INI_LOCATION_NPK_PATH)) {
+ IWL_ERR(trans,
+ "WRT: Invalid allocation TLV\n");
+ return -EINVAL;
+ }
+
+ if ((buf_location == IWL_FW_INI_LOCATION_SRAM_PATH ||
+ buf_location == IWL_FW_INI_LOCATION_NPK_PATH) &&
+ alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1) {
+ IWL_ERR(trans,
+ "WRT: Allocation TLV for SMEM/NPK path must have id %u (current: %u)\n",
+ IWL_FW_INI_ALLOCATION_ID_DBGC1, alloc_id);
return -EINVAL;
+ }
- if ((buf_location == IWL_FW_INI_LOCATION_SRAM_PATH &&
- alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1) ||
- (buf_location == IWL_FW_INI_LOCATION_DRAM_PATH &&
- (alloc_id == IWL_FW_INI_ALLOCATION_INVALID ||
- alloc_id >= IWL_FW_INI_ALLOCATION_NUM))) {
+ if (alloc_id == IWL_FW_INI_ALLOCATION_INVALID ||
+ alloc_id >= IWL_FW_INI_ALLOCATION_NUM) {
IWL_ERR(trans,
"WRT: Invalid allocation id %u for allocation TLV\n",
alloc_id);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index eeb750bdbda1..a483d389d9c2 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -1872,10 +1872,6 @@ module_param_named(power_level, iwlwifi_mod_params.power_level, int, 0444);
MODULE_PARM_DESC(power_level,
"default power save level (range from 1 - 5, default: 1)");
-module_param_named(fw_monitor, iwlwifi_mod_params.fw_monitor, bool, 0444);
-MODULE_PARM_DESC(fw_monitor,
- "firmware monitor - to debug FW (default: false - needs lots of memory)");
-
module_param_named(disable_11ac, iwlwifi_mod_params.disable_11ac, bool, 0444);
MODULE_PARM_DESC(disable_11ac, "Disable VHT capabilities (default: false)");
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
index 82e5cac23d8d..b094cc1e9be0 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
@@ -115,7 +115,6 @@ enum iwl_uapsd_disable {
* @nvm_file: specifies a external NVM file
* @uapsd_disable: disable U-APSD, see &enum iwl_uapsd_disable, default =
* IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT
- * @fw_monitor: allow to use firmware monitor
* @disable_11ac: disable VHT capabilities, default = false.
* @remove_when_gone: remove an inaccessible device from the PCIe bus.
* @enable_ini: enable new FW debug infratructure (INI TLVs)
@@ -135,7 +134,6 @@ struct iwl_mod_params {
int antenna_coupling;
char *nvm_file;
u32 uapsd_disable;
- bool fw_monitor;
bool disable_11ac;
/**
* @disable_11ax: disable HE capabilities, default = false
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index ccf0bc16465d..d91a8e8349e6 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -1166,8 +1166,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
ch_flags = (u16)__le32_to_cpup(channels + ch_idx);
- band = (ch_idx < NUM_2GHZ_CHANNELS) ?
- NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
+ band = iwl_nl80211_band_from_channel_idx(ch_idx);
center_freq = ieee80211_channel_to_frequency(nvm_chan[ch_idx],
band);
new_rule = false;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
index 58df25e2fb32..b0268f44b2ea 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
@@ -155,5 +155,9 @@
#define IWL_MVM_USE_TWT false
#define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 10
#define IWL_MVM_USE_NSSN_SYNC 0
+#define IWL_MVM_PHY_FILTER_CHAIN_A 0
+#define IWL_MVM_PHY_FILTER_CHAIN_B 0
+#define IWL_MVM_PHY_FILTER_CHAIN_C 0
+#define IWL_MVM_PHY_FILTER_CHAIN_D 0
#endif /* __MVM_CONSTANTS_H */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 122ca7624073..222775714859 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1517,12 +1517,14 @@ out:
struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm)
{
- struct iwl_wowlan_status *v7, *status;
+ struct iwl_wowlan_status_v7 *v7;
+ struct iwl_wowlan_status *status;
struct iwl_host_cmd cmd = {
.id = WOWLAN_GET_STATUSES,
.flags = CMD_WANT_SKB,
};
- int ret, len, status_size;
+ int ret, len, status_size, data_size;
+ u8 notif_ver;
lockdep_assert_held(&mvm->mutex);
@@ -1532,13 +1534,12 @@ struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm)
return ERR_PTR(ret);
}
+ len = iwl_rx_packet_payload_len(cmd.resp_pkt);
if (!fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL)) {
struct iwl_wowlan_status_v6 *v6 = (void *)cmd.resp_pkt->data;
- int data_size;
status_size = sizeof(*v6);
- len = iwl_rx_packet_payload_len(cmd.resp_pkt);
if (len < status_size) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
@@ -1593,23 +1594,33 @@ struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm)
}
v7 = (void *)cmd.resp_pkt->data;
- status_size = sizeof(*v7);
- len = iwl_rx_packet_payload_len(cmd.resp_pkt);
+ notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
+ WOWLAN_GET_STATUSES, 0);
+
+ status_size = sizeof(*status);
+
+ if (notif_ver == IWL_FW_CMD_VER_UNKNOWN || notif_ver < 9)
+ status_size = sizeof(*v7);
if (len < status_size) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
status = ERR_PTR(-EIO);
goto out_free_resp;
}
+ data_size = ALIGN(le32_to_cpu(v7->wake_packet_bufsize), 4);
- if (len != (status_size +
- ALIGN(le32_to_cpu(v7->wake_packet_bufsize), 4))) {
+ if (len != (status_size + data_size)) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
status = ERR_PTR(-EIO);
goto out_free_resp;
}
- status = kmemdup(v7, len, GFP_KERNEL);
+ status = kzalloc(sizeof(*status) + data_size, GFP_KERNEL);
+ if (!status)
+ goto out_free_resp;
+
+ memcpy(status, v7, status_size);
+ memcpy(status->wake_packet, (u8 *)v7 + status_size, data_size);
out_free_resp:
iwl_free_resp(&cmd);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index 9e21f5e5d364..cdb87139100d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -508,8 +508,8 @@ int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return -EBUSY;
if (new_api) {
- u8 cmd_ver = iwl_mvm_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
- TOF_RANGE_REQ_CMD);
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
+ TOF_RANGE_REQ_CMD);
if (cmd_ver == 8)
err = iwl_mvm_ftm_start_v8(mvm, vif, req);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
index 834564198409..0b6c32098b5a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
@@ -136,8 +136,8 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
IWL_TOF_RESPONDER_CMD_VALID_STA_ID),
.sta_id = mvmvif->bcast_sta.sta_id,
};
- u8 cmd_ver = iwl_mvm_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
- TOF_RESPONDER_CONFIG_CMD);
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
+ TOF_RESPONDER_CONFIG_CMD);
int err;
lockdep_assert_held(&mvm->mutex);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index e67c452fa92c..164fc9e98c86 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -102,9 +102,23 @@ static int iwl_set_soc_latency(struct iwl_mvm *mvm)
if (!mvm->trans->trans_cfg->integrated)
cmd.flags = cpu_to_le32(SOC_CONFIG_CMD_FLAGS_DISCRETE);
- if (iwl_mvm_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
- SCAN_REQ_UMAC) >= 2 &&
- (mvm->trans->trans_cfg->low_latency_xtal))
+ BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_NONE !=
+ SOC_FLAGS_LTR_APPLY_DELAY_NONE);
+ BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_200US !=
+ SOC_FLAGS_LTR_APPLY_DELAY_200);
+ BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_2500US !=
+ SOC_FLAGS_LTR_APPLY_DELAY_2500);
+ BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_1820US !=
+ SOC_FLAGS_LTR_APPLY_DELAY_1820);
+
+ if (mvm->trans->trans_cfg->ltr_delay != IWL_CFG_TRANS_LTR_DELAY_NONE &&
+ !WARN_ON(!mvm->trans->trans_cfg->integrated))
+ cmd.flags |= le32_encode_bits(mvm->trans->trans_cfg->ltr_delay,
+ SOC_FLAGS_LTR_APPLY_DELAY_MASK);
+
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
+ SCAN_REQ_UMAC) >= 2 &&
+ mvm->trans->trans_cfg->low_latency_xtal)
cmd.flags |= cpu_to_le32(SOC_CONFIG_CMD_FLAGS_LOW_LATENCY);
cmd.latency = cpu_to_le32(mvm->trans->trans_cfg->xtal_latency);
@@ -550,10 +564,49 @@ error:
return ret;
}
+#ifdef CONFIG_ACPI
+static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm,
+ struct iwl_phy_specific_cfg *phy_filters)
+{
+ /*
+ * TODO: read specific phy config from BIOS
+ * ACPI table for this feature has not been defined yet,
+ * so for now we use hardcoded values.
+ */
+
+ if (IWL_MVM_PHY_FILTER_CHAIN_A) {
+ phy_filters->filter_cfg_chain_a =
+ cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_A);
+ }
+ if (IWL_MVM_PHY_FILTER_CHAIN_B) {
+ phy_filters->filter_cfg_chain_b =
+ cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_B);
+ }
+ if (IWL_MVM_PHY_FILTER_CHAIN_C) {
+ phy_filters->filter_cfg_chain_c =
+ cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_C);
+ }
+ if (IWL_MVM_PHY_FILTER_CHAIN_D) {
+ phy_filters->filter_cfg_chain_d =
+ cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_D);
+ }
+}
+
+#else /* CONFIG_ACPI */
+
+static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm,
+ struct iwl_phy_specific_cfg *phy_filters)
+{
+}
+#endif /* CONFIG_ACPI */
+
static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
{
- struct iwl_phy_cfg_cmd phy_cfg_cmd;
+ struct iwl_phy_cfg_cmd_v3 phy_cfg_cmd;
enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img;
+ struct iwl_phy_specific_cfg phy_filters = {};
+ u8 cmd_ver;
+ size_t cmd_size;
if (iwl_mvm_has_unified_ucode(mvm) &&
!mvm->trans->cfg->tx_with_siso_diversity)
@@ -580,11 +633,20 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
phy_cfg_cmd.calib_control.flow_trigger =
mvm->fw->default_calib[ucode_type].flow_trigger;
+ cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
+ PHY_CONFIGURATION_CMD);
+ if (cmd_ver == 3) {
+ iwl_mvm_phy_filter_init(mvm, &phy_filters);
+ memcpy(&phy_cfg_cmd.phy_specific_cfg, &phy_filters,
+ sizeof(struct iwl_phy_specific_cfg));
+ }
+
IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n",
phy_cfg_cmd.phy_cfg);
-
+ cmd_size = (cmd_ver == 3) ? sizeof(struct iwl_phy_cfg_cmd_v3) :
+ sizeof(struct iwl_phy_cfg_cmd_v1);
return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0,
- sizeof(phy_cfg_cmd), &phy_cfg_cmd);
+ cmd_size, &phy_cfg_cmd);
}
int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
@@ -937,6 +999,40 @@ static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
return iwl_mvm_ppag_send_cmd(mvm);
}
+static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
+{
+ int ret;
+ struct iwl_tas_config_cmd cmd = {};
+ int list_size;
+
+ BUILD_BUG_ON(ARRAY_SIZE(cmd.black_list_array) <
+ APCI_WTAS_BLACK_LIST_MAX);
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TAS_CFG)) {
+ IWL_DEBUG_RADIO(mvm, "TAS not enabled in FW\n");
+ return;
+ }
+
+ ret = iwl_acpi_get_tas(&mvm->fwrt, cmd.black_list_array, &list_size);
+ if (ret < 0) {
+ IWL_DEBUG_RADIO(mvm,
+ "TAS table invalid or unavailable. (%d)\n",
+ ret);
+ return;
+ }
+
+ if (list_size < 0)
+ return;
+
+ /* list size if TAS enabled can only be non-negative */
+ cmd.black_list_size = cpu_to_le32((u32)list_size);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
+ TAS_CONFIG),
+ 0, sizeof(cmd), &cmd);
+ if (ret < 0)
+ IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret);
+}
#else /* CONFIG_ACPI */
inline int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm,
@@ -964,6 +1060,10 @@ static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
{
return 0;
}
+
+static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
+{
+}
#endif /* CONFIG_ACPI */
void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags)
@@ -1282,6 +1382,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (ret < 0)
goto error;
+ iwl_mvm_tas_init(mvm);
iwl_mvm_leds_sync(mvm);
IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 7aa1350b093e..853ba7b8bf3f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1264,7 +1264,6 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork);
cancel_delayed_work_sync(&mvm->scan_timeout_dwork);
- iwl_fw_free_dump_desc(&mvm->fwrt);
mutex_lock(&mvm->mutex);
__iwl_mvm_mac_stop(mvm);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index afcf2b98a9cb..9e2a0858108c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -2149,8 +2149,8 @@ iwl_mvm_set_chan_info_chandef(struct iwl_mvm *mvm,
static inline int iwl_umac_scan_get_max_profiles(const struct iwl_fw *fw)
{
- u8 ver = iwl_mvm_lookup_cmd_ver(fw, IWL_ALWAYS_LONG_GROUP,
- SCAN_OFFLOAD_UPDATE_PROFILES_CMD);
+ u8 ver = iwl_fw_lookup_cmd_ver(fw, IWL_ALWAYS_LONG_GROUP,
+ SCAN_OFFLOAD_UPDATE_PROFILES_CMD);
return (ver == IWL_FW_CMD_VER_UNKNOWN || ver < 3) ?
IWL_SCAN_MAX_PROFILES : IWL_SCAN_MAX_PROFILES_V2;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index dfe02440d474..d0afc806706d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -505,6 +505,7 @@ static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = {
static const struct iwl_hcmd_names iwl_mvm_regulatory_and_nvm_names[] = {
HCMD_NAME(NVM_ACCESS_COMPLETE),
HCMD_NAME(NVM_GET_INFO),
+ HCMD_NAME(TAS_CONFIG),
};
static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
@@ -612,27 +613,6 @@ static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
.d3_debug_enable = iwl_mvm_d3_debug_enable,
};
-static u8 iwl_mvm_lookup_notif_ver(struct iwl_mvm *mvm, u8 grp, u8 cmd, u8 def)
-{
- const struct iwl_fw_cmd_version *entry;
- unsigned int i;
-
- if (!mvm->fw->ucode_capa.cmd_versions ||
- !mvm->fw->ucode_capa.n_cmd_versions)
- return def;
-
- entry = mvm->fw->ucode_capa.cmd_versions;
- for (i = 0; i < mvm->fw->ucode_capa.n_cmd_versions; i++, entry++) {
- if (entry->group == grp && entry->cmd == cmd) {
- if (entry->notif_ver == IWL_FW_CMD_VER_UNKNOWN)
- return def;
- return entry->notif_ver;
- }
- }
-
- return def;
-}
-
static struct iwl_op_mode *
iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
const struct iwl_fw *fw, struct dentry *dbgfs_dir)
@@ -745,7 +725,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_DELAYED_WORK(&mvm->cs_tx_unblock_dwork, iwl_mvm_tx_unblock_dwork);
mvm->cmd_ver.d0i3_resp =
- iwl_mvm_lookup_notif_ver(mvm, LEGACY_GROUP, D0I3_END_CMD, 0);
+ iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, D0I3_END_CMD,
+ 0);
/* we only support version 1 */
if (WARN_ON_ONCE(mvm->cmd_ver.d0i3_resp > 1))
goto out_free;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 00e7fdbaeb7f..246265904b98 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -3740,11 +3740,12 @@ int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
}
return scnprintf(buf, bufsz,
- "0x%x: %s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s",
+ "0x%x: %s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s%s",
rate, type, rs_pretty_ant(ant), bw, mcs, nss,
(rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ",
(rate & RATE_MCS_STBC_MSK) ? "STBC " : "",
(rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "",
+ (rate & RATE_HE_DUAL_CARRIER_MODE_MSK) ? "DCM " : "",
(rate & RATE_MCS_BF_MSK) ? "BF " : "");
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 7a6ad1ff7055..51a061b138ba 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -2051,40 +2051,6 @@ static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return 0;
}
-static int iwl_mvm_scan_umac_v13(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct iwl_mvm_scan_params *params, int type,
- int uid)
-{
- struct iwl_scan_req_umac_v13 *cmd = mvm->scan_cmd;
- struct iwl_scan_req_params_v13 *scan_p = &cmd->scan_params;
- int ret;
- u16 gen_flags;
- u32 bitmap_ssid = 0;
-
- mvm->scan_uid_status[uid] = type;
-
- cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(params));
- cmd->uid = cpu_to_le32(uid);
-
- gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
- iwl_mvm_scan_umac_fill_general_p_v10(mvm, params, vif,
- &scan_p->general_params,
- gen_flags);
-
- ret = iwl_mvm_fill_scan_sched_params(params,
- scan_p->periodic_params.schedule,
- &scan_p->periodic_params.delay);
- if (ret)
- return ret;
-
- iwl_mvm_scan_umac_fill_probe_p_v4(params, &scan_p->probe_params,
- &bitmap_ssid);
- iwl_mvm_scan_umac_fill_ch_p_v4(mvm, params, vif,
- &scan_p->channel_params, bitmap_ssid);
-
- return 0;
-}
-
static int iwl_mvm_scan_umac_v14(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_scan_params *params, int type,
int uid)
@@ -2235,7 +2201,6 @@ struct iwl_scan_umac_handler {
static const struct iwl_scan_umac_handler iwl_scan_umac_handlers[] = {
/* set the newest version first to shorten the list traverse time */
IWL_SCAN_UMAC_HANDLER(14),
- IWL_SCAN_UMAC_HANDLER(13),
IWL_SCAN_UMAC_HANDLER(12),
};
@@ -2263,8 +2228,8 @@ static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
hcmd->id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
- scan_ver = iwl_mvm_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
- SCAN_REQ_UMAC);
+ scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
+ SCAN_REQ_UMAC);
for (i = 0; i < ARRAY_SIZE(iwl_scan_umac_handlers); i++) {
const struct iwl_scan_umac_handler *ver_handler =
@@ -2594,7 +2559,6 @@ static int iwl_scan_req_umac_get_size(u8 scan_ver)
{
switch (scan_ver) {
IWL_SCAN_REQ_UMAC_HANDLE_SIZE(14);
- IWL_SCAN_REQ_UMAC_HANDLE_SIZE(13);
IWL_SCAN_REQ_UMAC_HANDLE_SIZE(12);
}
@@ -2604,8 +2568,8 @@ static int iwl_scan_req_umac_get_size(u8 scan_ver)
int iwl_mvm_scan_size(struct iwl_mvm *mvm)
{
int base_size, tail_size;
- u8 scan_ver = iwl_mvm_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
- SCAN_REQ_UMAC);
+ u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
+ SCAN_REQ_UMAC);
base_size = iwl_scan_req_umac_get_size(scan_ver);
if (base_size)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index 9d5b1e51b50d..b6a5921a63c3 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -18,7 +18,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -84,32 +84,35 @@ iwl_pcie_ctxt_info_dbg_enable(struct iwl_trans *trans,
fw_mon_cfg = &trans->dbg.fw_mon_cfg[alloc_id];
- if (le32_to_cpu(fw_mon_cfg->buf_location) ==
- IWL_FW_INI_LOCATION_SRAM_PATH) {
+ switch (le32_to_cpu(fw_mon_cfg->buf_location)) {
+ case IWL_FW_INI_LOCATION_SRAM_PATH:
dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL;
-
IWL_DEBUG_FW(trans,
- "WRT: Applying SMEM buffer destination\n");
-
- goto out;
- }
-
- if (le32_to_cpu(fw_mon_cfg->buf_location) ==
- IWL_FW_INI_LOCATION_DRAM_PATH &&
- trans->dbg.fw_mon_ini[alloc_id].num_frags) {
- struct iwl_dram_data *frag =
- &trans->dbg.fw_mon_ini[alloc_id].frags[0];
-
- dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
+ "WRT: Applying SMEM buffer destination\n");
+ break;
+ case IWL_FW_INI_LOCATION_NPK_PATH:
+ dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF;
IWL_DEBUG_FW(trans,
- "WRT: Applying DRAM destination (alloc_id=%u)\n",
- alloc_id);
+ "WRT: Applying NPK buffer destination\n");
+ break;
- dbg_cfg->hwm_base_addr = cpu_to_le64(frag->physical);
- dbg_cfg->hwm_size = cpu_to_le32(frag->size);
+ case IWL_FW_INI_LOCATION_DRAM_PATH:
+ if (trans->dbg.fw_mon_ini[alloc_id].num_frags) {
+ struct iwl_dram_data *frag =
+ &trans->dbg.fw_mon_ini[alloc_id].frags[0];
+ dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
+ dbg_cfg->hwm_base_addr = cpu_to_le64(frag->physical);
+ dbg_cfg->hwm_size = cpu_to_le32(frag->size);
+ IWL_DEBUG_FW(trans,
+ "WRT: Applying DRAM destination (alloc_id=%u, num_frags=%u)\n",
+ alloc_id,
+ trans->dbg.fw_mon_ini[alloc_id].num_frags);
+ }
+ break;
+ default:
+ IWL_ERR(trans, "WRT: Invalid buffer destination\n");
}
-
out:
if (dbg_flags)
*control_flags |= IWL_PRPH_SCRATCH_EARLY_DEBUG_EN | dbg_flags;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
index acd01d86f101..b65405009d02 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
@@ -93,6 +93,21 @@ static void *iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
return _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size, phys, 0);
}
+static int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
+ const struct fw_desc *sec,
+ struct iwl_dram_data *dram)
+{
+ dram->block = iwl_pcie_ctxt_info_dma_alloc_coherent(trans, sec->len,
+ &dram->physical);
+ if (!dram->block)
+ return -ENOMEM;
+
+ dram->size = sec->len;
+ memcpy(dram->block, sec->data, sec->len);
+
+ return 0;
+}
+
void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
{
struct iwl_self_init_dram *dram = &trans->init_dram;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 6744c0281ffb..2083eb4f2f15 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -524,8 +524,10 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
/* Qu devices */
{IWL_PCI_DEVICE(0x02F0, PCI_ANY_ID, iwl_qu_trans_cfg)},
{IWL_PCI_DEVICE(0x06F0, PCI_ANY_ID, iwl_qu_trans_cfg)},
- {IWL_PCI_DEVICE(0x34F0, PCI_ANY_ID, iwl_qu_trans_cfg)},
- {IWL_PCI_DEVICE(0x3DF0, PCI_ANY_ID, iwl_qu_trans_cfg)},
+
+ {IWL_PCI_DEVICE(0x34F0, PCI_ANY_ID, iwl_qu_medium_latency_trans_cfg)},
+ {IWL_PCI_DEVICE(0x3DF0, PCI_ANY_ID, iwl_qu_medium_latency_trans_cfg)},
+ {IWL_PCI_DEVICE(0x4DF0, PCI_ANY_ID, iwl_qu_medium_latency_trans_cfg)},
{IWL_PCI_DEVICE(0x43F0, PCI_ANY_ID, iwl_qu_long_latency_trans_cfg)},
{IWL_PCI_DEVICE(0xA0F0, PCI_ANY_ID, iwl_qu_long_latency_trans_cfg)},
@@ -539,12 +541,17 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2725, 0x0310, iwlax210_2ax_cfg_ty_gf_a0)},
{IWL_PCI_DEVICE(0x2725, 0x0510, iwlax210_2ax_cfg_ty_gf_a0)},
{IWL_PCI_DEVICE(0x2725, 0x0A10, iwlax210_2ax_cfg_ty_gf_a0)},
- {IWL_PCI_DEVICE(0x2725, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0)},
- {IWL_PCI_DEVICE(0x7A70, 0x0090, iwlax211_2ax_cfg_so_gf_a0)},
- {IWL_PCI_DEVICE(0x7A70, 0x0310, iwlax211_2ax_cfg_so_gf_a0)},
- {IWL_PCI_DEVICE(0x7A70, 0x0510, iwlax211_2ax_cfg_so_gf_a0)},
- {IWL_PCI_DEVICE(0x7A70, 0x0A10, iwlax211_2ax_cfg_so_gf_a0)},
+ {IWL_PCI_DEVICE(0x2725, 0x00B0, iwlax411_2ax_cfg_sosnj_gf4_a0)},
+ {IWL_PCI_DEVICE(0x2726, 0x0090, iwlax211_cfg_snj_gf_a0)},
+ {IWL_PCI_DEVICE(0x2726, 0x00B0, iwlax411_2ax_cfg_sosnj_gf4_a0)},
+ {IWL_PCI_DEVICE(0x2726, 0x0510, iwlax211_cfg_snj_gf_a0)},
+ {IWL_PCI_DEVICE(0x7A70, 0x0090, iwlax211_2ax_cfg_so_gf_a0_long)},
+ {IWL_PCI_DEVICE(0x7A70, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0_long)},
+ {IWL_PCI_DEVICE(0x7A70, 0x0310, iwlax211_2ax_cfg_so_gf_a0_long)},
+ {IWL_PCI_DEVICE(0x7A70, 0x0510, iwlax211_2ax_cfg_so_gf_a0_long)},
+ {IWL_PCI_DEVICE(0x7A70, 0x0A10, iwlax211_2ax_cfg_so_gf_a0_long)},
{IWL_PCI_DEVICE(0x7AF0, 0x0090, iwlax211_2ax_cfg_so_gf_a0)},
+ {IWL_PCI_DEVICE(0x7AF0, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0)},
{IWL_PCI_DEVICE(0x7AF0, 0x0310, iwlax211_2ax_cfg_so_gf_a0)},
{IWL_PCI_DEVICE(0x7AF0, 0x0510, iwlax211_2ax_cfg_so_gf_a0)},
{IWL_PCI_DEVICE(0x7AF0, 0x0A10, iwlax211_2ax_cfg_so_gf_a0)},
@@ -657,6 +664,19 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_DEV_INFO(0x3DF0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x3DF0, 0x4244, iwl_ax101_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x4DF0, 0x0044, iwl_ax101_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x4DF0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x4DF0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x4DF0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x4DF0, 0x007C, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x4DF0, 0x0244, iwl_ax101_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x4DF0, 0x0310, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x4DF0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL),
+ IWL_DEV_INFO(0x4DF0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
+ IWL_DEV_INFO(0x4DF0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x4DF0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x4DF0, 0x4244, iwl_ax101_cfg_qu_hr, NULL),
+
IWL_DEV_INFO(0x2720, 0x0000, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
IWL_DEV_INFO(0x2720, 0x0040, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
IWL_DEV_INFO(0x2720, 0x0044, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 595e6873d56e..abe649af689c 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -792,22 +792,6 @@ static inline int iwl_pcie_get_num_sections(const struct fw_img *fw,
return i;
}
-static inline int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
- const struct fw_desc *sec,
- struct iwl_dram_data *dram)
-{
- dram->block = dma_alloc_coherent(trans->dev, sec->len,
- &dram->physical,
- GFP_KERNEL);
- if (!dram->block)
- return -ENOMEM;
-
- dram->size = sec->len;
- memcpy(dram->block, sec->data, sec->len);
-
- return 0;
-}
-
static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
{
struct iwl_self_init_dram *dram = &trans->init_dram;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index e4cbd8daa7c6..a0daae058c1c 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -70,6 +70,7 @@
#include <linux/vmalloc.h>
#include <linux/module.h>
#include <linux/wait.h>
+#include <linux/seq_file.h>
#include "iwl-drv.h"
#include "iwl-trans.h"
@@ -1018,21 +1019,8 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
return ret;
}
- /* supported for 7000 only for the moment */
- if (iwlwifi_mod_params.fw_monitor &&
- trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) {
- struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
-
- iwl_pcie_alloc_fw_monitor(trans, 0);
- if (fw_mon->size) {
- iwl_write_prph(trans, MON_BUFF_BASE_ADDR,
- fw_mon->physical >> 4);
- iwl_write_prph(trans, MON_BUFF_END_ADDR,
- (fw_mon->physical + fw_mon->size) >> 4);
- }
- } else if (iwl_pcie_dbg_on(trans)) {
+ if (iwl_pcie_dbg_on(trans))
iwl_pcie_apply_destination(trans);
- }
iwl_enable_interrupts(trans);
@@ -2544,44 +2532,95 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
.llseek = generic_file_llseek, \
};
-static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
+struct iwl_dbgfs_tx_queue_priv {
+ struct iwl_trans *trans;
+};
+
+struct iwl_dbgfs_tx_queue_state {
+ loff_t pos;
+};
+
+static void *iwl_dbgfs_tx_queue_seq_start(struct seq_file *seq, loff_t *pos)
{
- struct iwl_trans *trans = file->private_data;
+ struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
+ struct iwl_dbgfs_tx_queue_state *state;
+
+ if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues)
+ return NULL;
+
+ state = kmalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+ state->pos = *pos;
+ return state;
+}
+
+static void *iwl_dbgfs_tx_queue_seq_next(struct seq_file *seq,
+ void *v, loff_t *pos)
+{
+ struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
+ struct iwl_dbgfs_tx_queue_state *state = v;
+
+ *pos = ++state->pos;
+
+ if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues)
+ return NULL;
+
+ return state;
+}
+
+static void iwl_dbgfs_tx_queue_seq_stop(struct seq_file *seq, void *v)
+{
+ kfree(v);
+}
+
+static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v)
+{
+ struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
+ struct iwl_dbgfs_tx_queue_state *state = v;
+ struct iwl_trans *trans = priv->trans;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq;
- char *buf;
- int pos = 0;
- int cnt;
- int ret;
- size_t bufsz;
+ struct iwl_txq *txq = trans_pcie->txq[state->pos];
+
+ seq_printf(seq, "hwq %.3u: used=%d stopped=%d ",
+ (unsigned int)state->pos,
+ !!test_bit(state->pos, trans_pcie->queue_used),
+ !!test_bit(state->pos, trans_pcie->queue_stopped));
+ if (txq)
+ seq_printf(seq,
+ "read=%u write=%u need_update=%d frozen=%d n_window=%d ampdu=%d",
+ txq->read_ptr, txq->write_ptr,
+ txq->need_update, txq->frozen,
+ txq->n_window, txq->ampdu);
+ else
+ seq_puts(seq, "(unallocated)");
- bufsz = sizeof(char) * 75 *
- trans->trans_cfg->base_params->num_of_queues;
+ if (state->pos == trans_pcie->cmd_queue)
+ seq_puts(seq, " (HCMD)");
+ seq_puts(seq, "\n");
- if (!trans_pcie->txq_memory)
- return -EAGAIN;
+ return 0;
+}
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf)
+static const struct seq_operations iwl_dbgfs_tx_queue_seq_ops = {
+ .start = iwl_dbgfs_tx_queue_seq_start,
+ .next = iwl_dbgfs_tx_queue_seq_next,
+ .stop = iwl_dbgfs_tx_queue_seq_stop,
+ .show = iwl_dbgfs_tx_queue_seq_show,
+};
+
+static int iwl_dbgfs_tx_queue_open(struct inode *inode, struct file *filp)
+{
+ struct iwl_dbgfs_tx_queue_priv *priv;
+
+ priv = __seq_open_private(filp, &iwl_dbgfs_tx_queue_seq_ops,
+ sizeof(*priv));
+
+ if (!priv)
return -ENOMEM;
- for (cnt = 0;
- cnt < trans->trans_cfg->base_params->num_of_queues;
- cnt++) {
- txq = trans_pcie->txq[cnt];
- pos += scnprintf(buf + pos, bufsz - pos,
- "hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n",
- cnt, txq->read_ptr, txq->write_ptr,
- !!test_bit(cnt, trans_pcie->queue_used),
- !!test_bit(cnt, trans_pcie->queue_stopped),
- txq->need_update, txq->frozen,
- (cnt == trans_pcie->cmd_queue ? " HCMD" : ""));
- }
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
+ priv->trans = inode->i_private;
+ return 0;
}
static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
@@ -2914,9 +2953,15 @@ static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
DEBUGFS_READ_FILE_OPS(fh_reg);
DEBUGFS_READ_FILE_OPS(rx_queue);
-DEBUGFS_READ_FILE_OPS(tx_queue);
DEBUGFS_WRITE_FILE_OPS(csr);
DEBUGFS_READ_WRITE_FILE_OPS(rfkill);
+static const struct file_operations iwl_dbgfs_tx_queue_ops = {
+ .owner = THIS_MODULE,
+ .open = iwl_dbgfs_tx_queue_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private,
+};
static const struct file_operations iwl_dbgfs_monitor_data_ops = {
.read = iwl_dbgfs_monitor_data_read,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 9664dbc70ef1..53747ac945b8 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -90,9 +90,7 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
struct iwl_txq *txq, u16 byte_cnt,
int num_tbs)
{
- struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
- struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
u8 filled_tfd_size, num_fetch_chunks;
u16 len = byte_cnt;
@@ -102,7 +100,7 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
return;
filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
- num_tbs * sizeof(struct iwl_tfh_tb);
+ num_tbs * sizeof(struct iwl_tfh_tb);
/*
* filled_tfd_size contains the number of filled bytes in the TFD.
* Dividing it by 64 will give the number of chunks to fetch
@@ -114,12 +112,16 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
+
/* Starting from AX210, the HW expects bytes */
WARN_ON(trans_pcie->bc_table_dword);
WARN_ON(len > 0x3FFF);
bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
} else {
+ struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
+
/* Before AX210, the HW expects DW */
WARN_ON(!trans_pcie->bc_table_dword);
len = DIV_ROUND_UP(len, 4);
diff --git a/drivers/net/wireless/intersil/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c
index 58212c532c90..aadf3dec5bf3 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_hw.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c
@@ -3041,6 +3041,27 @@ static void prism2_clear_set_tim_queue(local_info_t *local)
}
}
+
+/*
+ * HostAP uses two layers of net devices, where the inner
+ * layer gets called all the time from the outer layer.
+ * This is a natural nesting, which needs a split lock type.
+ */
+static struct lock_class_key hostap_netdev_xmit_lock_key;
+
+static void prism2_set_lockdep_class_one(struct net_device *dev,
+ struct netdev_queue *txq,
+ void *_unused)
+{
+ lockdep_set_class(&txq->_xmit_lock,
+ &hostap_netdev_xmit_lock_key);
+}
+
+static void prism2_set_lockdep_class(struct net_device *dev)
+{
+ netdev_for_each_tx_queue(dev, prism2_set_lockdep_class_one, NULL);
+}
+
static struct net_device *
prism2_init_local_data(struct prism2_helper_functions *funcs, int card_idx,
struct device *sdev)
@@ -3199,6 +3220,7 @@ while (0)
if (ret >= 0)
ret = register_netdevice(dev);
+ prism2_set_lockdep_class(dev);
rtnl_unlock();
if (ret < 0) {
printk(KERN_WARNING "%s: register netdevice failed!\n",
diff --git a/drivers/net/wireless/intersil/hostap/hostap_proc.c b/drivers/net/wireless/intersil/hostap/hostap_proc.c
index a2ee4693eaed..97c270845fd1 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_proc.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_proc.c
@@ -149,6 +149,7 @@ static int prism2_bss_list_proc_show(struct seq_file *m, void *v)
}
static void *prism2_bss_list_proc_start(struct seq_file *m, loff_t *_pos)
+ __acquires(&local->lock)
{
local_info_t *local = PDE_DATA(file_inode(m->file));
spin_lock_bh(&local->lock);
@@ -162,6 +163,7 @@ static void *prism2_bss_list_proc_next(struct seq_file *m, void *v, loff_t *_pos
}
static void prism2_bss_list_proc_stop(struct seq_file *m, void *v)
+ __releases(&local->lock)
{
local_info_t *local = PDE_DATA(file_inode(m->file));
spin_unlock_bh(&local->lock);
diff --git a/drivers/net/wireless/intersil/orinoco/spectrum_cs.c b/drivers/net/wireless/intersil/orinoco/spectrum_cs.c
index b60048c95e0a..291ef97ed45e 100644
--- a/drivers/net/wireless/intersil/orinoco/spectrum_cs.c
+++ b/drivers/net/wireless/intersil/orinoco/spectrum_cs.c
@@ -278,12 +278,11 @@ static int
spectrum_cs_suspend(struct pcmcia_device *link)
{
struct orinoco_private *priv = link->priv;
- int err = 0;
/* Mark the device as stopped, to block IO until later */
orinoco_down(priv);
- return err;
+ return 0;
}
static int
diff --git a/drivers/net/wireless/intersil/p54/p54usb.c b/drivers/net/wireless/intersil/p54/p54usb.c
index b94764c88750..ff0e30c0c14c 100644
--- a/drivers/net/wireless/intersil/p54/p54usb.c
+++ b/drivers/net/wireless/intersil/p54/p54usb.c
@@ -61,6 +61,7 @@ static const struct usb_device_id p54u_table[] = {
{USB_DEVICE(0x0db0, 0x6826)}, /* MSI UB54G (MS-6826) */
{USB_DEVICE(0x107b, 0x55f2)}, /* Gateway WGU-210 (Gemtek) */
{USB_DEVICE(0x124a, 0x4023)}, /* Shuttle PN15, Airvast WM168g, IOGear GWU513 */
+ {USB_DEVICE(0x124a, 0x4026)}, /* AirVasT USB wireless device */
{USB_DEVICE(0x1435, 0x0210)}, /* Inventel UR054G */
{USB_DEVICE(0x15a9, 0x0002)}, /* Gemtek WUBI-100GW 802.11g */
{USB_DEVICE(0x1630, 0x0005)}, /* 2Wire 802.11g USB (v1) / Z-Com */
diff --git a/drivers/net/wireless/marvell/libertas/cmd.h b/drivers/net/wireless/marvell/libertas/cmd.h
index 80878561cb90..3c193074662b 100644
--- a/drivers/net/wireless/marvell/libertas/cmd.h
+++ b/drivers/net/wireless/marvell/libertas/cmd.h
@@ -76,7 +76,7 @@ void lbs_mac_event_disconnected(struct lbs_private *priv,
/* Events */
-int lbs_process_event(struct lbs_private *priv, u32 event);
+void lbs_process_event(struct lbs_private *priv, u32 event);
/* Actual commands */
diff --git a/drivers/net/wireless/marvell/libertas/cmdresp.c b/drivers/net/wireless/marvell/libertas/cmdresp.c
index b73d08381398..cb515c5584c1 100644
--- a/drivers/net/wireless/marvell/libertas/cmdresp.c
+++ b/drivers/net/wireless/marvell/libertas/cmdresp.c
@@ -220,9 +220,8 @@ done:
return ret;
}
-int lbs_process_event(struct lbs_private *priv, u32 event)
+void lbs_process_event(struct lbs_private *priv, u32 event)
{
- int ret = 0;
struct cmd_header cmd;
switch (event) {
@@ -351,6 +350,4 @@ int lbs_process_event(struct lbs_private *priv, u32 event)
netdev_alert(priv->dev, "EVENT: unknown event id %d\n", event);
break;
}
-
- return ret;
}
diff --git a/drivers/net/wireless/marvell/libertas/mesh.c b/drivers/net/wireless/marvell/libertas/mesh.c
index 44c8a550da4c..f5b78257d551 100644
--- a/drivers/net/wireless/marvell/libertas/mesh.c
+++ b/drivers/net/wireless/marvell/libertas/mesh.c
@@ -828,10 +828,8 @@ static void lbs_persist_config_remove(struct net_device *dev)
* Check mesh FW version and appropriately send the mesh start
* command
*/
-int lbs_init_mesh(struct lbs_private *priv)
+void lbs_init_mesh(struct lbs_private *priv)
{
- int ret = 0;
-
/* Determine mesh_fw_ver from fwrelease and fwcapinfo */
/* 5.0.16p0 9.0.0.p0 is known to NOT support any mesh */
/* 5.110.22 have mesh command with 0xa3 command id */
@@ -870,8 +868,6 @@ int lbs_init_mesh(struct lbs_private *priv)
/* Stop meshing until interface is brought up */
lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_STOP, 1);
-
- return ret;
}
void lbs_start_mesh(struct lbs_private *priv)
diff --git a/drivers/net/wireless/marvell/libertas/mesh.h b/drivers/net/wireless/marvell/libertas/mesh.h
index 1561018f226f..d49717b20c09 100644
--- a/drivers/net/wireless/marvell/libertas/mesh.h
+++ b/drivers/net/wireless/marvell/libertas/mesh.h
@@ -16,7 +16,7 @@
struct net_device;
-int lbs_init_mesh(struct lbs_private *priv);
+void lbs_init_mesh(struct lbs_private *priv);
void lbs_start_mesh(struct lbs_private *priv);
int lbs_deinit_mesh(struct lbs_private *priv);
diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
index 25ac9db35dbf..bedc09215088 100644
--- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
@@ -247,10 +247,10 @@ static void if_usb_disconnect(struct usb_interface *intf)
lbtf_deb_enter(LBTF_DEB_MAIN);
- if_usb_reset_device(priv);
-
- if (priv)
+ if (priv) {
+ if_usb_reset_device(priv);
lbtf_remove_card(priv);
+ }
/* Unlink and free urb */
if_usb_free(cardp);
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
index 0bd93f26bd7f..8bd355d7974e 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
@@ -853,43 +853,36 @@ mwifiex_cmd_802_11_key_material_v1(struct mwifiex_private *priv,
memset(&key_material->key_param_set, 0,
sizeof(struct mwifiex_ie_type_key_param_set));
if (enc_key->is_wapi_key) {
+ struct mwifiex_ie_type_key_param_set *set;
+
mwifiex_dbg(priv->adapter, INFO, "info: Set WAPI Key\n");
- key_material->key_param_set.key_type_id =
- cpu_to_le16(KEY_TYPE_ID_WAPI);
+ set = &key_material->key_param_set;
+ set->key_type_id = cpu_to_le16(KEY_TYPE_ID_WAPI);
if (cmd_oid == KEY_INFO_ENABLED)
- key_material->key_param_set.key_info =
- cpu_to_le16(KEY_ENABLED);
+ set->key_info = cpu_to_le16(KEY_ENABLED);
else
- key_material->key_param_set.key_info =
- cpu_to_le16(!KEY_ENABLED);
+ set->key_info = cpu_to_le16(!KEY_ENABLED);
- key_material->key_param_set.key[0] = enc_key->key_index;
+ set->key[0] = enc_key->key_index;
if (!priv->sec_info.wapi_key_on)
- key_material->key_param_set.key[1] = 1;
+ set->key[1] = 1;
else
/* set 0 when re-key */
- key_material->key_param_set.key[1] = 0;
+ set->key[1] = 0;
if (!is_broadcast_ether_addr(enc_key->mac_addr)) {
/* WAPI pairwise key: unicast */
- key_material->key_param_set.key_info |=
- cpu_to_le16(KEY_UNICAST);
+ set->key_info |= cpu_to_le16(KEY_UNICAST);
} else { /* WAPI group key: multicast */
- key_material->key_param_set.key_info |=
- cpu_to_le16(KEY_MCAST);
+ set->key_info |= cpu_to_le16(KEY_MCAST);
priv->sec_info.wapi_key_on = true;
}
- key_material->key_param_set.type =
- cpu_to_le16(TLV_TYPE_KEY_MATERIAL);
- key_material->key_param_set.key_len =
- cpu_to_le16(WAPI_KEY_LEN);
- memcpy(&key_material->key_param_set.key[2],
- enc_key->key_material, enc_key->key_len);
- memcpy(&key_material->key_param_set.key[2 + enc_key->key_len],
- enc_key->pn, PN_LEN);
- key_material->key_param_set.length =
- cpu_to_le16(WAPI_KEY_LEN + KEYPARAMSET_FIXED_LEN);
+ set->type = cpu_to_le16(TLV_TYPE_KEY_MATERIAL);
+ set->key_len = cpu_to_le16(WAPI_KEY_LEN);
+ memcpy(&set->key[2], enc_key->key_material, enc_key->key_len);
+ memcpy(&set->key[2 + enc_key->key_len], enc_key->pn, PN_LEN);
+ set->length = cpu_to_le16(WAPI_KEY_LEN + KEYPARAMSET_FIXED_LEN);
key_param_len = (WAPI_KEY_LEN + KEYPARAMSET_FIXED_LEN) +
sizeof(struct mwifiex_ie_types_header);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
index dbb241106d8a..eb67b66b846b 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
@@ -286,7 +286,7 @@ static int pearl_skb2rbd_attach(struct qtnf_pcie_pearl_state *ps, u16 index)
struct sk_buff *skb;
dma_addr_t paddr;
- skb = __netdev_alloc_skb_ip_align(NULL, SKB_BUF_SIZE, GFP_ATOMIC);
+ skb = netdev_alloc_skb_ip_align(NULL, SKB_BUF_SIZE);
if (!skb) {
priv->rx_skb[index] = NULL;
return -ENOMEM;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
index dbf3c5fd751f..d1b850aa4657 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
@@ -247,7 +247,7 @@ topaz_skb2rbd_attach(struct qtnf_pcie_topaz_state *ts, u16 index, u32 wrap)
struct sk_buff *skb;
dma_addr_t paddr;
- skb = __netdev_alloc_skb_ip_align(NULL, SKB_BUF_SIZE, GFP_ATOMIC);
+ skb = netdev_alloc_skb_ip_align(NULL, SKB_BUF_SIZE);
if (!skb) {
ts->base.rx_skb[index] = NULL;
return -ENOMEM;
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index c1d542bfa530..bf3fbd14eda3 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -2794,8 +2794,7 @@ static int __init init_ray_cs(void)
proc_create_data("driver/ray_cs/translate", 0200, NULL, &int_proc_ops,
&translate);
#endif
- if (translate != 0)
- translate = 1;
+ translate = !!translate;
return 0;
} /* init_ray_cs */
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index c75192c4447f..a4489b9302d4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -505,7 +505,7 @@ void rtl_init_rfkill(struct ieee80211_hw *hw)
rtlpriv->rfkill.rfkill_state = radio_state;
- blocked = (rtlpriv->rfkill.rfkill_state == 1) ? 0 : 1;
+ blocked = rtlpriv->rfkill.rfkill_state != 1;
wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index f73e690bbe8e..4dd82c6052f0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -1722,7 +1722,7 @@ static void rtl_op_rfkill_poll(struct ieee80211_hw *hw)
"wireless radio switch turned %s\n",
radio_state ? "on" : "off");
- blocked = (rtlpriv->rfkill.rfkill_state == 1) ? 0 : 1;
+ blocked = !rtlpriv->rfkill.rfkill_state;
wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c
index 0f401ad92c2e..c376817a1bf4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c
@@ -51,7 +51,7 @@ void rtl88e_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
if (rtlefuse->eeprom_regulatory != 0)
turbo_scanoff = true;
- if (mac->act_scanning == true) {
+ if (mac->act_scanning) {
tx_agc[RF90_PATH_A] = 0x3f3f3f3f;
tx_agc[RF90_PATH_B] = 0x3f3f3f3f;
@@ -473,7 +473,7 @@ static bool _rtl88e_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
break;
}
- if (rtstatus != true) {
+ if (!rtstatus) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
"Radio[%d] Fail!!\n", rfpath);
return false;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
index 4865639ac9ea..02b77521b5cd 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
@@ -67,9 +67,9 @@ static int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
char *fw_name;
rtl8188ee_bt_reg_init(hw);
- rtlpriv->dm.dm_initialgain_enable = 1;
+ rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
- rtlpriv->dm.disable_framebursting = 0;
+ rtlpriv->dm.disable_framebursting = false;
rtlpriv->dm.thermalvalue = 0;
rtlpci->transmit_config = CFENDFORM | BIT(15);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
index b337d599b6f4..7a16563b3a5d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
@@ -75,9 +75,9 @@ static int rtl92ee_init_sw_vars(struct ieee80211_hw *hw)
rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
- rtlpriv->dm.dm_initialgain_enable = 1;
+ rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
- rtlpriv->dm.disable_framebursting = 0;
+ rtlpriv->dm.disable_framebursting = false;
rtlpci->transmit_config = CFENDFORM | BIT(15);
/*just 2.4G band*/
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c
index 680198280f8f..652d8ff9cccb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c
@@ -131,7 +131,7 @@ static bool rtl8723e_dm_bt_is_same_coexist_state(struct ieee80211_hw *hw)
(rtlpriv->btcoexist.previous_state_h ==
rtlpriv->btcoexist.cstate_h)) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[DM][BT], Coexist state do not chang!!\n");
+ "[DM][BT], Coexist state do not change!!\n");
return true;
} else {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
index 655460f61bbc..7a46c6a9deae 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
@@ -614,22 +614,22 @@ static bool _rtl8723e_llt_table_init(struct ieee80211_hw *hw)
for (i = 0; i < (txpktbuf_bndy - 1); i++) {
status = _rtl8723e_llt_write(hw, i, i + 1);
- if (true != status)
+ if (!status)
return status;
}
status = _rtl8723e_llt_write(hw, (txpktbuf_bndy - 1), 0xFF);
- if (true != status)
+ if (!status)
return status;
for (i = txpktbuf_bndy; i < maxpage; i++) {
status = _rtl8723e_llt_write(hw, i, (i + 1));
- if (true != status)
+ if (!status)
return status;
}
status = _rtl8723e_llt_write(hw, maxpage, txpktbuf_bndy);
- if (true != status)
+ if (!status)
return status;
rtl_write_byte(rtlpriv, REG_CR, 0xff);
@@ -934,7 +934,7 @@ int rtl8723e_hw_init(struct ieee80211_hw *hw)
rtlpriv->intf_ops->disable_aspm(hw);
rtstatus = _rtl8712e_init_mac(hw);
- if (rtstatus != true) {
+ if (!rtstatus) {
pr_err("Init MAC failed\n");
err = 1;
goto exit;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
index ea86d5bf33d2..7828acb1de3f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
@@ -78,9 +78,9 @@ static int rtl8723e_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
- rtlpriv->dm.dm_initialgain_enable = 1;
+ rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
- rtlpriv->dm.disable_framebursting = 0;
+ rtlpriv->dm.disable_framebursting = false;
rtlpriv->dm.thermalvalue = 0;
rtlpci->transmit_config = CFENDFORM | BIT(12) | BIT(13);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
index 36209ac5b208..d220e8955e37 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
@@ -74,9 +74,9 @@ static int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
rtl8723be_bt_reg_init(hw);
rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
- rtlpriv->dm.dm_initialgain_enable = 1;
+ rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
- rtlpriv->dm.disable_framebursting = 0;
+ rtlpriv->dm.disable_framebursting = false;
rtlpriv->dm.thermalvalue = 0;
rtlpci->transmit_config = CFENDFORM | BIT(15) | BIT(24) | BIT(25);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
index d8df816753cb..950542a24e31 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
@@ -76,9 +76,9 @@ static int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
rtl8821ae_bt_reg_init(hw);
rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
- rtlpriv->dm.dm_initialgain_enable = 1;
+ rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
- rtlpriv->dm.disable_framebursting = 0;
+ rtlpriv->dm.disable_framebursting = false;
rtlpriv->dm.thermalvalue = 0;
rtlpci->transmit_config = CFENDFORM | BIT(15) | BIT(24) | BIT(25);
diff --git a/drivers/net/wireless/realtek/rtw88/bf.c b/drivers/net/wireless/realtek/rtw88/bf.c
index b6d1d71f4d30..a5912da327e2 100644
--- a/drivers/net/wireless/realtek/rtw88/bf.c
+++ b/drivers/net/wireless/realtek/rtw88/bf.c
@@ -10,7 +10,6 @@
void rtw_bf_disassoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf)
{
- struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
struct rtw_bfee *bfee = &rtwvif->bfee;
struct rtw_bf_info *bfinfo = &rtwdev->bf_info;
@@ -23,7 +22,7 @@ void rtw_bf_disassoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
else if (bfee->role == RTW_BFEE_SU)
bfinfo->bfer_su_cnt--;
- chip->ops->config_bfee(rtwdev, rtwvif, bfee, false);
+ rtw_chip_config_bfee(rtwdev, rtwvif, bfee, false);
bfee->role = RTW_BFEE_NONE;
}
@@ -71,7 +70,7 @@ void rtw_bf_assoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
bfee->aid = bss_conf->aid;
bfinfo->bfer_mu_cnt++;
- chip->ops->config_bfee(rtwdev, rtwvif, bfee, true);
+ rtw_chip_config_bfee(rtwdev, rtwvif, bfee, true);
} else if ((ic_vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) &&
(vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) {
if (bfinfo->bfer_su_cnt >= chip->bfer_su_max_num) {
@@ -97,7 +96,7 @@ void rtw_bf_assoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
}
}
- chip->ops->config_bfee(rtwdev, rtwvif, bfee, true);
+ rtw_chip_config_bfee(rtwdev, rtwvif, bfee, true);
}
out_unlock:
diff --git a/drivers/net/wireless/realtek/rtw88/bf.h b/drivers/net/wireless/realtek/rtw88/bf.h
index 96a8216dd11f..17855edb5006 100644
--- a/drivers/net/wireless/realtek/rtw88/bf.h
+++ b/drivers/net/wireless/realtek/rtw88/bf.h
@@ -89,4 +89,26 @@ void rtw_bf_set_gid_table(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
void rtw_bf_phy_init(struct rtw_dev *rtwdev);
void rtw_bf_cfg_csi_rate(struct rtw_dev *rtwdev, u8 rssi, u8 cur_rate,
u8 fixrate_en, u8 *new_rate);
+static inline void rtw_chip_config_bfee(struct rtw_dev *rtwdev, struct rtw_vif *vif,
+ struct rtw_bfee *bfee, bool enable)
+{
+ if (rtwdev->chip->ops->config_bfee)
+ rtwdev->chip->ops->config_bfee(rtwdev, vif, bfee, enable);
+}
+
+static inline void rtw_chip_set_gid_table(struct rtw_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *conf)
+{
+ if (rtwdev->chip->ops->set_gid_table)
+ rtwdev->chip->ops->set_gid_table(rtwdev, vif, conf);
+}
+
+static inline void rtw_chip_cfg_csi_rate(struct rtw_dev *rtwdev, u8 rssi, u8 cur_rate,
+ u8 fixrate_en, u8 *new_rate)
+{
+ if (rtwdev->chip->ops->cfg_csi_rate)
+ rtwdev->chip->ops->cfg_csi_rate(rtwdev, rssi, cur_rate,
+ fixrate_en, new_rate);
+}
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/efuse.c b/drivers/net/wireless/realtek/rtw88/efuse.c
index 212c8376a8c9..13d1c58d6de5 100644
--- a/drivers/net/wireless/realtek/rtw88/efuse.c
+++ b/drivers/net/wireless/realtek/rtw88/efuse.c
@@ -2,6 +2,8 @@
/* Copyright(c) 2018-2019 Realtek Corporation
*/
+#include <linux/iopoll.h>
+
#include "main.h"
#include "efuse.h"
#include "reg.h"
@@ -90,6 +92,8 @@ static int rtw_dump_physical_efuse_map(struct rtw_dev *rtwdev, u8 *map)
u32 addr;
u32 cnt;
+ rtw_chip_efuse_grant_on(rtwdev);
+
switch_efuse_bank(rtwdev);
/* disable 2.5V LDO */
@@ -113,6 +117,28 @@ static int rtw_dump_physical_efuse_map(struct rtw_dev *rtwdev, u8 *map)
*(map + addr) = (u8)(efuse_ctl & BIT_MASK_EF_DATA);
}
+ rtw_chip_efuse_grant_off(rtwdev);
+
+ return 0;
+}
+
+int rtw_read8_physical_efuse(struct rtw_dev *rtwdev, u16 addr, u8 *data)
+{
+ u32 efuse_ctl;
+ int ret;
+
+ rtw_write32_mask(rtwdev, REG_EFUSE_CTRL, 0x3ff00, addr);
+ rtw_write32_clr(rtwdev, REG_EFUSE_CTRL, BIT_EF_FLAG);
+
+ ret = read_poll_timeout(rtw_read32, efuse_ctl, efuse_ctl & BIT_EF_FLAG,
+ 1000, 100000, false, rtwdev, REG_EFUSE_CTRL);
+ if (ret) {
+ *data = EFUSE_READ_FAIL;
+ return ret;
+ }
+
+ *data = rtw_read8(rtwdev, REG_EFUSE_CTRL);
+
return 0;
}
diff --git a/drivers/net/wireless/realtek/rtw88/efuse.h b/drivers/net/wireless/realtek/rtw88/efuse.h
index 115bbe85946a..97a51f0b0e46 100644
--- a/drivers/net/wireless/realtek/rtw88/efuse.h
+++ b/drivers/net/wireless/realtek/rtw88/efuse.h
@@ -10,6 +10,8 @@
#define EFUSE_HW_CAP_SUPP_BW80 7
#define EFUSE_HW_CAP_SUPP_BW40 6
+#define EFUSE_READ_FAIL 0xff
+
#define GET_EFUSE_HW_CAP_HCI(hw_cap) \
le32_get_bits(*((__le32 *)(hw_cap) + 0x01), GENMASK(3, 0))
#define GET_EFUSE_HW_CAP_BW(hw_cap) \
@@ -22,5 +24,6 @@
le32_get_bits(*((__le32 *)(hw_cap) + 0x01), GENMASK(27, 26))
int rtw_parse_efuse_map(struct rtw_dev *rtwdev);
+int rtw_read8_physical_efuse(struct rtw_dev *rtwdev, u16 addr, u8 *data);
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index 05c430b3489c..2c28afe525c7 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -2,6 +2,8 @@
/* Copyright(c) 2018-2019 Realtek Corporation
*/
+#include <linux/iopoll.h>
+
#include "main.h"
#include "coex.h"
#include "fw.h"
@@ -23,7 +25,7 @@ static void rtw_fw_c2h_cmd_handle_ext(struct rtw_dev *rtwdev,
switch (sub_cmd_id) {
case C2H_CCX_RPT:
- rtw_tx_report_handle(rtwdev, skb);
+ rtw_tx_report_handle(rtwdev, skb, C2H_CCX_RPT);
break;
default:
break;
@@ -140,6 +142,9 @@ void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
goto unlock;
switch (c2h->id) {
+ case C2H_CCX_TX_RPT:
+ rtw_tx_report_handle(rtwdev, skb, C2H_CCX_TX_RPT);
+ break;
case C2H_BT_INFO:
rtw_coex_bt_info_notify(rtwdev, c2h->payload, len);
break;
@@ -153,6 +158,7 @@ void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
rtw_fw_ra_report_handle(rtwdev, c2h->payload, len);
break;
default:
+ rtw_dbg(rtwdev, RTW_DBG_FW, "C2H 0x%x isn't handled\n", c2h->id);
break;
}
@@ -193,8 +199,8 @@ static void rtw_fw_send_h2c_command(struct rtw_dev *rtwdev,
u8 box;
u8 box_state;
u32 box_reg, box_ex_reg;
- u32 h2c_wait;
int idx;
+ int ret;
rtw_dbg(rtwdev, RTW_DBG_FW,
"send H2C content %02x%02x%02x%02x %02x%02x%02x%02x\n",
@@ -226,12 +232,11 @@ static void rtw_fw_send_h2c_command(struct rtw_dev *rtwdev,
goto out;
}
- h2c_wait = 20;
- do {
- box_state = rtw_read8(rtwdev, REG_HMETFR);
- } while ((box_state >> box) & 0x1 && --h2c_wait > 0);
+ ret = read_poll_timeout_atomic(rtw_read8, box_state,
+ !((box_state >> box) & 0x1), 100, 3000,
+ false, rtwdev, REG_HMETFR);
- if (!h2c_wait) {
+ if (ret) {
rtw_err(rtwdev, "failed to send h2c command\n");
goto out;
}
@@ -270,6 +275,9 @@ rtw_fw_send_general_info(struct rtw_dev *rtwdev)
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
u16 total_size = H2C_PKT_HDR_SIZE + 4;
+ if (rtw_chip_wcpu_11n(rtwdev))
+ return;
+
rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_GENERAL_INFO);
SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
@@ -290,6 +298,9 @@ rtw_fw_send_phydm_info(struct rtw_dev *rtwdev)
u16 total_size = H2C_PKT_HDR_SIZE + 8;
u8 fw_rf_type = 0;
+ if (rtw_chip_wcpu_11n(rtwdev))
+ return;
+
if (hal->rf_type == RF_1T1R)
fw_rf_type = FW_RF_1T1R;
else if (hal->rf_type == RF_2T2R)
@@ -630,8 +641,8 @@ void rtw_fw_set_pg_info(struct rtw_dev *rtwdev)
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
}
-u8 rtw_get_rsvd_page_probe_req_location(struct rtw_dev *rtwdev,
- struct cfg80211_ssid *ssid)
+static u8 rtw_get_rsvd_page_probe_req_location(struct rtw_dev *rtwdev,
+ struct cfg80211_ssid *ssid)
{
struct rtw_rsvd_page *rsvd_pkt;
u8 location = 0;
@@ -647,8 +658,8 @@ u8 rtw_get_rsvd_page_probe_req_location(struct rtw_dev *rtwdev,
return location;
}
-u16 rtw_get_rsvd_page_probe_req_size(struct rtw_dev *rtwdev,
- struct cfg80211_ssid *ssid)
+static u16 rtw_get_rsvd_page_probe_req_size(struct rtw_dev *rtwdev,
+ struct cfg80211_ssid *ssid)
{
struct rtw_rsvd_page *rsvd_pkt;
u16 size = 0;
@@ -1078,6 +1089,8 @@ int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
u8 bckp[2];
u8 val;
u16 rsvd_pg_head;
+ u32 bcn_valid_addr;
+ u32 bcn_valid_mask;
int ret;
lockdep_assert_held(&rtwdev->mutex);
@@ -1085,8 +1098,13 @@ int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
if (!size)
return -EINVAL;
- pg_addr &= BIT_MASK_BCN_HEAD_1_V1;
- rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, pg_addr | BIT_BCN_VALID_V1);
+ if (rtw_chip_wcpu_11n(rtwdev)) {
+ rtw_write32_set(rtwdev, REG_DWBCN0_CTRL, BIT_BCN_VALID);
+ } else {
+ pg_addr &= BIT_MASK_BCN_HEAD_1_V1;
+ pg_addr |= BIT_BCN_VALID_V1;
+ rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, pg_addr);
+ }
val = rtw_read8(rtwdev, REG_CR + 1);
bckp[0] = val;
@@ -1104,7 +1122,15 @@ int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
goto restore;
}
- if (!check_hw_ready(rtwdev, REG_FIFOPAGE_CTRL_2, BIT_BCN_VALID_V1, 1)) {
+ if (rtw_chip_wcpu_11n(rtwdev)) {
+ bcn_valid_addr = REG_DWBCN0_CTRL;
+ bcn_valid_mask = BIT_BCN_VALID;
+ } else {
+ bcn_valid_addr = REG_FIFOPAGE_CTRL_2;
+ bcn_valid_mask = BIT_BCN_VALID_V1;
+ }
+
+ if (!check_hw_ready(rtwdev, bcn_valid_addr, bcn_valid_mask, 1)) {
rtw_err(rtwdev, "error beacon valid\n");
ret = -EBUSY;
}
diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h
index cdd244857048..470e1809645a 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.h
+++ b/drivers/net/wireless/realtek/rtw88/fw.h
@@ -19,7 +19,14 @@
#define RSVD_PAGE_START_ADDR 0x780
#define FIFO_DUMP_ADDR 0x8000
+#define DLFW_PAGE_SIZE_SHIFT_LEGACY 12
+#define DLFW_PAGE_SIZE_LEGACY 0x1000
+#define DLFW_BLK_SIZE_SHIFT_LEGACY 2
+#define DLFW_BLK_SIZE_LEGACY 4
+#define FW_START_ADDR_LEGACY 0x1000
+
enum rtw_c2h_cmd_id {
+ C2H_CCX_TX_RPT = 0x03,
C2H_BT_INFO = 0x09,
C2H_BT_MP_INFO = 0x0b,
C2H_RA_RPT = 0x0c,
@@ -192,9 +199,30 @@ struct rtw_fw_hdr {
__le32 imem_addr;
} __packed;
+struct rtw_fw_hdr_legacy {
+ __le16 signature;
+ u8 category;
+ u8 function;
+ __le16 version; /* 0x04 */
+ u8 subversion1;
+ u8 subversion2;
+ u8 month; /* 0x08 */
+ u8 day;
+ u8 hour;
+ u8 minute;
+ __le16 size;
+ __le16 rsvd2;
+ __le32 idx; /* 0x10 */
+ __le32 rsvd3;
+ __le32 rsvd4; /* 0x18 */
+ __le32 rsvd5;
+} __packed;
+
/* C2H */
-#define GET_CCX_REPORT_SEQNUM(c2h_payload) (c2h_payload[8] & 0xfc)
-#define GET_CCX_REPORT_STATUS(c2h_payload) (c2h_payload[9] & 0xc0)
+#define GET_CCX_REPORT_SEQNUM_V0(c2h_payload) (c2h_payload[6] & 0xfc)
+#define GET_CCX_REPORT_STATUS_V0(c2h_payload) (c2h_payload[0] & 0xc0)
+#define GET_CCX_REPORT_SEQNUM_V1(c2h_payload) (c2h_payload[8] & 0xfc)
+#define GET_CCX_REPORT_STATUS_V1(c2h_payload) (c2h_payload[9] & 0xc0)
#define GET_RA_REPORT_RATE(c2h_payload) (c2h_payload[0] & 0x7f)
#define GET_RA_REPORT_SGI(c2h_payload) ((c2h_payload[0] & 0x80) >> 7)
diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
index 7b245779ff90..e8ffeb338584 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.c
+++ b/drivers/net/wireless/realtek/rtw88/mac.c
@@ -40,6 +40,9 @@ void rtw_set_channel_mac(struct rtw_dev *rtwdev, u8 channel, u8 bw,
}
rtw_write32(rtwdev, REG_WMAC_TRXPTCL_CTL, value32);
+ if (rtw_chip_wcpu_11n(rtwdev))
+ return;
+
value32 = rtw_read32(rtwdev, REG_AFE_CTRL1) & ~(BIT_MAC_CLK_SEL);
value32 |= (MAC_CLK_HW_DEF_80M << BIT_SHIFT_MAC_CLK_SEL);
rtw_write32(rtwdev, REG_AFE_CTRL1, value32);
@@ -61,6 +64,14 @@ static int rtw_mac_pre_system_cfg(struct rtw_dev *rtwdev)
rtw_write8(rtwdev, REG_RSV_CTRL, 0);
+ if (rtw_chip_wcpu_11n(rtwdev)) {
+ if (rtw_read32(rtwdev, REG_SYS_CFG1) & BIT_LDO)
+ rtw_write8(rtwdev, REG_LDO_SWR_CTRL, LDO_SEL);
+ else
+ rtw_write8(rtwdev, REG_LDO_SWR_CTRL, SPS_SEL);
+ return 0;
+ }
+
switch (rtw_hci_type(rtwdev)) {
case RTW_HCI_TYPE_PCIE:
rtw_write32_set(rtwdev, REG_HCI_OPT_CTRL, BIT_BT_DIG_CLK_EN);
@@ -100,42 +111,55 @@ static int rtw_mac_pre_system_cfg(struct rtw_dev *rtwdev)
return 0;
}
+static bool do_pwr_poll_cmd(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 target)
+{
+ u32 cnt;
+
+ target &= mask;
+
+ for (cnt = 0; cnt < RTW_PWR_POLLING_CNT; cnt++) {
+ if ((rtw_read8(rtwdev, addr) & mask) == target)
+ return true;
+
+ udelay(50);
+ }
+
+ return false;
+}
+
static int rtw_pwr_cmd_polling(struct rtw_dev *rtwdev,
const struct rtw_pwr_seq_cmd *cmd)
{
u8 value;
- u8 flag = 0;
u32 offset;
- u32 cnt = RTW_PWR_POLLING_CNT;
if (cmd->base == RTW_PWR_ADDR_SDIO)
offset = cmd->offset | SDIO_LOCAL_OFFSET;
else
offset = cmd->offset;
- do {
- cnt--;
- value = rtw_read8(rtwdev, offset);
- value &= cmd->mask;
- if (value == (cmd->value & cmd->mask))
- return 0;
- if (cnt == 0) {
- if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE &&
- flag == 0) {
- value = rtw_read8(rtwdev, REG_SYS_PW_CTRL);
- value |= BIT(3);
- rtw_write8(rtwdev, REG_SYS_PW_CTRL, value);
- value &= ~BIT(3);
- rtw_write8(rtwdev, REG_SYS_PW_CTRL, value);
- cnt = RTW_PWR_POLLING_CNT;
- flag = 1;
- } else {
- return -EBUSY;
- }
- } else {
- udelay(50);
- }
- } while (1);
+ if (do_pwr_poll_cmd(rtwdev, offset, cmd->mask, cmd->value))
+ return 0;
+
+ if (rtw_hci_type(rtwdev) != RTW_HCI_TYPE_PCIE)
+ goto err;
+
+ /* if PCIE, toggle BIT_PFM_WOWL and try again */
+ value = rtw_read8(rtwdev, REG_SYS_PW_CTRL);
+ if (rtwdev->chip->id == RTW_CHIP_TYPE_8723D)
+ rtw_write8(rtwdev, REG_SYS_PW_CTRL, value & ~BIT_PFM_WOWL);
+ rtw_write8(rtwdev, REG_SYS_PW_CTRL, value | BIT_PFM_WOWL);
+ rtw_write8(rtwdev, REG_SYS_PW_CTRL, value & ~BIT_PFM_WOWL);
+ if (rtwdev->chip->id == RTW_CHIP_TYPE_8723D)
+ rtw_write8(rtwdev, REG_SYS_PW_CTRL, value | BIT_PFM_WOWL);
+
+ if (do_pwr_poll_cmd(rtwdev, offset, cmd->mask, cmd->value))
+ return 0;
+
+err:
+ rtw_err(rtwdev, "failed to poll offset=0x%x mask=0x%x value=0x%x\n",
+ offset, cmd->mask, cmd->value);
+ return -EBUSY;
}
static int rtw_sub_pwr_seq_parser(struct rtw_dev *rtwdev, u8 intf_mask,
@@ -228,12 +252,14 @@ static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
u8 rpwm;
bool cur_pwr;
- rpwm = rtw_read8(rtwdev, rtwdev->hci.rpwm_addr);
+ if (rtw_chip_wcpu_11ac(rtwdev)) {
+ rpwm = rtw_read8(rtwdev, rtwdev->hci.rpwm_addr);
- /* Check FW still exist or not */
- if (rtw_read16(rtwdev, REG_MCUFW_CTRL) == 0xC078) {
- rpwm = (rpwm ^ BIT_RPWM_TOGGLE) & BIT_RPWM_TOGGLE;
- rtw_write8(rtwdev, rtwdev->hci.rpwm_addr, rpwm);
+ /* Check FW still exist or not */
+ if (rtw_read16(rtwdev, REG_MCUFW_CTRL) == 0xC078) {
+ rpwm = (rpwm ^ BIT_RPWM_TOGGLE) & BIT_RPWM_TOGGLE;
+ rtw_write8(rtwdev, rtwdev->hci.rpwm_addr, rpwm);
+ }
}
if (rtw_read8(rtwdev, REG_CR) == 0xea)
@@ -244,7 +270,7 @@ static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
else
cur_pwr = true;
- if (pwr_on && cur_pwr)
+ if (pwr_on == cur_pwr)
return -EALREADY;
pwr_seq = pwr_on ? chip->pwr_on_seq : chip->pwr_off_seq;
@@ -254,7 +280,7 @@ static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
return 0;
}
-static int rtw_mac_init_system_cfg(struct rtw_dev *rtwdev)
+static int __rtw_mac_init_system_cfg(struct rtw_dev *rtwdev)
{
u8 sys_func_en = rtwdev->chip->sys_func_en;
u8 value8;
@@ -279,6 +305,29 @@ static int rtw_mac_init_system_cfg(struct rtw_dev *rtwdev)
return 0;
}
+static int __rtw_mac_init_system_cfg_legacy(struct rtw_dev *rtwdev)
+{
+ rtw_write8(rtwdev, REG_CR, 0xff);
+ mdelay(2);
+ rtw_write8(rtwdev, REG_HWSEQ_CTRL, 0x7f);
+ mdelay(2);
+
+ rtw_write8_set(rtwdev, REG_SYS_CLKR, BIT_WAKEPAD_EN);
+ rtw_write16_clr(rtwdev, REG_GPIO_MUXCFG, BIT_EN_SIC);
+
+ rtw_write16(rtwdev, REG_CR, 0x2ff);
+
+ return 0;
+}
+
+static int rtw_mac_init_system_cfg(struct rtw_dev *rtwdev)
+{
+ if (rtw_chip_wcpu_11n(rtwdev))
+ return __rtw_mac_init_system_cfg_legacy(rtwdev);
+
+ return __rtw_mac_init_system_cfg(rtwdev);
+}
+
int rtw_mac_power_on(struct rtw_dev *rtwdev)
{
int ret = 0;
@@ -650,7 +699,8 @@ static void download_firmware_end_flow(struct rtw_dev *rtwdev)
rtw_write16(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
}
-int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw)
+static int __rtw_download_firmware(struct rtw_dev *rtwdev,
+ struct rtw_fw_state *fw)
{
struct rtw_backup_info bckp[DLFW_RESTORE_REG_NUM];
const u8 *data = fw->firmware->data;
@@ -704,6 +754,151 @@ dlfw_fail:
return ret;
}
+static void en_download_firmware_legacy(struct rtw_dev *rtwdev, bool en)
+{
+ int try;
+
+ if (en) {
+ wlan_cpu_enable(rtwdev, false);
+ wlan_cpu_enable(rtwdev, true);
+
+ rtw_write8_set(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN);
+
+ for (try = 0; try < 10; try++) {
+ if (rtw_read8(rtwdev, REG_MCUFW_CTRL) & BIT_MCUFWDL_EN)
+ goto fwdl_ready;
+ rtw_write8_set(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN);
+ msleep(20);
+ }
+ rtw_err(rtwdev, "failed to check fw download ready\n");
+fwdl_ready:
+ rtw_write32_clr(rtwdev, REG_MCUFW_CTRL, BIT_ROM_DLEN);
+ } else {
+ rtw_write8_clr(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN);
+ }
+}
+
+static void
+write_firmware_page(struct rtw_dev *rtwdev, u32 page, const u8 *data, u32 size)
+{
+ u32 val32;
+ u32 block_nr;
+ u32 remain_size;
+ u32 write_addr = FW_START_ADDR_LEGACY;
+ const __le32 *ptr = (const __le32 *)data;
+ u32 block;
+ __le32 remain_data = 0;
+
+ block_nr = size >> DLFW_BLK_SIZE_SHIFT_LEGACY;
+ remain_size = size & (DLFW_BLK_SIZE_LEGACY - 1);
+
+ val32 = rtw_read32(rtwdev, REG_MCUFW_CTRL);
+ val32 &= ~BIT_ROM_PGE;
+ val32 |= (page << BIT_SHIFT_ROM_PGE) & BIT_ROM_PGE;
+ rtw_write32(rtwdev, REG_MCUFW_CTRL, val32);
+
+ for (block = 0; block < block_nr; block++) {
+ rtw_write32(rtwdev, write_addr, le32_to_cpu(*ptr));
+
+ write_addr += DLFW_BLK_SIZE_LEGACY;
+ ptr++;
+ }
+
+ if (remain_size) {
+ memcpy(&remain_data, ptr, remain_size);
+ rtw_write32(rtwdev, write_addr, le32_to_cpu(remain_data));
+ }
+}
+
+static int
+download_firmware_legacy(struct rtw_dev *rtwdev, const u8 *data, u32 size)
+{
+ u32 page;
+ u32 total_page;
+ u32 last_page_size;
+
+ data += sizeof(struct rtw_fw_hdr_legacy);
+ size -= sizeof(struct rtw_fw_hdr_legacy);
+
+ total_page = size >> DLFW_PAGE_SIZE_SHIFT_LEGACY;
+ last_page_size = size & (DLFW_PAGE_SIZE_LEGACY - 1);
+
+ rtw_write8_set(rtwdev, REG_MCUFW_CTRL, BIT_FWDL_CHK_RPT);
+
+ for (page = 0; page < total_page; page++) {
+ write_firmware_page(rtwdev, page, data, DLFW_PAGE_SIZE_LEGACY);
+ data += DLFW_PAGE_SIZE_LEGACY;
+ }
+ if (last_page_size)
+ write_firmware_page(rtwdev, page, data, last_page_size);
+
+ if (!check_hw_ready(rtwdev, REG_MCUFW_CTRL, BIT_FWDL_CHK_RPT, 1)) {
+ rtw_err(rtwdev, "failed to check download firmware report\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int download_firmware_validate_legacy(struct rtw_dev *rtwdev)
+{
+ u32 val32;
+ int try;
+
+ val32 = rtw_read32(rtwdev, REG_MCUFW_CTRL);
+ val32 |= BIT_MCUFWDL_RDY;
+ val32 &= ~BIT_WINTINI_RDY;
+ rtw_write32(rtwdev, REG_MCUFW_CTRL, val32);
+
+ wlan_cpu_enable(rtwdev, false);
+ wlan_cpu_enable(rtwdev, true);
+
+ for (try = 0; try < 10; try++) {
+ val32 = rtw_read32(rtwdev, REG_MCUFW_CTRL);
+ if ((val32 & FW_READY_LEGACY) == FW_READY_LEGACY)
+ return 0;
+ msleep(20);
+ }
+
+ rtw_err(rtwdev, "failed to validate firmware\n");
+ return -EINVAL;
+}
+
+static int __rtw_download_firmware_legacy(struct rtw_dev *rtwdev,
+ struct rtw_fw_state *fw)
+{
+ int ret = 0;
+
+ en_download_firmware_legacy(rtwdev, true);
+ ret = download_firmware_legacy(rtwdev, fw->firmware->data, fw->firmware->size);
+ en_download_firmware_legacy(rtwdev, false);
+ if (ret)
+ goto out;
+
+ ret = download_firmware_validate_legacy(rtwdev);
+ if (ret)
+ goto out;
+
+ /* reset desc and index */
+ rtw_hci_setup(rtwdev);
+
+ rtwdev->h2c.last_box_num = 0;
+ rtwdev->h2c.seq = 0;
+
+ set_bit(RTW_FLAG_FW_RUNNING, rtwdev->flags);
+
+out:
+ return ret;
+}
+
+int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw)
+{
+ if (rtw_chip_wcpu_11n(rtwdev))
+ return __rtw_download_firmware_legacy(rtwdev, fw);
+
+ return __rtw_download_firmware(rtwdev, fw);
+}
+
static u32 get_priority_queues(struct rtw_dev *rtwdev, u32 queues)
{
const struct rtw_rqpn *rqpn = rtwdev->fifo.rqpn;
@@ -826,7 +1021,8 @@ static int txdma_queue_mapping(struct rtw_dev *rtwdev)
rtw_write8(rtwdev, REG_CR, 0);
rtw_write8(rtwdev, REG_CR, MAC_TRX_ENABLE);
- rtw_write32(rtwdev, REG_H2CQ_CSR, BIT_H2CQ_FULL);
+ if (rtw_chip_wcpu_11ac(rtwdev))
+ rtw_write32(rtwdev, REG_H2CQ_CSR, BIT_H2CQ_FULL);
return 0;
}
@@ -841,13 +1037,16 @@ static int set_trx_fifo_info(struct rtw_dev *rtwdev)
/* config rsvd page num */
fifo->rsvd_drv_pg_num = 8;
fifo->txff_pg_num = chip->txff_size >> 7;
- fifo->rsvd_pg_num = fifo->rsvd_drv_pg_num +
- RSVD_PG_H2C_EXTRAINFO_NUM +
- RSVD_PG_H2C_STATICINFO_NUM +
- RSVD_PG_H2CQ_NUM +
- RSVD_PG_CPU_INSTRUCTION_NUM +
- RSVD_PG_FW_TXBUF_NUM +
- csi_buf_pg_num;
+ if (rtw_chip_wcpu_11n(rtwdev))
+ fifo->rsvd_pg_num = fifo->rsvd_drv_pg_num;
+ else
+ fifo->rsvd_pg_num = fifo->rsvd_drv_pg_num +
+ RSVD_PG_H2C_EXTRAINFO_NUM +
+ RSVD_PG_H2C_STATICINFO_NUM +
+ RSVD_PG_H2CQ_NUM +
+ RSVD_PG_CPU_INSTRUCTION_NUM +
+ RSVD_PG_FW_TXBUF_NUM +
+ csi_buf_pg_num;
if (fifo->rsvd_pg_num > fifo->txff_pg_num)
return -ENOMEM;
@@ -856,18 +1055,20 @@ static int set_trx_fifo_info(struct rtw_dev *rtwdev)
fifo->rsvd_boundary = fifo->txff_pg_num - fifo->rsvd_pg_num;
cur_pg_addr = fifo->txff_pg_num;
- cur_pg_addr -= csi_buf_pg_num;
- fifo->rsvd_csibuf_addr = cur_pg_addr;
- cur_pg_addr -= RSVD_PG_FW_TXBUF_NUM;
- fifo->rsvd_fw_txbuf_addr = cur_pg_addr;
- cur_pg_addr -= RSVD_PG_CPU_INSTRUCTION_NUM;
- fifo->rsvd_cpu_instr_addr = cur_pg_addr;
- cur_pg_addr -= RSVD_PG_H2CQ_NUM;
- fifo->rsvd_h2cq_addr = cur_pg_addr;
- cur_pg_addr -= RSVD_PG_H2C_STATICINFO_NUM;
- fifo->rsvd_h2c_sta_info_addr = cur_pg_addr;
- cur_pg_addr -= RSVD_PG_H2C_EXTRAINFO_NUM;
- fifo->rsvd_h2c_info_addr = cur_pg_addr;
+ if (rtw_chip_wcpu_11ac(rtwdev)) {
+ cur_pg_addr -= csi_buf_pg_num;
+ fifo->rsvd_csibuf_addr = cur_pg_addr;
+ cur_pg_addr -= RSVD_PG_FW_TXBUF_NUM;
+ fifo->rsvd_fw_txbuf_addr = cur_pg_addr;
+ cur_pg_addr -= RSVD_PG_CPU_INSTRUCTION_NUM;
+ fifo->rsvd_cpu_instr_addr = cur_pg_addr;
+ cur_pg_addr -= RSVD_PG_H2CQ_NUM;
+ fifo->rsvd_h2cq_addr = cur_pg_addr;
+ cur_pg_addr -= RSVD_PG_H2C_STATICINFO_NUM;
+ fifo->rsvd_h2c_sta_info_addr = cur_pg_addr;
+ cur_pg_addr -= RSVD_PG_H2C_EXTRAINFO_NUM;
+ fifo->rsvd_h2c_info_addr = cur_pg_addr;
+ }
cur_pg_addr -= fifo->rsvd_drv_pg_num;
fifo->rsvd_drv_addr = cur_pg_addr;
@@ -879,6 +1080,65 @@ static int set_trx_fifo_info(struct rtw_dev *rtwdev)
return 0;
}
+static int __priority_queue_cfg(struct rtw_dev *rtwdev,
+ const struct rtw_page_table *pg_tbl,
+ u16 pubq_num)
+{
+ struct rtw_fifo_conf *fifo = &rtwdev->fifo;
+ struct rtw_chip_info *chip = rtwdev->chip;
+
+ rtw_write16(rtwdev, REG_FIFOPAGE_INFO_1, pg_tbl->hq_num);
+ rtw_write16(rtwdev, REG_FIFOPAGE_INFO_2, pg_tbl->lq_num);
+ rtw_write16(rtwdev, REG_FIFOPAGE_INFO_3, pg_tbl->nq_num);
+ rtw_write16(rtwdev, REG_FIFOPAGE_INFO_4, pg_tbl->exq_num);
+ rtw_write16(rtwdev, REG_FIFOPAGE_INFO_5, pubq_num);
+ rtw_write32_set(rtwdev, REG_RQPN_CTRL_2, BIT_LD_RQPN);
+
+ rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, fifo->rsvd_boundary);
+ rtw_write8_set(rtwdev, REG_FWHW_TXQ_CTRL + 2, BIT_EN_WR_FREE_TAIL >> 16);
+
+ rtw_write16(rtwdev, REG_BCNQ_BDNY_V1, fifo->rsvd_boundary);
+ rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2 + 2, fifo->rsvd_boundary);
+ rtw_write16(rtwdev, REG_BCNQ1_BDNY_V1, fifo->rsvd_boundary);
+ rtw_write32(rtwdev, REG_RXFF_BNDY, chip->rxff_size - C2H_PKT_BUF - 1);
+ rtw_write8_set(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1);
+
+ if (!check_hw_ready(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1, 0))
+ return -EBUSY;
+
+ rtw_write8(rtwdev, REG_CR + 3, 0);
+
+ return 0;
+}
+
+static int __priority_queue_cfg_legacy(struct rtw_dev *rtwdev,
+ const struct rtw_page_table *pg_tbl,
+ u16 pubq_num)
+{
+ struct rtw_fifo_conf *fifo = &rtwdev->fifo;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u32 val32;
+
+ val32 = BIT_RQPN_NE(pg_tbl->nq_num, pg_tbl->exq_num);
+ rtw_write32(rtwdev, REG_RQPN_NPQ, val32);
+ val32 = BIT_RQPN_HLP(pg_tbl->hq_num, pg_tbl->lq_num, pubq_num);
+ rtw_write32(rtwdev, REG_RQPN, val32);
+
+ rtw_write8(rtwdev, REG_TRXFF_BNDY, fifo->rsvd_boundary);
+ rtw_write16(rtwdev, REG_TRXFF_BNDY + 2, chip->rxff_size - REPORT_BUF - 1);
+ rtw_write8(rtwdev, REG_DWBCN0_CTRL + 1, fifo->rsvd_boundary);
+ rtw_write8(rtwdev, REG_BCNQ_BDNY, fifo->rsvd_boundary);
+ rtw_write8(rtwdev, REG_MGQ_BDNY, fifo->rsvd_boundary);
+ rtw_write8(rtwdev, REG_WMAC_LBK_BF_HD, fifo->rsvd_boundary);
+
+ rtw_write32_set(rtwdev, REG_AUTO_LLT, BIT_AUTO_INIT_LLT);
+
+ if (!check_hw_ready(rtwdev, REG_AUTO_LLT, BIT_AUTO_INIT_LLT, 0))
+ return -EBUSY;
+
+ return 0;
+}
+
static int priority_queue_cfg(struct rtw_dev *rtwdev)
{
struct rtw_fifo_conf *fifo = &rtwdev->fifo;
@@ -911,28 +1171,10 @@ static int priority_queue_cfg(struct rtw_dev *rtwdev)
pubq_num = fifo->acq_pg_num - pg_tbl->hq_num - pg_tbl->lq_num -
pg_tbl->nq_num - pg_tbl->exq_num - pg_tbl->gapq_num;
- rtw_write16(rtwdev, REG_FIFOPAGE_INFO_1, pg_tbl->hq_num);
- rtw_write16(rtwdev, REG_FIFOPAGE_INFO_2, pg_tbl->lq_num);
- rtw_write16(rtwdev, REG_FIFOPAGE_INFO_3, pg_tbl->nq_num);
- rtw_write16(rtwdev, REG_FIFOPAGE_INFO_4, pg_tbl->exq_num);
- rtw_write16(rtwdev, REG_FIFOPAGE_INFO_5, pubq_num);
- rtw_write32_set(rtwdev, REG_RQPN_CTRL_2, BIT_LD_RQPN);
-
- rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, fifo->rsvd_boundary);
- rtw_write8_set(rtwdev, REG_FWHW_TXQ_CTRL + 2, BIT_EN_WR_FREE_TAIL >> 16);
-
- rtw_write16(rtwdev, REG_BCNQ_BDNY_V1, fifo->rsvd_boundary);
- rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2 + 2, fifo->rsvd_boundary);
- rtw_write16(rtwdev, REG_BCNQ1_BDNY_V1, fifo->rsvd_boundary);
- rtw_write32(rtwdev, REG_RXFF_BNDY, chip->rxff_size - C2H_PKT_BUF - 1);
- rtw_write8_set(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1);
-
- if (!check_hw_ready(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1, 0))
- return -EBUSY;
-
- rtw_write8(rtwdev, REG_CR + 3, 0);
-
- return 0;
+ if (rtw_chip_wcpu_11n(rtwdev))
+ return __priority_queue_cfg_legacy(rtwdev, pg_tbl, pubq_num);
+ else
+ return __priority_queue_cfg(rtwdev, pg_tbl, pubq_num);
}
static int init_h2c(struct rtw_dev *rtwdev)
@@ -945,6 +1187,9 @@ static int init_h2c(struct rtw_dev *rtwdev)
u32 h2cq_free;
u32 wp, rp;
+ if (rtw_chip_wcpu_11n(rtwdev))
+ return 0;
+
h2cq_addr = fifo->rsvd_h2cq_addr << TX_PAGE_SIZE_SHIFT;
h2cq_size = RSVD_PG_H2CQ_NUM << TX_PAGE_SIZE_SHIFT;
@@ -1009,11 +1254,13 @@ static int rtw_drv_info_cfg(struct rtw_dev *rtwdev)
u8 value8;
rtw_write8(rtwdev, REG_RX_DRVINFO_SZ, PHY_STATUS_SIZE);
- value8 = rtw_read8(rtwdev, REG_TRXFF_BNDY + 1);
- value8 &= 0xF0;
- /* For rxdesc len = 0 issue */
- value8 |= 0xF;
- rtw_write8(rtwdev, REG_TRXFF_BNDY + 1, value8);
+ if (rtw_chip_wcpu_11ac(rtwdev)) {
+ value8 = rtw_read8(rtwdev, REG_TRXFF_BNDY + 1);
+ value8 &= 0xF0;
+ /* For rxdesc len = 0 issue */
+ value8 |= 0xF;
+ rtw_write8(rtwdev, REG_TRXFF_BNDY + 1, value8);
+ }
rtw_write32_set(rtwdev, REG_RCR, BIT_APP_PHYSTS);
rtw_write32_clr(rtwdev, REG_WMAC_OPTION_FUNCTION + 4, BIT(8) | BIT(9));
diff --git a/drivers/net/wireless/realtek/rtw88/mac.h b/drivers/net/wireless/realtek/rtw88/mac.h
index 592dc830160c..ce64cdf7a565 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.h
+++ b/drivers/net/wireless/realtek/rtw88/mac.h
@@ -10,6 +10,7 @@
#define SDIO_LOCAL_OFFSET 0x10250000
#define DDMA_POLLING_COUNT 1000
#define C2H_PKT_BUF 256
+#define REPORT_BUF 128
#define PHY_STATUS_SIZE 4
#define ILLEGAL_KEY_GROUP 0xFAAAAA00
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index d7d02e4c0184..98d2ac22f6f6 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -375,11 +375,8 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BEACON)
rtw_fw_download_rsvd_page(rtwdev);
- if (changed & BSS_CHANGED_MU_GROUPS) {
- struct rtw_chip_info *chip = rtwdev->chip;
-
- chip->ops->set_gid_table(rtwdev, vif, conf);
- }
+ if (changed & BSS_CHANGED_MU_GROUPS)
+ rtw_chip_set_gid_table(rtwdev, vif, conf);
if (changed & BSS_CHANGED_ERP_SLOT)
rtw_conf_tx(rtwdev, rtwvif);
@@ -754,6 +751,37 @@ static int rtw_ops_set_bitrate_mask(struct ieee80211_hw *hw,
return 0;
}
+static int rtw_ops_set_antenna(struct ieee80211_hw *hw,
+ u32 tx_antenna,
+ u32 rx_antenna)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ int ret;
+
+ if (!chip->ops->set_antenna)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&rtwdev->mutex);
+ ret = chip->ops->set_antenna(rtwdev, tx_antenna, rx_antenna);
+ mutex_unlock(&rtwdev->mutex);
+
+ return ret;
+}
+
+static int rtw_ops_get_antenna(struct ieee80211_hw *hw,
+ u32 *tx_antenna,
+ u32 *rx_antenna)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_hal *hal = &rtwdev->hal;
+
+ *tx_antenna = hal->antenna_tx;
+ *rx_antenna = hal->antenna_rx;
+
+ return 0;
+}
+
#ifdef CONFIG_PM
static int rtw_ops_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wowlan)
@@ -815,6 +843,8 @@ const struct ieee80211_ops rtw_ops = {
.sta_statistics = rtw_ops_sta_statistics,
.flush = rtw_ops_flush,
.set_bitrate_mask = rtw_ops_set_bitrate_mask,
+ .set_antenna = rtw_ops_set_antenna,
+ .get_antenna = rtw_ops_get_antenna,
#ifdef CONFIG_PM
.suspend = rtw_ops_suspend,
.resume = rtw_ops_resume,
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index 7640e97706f5..f88a7d2370aa 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -137,7 +137,6 @@ struct rtw_watch_dog_iter_data {
static void rtw_dynamic_csi_rate(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif)
{
struct rtw_bf_info *bf_info = &rtwdev->bf_info;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 fix_rate_enable = 0;
u8 new_csi_rate_idx;
@@ -145,9 +144,9 @@ static void rtw_dynamic_csi_rate(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif)
rtwvif->bfee.role != RTW_BFEE_MU)
return;
- chip->ops->cfg_csi_rate(rtwdev, rtwdev->dm_info.min_rssi,
- bf_info->cur_csi_rpt_rate,
- fix_rate_enable, &new_csi_rate_idx);
+ rtw_chip_cfg_csi_rate(rtwdev, rtwdev->dm_info.min_rssi,
+ bf_info->cur_csi_rpt_rate,
+ fix_rate_enable, &new_csi_rate_idx);
if (new_csi_rate_idx != bf_info->cur_csi_rpt_rate)
bf_info->cur_csi_rpt_rate = new_csi_rate_idx;
@@ -473,6 +472,7 @@ static u8 hw_bw_cap_to_bitamp(u8 bw_cap)
static void rtw_hw_config_rf_ant_num(struct rtw_dev *rtwdev, u8 hw_ant_num)
{
struct rtw_hal *hal = &rtwdev->hal;
+ struct rtw_chip_info *chip = rtwdev->chip;
if (hw_ant_num == EFUSE_HW_CAP_IGNORE ||
hw_ant_num >= hal->rf_path_num)
@@ -482,6 +482,8 @@ static void rtw_hw_config_rf_ant_num(struct rtw_dev *rtwdev, u8 hw_ant_num)
case 1:
hal->rf_type = RF_1T1R;
hal->rf_path_num = 1;
+ if (!chip->fix_rf_phy_num)
+ hal->rf_phy_num = hal->rf_path_num;
hal->antenna_tx = BB_PATH_A;
hal->antenna_rx = BB_PATH_A;
break;
@@ -931,8 +933,11 @@ static void rtw_init_ht_cap(struct rtw_dev *rtwdev,
ht_cap->cap = 0;
ht_cap->cap |= IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_MAX_AMSDU |
- IEEE80211_HT_CAP_LDPC_CODING |
(1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+
+ if (rtw_chip_has_rx_ldpc(rtwdev))
+ ht_cap->cap |= IEEE80211_HT_CAP_LDPC_CODING;
+
if (efuse->hw_cap.bw & BIT(RTW_CHANNEL_WIDTH_40))
ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
IEEE80211_HT_CAP_DSSSCCK40 |
@@ -966,7 +971,6 @@ static void rtw_init_vht_cap(struct rtw_dev *rtwdev,
vht_cap->vht_supported = true;
vht_cap->cap = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
- IEEE80211_VHT_CAP_RXLDPC |
IEEE80211_VHT_CAP_SHORT_GI_80 |
IEEE80211_VHT_CAP_TXSTBC |
IEEE80211_VHT_CAP_RXSTBC_1 |
@@ -979,6 +983,9 @@ static void rtw_init_vht_cap(struct rtw_dev *rtwdev,
vht_cap->cap |= (rtwdev->hal.bfee_sts_cap <<
IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT);
+ if (rtw_chip_has_rx_ldpc(rtwdev))
+ vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC;
+
mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 |
IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
@@ -1040,11 +1047,43 @@ static void rtw_unset_supported_band(struct ieee80211_hw *hw,
kfree(hw->wiphy->bands[NL80211_BAND_5GHZ]);
}
+static void __update_firmware_info(struct rtw_dev *rtwdev,
+ struct rtw_fw_state *fw)
+{
+ const struct rtw_fw_hdr *fw_hdr =
+ (const struct rtw_fw_hdr *)fw->firmware->data;
+
+ fw->h2c_version = le16_to_cpu(fw_hdr->h2c_fmt_ver);
+ fw->version = le16_to_cpu(fw_hdr->version);
+ fw->sub_version = fw_hdr->subversion;
+ fw->sub_index = fw_hdr->subindex;
+}
+
+static void __update_firmware_info_legacy(struct rtw_dev *rtwdev,
+ struct rtw_fw_state *fw)
+{
+ struct rtw_fw_hdr_legacy *legacy =
+ (struct rtw_fw_hdr_legacy *)fw->firmware->data;
+
+ fw->h2c_version = 0;
+ fw->version = le16_to_cpu(legacy->version);
+ fw->sub_version = legacy->subversion1;
+ fw->sub_index = legacy->subversion2;
+}
+
+static void update_firmware_info(struct rtw_dev *rtwdev,
+ struct rtw_fw_state *fw)
+{
+ if (rtw_chip_wcpu_11n(rtwdev))
+ __update_firmware_info_legacy(rtwdev, fw);
+ else
+ __update_firmware_info(rtwdev, fw);
+}
+
static void rtw_load_firmware_cb(const struct firmware *firmware, void *context)
{
struct rtw_fw_state *fw = context;
struct rtw_dev *rtwdev = fw->rtwdev;
- const struct rtw_fw_hdr *fw_hdr;
if (!firmware || !firmware->data) {
rtw_err(rtwdev, "failed to request firmware\n");
@@ -1052,13 +1091,8 @@ static void rtw_load_firmware_cb(const struct firmware *firmware, void *context)
return;
}
- fw_hdr = (const struct rtw_fw_hdr *)firmware->data;
- fw->h2c_version = le16_to_cpu(fw_hdr->h2c_fmt_ver);
- fw->version = le16_to_cpu(fw_hdr->version);
- fw->sub_version = fw_hdr->subversion;
- fw->sub_index = fw_hdr->subindex;
-
fw->firmware = firmware;
+ update_firmware_info(rtwdev, fw);
complete_all(&fw->completion);
rtw_info(rtwdev, "Firmware version %u.%u.%u, H2C version %u\n",
@@ -1131,6 +1165,8 @@ static int rtw_chip_parameter_setup(struct rtw_dev *rtwdev)
hal->antenna_tx = BB_PATH_A;
hal->antenna_rx = BB_PATH_A;
}
+ hal->rf_phy_num = chip->fix_rf_phy_num ? chip->fix_rf_phy_num :
+ hal->rf_path_num;
efuse->physical_size = chip->phy_efuse_size;
efuse->logical_size = chip->log_efuse_size;
@@ -1450,6 +1486,7 @@ EXPORT_SYMBOL(rtw_core_deinit);
int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
{
+ struct rtw_hal *hal = &rtwdev->hal;
int max_tx_headroom = 0;
int ret;
@@ -1478,6 +1515,8 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_MESH_POINT);
+ hw->wiphy->available_antennas_tx = hal->antenna_tx;
+ hw->wiphy->available_antennas_rx = hal->antenna_rx;
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
WIPHY_FLAG_TDLS_EXTERNAL_SETUP;
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index c6b590fdb573..e0365a70c6f7 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -41,6 +41,7 @@ extern unsigned int rtw_debug_mask;
extern const struct ieee80211_ops rtw_ops;
extern struct rtw_chip_info rtw8822b_hw_spec;
extern struct rtw_chip_info rtw8822c_hw_spec;
+extern struct rtw_chip_info rtw8723d_hw_spec;
#define RTW_MAX_CHANNEL_NUM_2G 14
#define RTW_MAX_CHANNEL_NUM_5G 49
@@ -183,6 +184,7 @@ enum rtw_wireless_set {
enum rtw_chip_type {
RTW_CHIP_TYPE_8822B,
RTW_CHIP_TYPE_8822C,
+ RTW_CHIP_TYPE_8723D,
};
enum rtw_tx_queue_type {
@@ -337,6 +339,7 @@ enum rtw_regulatory_domains {
RTW_REGD_CHILE = 6,
RTW_REGD_UKRAINE = 7,
RTW_REGD_MEXICO = 8,
+ RTW_REGD_CN = 9,
RTW_REGD_WW,
RTW_REGD_MAX
@@ -527,6 +530,13 @@ struct rtw_reg_domain {
u8 domain;
};
+struct rtw_rf_sipi_addr {
+ u32 hssi_1;
+ u32 hssi_2;
+ u32 lssi_read;
+ u32 lssi_read_pi;
+};
+
struct rtw_backup_info {
u8 len;
u32 reg;
@@ -798,9 +808,11 @@ struct rtw_chip_ops {
void (*set_tx_power_index)(struct rtw_dev *rtwdev);
int (*rsvd_page_dump)(struct rtw_dev *rtwdev, u8 *buf, u32 offset,
u32 size);
- void (*set_antenna)(struct rtw_dev *rtwdev, u8 antenna_tx,
- u8 antenna_rx);
+ int (*set_antenna)(struct rtw_dev *rtwdev,
+ u32 antenna_tx,
+ u32 antenna_rx);
void (*cfg_ldo25)(struct rtw_dev *rtwdev, bool enable);
+ void (*efuse_grant)(struct rtw_dev *rtwdev, bool enable);
void (*false_alarm_statistics)(struct rtw_dev *rtwdev);
void (*phy_calibration)(struct rtw_dev *rtwdev);
void (*dpk_track)(struct rtw_dev *rtwdev);
@@ -844,6 +856,7 @@ struct rtw_chip_ops {
#define RTW_PWR_INTF_PCI_MSK BIT(2)
#define RTW_PWR_INTF_ALL_MSK (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+#define RTW_PWR_CUT_TEST_MSK BIT(0)
#define RTW_PWR_CUT_A_MSK BIT(1)
#define RTW_PWR_CUT_B_MSK BIT(2)
#define RTW_PWR_CUT_C_MSK BIT(3)
@@ -1044,12 +1057,18 @@ struct rtw_pwr_track_tbl {
const u8 *pwrtrk_2g_ccka_p;
};
+enum rtw_wlan_cpu {
+ RTW_WCPU_11AC,
+ RTW_WCPU_11N,
+};
+
/* hardware configuration for each IC */
struct rtw_chip_info {
struct rtw_chip_ops *ops;
u8 id;
const char *fw_name;
+ enum rtw_wlan_cpu wlan_cpu;
u8 tx_pkt_desc_sz;
u8 tx_buf_desc_sz;
u8 rx_pkt_desc_sz;
@@ -1066,6 +1085,7 @@ struct rtw_chip_info {
u8 dig_min;
u8 txgi_factor;
bool is_pwr_by_rate_dec;
+ bool rx_ldpc;
u8 max_power_index;
bool ht_supported;
@@ -1081,8 +1101,11 @@ struct rtw_chip_info {
const struct rtw_intf_phy_para_table *intf_table;
const struct rtw_hw_reg *dig;
+ const struct rtw_hw_reg *dig_cck;
u32 rf_base_addr[2];
u32 rf_sipi_addr[2];
+ const struct rtw_rf_sipi_addr *rf_sipi_read_addr;
+ u8 fix_rf_phy_num;
const struct rtw_table *mac_tbl;
const struct rtw_table *agc_tbl;
@@ -1455,6 +1478,7 @@ struct rtw_efuse {
u8 ant_div_cfg;
u8 ant_div_type;
u8 regd;
+ u8 afe;
u8 lna_type_2g;
u8 lna_type_5g;
@@ -1567,8 +1591,9 @@ struct rtw_hal {
u8 sec_ch_offset;
u8 rf_type;
u8 rf_path_num;
- u8 antenna_tx;
- u8 antenna_rx;
+ u8 rf_phy_num;
+ u32 antenna_tx;
+ u32 antenna_rx;
u8 bfee_sts_cap;
/* protect tx power section */
@@ -1698,6 +1723,33 @@ static inline bool rtw_ssid_equal(struct cfg80211_ssid *a,
return true;
}
+static inline void rtw_chip_efuse_grant_on(struct rtw_dev *rtwdev)
+{
+ if (rtwdev->chip->ops->efuse_grant)
+ rtwdev->chip->ops->efuse_grant(rtwdev, true);
+}
+
+static inline void rtw_chip_efuse_grant_off(struct rtw_dev *rtwdev)
+{
+ if (rtwdev->chip->ops->efuse_grant)
+ rtwdev->chip->ops->efuse_grant(rtwdev, false);
+}
+
+static inline bool rtw_chip_wcpu_11n(struct rtw_dev *rtwdev)
+{
+ return rtwdev->chip->wlan_cpu == RTW_WCPU_11N;
+}
+
+static inline bool rtw_chip_wcpu_11ac(struct rtw_dev *rtwdev)
+{
+ return rtwdev->chip->wlan_cpu == RTW_WCPU_11AC;
+}
+
+static inline bool rtw_chip_has_rx_ldpc(struct rtw_dev *rtwdev)
+{
+ return rtwdev->chip->rx_ldpc;
+}
+
void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
struct rtw_channel_params *ch_param);
bool check_hw_ready(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 target);
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
index 1af87eb2e53a..a9752c34c9d8 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.c
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -411,12 +411,14 @@ static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev)
dma = rtwpci->tx_rings[RTW_TX_QUEUE_BCN].r.dma;
rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BCNQ, dma);
- len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len;
- dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma;
- rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0;
- rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0;
- rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_H2CQ, len & TRX_BD_IDX_MASK);
- rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_H2CQ, dma);
+ if (!rtw_chip_wcpu_11n(rtwdev)) {
+ len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len;
+ dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma;
+ rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0;
+ rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0;
+ rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_H2CQ, len & TRX_BD_IDX_MASK);
+ rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_H2CQ, dma);
+ }
len = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.len;
dma = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.dma;
@@ -471,8 +473,9 @@ static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev)
rtw_write32(rtwdev, RTK_PCI_TXBD_RWPTR_CLR, 0xffffffff);
/* reset H2C Queue index in a single write */
- rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR,
- BIT_CLR_H2CQ_HOST_IDX | BIT_CLR_H2CQ_HW_IDX);
+ if (rtw_chip_wcpu_11ac(rtwdev))
+ rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR,
+ BIT_CLR_H2CQ_HOST_IDX | BIT_CLR_H2CQ_HW_IDX);
}
static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev)
@@ -489,7 +492,9 @@ static void rtw_pci_enable_interrupt(struct rtw_dev *rtwdev,
rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0]);
rtw_write32(rtwdev, RTK_PCI_HIMR1, rtwpci->irq_mask[1]);
- rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]);
+ if (rtw_chip_wcpu_11ac(rtwdev))
+ rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]);
+
rtwpci->irq_enabled = true;
spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
@@ -507,7 +512,9 @@ static void rtw_pci_disable_interrupt(struct rtw_dev *rtwdev,
rtw_write32(rtwdev, RTK_PCI_HIMR0, 0);
rtw_write32(rtwdev, RTK_PCI_HIMR1, 0);
- rtw_write32(rtwdev, RTK_PCI_HIMR3, 0);
+ if (rtw_chip_wcpu_11ac(rtwdev))
+ rtw_write32(rtwdev, RTK_PCI_HIMR3, 0);
+
rtwpci->irq_enabled = false;
out:
@@ -1012,13 +1019,17 @@ static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev,
irq_status[0] = rtw_read32(rtwdev, RTK_PCI_HISR0);
irq_status[1] = rtw_read32(rtwdev, RTK_PCI_HISR1);
- irq_status[3] = rtw_read32(rtwdev, RTK_PCI_HISR3);
+ if (rtw_chip_wcpu_11ac(rtwdev))
+ irq_status[3] = rtw_read32(rtwdev, RTK_PCI_HISR3);
+ else
+ irq_status[3] = 0;
irq_status[0] &= rtwpci->irq_mask[0];
irq_status[1] &= rtwpci->irq_mask[1];
irq_status[3] &= rtwpci->irq_mask[3];
rtw_write32(rtwdev, RTK_PCI_HISR0, irq_status[0]);
rtw_write32(rtwdev, RTK_PCI_HISR1, irq_status[1]);
- rtw_write32(rtwdev, RTK_PCI_HISR3, irq_status[3]);
+ if (rtw_chip_wcpu_11ac(rtwdev))
+ rtw_write32(rtwdev, RTK_PCI_HISR3, irq_status[3]);
spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
}
@@ -1091,6 +1102,7 @@ static int rtw_pci_io_mapping(struct rtw_dev *rtwdev,
len = pci_resource_len(pdev, bar_id);
rtwpci->mmap = pci_iomap(pdev, bar_id, len);
if (!rtwpci->mmap) {
+ pci_release_regions(pdev);
rtw_err(rtwdev, "failed to map pci memory\n");
return -ENOMEM;
}
@@ -1568,6 +1580,9 @@ static const struct pci_device_id rtw_pci_id_table[] = {
#ifdef CONFIG_RTW88_8822CE
{ RTK_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xC822, rtw8822c_hw_spec) },
#endif
+#ifdef CONFIG_RTW88_8723DE
+ { RTK_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xD723, rtw8723d_hw_spec) },
+#endif
{},
};
MODULE_DEVICE_TABLE(pci, rtw_pci_id_table);
diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
index 8793dd22188f..72a16eff9db3 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.c
+++ b/drivers/net/wireless/realtek/rtw88/phy.c
@@ -140,9 +140,13 @@ void rtw_phy_dig_write(struct rtw_dev *rtwdev, u8 igi)
{
struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_hal *hal = &rtwdev->hal;
+ const struct rtw_hw_reg *dig_cck = &chip->dig_cck[0];
u32 addr, mask;
u8 path;
+ if (dig_cck)
+ rtw_write32_mask(rtwdev, dig_cck->addr, dig_cck->mask, igi >> 1);
+
for (path = 0; path < hal->rf_path_num; path++) {
addr = chip->dig[path].addr;
mask = chip->dig[path].mask;
@@ -679,7 +683,7 @@ u32 rtw_phy_read_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
const u32 *base_addr = chip->rf_base_addr;
u32 val, direct_addr;
- if (rf_path >= hal->rf_path_num) {
+ if (rf_path >= hal->rf_phy_num) {
rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
return INV_RF_DATA;
}
@@ -693,6 +697,54 @@ u32 rtw_phy_read_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
return val;
}
+u32 rtw_phy_read_rf_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
+ u32 addr, u32 mask)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_rf_sipi_addr *rf_sipi_addr;
+ const struct rtw_rf_sipi_addr *rf_sipi_addr_a;
+ u32 val32;
+ u32 en_pi;
+ u32 r_addr;
+ u32 shift;
+
+ if (rf_path >= hal->rf_phy_num) {
+ rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
+ return INV_RF_DATA;
+ }
+
+ if (!chip->rf_sipi_read_addr) {
+ rtw_err(rtwdev, "rf_sipi_read_addr isn't defined\n");
+ return INV_RF_DATA;
+ }
+
+ rf_sipi_addr = &chip->rf_sipi_read_addr[rf_path];
+ rf_sipi_addr_a = &chip->rf_sipi_read_addr[RF_PATH_A];
+
+ addr &= 0xff;
+
+ val32 = rtw_read32(rtwdev, rf_sipi_addr->hssi_2);
+ val32 = (val32 & ~LSSI_READ_ADDR_MASK) | (addr << 23);
+ rtw_write32(rtwdev, rf_sipi_addr->hssi_2, val32);
+
+ /* toggle read edge of path A */
+ val32 = rtw_read32(rtwdev, rf_sipi_addr_a->hssi_2);
+ rtw_write32(rtwdev, rf_sipi_addr_a->hssi_2, val32 & ~LSSI_READ_EDGE_MASK);
+ rtw_write32(rtwdev, rf_sipi_addr_a->hssi_2, val32 | LSSI_READ_EDGE_MASK);
+
+ udelay(120);
+
+ en_pi = rtw_read32_mask(rtwdev, rf_sipi_addr->hssi_1, BIT(8));
+ r_addr = en_pi ? rf_sipi_addr->lssi_read_pi : rf_sipi_addr->lssi_read;
+
+ val32 = rtw_read32_mask(rtwdev, r_addr, LSSI_READ_DATA_MASK);
+
+ shift = __ffs(mask);
+
+ return (val32 & mask) >> shift;
+}
+
bool rtw_phy_write_rf_reg_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
u32 addr, u32 mask, u32 data)
{
@@ -703,7 +755,7 @@ bool rtw_phy_write_rf_reg_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
u32 old_data = 0;
u32 shift;
- if (rf_path >= hal->rf_path_num) {
+ if (rf_path >= hal->rf_phy_num) {
rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
return false;
}
@@ -712,7 +764,7 @@ bool rtw_phy_write_rf_reg_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
mask &= RFREG_MASK;
if (mask != RFREG_MASK) {
- old_data = rtw_phy_read_rf(rtwdev, rf_path, addr, RFREG_MASK);
+ old_data = chip->ops->read_rf(rtwdev, rf_path, addr, RFREG_MASK);
if (old_data == INV_RF_DATA) {
rtw_err(rtwdev, "Write fail, rf is disabled\n");
@@ -740,7 +792,7 @@ bool rtw_phy_write_rf_reg(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
const u32 *base_addr = chip->rf_base_addr;
u32 direct_addr;
- if (rf_path >= hal->rf_path_num) {
+ if (rf_path >= hal->rf_phy_num) {
rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
return false;
}
diff --git a/drivers/net/wireless/realtek/rtw88/phy.h b/drivers/net/wireless/realtek/rtw88/phy.h
index af916d8784cd..413bf7165cc0 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.h
+++ b/drivers/net/wireless/realtek/rtw88/phy.h
@@ -21,6 +21,8 @@ void rtw_phy_dynamic_mechanism(struct rtw_dev *rtwdev);
u8 rtw_phy_rf_power_2_rssi(s8 *rf_power, u8 path_num);
u32 rtw_phy_read_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
u32 addr, u32 mask);
+u32 rtw_phy_read_rf_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
+ u32 addr, u32 mask);
bool rtw_phy_write_rf_reg_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
u32 addr, u32 mask, u32 data);
bool rtw_phy_write_rf_reg(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
@@ -178,4 +180,8 @@ enum rtw_phy_cck_pd_lv {
#define CCK_FA_AVG_RESET 0xffffffff
+#define LSSI_READ_ADDR_MASK 0x7f800000
+#define LSSI_READ_EDGE_MASK 0x80000000
+#define LSSI_READ_DATA_MASK 0xfffff
+
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h
index 9d94534c9674..9fdfcdc5c5cf 100644
--- a/drivers/net/wireless/realtek/rtw88/reg.h
+++ b/drivers/net/wireless/realtek/rtw88/reg.h
@@ -6,15 +6,23 @@
#define __RTW_REG_DEF_H__
#define REG_SYS_FUNC_EN 0x0002
+#define BIT_FEN_EN_25_1 BIT(13)
+#define BIT_FEN_ELDR BIT(12)
#define BIT_FEN_CPUEN BIT(2)
#define BIT_FEN_BB_GLB_RST BIT(1)
#define BIT_FEN_BB_RSTB BIT(0)
#define BIT_R_DIS_PRST BIT(6)
#define BIT_WLOCK_1C_B6 BIT(5)
#define REG_SYS_PW_CTRL 0x0004
+#define BIT_PFM_WOWL BIT(3)
#define REG_SYS_CLK_CTRL 0x0008
#define BIT_CPU_CLK_EN BIT(14)
+#define REG_SYS_CLKR 0x0008
+#define BIT_ANA8M BIT(1)
+#define BIT_WAKEPAD_EN BIT(3)
+#define BIT_LOADER_CLK_EN BIT(5)
+
#define REG_RSV_CTRL 0x001C
#define DISABLE_PI 0x3
#define ENABLE_PI 0x2
@@ -33,12 +41,23 @@
#define BIT_MASK_EF_ADDR 0x3ff
#define BIT_MASK_EF_DATA 0xff
#define BITS_EF_ADDR (BIT_MASK_EF_ADDR << BIT_SHIFT_EF_ADDR)
+#define BITS_PLL 0xf0
+
+#define REG_AFE_CTRL3 0x2c
+#define BIT_MASK_XTAL 0x00FFF000
+#define BIT_XTAL_GMP_BIT4 BIT(28)
#define REG_LDO_EFUSE_CTRL 0x0034
#define BIT_MASK_EFUSE_BANK_SEL (BIT(8) | BIT(9))
+#define BIT_LDO25_VOLTAGE_V25 0x03
+#define BIT_MASK_LDO25_VOLTAGE GENMASK(6, 4)
+#define BIT_SHIFT_LDO25_VOLTAGE 4
+#define BIT_LDO25_EN BIT(7)
+
#define REG_GPIO_MUXCFG 0x0040
#define BIT_FSPI_EN BIT(19)
+#define BIT_EN_SIC BIT(12)
#define BIT_BT_AOD_GPIO3 BIT(9)
#define BIT_BT_PTA_EN BIT(5)
#define BIT_WLRFE_4_5_EN BIT(2)
@@ -48,6 +67,7 @@
#define BIT_PAPE_SEL_EN BIT(25)
#define BIT_DPDT_WL_SEL BIT(24)
#define BIT_DPDT_SEL_EN BIT(23)
+#define REG_LEDCFG2 0x004E
#define REG_PAD_CTRL1 0x0064
#define BIT_PAPE_WLBT_SEL BIT(29)
#define BIT_LNAON_WLBT_SEL BIT(28)
@@ -63,30 +83,54 @@
#define BIT_LTE_MUX_CTRL_PATH BIT(26)
#define REG_HCI_OPT_CTRL 0x0074
+#define REG_AFE_CTRL_4 0x0078
+#define BIT_CK320M_AFE_EN BIT(4)
+#define BIT_EN_SYN BIT(15)
+
+#define REG_LDO_SWR_CTRL 0x007C
+#define LDO_SEL 0xC3
+#define SPS_SEL 0x83
+#define BIT_XTA1 BIT(29)
+#define BIT_XTA0 BIT(28)
+
#define REG_MCUFW_CTRL 0x0080
#define BIT_ANA_PORT_EN BIT(22)
#define BIT_MAC_PORT_EN BIT(21)
#define BIT_BOOT_FSPI_EN BIT(20)
+#define BIT_ROM_DLEN BIT(19)
+#define BIT_ROM_PGE GENMASK(18, 16) /* legacy only */
+#define BIT_SHIFT_ROM_PGE 16
#define BIT_FW_INIT_RDY BIT(15)
#define BIT_FW_DW_RDY BIT(14)
#define BIT_RPWM_TOGGLE BIT(7)
+#define BIT_RAM_DL_SEL BIT(7) /* legacy only */
#define BIT_DMEM_CHKSUM_OK BIT(6)
+#define BIT_WINTINI_RDY BIT(6) /* legacy only */
#define BIT_DMEM_DW_OK BIT(5)
#define BIT_IMEM_CHKSUM_OK BIT(4)
#define BIT_IMEM_DW_OK BIT(3)
#define BIT_IMEM_BOOT_LOAD_CHECKSUM_OK BIT(2)
+#define BIT_FWDL_CHK_RPT BIT(2) /* legacy only */
+#define BIT_MCUFWDL_RDY BIT(1) /* legacy only */
#define BIT_MCUFWDL_EN BIT(0)
#define BIT_CHECK_SUM_OK (BIT(4) | BIT(6))
#define FW_READY (BIT_FW_INIT_RDY | BIT_FW_DW_RDY | \
BIT_IMEM_DW_OK | BIT_DMEM_DW_OK | \
BIT_CHECK_SUM_OK)
+#define FW_READY_LEGACY (BIT_MCUFWDL_RDY | BIT_FWDL_CHK_RPT | \
+ BIT_WINTINI_RDY | BIT_RAM_DL_SEL)
#define FW_READY_MASK 0xffff
+#define REG_EFUSE_ACCESS 0x00CF
+#define EFUSE_ACCESS_ON 0x69
+#define EFUSE_ACCESS_OFF 0x00
+
#define REG_WLRF1 0x00EC
#define REG_WIFI_BT_INFO 0x00AA
#define BIT_BT_INT_EN BIT(15)
#define REG_SYS_CFG1 0x00F0
#define BIT_RTL_ID BIT(23)
+#define BIT_LDO BIT(24)
#define BIT_RF_TYPE_ID BIT(27)
#define BIT_SHIFT_VENDOR_ID 16
#define BIT_MASK_VENDOR_ID 0xf
@@ -166,6 +210,7 @@
#define BIT_FS_RXDONE BIT(16)
#define REG_PKTBUF_DBG_CTRL 0x0140
#define REG_C2HEVT 0x01A0
+#define REG_MCUTST_1 0x01C0
#define REG_MCUTST_II 0x01C4
#define REG_WOWLAN_WAKE_REASON 0x01C7
#define REG_HMETFR 0x01CC
@@ -178,14 +223,42 @@
#define REG_HMEBOX2_EX 0x01F8
#define REG_HMEBOX3_EX 0x01FC
+#define REG_RQPN 0x0200
+#define BIT_MASK_HPQ 0xff
+#define BIT_SHIFT_HPQ 0
+#define BIT_RQPN_HPQ(x) (((x) & BIT_MASK_HPQ) << BIT_SHIFT_HPQ)
+#define BIT_MASK_LPQ 0xff
+#define BIT_SHIFT_LPQ 8
+#define BIT_RQPN_LPQ(x) (((x) & BIT_MASK_LPQ) << BIT_SHIFT_LPQ)
+#define BIT_MASK_PUBQ 0xff
+#define BIT_SHIFT_PUBQ 16
+#define BIT_RQPN_PUBQ(x) (((x) & BIT_MASK_PUBQ) << BIT_SHIFT_PUBQ)
+#define BIT_RQPN_HLP(h, l, p) (BIT_LD_RQPN | BIT_RQPN_HPQ(h) | \
+ BIT_RQPN_LPQ(l) | BIT_RQPN_PUBQ(p))
+
#define REG_FIFOPAGE_CTRL_2 0x0204
#define BIT_BCN_VALID_V1 BIT(15)
#define BIT_MASK_BCN_HEAD_1_V1 0xfff
#define REG_AUTO_LLT_V1 0x0208
#define BIT_AUTO_INIT_LLT_V1 BIT(0)
+#define REG_DWBCN0_CTRL 0x0208
+#define BIT_BCN_VALID BIT(16)
#define REG_TXDMA_OFFSET_CHK 0x020C
+#define BIT_DROP_DATA_EN BIT(9)
#define REG_TXDMA_STATUS 0x0210
#define BTI_PAGE_OVF BIT(2)
+
+#define REG_RQPN_NPQ 0x0214
+#define BIT_MASK_NPQ 0xff
+#define BIT_SHIFT_NPQ 0
+#define BIT_MASK_EPQ 0xff
+#define BIT_SHIFT_EPQ 16
+#define BIT_RQPN_NPQ(x) (((x) & BIT_MASK_NPQ) << BIT_SHIFT_NPQ)
+#define BIT_RQPN_EPQ(x) (((x) & BIT_MASK_EPQ) << BIT_SHIFT_EPQ)
+#define BIT_RQPN_NE(n, e) (BIT_RQPN_NPQ(n) | BIT_RQPN_EPQ(e))
+
+#define REG_AUTO_LLT 0x0224
+#define BIT_AUTO_INIT_LLT BIT(16)
#define REG_RQPN_CTRL_1 0x0228
#define REG_RQPN_CTRL_2 0x022C
#define BIT_LD_RQPN BIT(31)
@@ -213,7 +286,11 @@
#define REG_FWHW_TXQ_CTRL 0x0420
#define BIT_EN_BCNQ_DL BIT(22)
#define BIT_EN_WR_FREE_TAIL BIT(20)
+#define REG_HWSEQ_CTRL 0x0423
+
#define REG_BCNQ_BDNY_V1 0x0424
+#define REG_BCNQ_BDNY 0x0424
+#define REG_MGQ_BDNY 0x0425
#define REG_LIFETIME_EN 0x0426
#define BIT_BA_PARSER_EN BIT(5)
#define REG_SPEC_SIFS 0x0428
@@ -229,6 +306,8 @@
#define BIT_CHECK_CCK_EN BIT(7)
#define REG_AMPDU_MAX_TIME_V1 0x0455
#define REG_BCNQ1_BDNY_V1 0x0456
+#define REG_AMPDU_MAX_TIME 0x0456
+#define REG_WMAC_LBK_BF_HD 0x045D
#define REG_TX_HANG_CTRL 0x045E
#define BIT_EN_GNT_BT_AWAKE BIT(3)
#define BIT_EN_EOF_V1 BIT(2)
@@ -243,7 +322,10 @@
#define REG_QUEUE_CTRL 0x04C6
#define BIT_PTA_WL_TX_EN BIT(4)
#define BIT_PTA_EDCCA_EN BIT(5)
+#define REG_SINGLE_AMPDU_CTRL 0x04C7
+#define BIT_EN_SINGLE_APMDU BIT(7)
#define REG_PROT_MODE_CTRL 0x04C8
+#define REG_MAX_AGGR_NUM 0x04CA
#define REG_BAR_MODE_CTRL 0x04CC
#define REG_PRECNT_CTRL 0x04E5
#define BIT_BTCCA_CTRL (BIT(0) | BIT(1))
@@ -263,6 +345,7 @@
#define BIT_SHIFT_SIFS_OFDM_CTX 8
#define BIT_SHIFT_SIFS_CCK_TRX 16
#define BIT_SHIFT_SIFS_OFDM_TRX 24
+#define REG_AGGR_BREAK_TIME 0x051A
#define REG_SLOT 0x051B
#define REG_TX_PTCL_CTRL 0x0520
#define BIT_SIFS_BK_EN BIT(12)
@@ -274,18 +357,23 @@
#define REG_TBTT_PROHIBIT 0x0540
#define BIT_SHIFT_TBTT_HOLD_TIME_AP 8
#define REG_RD_NAV_NXT 0x0544
+#define REG_NAV_PROT_LEN 0x0546
#define REG_BCN_CTRL 0x0550
#define BIT_DIS_TSF_UDT BIT(4)
#define BIT_EN_BCN_FUNCTION BIT(3)
+#define BIT_EN_TXBCN_RPT BIT(2)
#define REG_BCN_CTRL_CLINT0 0x0551
#define REG_DRVERLYINT 0x0558
#define REG_BCNDMATIM 0x0559
+#define REG_ATIMWND 0x055A
#define REG_USTIME_TSF 0x055C
#define REG_BCN_MAX_ERR 0x055D
#define REG_RXTSF_OFFSET_CCK 0x055E
#define REG_MISC_CTRL 0x0577
#define BIT_EN_FREE_CNT BIT(3)
#define BIT_DIS_SECOND_CCA (BIT(0) | BIT(1))
+#define REG_HIQ_NO_LMT_EN 0x5A7
+#define BIT_HIQ_NO_LMT_EN_ROOT BIT(0)
#define REG_TIMER0_SRC_SEL 0x05B4
#define BIT_TSFT_SEL_TIMER0 (BIT(4) | BIT(5) | BIT(6))
@@ -311,6 +399,7 @@
#define BIT_HTC_LOC_CTRL BIT(14)
#define BIT_RPFM_CAM_ENABLE BIT(12)
#define BIT_TA_BCN BIT(11)
+#define BIT_RCR_ADF BIT(11)
#define BIT_DISDECMYPKT BIT(10)
#define BIT_AICV BIT(9)
#define BIT_ACRC32 BIT(8)
@@ -328,6 +417,7 @@
#define REG_MAR 0x0620
#define REG_USTIME_EDCA 0x0638
#define REG_ACKTO_CCK 0x0639
+#define REG_MAC_SPEC_SIFS 0x063A
#define REG_RESP_SIFS_CCK 0x063C
#define REG_RESP_SIFS_OFDM 0x063E
#define REG_ACKTO 0x0640
@@ -370,12 +460,19 @@
#define BIT_LTE_COEX_EN BIT(7)
#define REG_BT_STAT_CTRL 0x0778
#define REG_BT_TDMA_TIME 0x0790
+#define REG_LTR_IDLE_LATENCY 0x0798
+#define REG_LTR_ACTIVE_LATENCY 0x079C
+#define REG_LTR_CTRL_BASIC 0x07A4
#define REG_WMAC_OPTION_FUNCTION 0x07D0
#define REG_WMAC_OPTION_FUNCTION_1 0x07D4
+#define REG_FPGA0_RFMOD 0x0800
+#define BIT_CCKEN BIT(24)
+#define BIT_OFDMEN BIT(25)
#define REG_RX_GAIN_EN 0x081c
#define REG_RFE_CTRL_E 0x0974
+#define REG_2ND_CCA_CTRL 0x0976
#define REG_DIS_DPD 0x0a70
#define DIS_DPD_MASK GENMASK(9, 0)
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.c b/drivers/net/wireless/realtek/rtw88/rtw8723d.c
new file mode 100644
index 000000000000..92c742d1ce6d
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.c
@@ -0,0 +1,1137 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include "main.h"
+#include "coex.h"
+#include "fw.h"
+#include "tx.h"
+#include "rx.h"
+#include "phy.h"
+#include "rtw8723d.h"
+#include "rtw8723d_table.h"
+#include "mac.h"
+#include "reg.h"
+#include "debug.h"
+
+static const struct rtw_hw_reg rtw8723d_txagc[] = {
+ [DESC_RATE1M] = { .addr = 0xe08, .mask = 0x0000ff00 },
+ [DESC_RATE2M] = { .addr = 0x86c, .mask = 0x0000ff00 },
+ [DESC_RATE5_5M] = { .addr = 0x86c, .mask = 0x00ff0000 },
+ [DESC_RATE11M] = { .addr = 0x86c, .mask = 0xff000000 },
+ [DESC_RATE6M] = { .addr = 0xe00, .mask = 0x000000ff },
+ [DESC_RATE9M] = { .addr = 0xe00, .mask = 0x0000ff00 },
+ [DESC_RATE12M] = { .addr = 0xe00, .mask = 0x00ff0000 },
+ [DESC_RATE18M] = { .addr = 0xe00, .mask = 0xff000000 },
+ [DESC_RATE24M] = { .addr = 0xe04, .mask = 0x000000ff },
+ [DESC_RATE36M] = { .addr = 0xe04, .mask = 0x0000ff00 },
+ [DESC_RATE48M] = { .addr = 0xe04, .mask = 0x00ff0000 },
+ [DESC_RATE54M] = { .addr = 0xe04, .mask = 0xff000000 },
+ [DESC_RATEMCS0] = { .addr = 0xe10, .mask = 0x000000ff },
+ [DESC_RATEMCS1] = { .addr = 0xe10, .mask = 0x0000ff00 },
+ [DESC_RATEMCS2] = { .addr = 0xe10, .mask = 0x00ff0000 },
+ [DESC_RATEMCS3] = { .addr = 0xe10, .mask = 0xff000000 },
+ [DESC_RATEMCS4] = { .addr = 0xe14, .mask = 0x000000ff },
+ [DESC_RATEMCS5] = { .addr = 0xe14, .mask = 0x0000ff00 },
+ [DESC_RATEMCS6] = { .addr = 0xe14, .mask = 0x00ff0000 },
+ [DESC_RATEMCS7] = { .addr = 0xe14, .mask = 0xff000000 },
+};
+
+#define WLAN_TXQ_RPT_EN 0x1F
+#define WLAN_SLOT_TIME 0x09
+#define WLAN_RL_VAL 0x3030
+#define WLAN_BAR_VAL 0x0201ffff
+#define BIT_MASK_TBTT_HOLD 0x00000fff
+#define BIT_SHIFT_TBTT_HOLD 8
+#define BIT_MASK_TBTT_SETUP 0x000000ff
+#define BIT_SHIFT_TBTT_SETUP 0
+#define BIT_MASK_TBTT_MASK ((BIT_MASK_TBTT_HOLD << BIT_SHIFT_TBTT_HOLD) | \
+ (BIT_MASK_TBTT_SETUP << BIT_SHIFT_TBTT_SETUP))
+#define TBTT_TIME(s, h)((((s) & BIT_MASK_TBTT_SETUP) << BIT_SHIFT_TBTT_SETUP) |\
+ (((h) & BIT_MASK_TBTT_HOLD) << BIT_SHIFT_TBTT_HOLD))
+#define WLAN_TBTT_TIME_NORMAL TBTT_TIME(0x04, 0x80)
+#define WLAN_TBTT_TIME_STOP_BCN TBTT_TIME(0x04, 0x64)
+#define WLAN_PIFS_VAL 0
+#define WLAN_AGG_BRK_TIME 0x16
+#define WLAN_NAV_PROT_LEN 0x0040
+#define WLAN_SPEC_SIFS 0x100a
+#define WLAN_RX_PKT_LIMIT 0x17
+#define WLAN_MAX_AGG_NR 0x0A
+#define WLAN_AMPDU_MAX_TIME 0x1C
+#define WLAN_ANT_SEL 0x82
+#define WLAN_LTR_IDLE_LAT 0x883C883C
+#define WLAN_LTR_ACT_LAT 0x880B880B
+#define WLAN_LTR_CTRL1 0xCB004010
+#define WLAN_LTR_CTRL2 0x01233425
+
+static void rtw8723d_phy_set_param(struct rtw_dev *rtwdev)
+{
+ u8 xtal_cap;
+ u32 val32;
+
+ /* power on BB/RF domain */
+ rtw_write16_set(rtwdev, REG_SYS_FUNC_EN,
+ BIT_FEN_EN_25_1 | BIT_FEN_BB_GLB_RST | BIT_FEN_BB_RSTB);
+ rtw_write8_set(rtwdev, REG_RF_CTRL,
+ BIT_RF_EN | BIT_RF_RSTB | BIT_RF_SDM_RSTB);
+ rtw_write8(rtwdev, REG_AFE_CTRL1 + 1, 0x80);
+
+ rtw_phy_load_tables(rtwdev);
+
+ /* post init after header files config */
+ rtw_write32_clr(rtwdev, REG_RCR, BIT_RCR_ADF);
+ rtw_write8_set(rtwdev, REG_HIQ_NO_LMT_EN, BIT_HIQ_NO_LMT_EN_ROOT);
+ rtw_write16_set(rtwdev, REG_AFE_CTRL_4, BIT_CK320M_AFE_EN | BIT_EN_SYN);
+
+ xtal_cap = rtwdev->efuse.crystal_cap & 0x3F;
+ rtw_write32_mask(rtwdev, REG_AFE_CTRL3, BIT_MASK_XTAL,
+ xtal_cap | (xtal_cap << 6));
+ rtw_write32_set(rtwdev, REG_FPGA0_RFMOD, BIT_CCKEN | BIT_OFDMEN);
+ if ((rtwdev->efuse.afe >> 4) == 14) {
+ rtw_write32_set(rtwdev, REG_AFE_CTRL3, BIT_XTAL_GMP_BIT4);
+ rtw_write32_clr(rtwdev, REG_AFE_CTRL1, BITS_PLL);
+ rtw_write32_set(rtwdev, REG_LDO_SWR_CTRL, BIT_XTA1);
+ rtw_write32_clr(rtwdev, REG_LDO_SWR_CTRL, BIT_XTA0);
+ }
+
+ rtw_write8(rtwdev, REG_SLOT, WLAN_SLOT_TIME);
+ rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 1, WLAN_TXQ_RPT_EN);
+ rtw_write16(rtwdev, REG_RETRY_LIMIT, WLAN_RL_VAL);
+ rtw_write32(rtwdev, REG_BAR_MODE_CTRL, WLAN_BAR_VAL);
+ rtw_write8(rtwdev, REG_ATIMWND, 0x2);
+ rtw_write8(rtwdev, REG_BCN_CTRL,
+ BIT_DIS_TSF_UDT | BIT_EN_BCN_FUNCTION | BIT_EN_TXBCN_RPT);
+ val32 = rtw_read32(rtwdev, REG_TBTT_PROHIBIT);
+ val32 &= ~BIT_MASK_TBTT_MASK;
+ val32 |= WLAN_TBTT_TIME_STOP_BCN;
+ rtw_write8(rtwdev, REG_TBTT_PROHIBIT, val32);
+ rtw_write8(rtwdev, REG_PIFS, WLAN_PIFS_VAL);
+ rtw_write8(rtwdev, REG_AGGR_BREAK_TIME, WLAN_AGG_BRK_TIME);
+ rtw_write16(rtwdev, REG_NAV_PROT_LEN, WLAN_NAV_PROT_LEN);
+ rtw_write16(rtwdev, REG_MAC_SPEC_SIFS, WLAN_SPEC_SIFS);
+ rtw_write16(rtwdev, REG_SIFS, WLAN_SPEC_SIFS);
+ rtw_write16(rtwdev, REG_SIFS + 2, WLAN_SPEC_SIFS);
+ rtw_write8(rtwdev, REG_SINGLE_AMPDU_CTRL, BIT_EN_SINGLE_APMDU);
+ rtw_write8(rtwdev, REG_RX_PKT_LIMIT, WLAN_RX_PKT_LIMIT);
+ rtw_write8(rtwdev, REG_MAX_AGGR_NUM, WLAN_MAX_AGG_NR);
+ rtw_write8(rtwdev, REG_AMPDU_MAX_TIME, WLAN_AMPDU_MAX_TIME);
+ rtw_write8(rtwdev, REG_LEDCFG2, WLAN_ANT_SEL);
+
+ rtw_write32(rtwdev, REG_LTR_IDLE_LATENCY, WLAN_LTR_IDLE_LAT);
+ rtw_write32(rtwdev, REG_LTR_ACTIVE_LATENCY, WLAN_LTR_ACT_LAT);
+ rtw_write32(rtwdev, REG_LTR_CTRL_BASIC, WLAN_LTR_CTRL1);
+ rtw_write32(rtwdev, REG_LTR_CTRL_BASIC + 4, WLAN_LTR_CTRL2);
+
+ rtw_phy_init(rtwdev);
+
+ rtw_write16_set(rtwdev, REG_TXDMA_OFFSET_CHK, BIT_DROP_DATA_EN);
+ rtw_write32_mask(rtwdev, REG_OFDM0_XAAGC1, MASKBYTE0, 0x50);
+ rtw_write32_mask(rtwdev, REG_OFDM0_XAAGC1, MASKBYTE0, 0x20);
+}
+
+static void rtw8723de_efuse_parsing(struct rtw_efuse *efuse,
+ struct rtw8723d_efuse *map)
+{
+ ether_addr_copy(efuse->addr, map->e.mac_addr);
+}
+
+static int rtw8723d_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw8723d_efuse *map;
+ int i;
+
+ map = (struct rtw8723d_efuse *)log_map;
+
+ efuse->rfe_option = 0;
+ efuse->rf_board_option = map->rf_board_option;
+ efuse->crystal_cap = map->xtal_k;
+ efuse->pa_type_2g = map->pa_type;
+ efuse->lna_type_2g = map->lna_type_2g[0];
+ efuse->channel_plan = map->channel_plan;
+ efuse->country_code[0] = map->country_code[0];
+ efuse->country_code[1] = map->country_code[1];
+ efuse->bt_setting = map->rf_bt_setting;
+ efuse->regd = map->rf_board_option & 0x7;
+ efuse->thermal_meter[0] = map->thermal_meter;
+ efuse->thermal_meter_k = map->thermal_meter;
+ efuse->afe = map->afe;
+
+ for (i = 0; i < 4; i++)
+ efuse->txpwr_idx_table[i] = map->txpwr_idx_table[i];
+
+ switch (rtw_hci_type(rtwdev)) {
+ case RTW_HCI_TYPE_PCIE:
+ rtw8723de_efuse_parsing(efuse, map);
+ break;
+ default:
+ /* unsupported now */
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
+ struct rtw_rx_pkt_stat *pkt_stat)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ s8 min_rx_power = -120;
+ u8 pwdb = GET_PHY_STAT_P0_PWDB(phy_status);
+
+ pkt_stat->rx_power[RF_PATH_A] = pwdb - 97;
+ pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 1);
+ pkt_stat->bw = RTW_CHANNEL_WIDTH_20;
+ pkt_stat->signal_power = max(pkt_stat->rx_power[RF_PATH_A],
+ min_rx_power);
+ dm_info->rssi[RF_PATH_A] = pkt_stat->rssi;
+}
+
+static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status,
+ struct rtw_rx_pkt_stat *pkt_stat)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 rxsc, bw;
+ s8 min_rx_power = -120;
+ s8 rx_evm;
+
+ if (pkt_stat->rate > DESC_RATE11M && pkt_stat->rate < DESC_RATEMCS0)
+ rxsc = GET_PHY_STAT_P1_L_RXSC(phy_status);
+ else
+ rxsc = GET_PHY_STAT_P1_HT_RXSC(phy_status);
+
+ if (GET_PHY_STAT_P1_RF_MODE(phy_status) == 0)
+ bw = RTW_CHANNEL_WIDTH_20;
+ else if ((rxsc == 1) || (rxsc == 2))
+ bw = RTW_CHANNEL_WIDTH_20;
+ else
+ bw = RTW_CHANNEL_WIDTH_40;
+
+ pkt_stat->rx_power[RF_PATH_A] = GET_PHY_STAT_P1_PWDB_A(phy_status) - 110;
+ pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 1);
+ pkt_stat->bw = bw;
+ pkt_stat->signal_power = max(pkt_stat->rx_power[RF_PATH_A],
+ min_rx_power);
+ pkt_stat->rx_evm[RF_PATH_A] = GET_PHY_STAT_P1_RXEVM_A(phy_status);
+ pkt_stat->rx_snr[RF_PATH_A] = GET_PHY_STAT_P1_RXSNR_A(phy_status);
+ pkt_stat->cfo_tail[RF_PATH_A] = GET_PHY_STAT_P1_CFO_TAIL_A(phy_status);
+
+ dm_info->curr_rx_rate = pkt_stat->rate;
+ dm_info->rssi[RF_PATH_A] = pkt_stat->rssi;
+ dm_info->rx_snr[RF_PATH_A] = pkt_stat->rx_snr[RF_PATH_A] >> 1;
+ dm_info->cfo_tail[RF_PATH_A] = (pkt_stat->cfo_tail[RF_PATH_A] * 5) >> 1;
+
+ rx_evm = clamp_t(s8, -pkt_stat->rx_evm[RF_PATH_A] >> 1, 0, 64);
+ rx_evm &= 0x3F; /* 64->0: second path of 1SS rate is 64 */
+ dm_info->rx_evm_dbm[RF_PATH_A] = rx_evm;
+}
+
+static void query_phy_status(struct rtw_dev *rtwdev, u8 *phy_status,
+ struct rtw_rx_pkt_stat *pkt_stat)
+{
+ u8 page;
+
+ page = *phy_status & 0xf;
+
+ switch (page) {
+ case 0:
+ query_phy_status_page0(rtwdev, phy_status, pkt_stat);
+ break;
+ case 1:
+ query_phy_status_page1(rtwdev, phy_status, pkt_stat);
+ break;
+ default:
+ rtw_warn(rtwdev, "unused phy status page (%d)\n", page);
+ return;
+ }
+}
+
+static void rtw8723d_query_rx_desc(struct rtw_dev *rtwdev, u8 *rx_desc,
+ struct rtw_rx_pkt_stat *pkt_stat,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct ieee80211_hdr *hdr;
+ u32 desc_sz = rtwdev->chip->rx_pkt_desc_sz;
+ u8 *phy_status = NULL;
+
+ memset(pkt_stat, 0, sizeof(*pkt_stat));
+
+ pkt_stat->phy_status = GET_RX_DESC_PHYST(rx_desc);
+ pkt_stat->icv_err = GET_RX_DESC_ICV_ERR(rx_desc);
+ pkt_stat->crc_err = GET_RX_DESC_CRC32(rx_desc);
+ pkt_stat->decrypted = !GET_RX_DESC_SWDEC(rx_desc) &&
+ GET_RX_DESC_ENC_TYPE(rx_desc) != RX_DESC_ENC_NONE;
+ pkt_stat->is_c2h = GET_RX_DESC_C2H(rx_desc);
+ pkt_stat->pkt_len = GET_RX_DESC_PKT_LEN(rx_desc);
+ pkt_stat->drv_info_sz = GET_RX_DESC_DRV_INFO_SIZE(rx_desc);
+ pkt_stat->shift = GET_RX_DESC_SHIFT(rx_desc);
+ pkt_stat->rate = GET_RX_DESC_RX_RATE(rx_desc);
+ pkt_stat->cam_id = GET_RX_DESC_MACID(rx_desc);
+ pkt_stat->ppdu_cnt = 0;
+ pkt_stat->tsf_low = GET_RX_DESC_TSFL(rx_desc);
+
+ /* drv_info_sz is in unit of 8-bytes */
+ pkt_stat->drv_info_sz *= 8;
+
+ /* c2h cmd pkt's rx/phy status is not interested */
+ if (pkt_stat->is_c2h)
+ return;
+
+ hdr = (struct ieee80211_hdr *)(rx_desc + desc_sz + pkt_stat->shift +
+ pkt_stat->drv_info_sz);
+ if (pkt_stat->phy_status) {
+ phy_status = rx_desc + desc_sz + pkt_stat->shift;
+ query_phy_status(rtwdev, phy_status, pkt_stat);
+ }
+
+ rtw_rx_fill_rx_status(rtwdev, pkt_stat, hdr, rx_status, phy_status);
+}
+
+static bool rtw8723d_check_spur_ov_thres(struct rtw_dev *rtwdev,
+ u8 channel, u32 thres)
+{
+ u32 freq;
+ bool ret = false;
+
+ if (channel == 13)
+ freq = FREQ_CH13;
+ else if (channel == 14)
+ freq = FREQ_CH14;
+ else
+ return false;
+
+ rtw_write32(rtwdev, REG_ANALOG_P4, DIS_3WIRE);
+ rtw_write32(rtwdev, REG_PSDFN, freq);
+ rtw_write32(rtwdev, REG_PSDFN, START_PSD | freq);
+
+ msleep(30);
+ if (rtw_read32(rtwdev, REG_PSDRPT) >= thres)
+ ret = true;
+
+ rtw_write32(rtwdev, REG_PSDFN, freq);
+ rtw_write32(rtwdev, REG_ANALOG_P4, EN_3WIRE);
+
+ return ret;
+}
+
+static void rtw8723d_cfg_notch(struct rtw_dev *rtwdev, u8 channel, bool notch)
+{
+ if (!notch) {
+ rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_MASK_RXDSP, 0x1f);
+ rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_EN_RXDSP, 0x0);
+ rtw_write32(rtwdev, REG_OFDM1_CSI1, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI2, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI3, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI4, 0x00000000);
+ rtw_write32_mask(rtwdev, REG_OFDM1_CFOTRK, BIT_EN_CFOTRK, 0x0);
+ return;
+ }
+
+ switch (channel) {
+ case 13:
+ rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_MASK_RXDSP, 0xb);
+ rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_EN_RXDSP, 0x1);
+ rtw_write32(rtwdev, REG_OFDM1_CSI1, 0x04000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI2, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI3, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI4, 0x00000000);
+ rtw_write32_mask(rtwdev, REG_OFDM1_CFOTRK, BIT_EN_CFOTRK, 0x1);
+ break;
+ case 14:
+ rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_MASK_RXDSP, 0x5);
+ rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_EN_RXDSP, 0x1);
+ rtw_write32(rtwdev, REG_OFDM1_CSI1, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI2, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI3, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI4, 0x00080000);
+ rtw_write32_mask(rtwdev, REG_OFDM1_CFOTRK, BIT_EN_CFOTRK, 0x1);
+ break;
+ default:
+ rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_EN_RXDSP, 0x0);
+ rtw_write32_mask(rtwdev, REG_OFDM1_CFOTRK, BIT_EN_CFOTRK, 0x0);
+ break;
+ }
+}
+
+static void rtw8723d_spur_cal(struct rtw_dev *rtwdev, u8 channel)
+{
+ bool notch;
+
+ if (channel < 13) {
+ rtw8723d_cfg_notch(rtwdev, channel, false);
+ return;
+ }
+
+ notch = rtw8723d_check_spur_ov_thres(rtwdev, channel, SPUR_THRES);
+ rtw8723d_cfg_notch(rtwdev, channel, notch);
+}
+
+static void rtw8723d_set_channel_rf(struct rtw_dev *rtwdev, u8 channel, u8 bw)
+{
+ u32 rf_cfgch_a, rf_cfgch_b;
+
+ rf_cfgch_a = rtw_read_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK);
+ rf_cfgch_b = rtw_read_rf(rtwdev, RF_PATH_B, RF_CFGCH, RFREG_MASK);
+
+ rf_cfgch_a &= ~RFCFGCH_CHANNEL_MASK;
+ rf_cfgch_b &= ~RFCFGCH_CHANNEL_MASK;
+ rf_cfgch_a |= (channel & RFCFGCH_CHANNEL_MASK);
+ rf_cfgch_b |= (channel & RFCFGCH_CHANNEL_MASK);
+
+ rf_cfgch_a &= ~RFCFGCH_BW_MASK;
+ switch (bw) {
+ case RTW_CHANNEL_WIDTH_20:
+ rf_cfgch_a |= RFCFGCH_BW_20M;
+ break;
+ case RTW_CHANNEL_WIDTH_40:
+ rf_cfgch_a |= RFCFGCH_BW_40M;
+ break;
+ default:
+ break;
+ }
+
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK, rf_cfgch_a);
+ rtw_write_rf(rtwdev, RF_PATH_B, RF_CFGCH, RFREG_MASK, rf_cfgch_b);
+
+ rtw8723d_spur_cal(rtwdev, channel);
+}
+
+static const struct rtw_backup_info cck_dfir_cfg[][CCK_DFIR_NR] = {
+ [0] = {
+ { .len = 4, .reg = 0xA24, .val = 0x64B80C1C },
+ { .len = 4, .reg = 0xA28, .val = 0x00008810 },
+ { .len = 4, .reg = 0xAAC, .val = 0x01235667 },
+ },
+ [1] = {
+ { .len = 4, .reg = 0xA24, .val = 0x0000B81C },
+ { .len = 4, .reg = 0xA28, .val = 0x00000000 },
+ { .len = 4, .reg = 0xAAC, .val = 0x00003667 },
+ },
+};
+
+static void rtw8723d_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
+ u8 primary_ch_idx)
+{
+ const struct rtw_backup_info *cck_dfir;
+ int i;
+
+ cck_dfir = channel <= 13 ? cck_dfir_cfg[0] : cck_dfir_cfg[1];
+
+ for (i = 0; i < CCK_DFIR_NR; i++, cck_dfir++)
+ rtw_write32(rtwdev, cck_dfir->reg, cck_dfir->val);
+
+ switch (bw) {
+ case RTW_CHANNEL_WIDTH_20:
+ rtw_write32_mask(rtwdev, REG_FPGA0_RFMOD, BIT_MASK_RFMOD, 0x0);
+ rtw_write32_mask(rtwdev, REG_FPGA1_RFMOD, BIT_MASK_RFMOD, 0x0);
+ rtw_write32_mask(rtwdev, REG_BBRX_DFIR, BIT_RXBB_DFIR_EN, 1);
+ rtw_write32_mask(rtwdev, REG_BBRX_DFIR, BIT_MASK_RXBB_DFIR, 0xa);
+ break;
+ case RTW_CHANNEL_WIDTH_40:
+ rtw_write32_mask(rtwdev, REG_FPGA0_RFMOD, BIT_MASK_RFMOD, 0x1);
+ rtw_write32_mask(rtwdev, REG_FPGA1_RFMOD, BIT_MASK_RFMOD, 0x1);
+ rtw_write32_mask(rtwdev, REG_BBRX_DFIR, BIT_RXBB_DFIR_EN, 0);
+ rtw_write32_mask(rtwdev, REG_CCK0_SYS, BIT_CCK_SIDE_BAND,
+ (primary_ch_idx == RTW_SC_20_UPPER ? 1 : 0));
+ break;
+ default:
+ break;
+ }
+}
+
+static void rtw8723d_set_channel(struct rtw_dev *rtwdev, u8 channel, u8 bw,
+ u8 primary_chan_idx)
+{
+ rtw8723d_set_channel_rf(rtwdev, channel, bw);
+ rtw_set_channel_mac(rtwdev, channel, bw, primary_chan_idx);
+ rtw8723d_set_channel_bb(rtwdev, channel, bw, primary_chan_idx);
+}
+
+#define BIT_CFENDFORM BIT(9)
+#define BIT_WMAC_TCR_ERR0 BIT(12)
+#define BIT_WMAC_TCR_ERR1 BIT(13)
+#define BIT_TCR_CFG (BIT_CFENDFORM | BIT_WMAC_TCR_ERR0 | \
+ BIT_WMAC_TCR_ERR1)
+#define WLAN_RX_FILTER0 0xFFFF
+#define WLAN_RX_FILTER1 0x400
+#define WLAN_RX_FILTER2 0xFFFF
+#define WLAN_RCR_CFG 0x700060CE
+
+static int rtw8723d_mac_init(struct rtw_dev *rtwdev)
+{
+ rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 1, WLAN_TXQ_RPT_EN);
+ rtw_write32(rtwdev, REG_TCR, BIT_TCR_CFG);
+
+ rtw_write16(rtwdev, REG_RXFLTMAP0, WLAN_RX_FILTER0);
+ rtw_write16(rtwdev, REG_RXFLTMAP1, WLAN_RX_FILTER1);
+ rtw_write16(rtwdev, REG_RXFLTMAP2, WLAN_RX_FILTER2);
+ rtw_write32(rtwdev, REG_RCR, WLAN_RCR_CFG);
+
+ rtw_write32(rtwdev, REG_INT_MIG, 0);
+ rtw_write32(rtwdev, REG_MCUTST_1, 0x0);
+
+ rtw_write8(rtwdev, REG_MISC_CTRL, BIT_DIS_SECOND_CCA);
+ rtw_write8(rtwdev, REG_2ND_CCA_CTRL, 0);
+
+ return 0;
+}
+
+static void rtw8723d_cfg_ldo25(struct rtw_dev *rtwdev, bool enable)
+{
+ u8 ldo_pwr;
+
+ ldo_pwr = rtw_read8(rtwdev, REG_LDO_EFUSE_CTRL + 3);
+ if (enable) {
+ ldo_pwr &= ~BIT_MASK_LDO25_VOLTAGE;
+ ldo_pwr = (BIT_LDO25_VOLTAGE_V25 << 4) | BIT_LDO25_EN;
+ } else {
+ ldo_pwr &= ~BIT_LDO25_EN;
+ }
+ rtw_write8(rtwdev, REG_LDO_EFUSE_CTRL + 3, ldo_pwr);
+}
+
+static void
+rtw8723d_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ const struct rtw_hw_reg *txagc;
+ u8 rate, pwr_index;
+ int j;
+
+ for (j = 0; j < rtw_rate_size[rs]; j++) {
+ rate = rtw_rate_section[rs][j];
+ pwr_index = hal->tx_pwr_tbl[path][rate];
+
+ if (rate >= ARRAY_SIZE(rtw8723d_txagc)) {
+ rtw_warn(rtwdev, "rate 0x%x isn't supported\n", rate);
+ continue;
+ }
+ txagc = &rtw8723d_txagc[rate];
+ if (!txagc->addr) {
+ rtw_warn(rtwdev, "rate 0x%x isn't defined\n", rate);
+ continue;
+ }
+
+ rtw_write32_mask(rtwdev, txagc->addr, txagc->mask, pwr_index);
+ }
+}
+
+static void rtw8723d_set_tx_power_index(struct rtw_dev *rtwdev)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ int rs, path;
+
+ for (path = 0; path < hal->rf_path_num; path++) {
+ for (rs = 0; rs <= RTW_RATE_SECTION_HT_1S; rs++)
+ rtw8723d_set_tx_power_index_by_rate(rtwdev, path, rs);
+ }
+}
+
+static void rtw8723d_efuse_grant(struct rtw_dev *rtwdev, bool on)
+{
+ if (on) {
+ rtw_write8(rtwdev, REG_EFUSE_ACCESS, EFUSE_ACCESS_ON);
+
+ rtw_write16_set(rtwdev, REG_SYS_FUNC_EN, BIT_FEN_ELDR);
+ rtw_write16_set(rtwdev, REG_SYS_CLKR, BIT_LOADER_CLK_EN | BIT_ANA8M);
+ } else {
+ rtw_write8(rtwdev, REG_EFUSE_ACCESS, EFUSE_ACCESS_OFF);
+ }
+}
+
+static void rtw8723d_false_alarm_statistics(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u32 cck_fa_cnt;
+ u32 ofdm_fa_cnt;
+ u32 crc32_cnt;
+ u32 val32;
+
+ /* hold counter */
+ rtw_write32_mask(rtwdev, REG_OFDM_FA_HOLDC_11N, BIT_MASK_OFDM_FA_KEEP, 1);
+ rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTD_11N, BIT_MASK_OFDM_FA_KEEP1, 1);
+ rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_CNT_KEEP, 1);
+ rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_FA_KEEP, 1);
+
+ cck_fa_cnt = rtw_read32_mask(rtwdev, REG_CCK_FA_LSB_11N, MASKBYTE0);
+ cck_fa_cnt += rtw_read32_mask(rtwdev, REG_CCK_FA_MSB_11N, MASKBYTE3) << 8;
+
+ val32 = rtw_read32(rtwdev, REG_OFDM_FA_TYPE1_11N);
+ ofdm_fa_cnt = u32_get_bits(val32, BIT_MASK_OFDM_FF_CNT);
+ ofdm_fa_cnt += u32_get_bits(val32, BIT_MASK_OFDM_SF_CNT);
+ val32 = rtw_read32(rtwdev, REG_OFDM_FA_TYPE2_11N);
+ dm_info->ofdm_cca_cnt = u32_get_bits(val32, BIT_MASK_OFDM_CCA_CNT);
+ ofdm_fa_cnt += u32_get_bits(val32, BIT_MASK_OFDM_PF_CNT);
+ val32 = rtw_read32(rtwdev, REG_OFDM_FA_TYPE3_11N);
+ ofdm_fa_cnt += u32_get_bits(val32, BIT_MASK_OFDM_RI_CNT);
+ ofdm_fa_cnt += u32_get_bits(val32, BIT_MASK_OFDM_CRC_CNT);
+ val32 = rtw_read32(rtwdev, REG_OFDM_FA_TYPE4_11N);
+ ofdm_fa_cnt += u32_get_bits(val32, BIT_MASK_OFDM_MNS_CNT);
+
+ dm_info->cck_fa_cnt = cck_fa_cnt;
+ dm_info->ofdm_fa_cnt = ofdm_fa_cnt;
+ dm_info->total_fa_cnt = cck_fa_cnt + ofdm_fa_cnt;
+
+ dm_info->cck_err_cnt = rtw_read32(rtwdev, REG_IGI_C_11N);
+ dm_info->cck_ok_cnt = rtw_read32(rtwdev, REG_IGI_D_11N);
+ crc32_cnt = rtw_read32(rtwdev, REG_OFDM_CRC32_CNT_11N);
+ dm_info->ofdm_err_cnt = u32_get_bits(crc32_cnt, BIT_MASK_OFDM_LCRC_ERR);
+ dm_info->ofdm_ok_cnt = u32_get_bits(crc32_cnt, BIT_MASK_OFDM_LCRC_OK);
+ crc32_cnt = rtw_read32(rtwdev, REG_HT_CRC32_CNT_11N);
+ dm_info->ht_err_cnt = u32_get_bits(crc32_cnt, BIT_MASK_HT_CRC_ERR);
+ dm_info->ht_ok_cnt = u32_get_bits(crc32_cnt, BIT_MASK_HT_CRC_OK);
+ dm_info->vht_err_cnt = 0;
+ dm_info->vht_ok_cnt = 0;
+
+ val32 = rtw_read32(rtwdev, REG_CCK_CCA_CNT_11N);
+ dm_info->cck_cca_cnt = (u32_get_bits(val32, BIT_MASK_CCK_FA_MSB) << 8) |
+ u32_get_bits(val32, BIT_MASK_CCK_FA_LSB);
+ dm_info->total_cca_cnt = dm_info->cck_cca_cnt + dm_info->ofdm_cca_cnt;
+
+ /* reset counter */
+ rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTC_11N, BIT_MASK_OFDM_FA_RST, 1);
+ rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTC_11N, BIT_MASK_OFDM_FA_RST, 0);
+ rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTD_11N, BIT_MASK_OFDM_FA_RST1, 1);
+ rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTD_11N, BIT_MASK_OFDM_FA_RST1, 0);
+ rtw_write32_mask(rtwdev, REG_OFDM_FA_HOLDC_11N, BIT_MASK_OFDM_FA_KEEP, 0);
+ rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTD_11N, BIT_MASK_OFDM_FA_KEEP1, 0);
+ rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_CNT_KPEN, 0);
+ rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_CNT_KPEN, 2);
+ rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_FA_KPEN, 0);
+ rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_FA_KPEN, 2);
+ rtw_write32_mask(rtwdev, REG_PAGE_F_RST_11N, BIT_MASK_F_RST_ALL, 1);
+ rtw_write32_mask(rtwdev, REG_PAGE_F_RST_11N, BIT_MASK_F_RST_ALL, 0);
+}
+
+static struct rtw_chip_ops rtw8723d_ops = {
+ .phy_set_param = rtw8723d_phy_set_param,
+ .read_efuse = rtw8723d_read_efuse,
+ .query_rx_desc = rtw8723d_query_rx_desc,
+ .set_channel = rtw8723d_set_channel,
+ .mac_init = rtw8723d_mac_init,
+ .read_rf = rtw_phy_read_rf_sipi,
+ .write_rf = rtw_phy_write_rf_reg_sipi,
+ .set_tx_power_index = rtw8723d_set_tx_power_index,
+ .set_antenna = NULL,
+ .cfg_ldo25 = rtw8723d_cfg_ldo25,
+ .efuse_grant = rtw8723d_efuse_grant,
+ .false_alarm_statistics = rtw8723d_false_alarm_statistics,
+ .config_bfee = NULL,
+ .set_gid_table = NULL,
+ .cfg_csi_rate = NULL,
+};
+
+static const struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8723d[] = {
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(3) | BIT(7), 0},
+ {0x0086,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_SDIO,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0x0086,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_SDIO,
+ RTW_PWR_CMD_POLLING, BIT(1), BIT(1)},
+ {0x004A,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(3) | BIT(4), 0},
+ {0x0023,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(4), 0},
+ {0x0301,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0},
+ {0xFFFF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ 0,
+ RTW_PWR_CMD_END, 0, 0},
+};
+
+static const struct rtw_pwr_seq_cmd trans_cardemu_to_act_8723d[] = {
+ {0x0020,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0001,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_DELAY, 1, RTW_PWR_DELAY_MS},
+ {0x0000,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(5), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, (BIT(4) | BIT(3) | BIT(2)), 0},
+ {0x0075,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0006,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, BIT(1), BIT(1)},
+ {0x0075,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0x0006,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, (BIT(1) | BIT(0)), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(7), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, (BIT(4) | BIT(3)), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, BIT(0), 0},
+ {0x0010,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(6), BIT(6)},
+ {0x0049,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), BIT(1)},
+ {0x0063,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), BIT(1)},
+ {0x0062,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x0058,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x005A,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), BIT(1)},
+ {0x0068,
+ RTW_PWR_CUT_TEST_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(3), BIT(3)},
+ {0x0069,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(6), BIT(6)},
+ {0x001f,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x00},
+ {0x0077,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x00},
+ {0x001f,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x07},
+ {0x0077,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x07},
+ {0xFFFF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ 0,
+ RTW_PWR_CMD_END, 0, 0},
+};
+
+static const struct rtw_pwr_seq_cmd *card_enable_flow_8723d[] = {
+ trans_carddis_to_cardemu_8723d,
+ trans_cardemu_to_act_8723d,
+ NULL
+};
+
+static const struct rtw_pwr_seq_cmd trans_act_to_lps_8723d[] = {
+ {0x0301,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0xFF},
+ {0x0522,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0xFF},
+ {0x05F8,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, 0xFF, 0},
+ {0x05F9,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, 0xFF, 0},
+ {0x05FA,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, 0xFF, 0},
+ {0x05FB,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, 0xFF, 0},
+ {0x0002,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0x0002,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_DELAY, 0, RTW_PWR_DELAY_US},
+ {0x0002,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x0100,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x03},
+ {0x0101,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x0093,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x00},
+ {0x0553,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(5), BIT(5)},
+ {0xFFFF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ 0,
+ RTW_PWR_CMD_END, 0, 0},
+};
+
+static const struct rtw_pwr_seq_cmd trans_act_to_pre_carddis_8723d[] = {
+ {0x0003,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(2), 0},
+ {0x0080,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0},
+ {0xFFFF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ 0,
+ RTW_PWR_CMD_END, 0, 0},
+};
+
+static const struct rtw_pwr_seq_cmd trans_act_to_cardemu_8723d[] = {
+ {0x0002,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0x0049,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x0006,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), BIT(1)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, BIT(1), 0},
+ {0x0010,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(6), 0},
+ {0x0000,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(5), BIT(5)},
+ {0x0020,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0xFFFF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ 0,
+ RTW_PWR_CMD_END, 0, 0},
+};
+
+static const struct rtw_pwr_seq_cmd trans_cardemu_to_carddis_8723d[] = {
+ {0x0007,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x20},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(2), BIT(2)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3) | BIT(4)},
+ {0x004A,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 1},
+ {0x0023,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(4), BIT(4)},
+ {0x0086,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_SDIO,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0086,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_SDIO,
+ RTW_PWR_CMD_POLLING, BIT(1), 0},
+ {0xFFFF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ 0,
+ RTW_PWR_CMD_END, 0, 0},
+};
+
+static const struct rtw_pwr_seq_cmd trans_act_to_post_carddis_8723d[] = {
+ {0x001D,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0x001D,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x001C,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x0E},
+ {0xFFFF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ 0,
+ RTW_PWR_CMD_END, 0, 0},
+};
+
+static const struct rtw_pwr_seq_cmd *card_disable_flow_8723d[] = {
+ trans_act_to_lps_8723d,
+ trans_act_to_pre_carddis_8723d,
+ trans_act_to_cardemu_8723d,
+ trans_cardemu_to_carddis_8723d,
+ trans_act_to_post_carddis_8723d,
+ NULL
+};
+
+static const struct rtw_page_table page_table_8723d[] = {
+ {12, 2, 2, 0, 1},
+ {12, 2, 2, 0, 1},
+ {12, 2, 2, 0, 1},
+ {12, 2, 2, 0, 1},
+ {12, 2, 2, 0, 1},
+};
+
+static const struct rtw_rqpn rqpn_table_8723d[] = {
+ {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
+ RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
+ {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
+ RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
+ {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_HIGH,
+ RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH},
+ {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
+ RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH},
+ {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
+ RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
+};
+
+static const struct rtw_intf_phy_para pcie_gen1_param_8723d[] = {
+ {0x0008, 0x4a22,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_ALL,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0x0009, 0x1000,
+ RTW_IP_SEL_PHY,
+ ~(RTW_INTF_PHY_CUT_A | RTW_INTF_PHY_CUT_B),
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0xFFFF, 0x0000,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_ALL,
+ RTW_INTF_PHY_PLATFORM_ALL},
+};
+
+static const struct rtw_intf_phy_para_table phy_para_table_8723d = {
+ .gen1_para = pcie_gen1_param_8723d,
+ .n_gen1_para = ARRAY_SIZE(pcie_gen1_param_8723d),
+};
+
+static const struct rtw_hw_reg rtw8723d_dig[] = {
+ [0] = { .addr = 0xc50, .mask = 0x7f },
+ [1] = { .addr = 0xc50, .mask = 0x7f },
+};
+
+static const struct rtw_hw_reg rtw8723d_dig_cck[] = {
+ [0] = { .addr = 0xa0c, .mask = 0x3f00 },
+};
+
+static const struct rtw_rf_sipi_addr rtw8723d_rf_sipi_addr[] = {
+ [RF_PATH_A] = { .hssi_1 = 0x820, .lssi_read = 0x8a0,
+ .hssi_2 = 0x824, .lssi_read_pi = 0x8b8},
+ [RF_PATH_B] = { .hssi_1 = 0x828, .lssi_read = 0x8a4,
+ .hssi_2 = 0x82c, .lssi_read_pi = 0x8bc},
+};
+
+static const struct rtw_rfe_def rtw8723d_rfe_defs[] = {
+ [0] = { .phy_pg_tbl = &rtw8723d_bb_pg_tbl,
+ .txpwr_lmt_tbl = &rtw8723d_txpwr_lmt_tbl,},
+};
+
+struct rtw_chip_info rtw8723d_hw_spec = {
+ .ops = &rtw8723d_ops,
+ .id = RTW_CHIP_TYPE_8723D,
+ .fw_name = "rtw88/rtw8723d_fw.bin",
+ .wlan_cpu = RTW_WCPU_11N,
+ .tx_pkt_desc_sz = 40,
+ .tx_buf_desc_sz = 16,
+ .rx_pkt_desc_sz = 24,
+ .rx_buf_desc_sz = 8,
+ .phy_efuse_size = 512,
+ .log_efuse_size = 512,
+ .ptct_efuse_size = 96 + 1,
+ .txff_size = 32768,
+ .rxff_size = 16384,
+ .txgi_factor = 1,
+ .is_pwr_by_rate_dec = true,
+ .max_power_index = 0x3f,
+ .csi_buf_pg_num = 0,
+ .band = RTW_BAND_2G,
+ .page_size = 128,
+ .dig_min = 0x20,
+ .ht_supported = true,
+ .vht_supported = false,
+ .lps_deep_mode_supported = 0,
+ .sys_func_en = 0xFD,
+ .pwr_on_seq = card_enable_flow_8723d,
+ .pwr_off_seq = card_disable_flow_8723d,
+ .page_table = page_table_8723d,
+ .rqpn_table = rqpn_table_8723d,
+ .intf_table = &phy_para_table_8723d,
+ .dig = rtw8723d_dig,
+ .dig_cck = rtw8723d_dig_cck,
+ .rf_sipi_addr = {0x840, 0x844},
+ .rf_sipi_read_addr = rtw8723d_rf_sipi_addr,
+ .fix_rf_phy_num = 2,
+ .mac_tbl = &rtw8723d_mac_tbl,
+ .agc_tbl = &rtw8723d_agc_tbl,
+ .bb_tbl = &rtw8723d_bb_tbl,
+ .rf_tbl = {&rtw8723d_rf_a_tbl},
+ .rfe_defs = rtw8723d_rfe_defs,
+ .rfe_defs_size = ARRAY_SIZE(rtw8723d_rfe_defs),
+ .rx_ldpc = false,
+};
+EXPORT_SYMBOL(rtw8723d_hw_spec);
+
+MODULE_FIRMWARE("rtw88/rtw8723d_fw.bin");
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.h b/drivers/net/wireless/realtek/rtw88/rtw8723d.h
new file mode 100644
index 000000000000..ac66f672bec8
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW8723D_H__
+#define __RTW8723D_H__
+
+struct rtw8723de_efuse {
+ u8 mac_addr[ETH_ALEN]; /* 0xd0 */
+ u8 vender_id[2];
+ u8 device_id[2];
+ u8 sub_vender_id[2];
+ u8 sub_device_id[2];
+};
+
+struct rtw8723d_efuse {
+ __le16 rtl_id;
+ u8 rsvd[2];
+ u8 afe;
+ u8 rsvd1[11];
+
+ /* power index for four RF paths */
+ struct rtw_txpwr_idx txpwr_idx_table[4];
+
+ u8 channel_plan; /* 0xb8 */
+ u8 xtal_k;
+ u8 thermal_meter;
+ u8 iqk_lck;
+ u8 pa_type; /* 0xbc */
+ u8 lna_type_2g[2]; /* 0xbd */
+ u8 lna_type_5g[2];
+ u8 rf_board_option;
+ u8 rf_feature_option;
+ u8 rf_bt_setting;
+ u8 eeprom_version;
+ u8 eeprom_customer_id;
+ u8 tx_bb_swing_setting_2g;
+ u8 res_c7;
+ u8 tx_pwr_calibrate_rate;
+ u8 rf_antenna_option; /* 0xc9 */
+ u8 rfe_option;
+ u8 country_code[2];
+ u8 res[3];
+ struct rtw8723de_efuse e;
+};
+
+/* phy status page0 */
+#define GET_PHY_STAT_P0_PWDB(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x00), GENMASK(15, 8))
+
+/* phy status page1 */
+#define GET_PHY_STAT_P1_PWDB_A(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x00), GENMASK(15, 8))
+#define GET_PHY_STAT_P1_PWDB_B(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x00), GENMASK(23, 16))
+#define GET_PHY_STAT_P1_RF_MODE(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x03), GENMASK(29, 28))
+#define GET_PHY_STAT_P1_L_RXSC(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(11, 8))
+#define GET_PHY_STAT_P1_HT_RXSC(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(15, 12))
+#define GET_PHY_STAT_P1_RXEVM_A(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x04), GENMASK(7, 0))
+#define GET_PHY_STAT_P1_CFO_TAIL_A(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x05), GENMASK(7, 0))
+#define GET_PHY_STAT_P1_RXSNR_A(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x06), GENMASK(7, 0))
+
+#define SPUR_THRES 0x16
+#define CCK_DFIR_NR 3
+#define DIS_3WIRE 0xccf000c0
+#define EN_3WIRE 0xccc000c0
+#define START_PSD 0x400000
+#define FREQ_CH13 0xfccd
+#define FREQ_CH14 0xff9a
+#define RFCFGCH_CHANNEL_MASK GENMASK(7, 0)
+#define RFCFGCH_BW_MASK (BIT(11) | BIT(10))
+#define RFCFGCH_BW_20M (BIT(11) | BIT(10))
+#define RFCFGCH_BW_40M BIT(10)
+#define BIT_MASK_RFMOD BIT(0)
+
+#define REG_PSDFN 0x0808
+#define REG_ANALOG_P4 0x088c
+#define REG_PSDRPT 0x08b4
+#define REG_FPGA1_RFMOD 0x0900
+#define REG_BBRX_DFIR 0x0954
+#define BIT_MASK_RXBB_DFIR GENMASK(27, 24)
+#define BIT_RXBB_DFIR_EN BIT(19)
+#define REG_CCK0_SYS 0x0a00
+#define BIT_CCK_SIDE_BAND BIT(4)
+#define REG_CCK_FA_RST_11N 0x0a2c
+#define BIT_MASK_CCK_CNT_KEEP BIT(12)
+#define BIT_MASK_CCK_CNT_EN BIT(13)
+#define BIT_MASK_CCK_CNT_KPEN (BIT_MASK_CCK_CNT_KEEP | BIT_MASK_CCK_CNT_EN)
+#define BIT_MASK_CCK_FA_KEEP BIT(14)
+#define BIT_MASK_CCK_FA_EN BIT(15)
+#define BIT_MASK_CCK_FA_KPEN (BIT_MASK_CCK_FA_KEEP | BIT_MASK_CCK_FA_EN)
+#define REG_CCK_FA_LSB_11N 0x0a5c
+#define REG_CCK_FA_MSB_11N 0x0a58
+#define REG_CCK_CCA_CNT_11N 0x0a60
+#define BIT_MASK_CCK_FA_MSB GENMASK(7, 0)
+#define BIT_MASK_CCK_FA_LSB GENMASK(15, 8)
+#define REG_OFDM_FA_HOLDC_11N 0x0c00
+#define BIT_MASK_OFDM_FA_KEEP BIT(31)
+#define REG_OFDM_FA_RSTC_11N 0x0c0c
+#define BIT_MASK_OFDM_FA_RST BIT(31)
+#define REG_OFDM0_RXDSP 0x0c40
+#define BIT_MASK_RXDSP GENMASK(28, 24)
+#define BIT_EN_RXDSP BIT(9)
+#define REG_OFDM0_XAAGC1 0x0c50
+#define REG_OFDM0_XBAGC1 0x0c58
+#define REG_OFDM_FA_TYPE1_11N 0x0cf0
+#define BIT_MASK_OFDM_FF_CNT GENMASK(15, 0)
+#define BIT_MASK_OFDM_SF_CNT GENMASK(31, 16)
+#define REG_OFDM_FA_RSTD_11N 0x0d00
+#define BIT_MASK_OFDM_FA_RST1 BIT(27)
+#define BIT_MASK_OFDM_FA_KEEP1 BIT(31)
+#define REG_OFDM1_CFOTRK 0x0d2c
+#define BIT_EN_CFOTRK BIT(28)
+#define REG_OFDM1_CSI1 0x0d40
+#define REG_OFDM1_CSI2 0x0d44
+#define REG_OFDM1_CSI3 0x0d48
+#define REG_OFDM1_CSI4 0x0d4c
+#define REG_OFDM_FA_TYPE2_11N 0x0da0
+#define BIT_MASK_OFDM_CCA_CNT GENMASK(15, 0)
+#define BIT_MASK_OFDM_PF_CNT GENMASK(31, 16)
+#define REG_OFDM_FA_TYPE3_11N 0x0da4
+#define BIT_MASK_OFDM_RI_CNT GENMASK(15, 0)
+#define BIT_MASK_OFDM_CRC_CNT GENMASK(31, 16)
+#define REG_OFDM_FA_TYPE4_11N 0x0da8
+#define BIT_MASK_OFDM_MNS_CNT GENMASK(15, 0)
+#define REG_PAGE_F_RST_11N 0x0f14
+#define BIT_MASK_F_RST_ALL BIT(16)
+#define REG_IGI_C_11N 0x0f84
+#define REG_IGI_D_11N 0x0f88
+#define REG_HT_CRC32_CNT_11N 0x0f90
+#define BIT_MASK_HT_CRC_OK GENMASK(15, 0)
+#define BIT_MASK_HT_CRC_ERR GENMASK(31, 16)
+#define REG_OFDM_CRC32_CNT_11N 0x0f94
+#define BIT_MASK_OFDM_LCRC_OK GENMASK(15, 0)
+#define BIT_MASK_OFDM_LCRC_ERR GENMASK(31, 16)
+#define REG_HT_CRC32_CNT_11N_AGG 0x0fb8
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d_table.c b/drivers/net/wireless/realtek/rtw88/rtw8723d_table.c
new file mode 100644
index 000000000000..27a22b392df0
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723d_table.c
@@ -0,0 +1,1196 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include "main.h"
+#include "phy.h"
+#include "rtw8723d_table.h"
+
+static const u32 rtw8723d_mac[] = {
+ 0x020, 0x00000013,
+ 0x02F, 0x00000010,
+ 0x077, 0x00000007,
+ 0x421, 0x0000000F,
+ 0x428, 0x0000000A,
+ 0x429, 0x00000010,
+ 0x430, 0x00000000,
+ 0x431, 0x00000000,
+ 0x432, 0x00000000,
+ 0x433, 0x00000001,
+ 0x434, 0x00000002,
+ 0x435, 0x00000003,
+ 0x436, 0x00000005,
+ 0x437, 0x00000007,
+ 0x438, 0x00000000,
+ 0x439, 0x00000000,
+ 0x43A, 0x00000000,
+ 0x43B, 0x00000001,
+ 0x43C, 0x00000002,
+ 0x43D, 0x00000003,
+ 0x43E, 0x00000005,
+ 0x43F, 0x00000007,
+ 0x440, 0x0000005D,
+ 0x441, 0x00000001,
+ 0x442, 0x00000000,
+ 0x444, 0x00000010,
+ 0x445, 0x00000000,
+ 0x446, 0x00000000,
+ 0x447, 0x00000000,
+ 0x448, 0x00000000,
+ 0x449, 0x000000F0,
+ 0x44A, 0x0000000F,
+ 0x44B, 0x0000003E,
+ 0x44C, 0x00000010,
+ 0x44D, 0x00000000,
+ 0x44E, 0x00000000,
+ 0x44F, 0x00000000,
+ 0x450, 0x00000000,
+ 0x451, 0x000000F0,
+ 0x452, 0x0000000F,
+ 0x453, 0x00000000,
+ 0x456, 0x0000005E,
+ 0x460, 0x00000066,
+ 0x461, 0x00000066,
+ 0x4C8, 0x000000FF,
+ 0x4C9, 0x00000008,
+ 0x4CC, 0x000000FF,
+ 0x4CD, 0x000000FF,
+ 0x4CE, 0x00000001,
+ 0x500, 0x00000026,
+ 0x501, 0x000000A2,
+ 0x502, 0x0000002F,
+ 0x503, 0x00000000,
+ 0x504, 0x00000028,
+ 0x505, 0x000000A3,
+ 0x506, 0x0000005E,
+ 0x507, 0x00000000,
+ 0x508, 0x0000002B,
+ 0x509, 0x000000A4,
+ 0x50A, 0x0000005E,
+ 0x50B, 0x00000000,
+ 0x50C, 0x0000004F,
+ 0x50D, 0x000000A4,
+ 0x50E, 0x00000000,
+ 0x50F, 0x00000000,
+ 0x512, 0x0000001C,
+ 0x514, 0x0000000A,
+ 0x516, 0x0000000A,
+ 0x525, 0x0000004F,
+ 0x550, 0x00000010,
+ 0x551, 0x00000010,
+ 0x559, 0x00000002,
+ 0x55C, 0x00000028,
+ 0x55D, 0x000000FF,
+ 0x605, 0x00000030,
+ 0x608, 0x0000000E,
+ 0x609, 0x0000002A,
+ 0x620, 0x000000FF,
+ 0x621, 0x000000FF,
+ 0x622, 0x000000FF,
+ 0x623, 0x000000FF,
+ 0x624, 0x000000FF,
+ 0x625, 0x000000FF,
+ 0x626, 0x000000FF,
+ 0x627, 0x000000FF,
+ 0x638, 0x00000028,
+ 0x63C, 0x0000000A,
+ 0x63D, 0x0000000A,
+ 0x63E, 0x0000000C,
+ 0x63F, 0x0000000C,
+ 0x640, 0x00000040,
+ 0x642, 0x00000040,
+ 0x643, 0x00000000,
+ 0x652, 0x000000C8,
+ 0x66A, 0x000000B0,
+ 0x66E, 0x00000005,
+ 0x700, 0x00000021,
+ 0x701, 0x00000043,
+ 0x702, 0x00000065,
+ 0x703, 0x00000087,
+ 0x708, 0x00000021,
+ 0x709, 0x00000043,
+ 0x70A, 0x00000065,
+ 0x70B, 0x00000087,
+ 0x765, 0x00000018,
+ 0x76E, 0x00000004,
+ 0x7C0, 0x00000038,
+ 0x7C2, 0x0000000F,
+ 0x7C3, 0x000000C0,
+ 0x073, 0x00000004,
+ 0x7C4, 0x00000077,
+ 0x07C, 0x00000003,
+ 0x016, 0x000000B3,
+};
+
+RTW_DECL_TABLE_PHY_COND(rtw8723d_mac, rtw_phy_cfg_mac);
+
+static const u32 rtw8723d_agc[] = {
+ 0xC78, 0xFE000101,
+ 0xC78, 0xFD010101,
+ 0xC78, 0xFC020101,
+ 0xC78, 0xFB030101,
+ 0xC78, 0xFA040101,
+ 0xC78, 0xF9050101,
+ 0xC78, 0xF8060101,
+ 0xC78, 0xF7070101,
+ 0xC78, 0xF6080101,
+ 0xC78, 0xF5090101,
+ 0xC78, 0xF40A0101,
+ 0xC78, 0xF30B0101,
+ 0xC78, 0xF20C0101,
+ 0xC78, 0xF10D0101,
+ 0xC78, 0xF00E0101,
+ 0xC78, 0xEF0F0101,
+ 0xC78, 0xEE100101,
+ 0xC78, 0xED110101,
+ 0xC78, 0xEC120101,
+ 0xC78, 0xEB130101,
+ 0xC78, 0xEA140101,
+ 0xC78, 0xE9150101,
+ 0xC78, 0xE8160101,
+ 0xC78, 0xE7170101,
+ 0xC78, 0xE6180101,
+ 0xC78, 0xE5190101,
+ 0xC78, 0xE41A0101,
+ 0xC78, 0xE31B0101,
+ 0xC78, 0xE21C0101,
+ 0xC78, 0xE11D0101,
+ 0xC78, 0xE01E0101,
+ 0xC78, 0x861F0101,
+ 0xC78, 0x85200101,
+ 0xC78, 0x84210101,
+ 0xC78, 0x83220101,
+ 0xC78, 0x82230101,
+ 0xC78, 0x81240101,
+ 0xC78, 0x80250101,
+ 0xC78, 0x44260101,
+ 0xC78, 0x43270101,
+ 0xC78, 0x42280101,
+ 0xC78, 0x41290101,
+ 0xC78, 0x402A0101,
+ 0xC78, 0x022B0101,
+ 0xC78, 0x012C0101,
+ 0xC78, 0x002D0101,
+ 0xC78, 0xC52E0001,
+ 0xC78, 0xC42F0001,
+ 0xC78, 0xC3300001,
+ 0xC78, 0xC2310001,
+ 0xC78, 0xC1320001,
+ 0xC78, 0xC0330001,
+ 0xC78, 0x04340001,
+ 0xC78, 0x03350001,
+ 0xC78, 0x02360001,
+ 0xC78, 0x01370001,
+ 0xC78, 0x00380001,
+ 0xC78, 0x00390001,
+ 0xC78, 0x003A0001,
+ 0xC78, 0x003B0001,
+ 0xC78, 0x003C0001,
+ 0xC78, 0x003D0001,
+ 0xC78, 0x003E0001,
+ 0xC78, 0x003F0001,
+ 0xC78, 0x6F002001,
+ 0xC78, 0x6F012001,
+ 0xC78, 0x6F022001,
+ 0xC78, 0x6F032001,
+ 0xC78, 0x6F042001,
+ 0xC78, 0x6F052001,
+ 0xC78, 0x6F062001,
+ 0xC78, 0x6F072001,
+ 0xC78, 0x6F082001,
+ 0xC78, 0x6F092001,
+ 0xC78, 0x6F0A2001,
+ 0xC78, 0x6F0B2001,
+ 0xC78, 0x6F0C2001,
+ 0xC78, 0x6F0D2001,
+ 0xC78, 0x6F0E2001,
+ 0xC78, 0x6F0F2001,
+ 0xC78, 0x6F102001,
+ 0xC78, 0x6F112001,
+ 0xC78, 0x6F122001,
+ 0xC78, 0x6F132001,
+ 0xC78, 0x6F142001,
+ 0xC78, 0x6F152001,
+ 0xC78, 0x6F162001,
+ 0xC78, 0x6F172001,
+ 0xC78, 0x6F182001,
+ 0xC78, 0x6F192001,
+ 0xC78, 0x6F1A2001,
+ 0xC78, 0x6F1B2001,
+ 0xC78, 0x6F1C2001,
+ 0xC78, 0x6F1D2001,
+ 0xC78, 0x6F1E2001,
+ 0xC78, 0x6F1F2001,
+ 0xC78, 0x6F202001,
+ 0xC78, 0x6F212001,
+ 0xC78, 0x6F222001,
+ 0xC78, 0x6F232001,
+ 0xC78, 0x6E242001,
+ 0xC78, 0x6D252001,
+ 0xC78, 0x6C262001,
+ 0xC78, 0x6B272001,
+ 0xC78, 0x6A282001,
+ 0xC78, 0x69292001,
+ 0xC78, 0x4B2A2001,
+ 0xC78, 0x4A2B2001,
+ 0xC78, 0x492C2001,
+ 0xC78, 0x482D2001,
+ 0xC78, 0x472E2001,
+ 0xC78, 0x462F2001,
+ 0xC78, 0x45302001,
+ 0xC78, 0x44312001,
+ 0xC78, 0x43322001,
+ 0xC78, 0x42332001,
+ 0xC78, 0x41342001,
+ 0xC78, 0x40352001,
+ 0xC78, 0x02362001,
+ 0xC78, 0x01372001,
+ 0xC78, 0x00382001,
+ 0xC78, 0x00392001,
+ 0xC78, 0x003A2001,
+ 0xC78, 0x003B2001,
+ 0xC78, 0x003C2001,
+ 0xC78, 0x003D2001,
+ 0xC78, 0x003E2001,
+ 0xC78, 0x003F2001,
+ 0xC78, 0x7F003101,
+ 0xC78, 0x7F013101,
+ 0xC78, 0x7F023101,
+ 0xC78, 0x7F033101,
+ 0xC78, 0x7F043101,
+ 0xC78, 0x7F053101,
+ 0xC78, 0x7F063101,
+ 0xC78, 0x7F073101,
+ 0xC78, 0x7E083101,
+ 0xC78, 0x7D093101,
+ 0xC78, 0x7C0A3101,
+ 0xC78, 0x7B0B3101,
+ 0xC78, 0x7A0C3101,
+ 0xC78, 0x790D3101,
+ 0xC78, 0x780E3101,
+ 0xC78, 0x770F3101,
+ 0xC78, 0x76103101,
+ 0xC78, 0x75113101,
+ 0xC78, 0x74123101,
+ 0xC78, 0x73133101,
+ 0xC78, 0x72143101,
+ 0xC78, 0x71153101,
+ 0xC78, 0x70163101,
+ 0xC78, 0x6F173101,
+ 0xC78, 0x6E183101,
+ 0xC78, 0x6D193101,
+ 0xC78, 0x6C1A3101,
+ 0xC78, 0x6B1B3101,
+ 0xC78, 0x6A1C3101,
+ 0xC78, 0x691D3101,
+ 0xC78, 0x681E3101,
+ 0xC78, 0x4B1F3101,
+ 0xC78, 0x4A203101,
+ 0xC78, 0x49213101,
+ 0xC78, 0x48223101,
+ 0xC78, 0x47233101,
+ 0xC78, 0x46243101,
+ 0xC78, 0x45253101,
+ 0xC78, 0x44263101,
+ 0xC78, 0x43273101,
+ 0xC78, 0x42283101,
+ 0xC78, 0x41293101,
+ 0xC78, 0x402A3101,
+ 0xC78, 0x022B3101,
+ 0xC78, 0x012C3101,
+ 0xC78, 0x002D3101,
+ 0xC78, 0x002E3101,
+ 0xC78, 0x002F3101,
+ 0xC78, 0x00303101,
+ 0xC78, 0x00313101,
+ 0xC78, 0x00323101,
+ 0xC78, 0x00333101,
+ 0xC78, 0x00343101,
+ 0xC78, 0x00353101,
+ 0xC78, 0x00363101,
+ 0xC78, 0x00373101,
+ 0xC78, 0x00383101,
+ 0xC78, 0x00393101,
+ 0xC78, 0x003A3101,
+ 0xC78, 0x003B3101,
+ 0xC78, 0x003C3101,
+ 0xC78, 0x003D3101,
+ 0xC78, 0x003E3101,
+ 0xC78, 0x003F3101,
+ 0xC78, 0xFE403101,
+ 0xC78, 0xFD413101,
+ 0xC78, 0xFC423101,
+ 0xC78, 0xFB433101,
+ 0xC78, 0xFA443101,
+ 0xC78, 0xF9453101,
+ 0xC78, 0xF8463101,
+ 0xC78, 0xF7473101,
+ 0xC78, 0xF6483101,
+ 0xC78, 0xF5493101,
+ 0xC78, 0xF44A3101,
+ 0xC78, 0xF34B3101,
+ 0xC78, 0xF24C3101,
+ 0xC78, 0xF14D3101,
+ 0xC78, 0xF04E3101,
+ 0xC78, 0xEF4F3101,
+ 0xC78, 0xEE503101,
+ 0xC78, 0xED513101,
+ 0xC78, 0xEC523101,
+ 0xC78, 0xEB533101,
+ 0xC78, 0xEA543101,
+ 0xC78, 0xE9553101,
+ 0xC78, 0xE8563101,
+ 0xC78, 0xE7573101,
+ 0xC78, 0xE6583101,
+ 0xC78, 0xE5593101,
+ 0xC78, 0xE45A3101,
+ 0xC78, 0xE35B3101,
+ 0xC78, 0xE25C3101,
+ 0xC78, 0xE15D3101,
+ 0xC78, 0xE05E3101,
+ 0xC78, 0x865F3101,
+ 0xC78, 0x85603101,
+ 0xC78, 0x84613101,
+ 0xC78, 0x83623101,
+ 0xC78, 0x82633101,
+ 0xC78, 0x81643101,
+ 0xC78, 0x80653101,
+ 0xC78, 0x80663101,
+ 0xC78, 0x80673101,
+ 0xC78, 0x80683101,
+ 0xC78, 0x80693101,
+ 0xC78, 0x806A3101,
+ 0xC78, 0x806B3101,
+ 0xC78, 0x806C3101,
+ 0xC78, 0x806D3101,
+ 0xC78, 0x806E3101,
+ 0xC78, 0x806F3101,
+ 0xC78, 0x80703101,
+ 0xC78, 0x80713101,
+ 0xC78, 0x80723101,
+ 0xC78, 0x80733101,
+ 0xC78, 0x80743101,
+ 0xC78, 0x80753101,
+ 0xC78, 0x80763101,
+ 0xC78, 0x80773101,
+ 0xC78, 0x80783101,
+ 0xC78, 0x80793101,
+ 0xC78, 0x807A3101,
+ 0xC78, 0x807B3101,
+ 0xC78, 0x807C3101,
+ 0xC78, 0x807D3101,
+ 0xC78, 0x807E3101,
+ 0xC78, 0x807F3101,
+ 0xC78, 0xEF402001,
+ 0xC78, 0xEF412001,
+ 0xC78, 0xEF422001,
+ 0xC78, 0xEF432001,
+ 0xC78, 0xEF442001,
+ 0xC78, 0xEF452001,
+ 0xC78, 0xEF462001,
+ 0xC78, 0xEF472001,
+ 0xC78, 0xEF482001,
+ 0xC78, 0xEF492001,
+ 0xC78, 0xEF4A2001,
+ 0xC78, 0xEF4B2001,
+ 0xC78, 0xEF4C2001,
+ 0xC78, 0xEF4D2001,
+ 0xC78, 0xEF4E2001,
+ 0xC78, 0xEF4F2001,
+ 0xC78, 0xEF502001,
+ 0xC78, 0xEF512001,
+ 0xC78, 0xEF522001,
+ 0xC78, 0xEF532001,
+ 0xC78, 0xEF542001,
+ 0xC78, 0xEF552001,
+ 0xC78, 0xEF562001,
+ 0xC78, 0xEF572001,
+ 0xC78, 0xEF582001,
+ 0xC78, 0xEF592001,
+ 0xC78, 0xEF5A2001,
+ 0xC78, 0xEF5B2001,
+ 0xC78, 0xEF5C2001,
+ 0xC78, 0xEF5D2001,
+ 0xC78, 0xEF5E2001,
+ 0xC78, 0xEF5F2001,
+ 0xC78, 0xEF602001,
+ 0xC78, 0xEE612001,
+ 0xC78, 0xED622001,
+ 0xC78, 0xEC632001,
+ 0xC78, 0xEB642001,
+ 0xC78, 0xEA652001,
+ 0xC78, 0xE9662001,
+ 0xC78, 0xE8672001,
+ 0xC78, 0xCB682001,
+ 0xC78, 0xCA692001,
+ 0xC78, 0xC96A2001,
+ 0xC78, 0xC86B2001,
+ 0xC78, 0xC76C2001,
+ 0xC78, 0xC66D2001,
+ 0xC78, 0xC56E2001,
+ 0xC78, 0xC46F2001,
+ 0xC78, 0xC3702001,
+ 0xC78, 0xC2712001,
+ 0xC78, 0xC1722001,
+ 0xC78, 0xC0732001,
+ 0xC78, 0x82742001,
+ 0xC78, 0x81752001,
+ 0xC78, 0x80762001,
+ 0xC78, 0x80772001,
+ 0xC78, 0x80782001,
+ 0xC78, 0x80792001,
+ 0xC78, 0x807A2001,
+ 0xC78, 0x807B2001,
+ 0xC78, 0x807C2001,
+ 0xC78, 0x807D2001,
+ 0xC78, 0x807E2001,
+ 0xC78, 0x807F2001,
+ 0xC78, 0xFA001101,
+ 0xC78, 0xF9011101,
+ 0xC78, 0xF8021101,
+ 0xC78, 0xF7031101,
+ 0xC78, 0xF6041101,
+ 0xC78, 0xF5051101,
+ 0xC78, 0xF4061101,
+ 0xC78, 0xD7071101,
+ 0xC78, 0xD6081101,
+ 0xC78, 0xD5091101,
+ 0xC78, 0xD40A1101,
+ 0xC78, 0x970B1101,
+ 0xC78, 0x960C1101,
+ 0xC78, 0x950D1101,
+ 0xC78, 0x940E1101,
+ 0xC78, 0x930F1101,
+ 0xC78, 0x92101101,
+ 0xC78, 0x91111101,
+ 0xC78, 0x90121101,
+ 0xC78, 0x8F131101,
+ 0xC78, 0x8E141101,
+ 0xC78, 0x8D151101,
+ 0xC78, 0x8C161101,
+ 0xC78, 0x8B171101,
+ 0xC78, 0x8A181101,
+ 0xC78, 0x89191101,
+ 0xC78, 0x881A1101,
+ 0xC78, 0x871B1101,
+ 0xC78, 0x861C1101,
+ 0xC78, 0x851D1101,
+ 0xC78, 0x841E1101,
+ 0xC78, 0x831F1101,
+ 0xC78, 0x82201101,
+ 0xC78, 0x81211101,
+ 0xC78, 0x80221101,
+ 0xC78, 0x43231101,
+ 0xC78, 0x42241101,
+ 0xC78, 0x41251101,
+ 0xC78, 0x04261101,
+ 0xC78, 0x03271101,
+ 0xC78, 0x02281101,
+ 0xC78, 0x01291101,
+ 0xC78, 0x002A1101,
+ 0xC78, 0xC42B1001,
+ 0xC78, 0xC32C1001,
+ 0xC78, 0xC22D1001,
+ 0xC78, 0xC12E1001,
+ 0xC78, 0xC02F1001,
+ 0xC78, 0x85301001,
+ 0xC78, 0x84311001,
+ 0xC78, 0x83321001,
+ 0xC78, 0x82331001,
+ 0xC78, 0x81341001,
+ 0xC78, 0x80351001,
+ 0xC78, 0x05361001,
+ 0xC78, 0x04371001,
+ 0xC78, 0x03381001,
+ 0xC78, 0x02391001,
+ 0xC78, 0x013A1001,
+ 0xC78, 0x003B1001,
+ 0xC78, 0x003C1001,
+ 0xC78, 0x003D1001,
+ 0xC78, 0x003E1001,
+ 0xC78, 0x003F1001,
+ 0xC50, 0x69553422,
+ 0xC50, 0x69553420,
+};
+
+RTW_DECL_TABLE_PHY_COND(rtw8723d_agc, rtw_phy_cfg_agc);
+
+static const u32 rtw8723d_bb[] = {
+ 0x800, 0x80046C00,
+ 0x804, 0x00000003,
+ 0x808, 0x0000FC00,
+ 0x80C, 0x0000000A,
+ 0x810, 0x10001331,
+ 0x814, 0x020C3D10,
+ 0x818, 0x00200385,
+ 0x81C, 0x00000000,
+ 0x820, 0x01000100,
+ 0x824, 0x00390204,
+ 0x828, 0x00000000,
+ 0x82C, 0x00000000,
+ 0x830, 0x00000000,
+ 0x834, 0x00000000,
+ 0x838, 0x00000000,
+ 0x83C, 0x00000000,
+ 0x840, 0x00010000,
+ 0x844, 0x00000000,
+ 0x848, 0x00000000,
+ 0x84C, 0x00000000,
+ 0x850, 0x00000000,
+ 0x854, 0x00000000,
+ 0x858, 0x569A11A9,
+ 0x85C, 0x01000014,
+ 0x860, 0x66F60110,
+ 0x864, 0x461F0641,
+ 0x868, 0x00000000,
+ 0x86C, 0x27272700,
+ 0x870, 0x07000460,
+ 0x874, 0x25004000,
+ 0x878, 0x00000808,
+ 0x87C, 0x004F0201,
+ 0x880, 0xB2002E12,
+ 0x884, 0x00000007,
+ 0x888, 0x00000000,
+ 0x88C, 0xCCC000C0,
+ 0x890, 0x00000800,
+ 0x894, 0xFFFFFFFE,
+ 0x898, 0x40302010,
+ 0x89C, 0x00706050,
+ 0x900, 0x00000000,
+ 0x904, 0x00000023,
+ 0x908, 0x00000000,
+ 0x90C, 0x81121111,
+ 0x910, 0x00000402,
+ 0x914, 0x00000300,
+ 0x920, 0x18C6318C,
+ 0x924, 0x0000018C,
+ 0x948, 0x99000000,
+ 0x94C, 0x00000010,
+ 0x950, 0x00003800,
+ 0x954, 0x5A380000,
+ 0x958, 0x4BC6D87A,
+ 0x95C, 0x04EB9B79,
+ 0x96C, 0x00000003,
+ 0x970, 0x00000000,
+ 0x974, 0x00000000,
+ 0x978, 0x00000000,
+ 0x97C, 0x13000000,
+ 0x980, 0x00000000,
+ 0xA00, 0x00D046C8,
+ 0xA04, 0x80FF800C,
+ 0xA08, 0x8C838300,
+ 0xA0C, 0x2E20100F,
+ 0xA10, 0x9500BB78,
+ 0xA14, 0x1114D028,
+ 0xA18, 0x00881117,
+ 0xA1C, 0x89140F00,
+ 0xA20, 0xE82C0001,
+ 0xA24, 0x64B80C1C,
+ 0xA28, 0x00008810,
+ 0xA2C, 0x00D30000,
+ 0xA70, 0x101FBF00,
+ 0xA74, 0x00000007,
+ 0xA78, 0x00008900,
+ 0xA7C, 0x225B0606,
+ 0xA80, 0x2180FA74,
+ 0xA84, 0x00200000,
+ 0xA88, 0x040C0000,
+ 0xA8C, 0x12345678,
+ 0xA90, 0xABCDEF00,
+ 0xA94, 0x001B1B89,
+ 0xA98, 0x00000000,
+ 0xA9C, 0x00020000,
+ 0xAA0, 0x00000000,
+ 0xAA4, 0x0000000C,
+ 0xAA8, 0xCA100008,
+ 0xAAC, 0x01235667,
+ 0xAB0, 0x00000000,
+ 0xAB4, 0x20201402,
+ 0xB2C, 0x00000000,
+ 0xC00, 0x48071D40,
+ 0xC04, 0x03A05611,
+ 0xC08, 0x000000E4,
+ 0xC0C, 0x6C6C6C6C,
+ 0xC10, 0x28800000,
+ 0xC14, 0x40000100,
+ 0xC18, 0x08800000,
+ 0xC1C, 0x40000100,
+ 0xC20, 0x00000000,
+ 0xC24, 0x00000000,
+ 0xC28, 0x00000000,
+ 0xC2C, 0x00000000,
+ 0xC30, 0x69E9AC48,
+ 0xC34, 0x31000040,
+ 0xC38, 0x21688080,
+ 0xC3C, 0x000016D4,
+ 0xC40, 0x1F78403F,
+ 0xC44, 0x00010036,
+ 0xC48, 0xEC020107,
+ 0xC4C, 0x007F037F,
+ 0xC50, 0x69553420,
+ 0xC54, 0x43BC0094,
+ 0xC58, 0x00015969,
+ 0xC5C, 0x00310492,
+ 0xC60, 0x00280A00,
+ 0xC64, 0x7112848B,
+ 0xC68, 0x47C074FF,
+ 0xC6C, 0x00000036,
+ 0xC70, 0x2C7F000D,
+ 0xC74, 0x020600DB,
+ 0xC78, 0x0000001F,
+ 0xC7C, 0x00B91612,
+ 0xC80, 0x390000E4,
+ 0xC84, 0x21F60000,
+ 0xC88, 0x40000100,
+ 0xC8C, 0x20200000,
+ 0xC90, 0x00091521,
+ 0xC94, 0x00000000,
+ 0xC98, 0x00121820,
+ 0xC9C, 0x00007F7F,
+ 0xCA0, 0x00012000,
+ 0xCA4, 0x800000A0,
+ 0xCA8, 0x84E6C606,
+ 0xCAC, 0x00000060,
+ 0xCB0, 0x00000000,
+ 0xCB4, 0x00000000,
+ 0xCB8, 0x00000000,
+ 0xCBC, 0x28000000,
+ 0xCC0, 0x0010A3D0,
+ 0xCC4, 0x00000F7D,
+ 0xCC8, 0x000442D6,
+ 0xCCC, 0x00000000,
+ 0xCD0, 0x000001C8,
+ 0xCD4, 0x001C8000,
+ 0xCD8, 0x00000100,
+ 0xCDC, 0x40100000,
+ 0xCE0, 0x00222220,
+ 0xCE4, 0x20000000,
+ 0xCE8, 0x37644302,
+ 0xCEC, 0x2F97D40C,
+ 0xD00, 0x00030740,
+ 0xD04, 0x40020401,
+ 0xD08, 0x0000907F,
+ 0xD0C, 0x20010201,
+ 0xD10, 0xA0633333,
+ 0xD14, 0x3333BC53,
+ 0xD18, 0x7A8F5B6F,
+ 0xD2C, 0xCC979975,
+ 0xD30, 0x00000000,
+ 0xD34, 0x40608000,
+ 0xD38, 0x88000000,
+ 0xD3C, 0xC0127343,
+ 0xD40, 0x00000000,
+ 0xD44, 0x00000000,
+ 0xD48, 0x00000000,
+ 0xD4C, 0x00000000,
+ 0xD50, 0x00000038,
+ 0xD54, 0x00000000,
+ 0xD58, 0x00000282,
+ 0xD5C, 0x30032064,
+ 0xD60, 0x4653DE68,
+ 0xD64, 0x04518A3C,
+ 0xD68, 0x00002101,
+ 0xE00, 0x2D2D2D2D,
+ 0xE04, 0x2D2D2D2D,
+ 0xE08, 0x0390272D,
+ 0xE10, 0x2D2D2D2D,
+ 0xE14, 0x2D2D2D2D,
+ 0xE18, 0x2D2D2D2D,
+ 0xE1C, 0x2D2D2D2D,
+ 0xE28, 0x00000000,
+ 0xE30, 0x1000DC1F,
+ 0xE34, 0x10008C1F,
+ 0xE38, 0x02140102,
+ 0xE3C, 0x681604C2,
+ 0xE40, 0x01007C00,
+ 0xE44, 0x01004800,
+ 0xE48, 0xFB000000,
+ 0xE4C, 0x000028D1,
+ 0xE50, 0x1000DC1F,
+ 0xE54, 0x10008C1F,
+ 0xE58, 0x02140102,
+ 0xE5C, 0x28160D05,
+ 0xE60, 0x00000008,
+ 0xE68, 0x001B25A4,
+ 0xE6C, 0x01C00014,
+ 0xE70, 0x01C00016,
+ 0xE74, 0x02000014,
+ 0xE78, 0x02000014,
+ 0xE7C, 0x02000014,
+ 0xE80, 0x02000014,
+ 0xE84, 0x01C00014,
+ 0xE88, 0x02000014,
+ 0xE8C, 0x01C00014,
+ 0xED0, 0x01C00014,
+ 0xED4, 0x01C00014,
+ 0xED8, 0x01C00014,
+ 0xEDC, 0x00000014,
+ 0xEE0, 0x00000014,
+ 0xEE8, 0x21555448,
+ 0xEEC, 0x03C00014,
+ 0xF14, 0x00000003,
+ 0xF00, 0x00100300,
+ 0xF08, 0x0000800B,
+ 0xF0C, 0x0000F007,
+ 0xF10, 0x0000A487,
+ 0xF1C, 0x80000064,
+ 0xF38, 0x00030155,
+ 0xF3C, 0x0000003A,
+ 0xF4C, 0x13000000,
+ 0xF50, 0x00000000,
+ 0xF18, 0x00000000,
+};
+
+RTW_DECL_TABLE_PHY_COND(rtw8723d_bb, rtw_phy_cfg_bb);
+
+static const struct rtw_phy_pg_cfg_pair rtw8723d_bb_pg[] = {
+ { 0, 0, 0, 0x00000e08, 0x0000ff00, 0x00003200, },
+ { 0, 0, 0, 0x0000086c, 0xffffff00, 0x32323200, },
+ { 0, 0, 0, 0x00000e00, 0xffffffff, 0x32343434, },
+ { 0, 0, 0, 0x00000e04, 0xffffffff, 0x28303032, },
+ { 0, 0, 0, 0x00000e10, 0xffffffff, 0x30323234, },
+ { 0, 0, 0, 0x00000e14, 0xffffffff, 0x26282830, },
+};
+
+RTW_DECL_TABLE_BB_PG(rtw8723d_bb_pg);
+
+static const u32 rtw8723d_rf_a[] = {
+ 0x050, 0x0001C000,
+ 0x049, 0x0004AA00,
+ 0x000, 0x00010000,
+ 0x0B1, 0x00054573,
+ 0x0B4, 0x000508AB,
+ 0x0B7, 0x00014787,
+ 0x0B8, 0x000064CB,
+ 0x01B, 0x00073A40,
+ 0x051, 0x00038CAF,
+ 0x052, 0x000FCCA3,
+ 0x053, 0x00090F38,
+ 0x054, 0x00011083,
+ 0x057, 0x000D0000,
+ 0x08D, 0x00000A1A,
+ 0x082, 0x00082AAC,
+ 0x08E, 0x00076940,
+ 0x08F, 0x00088400,
+ 0x061, 0x00038CAF,
+ 0x062, 0x000FCCA3,
+ 0x063, 0x00090F38,
+ 0x064, 0x00011083,
+ 0x067, 0x000D0000,
+ 0x092, 0x00082AAC,
+ 0x0EF, 0x00000400,
+ 0x030, 0x000008CA,
+ 0x030, 0x000018CA,
+ 0x030, 0x000028CA,
+ 0x030, 0x000038CA,
+ 0x0EF, 0x00000000,
+ 0x0EE, 0x00000400,
+ 0x030, 0x000008CA,
+ 0x030, 0x000018CA,
+ 0x030, 0x000028CA,
+ 0x030, 0x000038CA,
+ 0x0EE, 0x00000000,
+ 0x0EF, 0x00000100,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000CCA3,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000CCA3,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000CCA3,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000CCA3,
+ 0x033, 0x00000004,
+ 0x03F, 0x0000CCA3,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000CCA3,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000CCA3,
+ 0x033, 0x00000007,
+ 0x03F, 0x0000CCA3,
+ 0x0EF, 0x00000000,
+ 0x0EE, 0x00000100,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000CCA3,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000CCA3,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000CCA3,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000CCA3,
+ 0x033, 0x00000004,
+ 0x03F, 0x0000CCA3,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000CCA3,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000CCA3,
+ 0x033, 0x00000007,
+ 0x03F, 0x0000CCA3,
+ 0x0EE, 0x00000000,
+ 0x0EF, 0x00000800,
+ 0x030, 0x0000002D,
+ 0x030, 0x0000122C,
+ 0x030, 0x0000222F,
+ 0x030, 0x0000326C,
+ 0x030, 0x0000466B,
+ 0x030, 0x0000566E,
+ 0x030, 0x000066EB,
+ 0x030, 0x000077EC,
+ 0x030, 0x000087EF,
+ 0x030, 0x000097F2,
+ 0x030, 0x0000A7F5,
+ 0x0EF, 0x00000000,
+ 0x0EE, 0x00000800,
+ 0x030, 0x00000001,
+ 0x030, 0x00001011,
+ 0x030, 0x00002011,
+ 0x030, 0x00003013,
+ 0x030, 0x00004033,
+ 0x030, 0x00005033,
+ 0x030, 0x00006037,
+ 0x030, 0x0000703F,
+ 0x030, 0x0000803F,
+ 0x030, 0x0000903F,
+ 0x030, 0x0000A03F,
+ 0x0EE, 0x00000000,
+ 0x082, 0x00083B8C,
+ 0x0ED, 0x00000008,
+ 0x030, 0x000030F6,
+ 0x030, 0x00002004,
+ 0x030, 0x000010F6,
+ 0x030, 0x000000F6,
+ 0x0ED, 0x00000000,
+ 0x092, 0x00083B8C,
+ 0x0EC, 0x00000008,
+ 0x030, 0x000030F6,
+ 0x030, 0x00002004,
+ 0x030, 0x000010F6,
+ 0x030, 0x000000F6,
+ 0x0EC, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x030, 0x0001C11C,
+ 0x030, 0x000181F4,
+ 0x030, 0x00014108,
+ 0x030, 0x000101E4,
+ 0x030, 0x0000C11C,
+ 0x030, 0x000081F4,
+ 0x030, 0x00004108,
+ 0x030, 0x000001E4,
+ 0x0EF, 0x00000000,
+ 0x0EE, 0x00010000,
+ 0x030, 0x0001C11C,
+ 0x030, 0x000181F4,
+ 0x030, 0x00014108,
+ 0x030, 0x000101E4,
+ 0x030, 0x0000C11C,
+ 0x030, 0x000081F4,
+ 0x030, 0x00004108,
+ 0x030, 0x000001E4,
+ 0x0EE, 0x00000000,
+ 0x0EF, 0x00080000,
+ 0x033, 0x00000007,
+ 0x03E, 0x0000005F,
+ 0x03F, 0x000B3FDB,
+ 0x033, 0x00000004,
+ 0x03E, 0x0000005D,
+ 0x03F, 0x000BFFE0,
+ 0x033, 0x00000005,
+ 0x03E, 0x0000005D,
+ 0x03F, 0x000FBFCE,
+ 0x033, 0x00000006,
+ 0x03E, 0x0000005F,
+ 0x03F, 0x000A7FFB,
+ 0x0EF, 0x00000000,
+ 0x0EE, 0x00000002,
+ 0x030, 0x00000001,
+ 0x030, 0x00002001,
+ 0x030, 0x00004001,
+ 0x030, 0x00007001,
+ 0x030, 0x00006001,
+ 0x030, 0x00020001,
+ 0x030, 0x00022001,
+ 0x030, 0x00024001,
+ 0x030, 0x00027001,
+ 0x030, 0x00026001,
+ 0x030, 0x00034001,
+ 0x030, 0x00037001,
+ 0x030, 0x00036001,
+ 0x030, 0x00008000,
+ 0x030, 0x0000A000,
+ 0x030, 0x0000C000,
+ 0x83000100, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x0000E024,
+ 0xA0000000, 0x00000000,
+ 0x030, 0x0000E000,
+ 0xB0000000, 0x00000000,
+ 0x030, 0x0001C000,
+ 0x030, 0x0001E000,
+ 0x0EE, 0x00000000,
+ 0x0EE, 0x00020000,
+ 0x0EF, 0x00020000,
+ 0x030, 0x00000F75,
+ 0x030, 0x00002F55,
+ 0x030, 0x00003F75,
+ 0x0EE, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x018, 0x00008401,
+ 0xFFE, 0x00000000,
+};
+
+RTW_DECL_TABLE_RF_RADIO(rtw8723d_rf_a, A);
+
+static const struct rtw_txpwr_lmt_cfg_pair rtw8723d_txpwr_lmt[] = {
+ {0, 0, 0, 0, 1, 30, },
+ {2, 0, 0, 0, 1, 30, },
+ {1, 0, 0, 0, 1, 30, },
+ {0, 0, 0, 0, 2, 30, },
+ {2, 0, 0, 0, 2, 30, },
+ {1, 0, 0, 0, 2, 30, },
+ {0, 0, 0, 0, 3, 30, },
+ {2, 0, 0, 0, 3, 30, },
+ {1, 0, 0, 0, 3, 30, },
+ {0, 0, 0, 0, 4, 30, },
+ {2, 0, 0, 0, 4, 30, },
+ {1, 0, 0, 0, 4, 30, },
+ {0, 0, 0, 0, 5, 30, },
+ {2, 0, 0, 0, 5, 30, },
+ {1, 0, 0, 0, 5, 30, },
+ {0, 0, 0, 0, 6, 30, },
+ {2, 0, 0, 0, 6, 30, },
+ {1, 0, 0, 0, 6, 30, },
+ {0, 0, 0, 0, 7, 30, },
+ {2, 0, 0, 0, 7, 30, },
+ {1, 0, 0, 0, 7, 30, },
+ {0, 0, 0, 0, 8, 30, },
+ {2, 0, 0, 0, 8, 30, },
+ {1, 0, 0, 0, 8, 30, },
+ {0, 0, 0, 0, 9, 30, },
+ {2, 0, 0, 0, 9, 30, },
+ {1, 0, 0, 0, 9, 30, },
+ {0, 0, 0, 0, 10, 30, },
+ {2, 0, 0, 0, 10, 30, },
+ {1, 0, 0, 0, 10, 30, },
+ {0, 0, 0, 0, 11, 30, },
+ {2, 0, 0, 0, 11, 30, },
+ {1, 0, 0, 0, 11, 30, },
+ {0, 0, 0, 0, 12, 30, },
+ {2, 0, 0, 0, 12, 30, },
+ {1, 0, 0, 0, 12, 30, },
+ {0, 0, 0, 0, 13, 17, },
+ {2, 0, 0, 0, 13, 30, },
+ {1, 0, 0, 0, 13, 30, },
+ {0, 0, 0, 0, 14, 63, },
+ {2, 0, 0, 0, 14, 63, },
+ {1, 0, 0, 0, 14, 30, },
+ {0, 0, 0, 1, 1, 26, },
+ {2, 0, 0, 1, 1, 31, },
+ {1, 0, 0, 1, 1, 31, },
+ {0, 0, 0, 1, 2, 28, },
+ {2, 0, 0, 1, 2, 31, },
+ {1, 0, 0, 1, 2, 31, },
+ {0, 0, 0, 1, 3, 30, },
+ {2, 0, 0, 1, 3, 31, },
+ {1, 0, 0, 1, 3, 31, },
+ {0, 0, 0, 1, 4, 30, },
+ {2, 0, 0, 1, 4, 31, },
+ {1, 0, 0, 1, 4, 31, },
+ {0, 0, 0, 1, 5, 30, },
+ {2, 0, 0, 1, 5, 31, },
+ {1, 0, 0, 1, 5, 31, },
+ {0, 0, 0, 1, 6, 30, },
+ {2, 0, 0, 1, 6, 31, },
+ {1, 0, 0, 1, 6, 31, },
+ {0, 0, 0, 1, 7, 30, },
+ {2, 0, 0, 1, 7, 31, },
+ {1, 0, 0, 1, 7, 31, },
+ {0, 0, 0, 1, 8, 30, },
+ {2, 0, 0, 1, 8, 31, },
+ {1, 0, 0, 1, 8, 31, },
+ {0, 0, 0, 1, 9, 30, },
+ {2, 0, 0, 1, 9, 31, },
+ {1, 0, 0, 1, 9, 31, },
+ {0, 0, 0, 1, 10, 28, },
+ {2, 0, 0, 1, 10, 31, },
+ {1, 0, 0, 1, 10, 31, },
+ {0, 0, 0, 1, 11, 26, },
+ {2, 0, 0, 1, 11, 31, },
+ {1, 0, 0, 1, 11, 31, },
+ {0, 0, 0, 1, 12, 24, },
+ {2, 0, 0, 1, 12, 31, },
+ {1, 0, 0, 1, 12, 31, },
+ {0, 0, 0, 1, 13, 14, },
+ {2, 0, 0, 1, 13, 31, },
+ {1, 0, 0, 1, 13, 31, },
+ {0, 0, 0, 1, 14, 63, },
+ {2, 0, 0, 1, 14, 63, },
+ {1, 0, 0, 1, 14, 63, },
+ {0, 0, 0, 2, 1, 24, },
+ {2, 0, 0, 2, 1, 31, },
+ {1, 0, 0, 2, 1, 31, },
+ {0, 0, 0, 2, 2, 26, },
+ {2, 0, 0, 2, 2, 31, },
+ {1, 0, 0, 2, 2, 31, },
+ {0, 0, 0, 2, 3, 30, },
+ {2, 0, 0, 2, 3, 31, },
+ {1, 0, 0, 2, 3, 31, },
+ {0, 0, 0, 2, 4, 30, },
+ {2, 0, 0, 2, 4, 31, },
+ {1, 0, 0, 2, 4, 31, },
+ {0, 0, 0, 2, 5, 30, },
+ {2, 0, 0, 2, 5, 31, },
+ {1, 0, 0, 2, 5, 31, },
+ {0, 0, 0, 2, 6, 30, },
+ {2, 0, 0, 2, 6, 31, },
+ {1, 0, 0, 2, 6, 31, },
+ {0, 0, 0, 2, 7, 30, },
+ {2, 0, 0, 2, 7, 31, },
+ {1, 0, 0, 2, 7, 31, },
+ {0, 0, 0, 2, 8, 30, },
+ {2, 0, 0, 2, 8, 31, },
+ {1, 0, 0, 2, 8, 31, },
+ {0, 0, 0, 2, 9, 30, },
+ {2, 0, 0, 2, 9, 31, },
+ {1, 0, 0, 2, 9, 31, },
+ {0, 0, 0, 2, 10, 26, },
+ {2, 0, 0, 2, 10, 31, },
+ {1, 0, 0, 2, 10, 31, },
+ {0, 0, 0, 2, 11, 24, },
+ {2, 0, 0, 2, 11, 31, },
+ {1, 0, 0, 2, 11, 31, },
+ {0, 0, 0, 2, 12, 23, },
+ {2, 0, 0, 2, 12, 31, },
+ {1, 0, 0, 2, 12, 31, },
+ {0, 0, 0, 2, 13, 13, },
+ {2, 0, 0, 2, 13, 31, },
+ {1, 0, 0, 2, 13, 31, },
+ {0, 0, 0, 2, 14, 63, },
+ {2, 0, 0, 2, 14, 63, },
+ {1, 0, 0, 2, 14, 63, },
+ {0, 0, 0, 3, 1, 28, },
+ {2, 0, 0, 3, 1, 30, },
+ {1, 0, 0, 3, 1, 30, },
+ {0, 0, 0, 3, 2, 28, },
+ {2, 0, 0, 3, 2, 30, },
+ {1, 0, 0, 3, 2, 30, },
+ {0, 0, 0, 3, 3, 30, },
+ {2, 0, 0, 3, 3, 30, },
+ {1, 0, 0, 3, 3, 30, },
+ {0, 0, 0, 3, 4, 30, },
+ {2, 0, 0, 3, 4, 30, },
+ {1, 0, 0, 3, 4, 30, },
+ {0, 0, 0, 3, 5, 30, },
+ {2, 0, 0, 3, 5, 30, },
+ {1, 0, 0, 3, 5, 30, },
+ {0, 0, 0, 3, 6, 30, },
+ {2, 0, 0, 3, 6, 30, },
+ {1, 0, 0, 3, 6, 30, },
+ {0, 0, 0, 3, 7, 30, },
+ {2, 0, 0, 3, 7, 30, },
+ {1, 0, 0, 3, 7, 30, },
+ {0, 0, 0, 3, 8, 30, },
+ {2, 0, 0, 3, 8, 30, },
+ {1, 0, 0, 3, 8, 30, },
+ {0, 0, 0, 3, 9, 28, },
+ {2, 0, 0, 3, 9, 30, },
+ {1, 0, 0, 3, 9, 30, },
+ {0, 0, 0, 3, 10, 28, },
+ {2, 0, 0, 3, 10, 30, },
+ {1, 0, 0, 3, 10, 30, },
+ {0, 0, 0, 3, 11, 28, },
+ {2, 0, 0, 3, 11, 30, },
+ {1, 0, 0, 3, 11, 30, },
+ {0, 0, 0, 3, 12, 63, },
+ {2, 0, 0, 3, 12, 30, },
+ {1, 0, 0, 3, 12, 30, },
+ {0, 0, 0, 3, 13, 63, },
+ {2, 0, 0, 3, 13, 30, },
+ {1, 0, 0, 3, 13, 30, },
+ {0, 0, 0, 3, 14, 63, },
+ {2, 0, 0, 3, 14, 63, },
+ {1, 0, 0, 3, 14, 63, },
+ {0, 0, 1, 2, 1, 63, },
+ {2, 0, 1, 2, 1, 63, },
+ {1, 0, 1, 2, 1, 63, },
+ {0, 0, 1, 2, 2, 63, },
+ {2, 0, 1, 2, 2, 63, },
+ {1, 0, 1, 2, 2, 63, },
+ {0, 0, 1, 2, 3, 24, },
+ {2, 0, 1, 2, 3, 30, },
+ {1, 0, 1, 2, 3, 30, },
+ {0, 0, 1, 2, 4, 24, },
+ {2, 0, 1, 2, 4, 30, },
+ {1, 0, 1, 2, 4, 30, },
+ {0, 0, 1, 2, 5, 24, },
+ {2, 0, 1, 2, 5, 30, },
+ {1, 0, 1, 2, 5, 30, },
+ {0, 0, 1, 2, 6, 24, },
+ {2, 0, 1, 2, 6, 30, },
+ {1, 0, 1, 2, 6, 30, },
+ {0, 0, 1, 2, 7, 24, },
+ {2, 0, 1, 2, 7, 30, },
+ {1, 0, 1, 2, 7, 30, },
+ {0, 0, 1, 2, 8, 24, },
+ {2, 0, 1, 2, 8, 30, },
+ {1, 0, 1, 2, 8, 30, },
+ {0, 0, 1, 2, 9, 24, },
+ {2, 0, 1, 2, 9, 30, },
+ {1, 0, 1, 2, 9, 30, },
+ {0, 0, 1, 2, 10, 22, },
+ {2, 0, 1, 2, 10, 30, },
+ {1, 0, 1, 2, 10, 30, },
+ {0, 0, 1, 2, 11, 20, },
+ {2, 0, 1, 2, 11, 30, },
+ {1, 0, 1, 2, 11, 30, },
+ {0, 0, 1, 2, 12, 63, },
+ {2, 0, 1, 2, 12, 30, },
+ {1, 0, 1, 2, 12, 30, },
+ {0, 0, 1, 2, 13, 63, },
+ {2, 0, 1, 2, 13, 30, },
+ {1, 0, 1, 2, 13, 30, },
+ {0, 0, 1, 2, 14, 63, },
+ {2, 0, 1, 2, 14, 63, },
+ {1, 0, 1, 2, 14, 63, },
+ {0, 0, 1, 3, 1, 63, },
+ {2, 0, 1, 3, 1, 63, },
+ {1, 0, 1, 3, 1, 63, },
+ {0, 0, 1, 3, 2, 63, },
+ {2, 0, 1, 3, 2, 63, },
+ {1, 0, 1, 3, 2, 63, },
+ {0, 0, 1, 3, 3, 26, },
+ {2, 0, 1, 3, 3, 26, },
+ {1, 0, 1, 3, 3, 26, },
+ {0, 0, 1, 3, 4, 26, },
+ {2, 0, 1, 3, 4, 26, },
+ {1, 0, 1, 3, 4, 26, },
+ {0, 0, 1, 3, 5, 26, },
+ {2, 0, 1, 3, 5, 26, },
+ {1, 0, 1, 3, 5, 26, },
+ {0, 0, 1, 3, 6, 26, },
+ {2, 0, 1, 3, 6, 26, },
+ {1, 0, 1, 3, 6, 26, },
+ {0, 0, 1, 3, 7, 26, },
+ {2, 0, 1, 3, 7, 26, },
+ {1, 0, 1, 3, 7, 26, },
+ {0, 0, 1, 3, 8, 26, },
+ {2, 0, 1, 3, 8, 26, },
+ {1, 0, 1, 3, 8, 26, },
+ {0, 0, 1, 3, 9, 26, },
+ {2, 0, 1, 3, 9, 26, },
+ {1, 0, 1, 3, 9, 26, },
+ {0, 0, 1, 3, 10, 26, },
+ {2, 0, 1, 3, 10, 26, },
+ {1, 0, 1, 3, 10, 26, },
+ {0, 0, 1, 3, 11, 26, },
+ {2, 0, 1, 3, 11, 26, },
+ {1, 0, 1, 3, 11, 26, },
+ {0, 0, 1, 3, 12, 63, },
+ {2, 0, 1, 3, 12, 26, },
+ {1, 0, 1, 3, 12, 26, },
+ {0, 0, 1, 3, 13, 63, },
+ {2, 0, 1, 3, 13, 26, },
+ {1, 0, 1, 3, 13, 26, },
+ {0, 0, 1, 3, 14, 63, },
+ {2, 0, 1, 3, 14, 63, },
+ {1, 0, 1, 3, 14, 63, },
+};
+
+RTW_DECL_TABLE_TXPWR_LMT(rtw8723d_txpwr_lmt);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d_table.h b/drivers/net/wireless/realtek/rtw88/rtw8723d_table.h
new file mode 100644
index 000000000000..4db996a1d982
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723d_table.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW8723D_TABLE_H__
+#define __RTW8723D_TABLE_H__
+
+extern const struct rtw_table rtw8723d_mac_tbl;
+extern const struct rtw_table rtw8723d_agc_tbl;
+extern const struct rtw_table rtw8723d_bb_tbl;
+extern const struct rtw_table rtw8723d_bb_pg_tbl;
+extern const struct rtw_table rtw8723d_rf_a_tbl;
+extern const struct rtw_table rtw8723d_txpwr_lmt_tbl;
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
index 4dd7d4143b04..45636382dafd 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
@@ -998,8 +998,9 @@ static bool rtw8822b_check_rf_path(u8 antenna)
}
}
-static void rtw8822b_set_antenna(struct rtw_dev *rtwdev, u8 antenna_tx,
- u8 antenna_rx)
+static int rtw8822b_set_antenna(struct rtw_dev *rtwdev,
+ u32 antenna_tx,
+ u32 antenna_rx)
{
struct rtw_hal *hal = &rtwdev->hal;
@@ -1007,16 +1008,21 @@ static void rtw8822b_set_antenna(struct rtw_dev *rtwdev, u8 antenna_tx,
antenna_tx, antenna_rx);
if (!rtw8822b_check_rf_path(antenna_tx)) {
- rtw_info(rtwdev, "unsupport tx path, set to default path ab\n");
- antenna_tx = BB_PATH_AB;
+ rtw_info(rtwdev, "unsupport tx path 0x%x\n", antenna_tx);
+ return -EINVAL;
}
+
if (!rtw8822b_check_rf_path(antenna_rx)) {
- rtw_info(rtwdev, "unsupport rx path, set to default path ab\n");
- antenna_rx = BB_PATH_AB;
+ rtw_info(rtwdev, "unsupport rx path 0x%x\n", antenna_rx);
+ return -EINVAL;
}
+
hal->antenna_tx = antenna_tx;
hal->antenna_rx = antenna_rx;
+
rtw8822b_config_trx_mode(rtwdev, antenna_tx, antenna_rx, false);
+
+ return 0;
}
static void rtw8822b_cfg_ldo25(struct rtw_dev *rtwdev, bool enable)
@@ -1024,7 +1030,7 @@ static void rtw8822b_cfg_ldo25(struct rtw_dev *rtwdev, bool enable)
u8 ldo_pwr;
ldo_pwr = rtw_read8(rtwdev, REG_LDO_EFUSE_CTRL + 3);
- ldo_pwr = enable ? ldo_pwr | BIT(7) : ldo_pwr & ~BIT(7);
+ ldo_pwr = enable ? ldo_pwr | BIT_LDO25_EN : ldo_pwr & ~BIT_LDO25_EN;
rtw_write8(rtwdev, REG_LDO_EFUSE_CTRL + 3, ldo_pwr);
}
@@ -2402,6 +2408,7 @@ struct rtw_chip_info rtw8822b_hw_spec = {
.ops = &rtw8822b_ops,
.id = RTW_CHIP_TYPE_8822B,
.fw_name = "rtw88/rtw8822b_fw.bin",
+ .wlan_cpu = RTW_WCPU_11AC,
.tx_pkt_desc_sz = 48,
.tx_buf_desc_sz = 16,
.rx_pkt_desc_sz = 24,
@@ -2428,6 +2435,7 @@ struct rtw_chip_info rtw8822b_hw_spec = {
.rqpn_table = rqpn_table_8822b,
.intf_table = &phy_para_table_8822b,
.dig = rtw8822b_dig,
+ .dig_cck = NULL,
.rf_base_addr = {0x2800, 0x2c00},
.rf_sipi_addr = {0xc90, 0xe90},
.mac_tbl = &rtw8822b_mac_tbl,
@@ -2440,6 +2448,7 @@ struct rtw_chip_info rtw8822b_hw_spec = {
.iqk_threshold = 8,
.bfer_su_max_num = 2,
.bfer_mu_max_num = 1,
+ .rx_ldpc = true,
.coex_para_ver = 0x19062706,
.bt_desired_ver = 0x6,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
index dc07e6be38e8..64b77a7cbffd 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -15,6 +15,7 @@
#include "debug.h"
#include "util.h"
#include "bf.h"
+#include "efuse.h"
static void rtw8822c_config_trx_mode(struct rtw_dev *rtwdev, u8 tx_path,
u8 rx_path, bool is_tx2_path);
@@ -1000,10 +1001,122 @@ static void rtw8822c_rf_x2_check(struct rtw_dev *rtwdev)
}
}
+static void rtw8822c_set_power_trim(struct rtw_dev *rtwdev, s8 bb_gain[2][8])
+{
+#define RF_SET_POWER_TRIM(_path, _seq, _idx) \
+ do { \
+ rtw_write_rf(rtwdev, _path, 0x33, RFREG_MASK, _seq); \
+ rtw_write_rf(rtwdev, _path, 0x3f, RFREG_MASK, \
+ bb_gain[_path][_idx]); \
+ } while (0)
+ u8 path;
+
+ for (path = 0; path < rtwdev->hal.rf_path_num; path++) {
+ rtw_write_rf(rtwdev, path, 0xee, BIT(19), 1);
+ RF_SET_POWER_TRIM(path, 0x0, 0);
+ RF_SET_POWER_TRIM(path, 0x1, 1);
+ RF_SET_POWER_TRIM(path, 0x2, 2);
+ RF_SET_POWER_TRIM(path, 0x3, 2);
+ RF_SET_POWER_TRIM(path, 0x4, 3);
+ RF_SET_POWER_TRIM(path, 0x5, 4);
+ RF_SET_POWER_TRIM(path, 0x6, 5);
+ RF_SET_POWER_TRIM(path, 0x7, 6);
+ RF_SET_POWER_TRIM(path, 0x8, 7);
+ RF_SET_POWER_TRIM(path, 0x9, 3);
+ RF_SET_POWER_TRIM(path, 0xa, 4);
+ RF_SET_POWER_TRIM(path, 0xb, 5);
+ RF_SET_POWER_TRIM(path, 0xc, 6);
+ RF_SET_POWER_TRIM(path, 0xd, 7);
+ RF_SET_POWER_TRIM(path, 0xe, 7);
+ rtw_write_rf(rtwdev, path, 0xee, BIT(19), 0);
+ }
+#undef RF_SET_POWER_TRIM
+}
+
+static void rtw8822c_power_trim(struct rtw_dev *rtwdev)
+{
+ u8 pg_pwr = 0xff, i, path, idx;
+ s8 bb_gain[2][8] = {0};
+ u16 rf_efuse_2g[3] = {PPG_2GL_TXAB, PPG_2GM_TXAB, PPG_2GH_TXAB};
+ u16 rf_efuse_5g[2][5] = {{PPG_5GL1_TXA, PPG_5GL2_TXA, PPG_5GM1_TXA,
+ PPG_5GM2_TXA, PPG_5GH1_TXA},
+ {PPG_5GL1_TXB, PPG_5GL2_TXB, PPG_5GM1_TXB,
+ PPG_5GM2_TXB, PPG_5GH1_TXB} };
+ bool set = false;
+
+ for (i = 0; i < ARRAY_SIZE(rf_efuse_2g); i++) {
+ rtw_read8_physical_efuse(rtwdev, rf_efuse_2g[i], &pg_pwr);
+ if (pg_pwr == EFUSE_READ_FAIL)
+ continue;
+ set = true;
+ bb_gain[RF_PATH_A][i] = FIELD_GET(PPG_2G_A_MASK, pg_pwr);
+ bb_gain[RF_PATH_B][i] = FIELD_GET(PPG_2G_B_MASK, pg_pwr);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(rf_efuse_5g[0]); i++) {
+ for (path = 0; path < rtwdev->hal.rf_path_num; path++) {
+ rtw_read8_physical_efuse(rtwdev, rf_efuse_5g[path][i],
+ &pg_pwr);
+ if (pg_pwr == EFUSE_READ_FAIL)
+ continue;
+ set = true;
+ idx = i + ARRAY_SIZE(rf_efuse_2g);
+ bb_gain[path][idx] = FIELD_GET(PPG_5G_MASK, pg_pwr);
+ }
+ }
+ if (set)
+ rtw8822c_set_power_trim(rtwdev, bb_gain);
+
+ rtw_write32_mask(rtwdev, REG_DIS_DPD, DIS_DPD_MASK, DIS_DPD_RATEALL);
+}
+
+static void rtw8822c_thermal_trim(struct rtw_dev *rtwdev)
+{
+ u16 rf_efuse[2] = {PPG_THERMAL_A, PPG_THERMAL_B};
+ u8 pg_therm = 0xff, thermal[2] = {0}, path;
+
+ for (path = 0; path < rtwdev->hal.rf_path_num; path++) {
+ rtw_read8_physical_efuse(rtwdev, rf_efuse[path], &pg_therm);
+ if (pg_therm == EFUSE_READ_FAIL)
+ return;
+ /* Efuse value of BIT(0) shall be move to BIT(3), and the value
+ * of BIT(1) to BIT(3) should be right shifted 1 bit.
+ */
+ thermal[path] = FIELD_GET(GENMASK(3, 1), pg_therm);
+ thermal[path] |= FIELD_PREP(BIT(3), pg_therm & BIT(0));
+ rtw_write_rf(rtwdev, path, 0x43, RF_THEMAL_MASK, thermal[path]);
+ }
+}
+
+static void rtw8822c_pa_bias(struct rtw_dev *rtwdev)
+{
+ u16 rf_efuse_2g[2] = {PPG_PABIAS_2GA, PPG_PABIAS_2GB};
+ u16 rf_efuse_5g[2] = {PPG_PABIAS_5GA, PPG_PABIAS_5GB};
+ u8 pg_pa_bias = 0xff, path;
+
+ for (path = 0; path < rtwdev->hal.rf_path_num; path++) {
+ rtw_read8_physical_efuse(rtwdev, rf_efuse_2g[path],
+ &pg_pa_bias);
+ if (pg_pa_bias == EFUSE_READ_FAIL)
+ return;
+ pg_pa_bias = FIELD_GET(PPG_PABIAS_MASK, pg_pa_bias);
+ rtw_write_rf(rtwdev, path, 0x60, RF_PABIAS_2G_MASK, pg_pa_bias);
+ }
+ for (path = 0; path < rtwdev->hal.rf_path_num; path++) {
+ rtw_read8_physical_efuse(rtwdev, rf_efuse_5g[path],
+ &pg_pa_bias);
+ pg_pa_bias = FIELD_GET(PPG_PABIAS_MASK, pg_pa_bias);
+ rtw_write_rf(rtwdev, path, 0x60, RF_PABIAS_5G_MASK, pg_pa_bias);
+ }
+}
+
static void rtw8822c_rf_init(struct rtw_dev *rtwdev)
{
rtw8822c_rf_dac_cal(rtwdev);
rtw8822c_rf_x2_check(rtwdev);
+ rtw8822c_thermal_trim(rtwdev);
+ rtw8822c_power_trim(rtwdev);
+ rtw8822c_pa_bias(rtwdev);
}
static void rtw8822c_pwrtrack_init(struct rtw_dev *rtwdev)
@@ -1890,6 +2003,40 @@ static void rtw8822c_set_tx_power_index(struct rtw_dev *rtwdev)
}
}
+static int rtw8822c_set_antenna(struct rtw_dev *rtwdev,
+ u32 antenna_tx,
+ u32 antenna_rx)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+
+ switch (antenna_tx) {
+ case BB_PATH_A:
+ case BB_PATH_B:
+ case BB_PATH_AB:
+ break;
+ default:
+ rtw_info(rtwdev, "unsupport tx path 0x%x\n", antenna_tx);
+ return -EINVAL;
+ }
+
+ /* path B only is not available for RX */
+ switch (antenna_rx) {
+ case BB_PATH_A:
+ case BB_PATH_AB:
+ break;
+ default:
+ rtw_info(rtwdev, "unsupport rx path 0x%x\n", antenna_rx);
+ return -EINVAL;
+ }
+
+ hal->antenna_tx = antenna_tx;
+ hal->antenna_rx = antenna_rx;
+
+ rtw8822c_config_trx_mode(rtwdev, antenna_tx, antenna_rx, false);
+
+ return 0;
+}
+
static void rtw8822c_cfg_ldo25(struct rtw_dev *rtwdev, bool enable)
{
u8 ldo_pwr;
@@ -3752,6 +3899,7 @@ static const struct rtw_rfe_def rtw8822c_rfe_defs[] = {
[0] = RTW_DEF_RFE(8822c, 0, 0),
[1] = RTW_DEF_RFE(8822c, 0, 0),
[2] = RTW_DEF_RFE(8822c, 0, 0),
+ [5] = RTW_DEF_RFE(8822c, 0, 5),
};
static const struct rtw_hw_reg rtw8822c_dig[] = {
@@ -3794,6 +3942,7 @@ static struct rtw_chip_ops rtw8822c_ops = {
.read_rf = rtw_phy_read_rf,
.write_rf = rtw_phy_write_rf_reg_mix,
.set_tx_power_index = rtw8822c_set_tx_power_index,
+ .set_antenna = rtw8822c_set_antenna,
.cfg_ldo25 = rtw8822c_cfg_ldo25,
.false_alarm_statistics = rtw8822c_false_alarm_statistics,
.dpk_track = rtw8822c_dpk_track,
@@ -4121,6 +4270,7 @@ struct rtw_chip_info rtw8822c_hw_spec = {
.ops = &rtw8822c_ops,
.id = RTW_CHIP_TYPE_8822C,
.fw_name = "rtw88/rtw8822c_fw.bin",
+ .wlan_cpu = RTW_WCPU_11AC,
.tx_pkt_desc_sz = 48,
.tx_buf_desc_sz = 16,
.rx_pkt_desc_sz = 24,
@@ -4147,6 +4297,7 @@ struct rtw_chip_info rtw8822c_hw_spec = {
.rqpn_table = rqpn_table_8822c,
.intf_table = &phy_para_table_8822c,
.dig = rtw8822c_dig,
+ .dig_cck = NULL,
.rf_base_addr = {0x3c00, 0x4c00},
.rf_sipi_addr = {0x1808, 0x4108},
.mac_tbl = &rtw8822c_mac_tbl,
@@ -4162,6 +4313,7 @@ struct rtw_chip_info rtw8822c_hw_spec = {
.iqk_threshold = 8,
.bfer_su_max_num = 2,
.bfer_mu_max_num = 1,
+ .rx_ldpc = true,
#ifdef CONFIG_PM
.wow_fw_name = "rtw88/rtw8822c_wow_fw.bin",
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.h b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
index dfd8662a0c0e..32b4771e04d0 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
@@ -309,4 +309,32 @@ const struct rtw_table name ## _tbl = { \
#define BIT_GS_PWSF GENMASK(27, 0)
#define BIT_RPT_DGAIN GENMASK(27, 16)
#define BIT_TX_CFIR GENMASK(31, 30)
+
+#define PPG_THERMAL_A 0x1ef
+#define PPG_THERMAL_B 0x1b0
+#define RF_THEMAL_MASK GENMASK(19, 16)
+#define PPG_2GL_TXAB 0x1d4
+#define PPG_2GM_TXAB 0x1ee
+#define PPG_2GH_TXAB 0x1d2
+#define PPG_2G_A_MASK GENMASK(3, 0)
+#define PPG_2G_B_MASK GENMASK(7, 4)
+#define PPG_5GL1_TXA 0x1ec
+#define PPG_5GL2_TXA 0x1e8
+#define PPG_5GM1_TXA 0x1e4
+#define PPG_5GM2_TXA 0x1e0
+#define PPG_5GH1_TXA 0x1dc
+#define PPG_5GL1_TXB 0x1eb
+#define PPG_5GL2_TXB 0x1e7
+#define PPG_5GM1_TXB 0x1e3
+#define PPG_5GM2_TXB 0x1df
+#define PPG_5GH1_TXB 0x1db
+#define PPG_5G_MASK GENMASK(4, 0)
+#define PPG_PABIAS_2GA 0x1d6
+#define PPG_PABIAS_2GB 0x1d5
+#define PPG_PABIAS_5GA 0x1d8
+#define PPG_PABIAS_5GB 0x1d7
+#define PPG_PABIAS_MASK GENMASK(3, 0)
+#define RF_PABIAS_2G_MASK GENMASK(15, 12)
+#define RF_PABIAS_5G_MASK GENMASK(19, 16)
+
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
index d102a2c27757..08d01a7bb1bf 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
@@ -13,6 +13,7 @@ static const u32 rtw8822c_mac[] = {
RTW_DECL_TABLE_PHY_COND(rtw8822c_mac, rtw_phy_cfg_mac);
static const u32 rtw8822c_agc[] = {
+ 0x80000015, 0x00000000, 0x40000000, 0x00000000,
0x1D90, 0x300001FF,
0x1D90, 0x300101FE,
0x1D90, 0x300201FD,
@@ -77,51 +78,313 @@ static const u32 rtw8822c_agc[] = {
0x1D90, 0x303D0003,
0x1D90, 0x303E0002,
0x1D90, 0x303F0001,
- 0x1D90, 0x304000FF,
- 0x1D90, 0x304100FF,
- 0x1D90, 0x304200FF,
- 0x1D90, 0x304300FF,
- 0x1D90, 0x304400FE,
- 0x1D90, 0x304500FD,
- 0x1D90, 0x304600FC,
- 0x1D90, 0x304700FB,
- 0x1D90, 0x304800FA,
- 0x1D90, 0x304900F9,
- 0x1D90, 0x304A00F8,
- 0x1D90, 0x304B00F7,
- 0x1D90, 0x304C00F6,
- 0x1D90, 0x304D00F5,
- 0x1D90, 0x304E00F4,
- 0x1D90, 0x304F00F3,
- 0x1D90, 0x305000F2,
- 0x1D90, 0x305100F1,
- 0x1D90, 0x305200F0,
- 0x1D90, 0x305300EF,
- 0x1D90, 0x305400EE,
- 0x1D90, 0x305500ED,
- 0x1D90, 0x305600EC,
- 0x1D90, 0x305700EB,
- 0x1D90, 0x305800EA,
- 0x1D90, 0x305900E9,
- 0x1D90, 0x305A00E8,
- 0x1D90, 0x305B00E7,
- 0x1D90, 0x305C00E6,
- 0x1D90, 0x305D00C7,
- 0x1D90, 0x305E00C6,
- 0x1D90, 0x305F00C5,
- 0x1D90, 0x306000C4,
- 0x1D90, 0x306100C3,
- 0x1D90, 0x306200C2,
- 0x1D90, 0x306300A4,
- 0x1D90, 0x306400A3,
- 0x1D90, 0x306500A2,
- 0x1D90, 0x30660086,
- 0x1D90, 0x30670085,
+ 0x90000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x1D90, 0x300001FF,
+ 0x1D90, 0x300101FE,
+ 0x1D90, 0x300201FD,
+ 0x1D90, 0x300301FC,
+ 0x1D90, 0x300401FB,
+ 0x1D90, 0x300501FA,
+ 0x1D90, 0x300601F9,
+ 0x1D90, 0x300701F8,
+ 0x1D90, 0x300801F7,
+ 0x1D90, 0x300901F6,
+ 0x1D90, 0x300A01F5,
+ 0x1D90, 0x300B01F4,
+ 0x1D90, 0x300C01F3,
+ 0x1D90, 0x300D01F2,
+ 0x1D90, 0x300E01F1,
+ 0x1D90, 0x300F01F0,
+ 0x1D90, 0x301001EF,
+ 0x1D90, 0x301101EE,
+ 0x1D90, 0x301201ED,
+ 0x1D90, 0x301301EC,
+ 0x1D90, 0x301401EB,
+ 0x1D90, 0x301501EA,
+ 0x1D90, 0x301601E9,
+ 0x1D90, 0x301701E8,
+ 0x1D90, 0x301801E7,
+ 0x1D90, 0x301901E5,
+ 0x1D90, 0x301A01E4,
+ 0x1D90, 0x301B01C5,
+ 0x1D90, 0x301C01C4,
+ 0x1D90, 0x301D01C3,
+ 0x1D90, 0x301E01C2,
+ 0x1D90, 0x301F0188,
+ 0x1D90, 0x30200187,
+ 0x1D90, 0x30210186,
+ 0x1D90, 0x30220184,
+ 0x1D90, 0x30230183,
+ 0x1D90, 0x30240182,
+ 0x1D90, 0x30250181,
+ 0x1D90, 0x30260148,
+ 0x1D90, 0x30270147,
+ 0x1D90, 0x30280146,
+ 0x1D90, 0x30290144,
+ 0x1D90, 0x302A0143,
+ 0x1D90, 0x302B0142,
+ 0x1D90, 0x302C0141,
+ 0x1D90, 0x302D00C8,
+ 0x1D90, 0x302E00C7,
+ 0x1D90, 0x302F00C6,
+ 0x1D90, 0x303000C5,
+ 0x1D90, 0x303100C4,
+ 0x1D90, 0x303200C3,
+ 0x1D90, 0x30330048,
+ 0x1D90, 0x30340047,
+ 0x1D90, 0x30350046,
+ 0x1D90, 0x30360045,
+ 0x1D90, 0x30370025,
+ 0x1D90, 0x30380024,
+ 0x1D90, 0x30390023,
+ 0x1D90, 0x303A0022,
+ 0x1D90, 0x303B0021,
+ 0x1D90, 0x303C0020,
+ 0x1D90, 0x303D0003,
+ 0x1D90, 0x303E0002,
+ 0x1D90, 0x303F0001,
+ 0xA0000000, 0x00000000,
+ 0x1D90, 0x300001FF,
+ 0x1D90, 0x300101FE,
+ 0x1D90, 0x300201FD,
+ 0x1D90, 0x300301FC,
+ 0x1D90, 0x300401FB,
+ 0x1D90, 0x300501FA,
+ 0x1D90, 0x300601F9,
+ 0x1D90, 0x300701F8,
+ 0x1D90, 0x300801F7,
+ 0x1D90, 0x300901F6,
+ 0x1D90, 0x300A01F5,
+ 0x1D90, 0x300B01F4,
+ 0x1D90, 0x300C01F3,
+ 0x1D90, 0x300D01F2,
+ 0x1D90, 0x300E01F1,
+ 0x1D90, 0x300F01F0,
+ 0x1D90, 0x301001EF,
+ 0x1D90, 0x301101EE,
+ 0x1D90, 0x301201ED,
+ 0x1D90, 0x301301EC,
+ 0x1D90, 0x301401EB,
+ 0x1D90, 0x301501EA,
+ 0x1D90, 0x301601E9,
+ 0x1D90, 0x301701E8,
+ 0x1D90, 0x301801E7,
+ 0x1D90, 0x301901E5,
+ 0x1D90, 0x301A01E4,
+ 0x1D90, 0x301B01C5,
+ 0x1D90, 0x301C01C4,
+ 0x1D90, 0x301D01C3,
+ 0x1D90, 0x301E01C2,
+ 0x1D90, 0x301F0188,
+ 0x1D90, 0x30200187,
+ 0x1D90, 0x30210186,
+ 0x1D90, 0x30220184,
+ 0x1D90, 0x30230183,
+ 0x1D90, 0x30240182,
+ 0x1D90, 0x30250181,
+ 0x1D90, 0x30260148,
+ 0x1D90, 0x30270147,
+ 0x1D90, 0x30280146,
+ 0x1D90, 0x30290144,
+ 0x1D90, 0x302A0143,
+ 0x1D90, 0x302B0142,
+ 0x1D90, 0x302C0141,
+ 0x1D90, 0x302D00C8,
+ 0x1D90, 0x302E00C7,
+ 0x1D90, 0x302F00C6,
+ 0x1D90, 0x303000C5,
+ 0x1D90, 0x303100C4,
+ 0x1D90, 0x303200C3,
+ 0x1D90, 0x30330048,
+ 0x1D90, 0x30340047,
+ 0x1D90, 0x30350046,
+ 0x1D90, 0x30360045,
+ 0x1D90, 0x30370025,
+ 0x1D90, 0x30380024,
+ 0x1D90, 0x30390023,
+ 0x1D90, 0x303A0022,
+ 0x1D90, 0x303B0021,
+ 0x1D90, 0x303C0020,
+ 0x1D90, 0x303D0003,
+ 0x1D90, 0x303E0002,
+ 0x1D90, 0x303F0001,
+ 0xB0000000, 0x00000000,
+ 0x80000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x1D90, 0x304001FD,
+ 0x1D90, 0x304101FC,
+ 0x1D90, 0x304201FB,
+ 0x1D90, 0x304301FA,
+ 0x1D90, 0x304401F9,
+ 0x1D90, 0x304501F8,
+ 0x1D90, 0x304601F7,
+ 0x1D90, 0x304701F6,
+ 0x1D90, 0x304801F5,
+ 0x1D90, 0x304901F4,
+ 0x1D90, 0x304A01F3,
+ 0x1D90, 0x304B01F2,
+ 0x1D90, 0x304C01F1,
+ 0x1D90, 0x304D01F0,
+ 0x1D90, 0x304E01EF,
+ 0x1D90, 0x304F00EE,
+ 0x1D90, 0x305000ED,
+ 0x1D90, 0x305100EC,
+ 0x1D90, 0x305200EB,
+ 0x1D90, 0x305300EA,
+ 0x1D90, 0x305400E9,
+ 0x1D90, 0x305500E8,
+ 0x1D90, 0x305600E7,
+ 0x1D90, 0x305700E6,
+ 0x1D90, 0x305800E5,
+ 0x1D90, 0x305900E4,
+ 0x1D90, 0x305A00E3,
+ 0x1D90, 0x305B00C3,
+ 0x1D90, 0x305C00C2,
+ 0x1D90, 0x305D00A4,
+ 0x1D90, 0x305E00A3,
+ 0x1D90, 0x305F00A2,
+ 0x1D90, 0x306000A1,
+ 0x1D90, 0x30610085,
+ 0x1D90, 0x30620084,
+ 0x1D90, 0x30630083,
+ 0x1D90, 0x30640082,
+ 0x1D90, 0x30650069,
+ 0x1D90, 0x30660068,
+ 0x1D90, 0x30670067,
+ 0x1D90, 0x30680066,
+ 0x1D90, 0x30690065,
+ 0x1D90, 0x306A0064,
+ 0x1D90, 0x306B0063,
+ 0x1D90, 0x306C0043,
+ 0x1D90, 0x306D0042,
+ 0x1D90, 0x306E0041,
+ 0x1D90, 0x306F0025,
+ 0x1D90, 0x30700024,
+ 0x1D90, 0x30710023,
+ 0x1D90, 0x30720022,
+ 0x1D90, 0x30730021,
+ 0x1D90, 0x30740020,
+ 0x1D90, 0x30750004,
+ 0x1D90, 0x30760003,
+ 0x1D90, 0x30770002,
+ 0x1D90, 0x30780001,
+ 0x1D90, 0x30790000,
+ 0x1D90, 0x307A0000,
+ 0x1D90, 0x307B0000,
+ 0x1D90, 0x307C0000,
+ 0x1D90, 0x307D0000,
+ 0x1D90, 0x307E0000,
+ 0x1D90, 0x307F0000,
+ 0x90000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x1D90, 0x304001FD,
+ 0x1D90, 0x304101FC,
+ 0x1D90, 0x304201FB,
+ 0x1D90, 0x304301FA,
+ 0x1D90, 0x304401F9,
+ 0x1D90, 0x304501F8,
+ 0x1D90, 0x304601F7,
+ 0x1D90, 0x304701F6,
+ 0x1D90, 0x304801F5,
+ 0x1D90, 0x304901F4,
+ 0x1D90, 0x304A01F3,
+ 0x1D90, 0x304B01F2,
+ 0x1D90, 0x304C01F1,
+ 0x1D90, 0x304D01F0,
+ 0x1D90, 0x304E01EF,
+ 0x1D90, 0x304F00EE,
+ 0x1D90, 0x305000ED,
+ 0x1D90, 0x305100EC,
+ 0x1D90, 0x305200EB,
+ 0x1D90, 0x305300EA,
+ 0x1D90, 0x305400E9,
+ 0x1D90, 0x305500E8,
+ 0x1D90, 0x305600E7,
+ 0x1D90, 0x305700E6,
+ 0x1D90, 0x305800E5,
+ 0x1D90, 0x305900E4,
+ 0x1D90, 0x305A00E3,
+ 0x1D90, 0x305B00C3,
+ 0x1D90, 0x305C00C2,
+ 0x1D90, 0x305D00A4,
+ 0x1D90, 0x305E00A3,
+ 0x1D90, 0x305F00A2,
+ 0x1D90, 0x306000A1,
+ 0x1D90, 0x30610085,
+ 0x1D90, 0x30620084,
+ 0x1D90, 0x30630083,
+ 0x1D90, 0x30640082,
+ 0x1D90, 0x30650069,
+ 0x1D90, 0x30660068,
+ 0x1D90, 0x30670067,
+ 0x1D90, 0x30680066,
+ 0x1D90, 0x30690065,
+ 0x1D90, 0x306A0064,
+ 0x1D90, 0x306B0063,
+ 0x1D90, 0x306C0043,
+ 0x1D90, 0x306D0042,
+ 0x1D90, 0x306E0041,
+ 0x1D90, 0x306F0025,
+ 0x1D90, 0x30700024,
+ 0x1D90, 0x30710023,
+ 0x1D90, 0x30720022,
+ 0x1D90, 0x30730021,
+ 0x1D90, 0x30740020,
+ 0x1D90, 0x30750004,
+ 0x1D90, 0x30760003,
+ 0x1D90, 0x30770002,
+ 0x1D90, 0x30780001,
+ 0x1D90, 0x30790000,
+ 0x1D90, 0x307A0000,
+ 0x1D90, 0x307B0000,
+ 0x1D90, 0x307C0000,
+ 0x1D90, 0x307D0000,
+ 0x1D90, 0x307E0000,
+ 0x1D90, 0x307F0000,
+ 0xA0000000, 0x00000000,
+ 0x1D90, 0x3040011F,
+ 0x1D90, 0x3041011F,
+ 0x1D90, 0x3042011F,
+ 0x1D90, 0x3043011F,
+ 0x1D90, 0x3044011F,
+ 0x1D90, 0x3045011F,
+ 0x1D90, 0x3046011F,
+ 0x1D90, 0x3047011F,
+ 0x1D90, 0x3048011F,
+ 0x1D90, 0x3049011F,
+ 0x1D90, 0x304A011F,
+ 0x1D90, 0x304B011F,
+ 0x1D90, 0x304C011F,
+ 0x1D90, 0x304D011F,
+ 0x1D90, 0x304E011F,
+ 0x1D90, 0x304F00F4,
+ 0x1D90, 0x305000F3,
+ 0x1D90, 0x305100F2,
+ 0x1D90, 0x305200F1,
+ 0x1D90, 0x305300F0,
+ 0x1D90, 0x305400EF,
+ 0x1D90, 0x305500EE,
+ 0x1D90, 0x305600ED,
+ 0x1D90, 0x305700EC,
+ 0x1D90, 0x305800EB,
+ 0x1D90, 0x305900EA,
+ 0x1D90, 0x305A00E9,
+ 0x1D90, 0x305B00E8,
+ 0x1D90, 0x305C00E7,
+ 0x1D90, 0x305D00E6,
+ 0x1D90, 0x305E00E4,
+ 0x1D90, 0x305F00E3,
+ 0x1D90, 0x306000E2,
+ 0x1D90, 0x306100C4,
+ 0x1D90, 0x306200C3,
+ 0x1D90, 0x306300C2,
+ 0x1D90, 0x306400A4,
+ 0x1D90, 0x306500A3,
+ 0x1D90, 0x306600A2,
+ 0x1D90, 0x306700A1,
0x1D90, 0x30680084,
0x1D90, 0x30690083,
0x1D90, 0x306A0082,
- 0x1D90, 0x306B0069,
- 0x1D90, 0x306C0068,
+ 0x1D90, 0x306B0081,
+ 0x1D90, 0x306C0080,
0x1D90, 0x306D0067,
0x1D90, 0x306E0066,
0x1D90, 0x306F0065,
@@ -130,131 +393,395 @@ static const u32 rtw8822c_agc[] = {
0x1D90, 0x30720044,
0x1D90, 0x30730043,
0x1D90, 0x30740042,
- 0x1D90, 0x30750025,
+ 0x1D90, 0x30750041,
0x1D90, 0x30760024,
0x1D90, 0x30770023,
0x1D90, 0x30780022,
0x1D90, 0x30790021,
0x1D90, 0x307A0020,
- 0x1D90, 0x307B0003,
- 0x1D90, 0x307C0002,
- 0x1D90, 0x307D0001,
- 0x1D90, 0x307E0000,
+ 0x1D90, 0x307B0004,
+ 0x1D90, 0x307C0003,
+ 0x1D90, 0x307D0002,
+ 0x1D90, 0x307E0001,
0x1D90, 0x307F0000,
+ 0xB0000000, 0x00000000,
+ 0x80000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x1D90, 0x308000FA,
+ 0x1D90, 0x308100F9,
+ 0x1D90, 0x308200F8,
+ 0x1D90, 0x308300F7,
+ 0x1D90, 0x308400F6,
+ 0x1D90, 0x308500F5,
+ 0x1D90, 0x308600F4,
+ 0x1D90, 0x308700F3,
+ 0x1D90, 0x308800F2,
+ 0x1D90, 0x308900F1,
+ 0x1D90, 0x308A00F0,
+ 0x1D90, 0x308B00EF,
+ 0x1D90, 0x308C00EE,
+ 0x1D90, 0x308D00ED,
+ 0x1D90, 0x308E00EC,
+ 0x1D90, 0x308F00EB,
+ 0x1D90, 0x309000EA,
+ 0x1D90, 0x309100E8,
+ 0x1D90, 0x309200E7,
+ 0x1D90, 0x309300E6,
+ 0x1D90, 0x309400E5,
+ 0x1D90, 0x309500E4,
+ 0x1D90, 0x309600C4,
+ 0x1D90, 0x309700C3,
+ 0x1D90, 0x309800C2,
+ 0x1D90, 0x309900C1,
+ 0x1D90, 0x309A00A3,
+ 0x1D90, 0x309B00A2,
+ 0x1D90, 0x309C00A1,
+ 0x1D90, 0x309D0085,
+ 0x1D90, 0x309E0084,
+ 0x1D90, 0x309F0083,
+ 0x1D90, 0x30A00082,
+ 0x1D90, 0x30A10081,
+ 0x1D90, 0x30A20067,
+ 0x1D90, 0x30A30066,
+ 0x1D90, 0x30A40065,
+ 0x1D90, 0x30A50064,
+ 0x1D90, 0x30A60063,
+ 0x1D90, 0x30A70044,
+ 0x1D90, 0x30A80043,
+ 0x1D90, 0x30A90042,
+ 0x1D90, 0x30AA0026,
+ 0x1D90, 0x30AB0025,
+ 0x1D90, 0x30AC0024,
+ 0x1D90, 0x30AD0023,
+ 0x1D90, 0x30AE0022,
+ 0x1D90, 0x30AF0021,
+ 0x1D90, 0x30B00005,
+ 0x1D90, 0x30B10004,
+ 0x1D90, 0x30B20003,
+ 0x1D90, 0x30B30002,
+ 0x1D90, 0x30B40001,
+ 0x1D90, 0x30B50000,
+ 0x1D90, 0x30B60000,
+ 0x1D90, 0x30B70000,
+ 0x1D90, 0x30B80000,
+ 0x1D90, 0x30B90000,
+ 0x1D90, 0x30BA0000,
+ 0x1D90, 0x30BB0000,
+ 0x1D90, 0x30BC0000,
+ 0x1D90, 0x30BD0000,
+ 0x1D90, 0x30BE0000,
+ 0x1D90, 0x30BF0000,
+ 0x90000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x1D90, 0x308000FA,
+ 0x1D90, 0x308100F9,
+ 0x1D90, 0x308200F8,
+ 0x1D90, 0x308300F7,
+ 0x1D90, 0x308400F6,
+ 0x1D90, 0x308500F5,
+ 0x1D90, 0x308600F4,
+ 0x1D90, 0x308700F3,
+ 0x1D90, 0x308800F2,
+ 0x1D90, 0x308900F1,
+ 0x1D90, 0x308A00F0,
+ 0x1D90, 0x308B00EF,
+ 0x1D90, 0x308C00EE,
+ 0x1D90, 0x308D00ED,
+ 0x1D90, 0x308E00EC,
+ 0x1D90, 0x308F00EB,
+ 0x1D90, 0x309000EA,
+ 0x1D90, 0x309100E8,
+ 0x1D90, 0x309200E7,
+ 0x1D90, 0x309300E6,
+ 0x1D90, 0x309400E5,
+ 0x1D90, 0x309500E4,
+ 0x1D90, 0x309600C4,
+ 0x1D90, 0x309700C3,
+ 0x1D90, 0x309800C2,
+ 0x1D90, 0x309900C1,
+ 0x1D90, 0x309A00A3,
+ 0x1D90, 0x309B00A2,
+ 0x1D90, 0x309C00A1,
+ 0x1D90, 0x309D0085,
+ 0x1D90, 0x309E0084,
+ 0x1D90, 0x309F0083,
+ 0x1D90, 0x30A00082,
+ 0x1D90, 0x30A10081,
+ 0x1D90, 0x30A20067,
+ 0x1D90, 0x30A30066,
+ 0x1D90, 0x30A40065,
+ 0x1D90, 0x30A50064,
+ 0x1D90, 0x30A60063,
+ 0x1D90, 0x30A70044,
+ 0x1D90, 0x30A80043,
+ 0x1D90, 0x30A90042,
+ 0x1D90, 0x30AA0026,
+ 0x1D90, 0x30AB0025,
+ 0x1D90, 0x30AC0024,
+ 0x1D90, 0x30AD0023,
+ 0x1D90, 0x30AE0022,
+ 0x1D90, 0x30AF0021,
+ 0x1D90, 0x30B00005,
+ 0x1D90, 0x30B10004,
+ 0x1D90, 0x30B20003,
+ 0x1D90, 0x30B30002,
+ 0x1D90, 0x30B40001,
+ 0x1D90, 0x30B50000,
+ 0x1D90, 0x30B60000,
+ 0x1D90, 0x30B70000,
+ 0x1D90, 0x30B80000,
+ 0x1D90, 0x30B90000,
+ 0x1D90, 0x30BA0000,
+ 0x1D90, 0x30BB0000,
+ 0x1D90, 0x30BC0000,
+ 0x1D90, 0x30BD0000,
+ 0x1D90, 0x30BE0000,
+ 0x1D90, 0x30BF0000,
+ 0xA0000000, 0x00000000,
0x1D90, 0x308000FF,
0x1D90, 0x308100FF,
0x1D90, 0x308200FF,
0x1D90, 0x308300FF,
- 0x1D90, 0x308400FE,
- 0x1D90, 0x308500FD,
- 0x1D90, 0x308600FC,
- 0x1D90, 0x308700FB,
- 0x1D90, 0x308800FA,
- 0x1D90, 0x308900F9,
- 0x1D90, 0x308A00F8,
- 0x1D90, 0x308B00F7,
- 0x1D90, 0x308C00F6,
- 0x1D90, 0x308D00F5,
- 0x1D90, 0x308E00F4,
- 0x1D90, 0x308F00F3,
- 0x1D90, 0x309000F2,
- 0x1D90, 0x309100F1,
- 0x1D90, 0x309200F0,
- 0x1D90, 0x309300EF,
- 0x1D90, 0x309400EE,
- 0x1D90, 0x309500ED,
- 0x1D90, 0x309600EC,
- 0x1D90, 0x309700EB,
- 0x1D90, 0x309800EA,
- 0x1D90, 0x309900E9,
- 0x1D90, 0x309A00E8,
- 0x1D90, 0x309B00E7,
- 0x1D90, 0x309C00E6,
- 0x1D90, 0x309D00C7,
- 0x1D90, 0x309E00C6,
- 0x1D90, 0x309F00C5,
+ 0x1D90, 0x308400FF,
+ 0x1D90, 0x308500FF,
+ 0x1D90, 0x308600FE,
+ 0x1D90, 0x308700FD,
+ 0x1D90, 0x308800FC,
+ 0x1D90, 0x308900FB,
+ 0x1D90, 0x308A00FA,
+ 0x1D90, 0x308B00F9,
+ 0x1D90, 0x308C00F8,
+ 0x1D90, 0x308D00F7,
+ 0x1D90, 0x308E00F6,
+ 0x1D90, 0x308F00F5,
+ 0x1D90, 0x309000F4,
+ 0x1D90, 0x309100F3,
+ 0x1D90, 0x309200F2,
+ 0x1D90, 0x309300F1,
+ 0x1D90, 0x309400F0,
+ 0x1D90, 0x309500EF,
+ 0x1D90, 0x309600EE,
+ 0x1D90, 0x309700ED,
+ 0x1D90, 0x309800EC,
+ 0x1D90, 0x309900EB,
+ 0x1D90, 0x309A00EA,
+ 0x1D90, 0x309B00E8,
+ 0x1D90, 0x309C00E7,
+ 0x1D90, 0x309D00E6,
+ 0x1D90, 0x309E00E5,
+ 0x1D90, 0x309F00E4,
0x1D90, 0x30A000C4,
0x1D90, 0x30A100C3,
0x1D90, 0x30A200C2,
- 0x1D90, 0x30A300A4,
+ 0x1D90, 0x30A300C1,
0x1D90, 0x30A400A3,
0x1D90, 0x30A500A2,
- 0x1D90, 0x30A60086,
+ 0x1D90, 0x30A600A1,
0x1D90, 0x30A70085,
0x1D90, 0x30A80084,
0x1D90, 0x30A90083,
0x1D90, 0x30AA0082,
- 0x1D90, 0x30AB0069,
- 0x1D90, 0x30AC0068,
- 0x1D90, 0x30AD0067,
- 0x1D90, 0x30AE0066,
- 0x1D90, 0x30AF0065,
- 0x1D90, 0x30B00064,
- 0x1D90, 0x30B10063,
- 0x1D90, 0x30B20044,
- 0x1D90, 0x30B30043,
- 0x1D90, 0x30B40042,
+ 0x1D90, 0x30AB0081,
+ 0x1D90, 0x30AC0067,
+ 0x1D90, 0x30AD0066,
+ 0x1D90, 0x30AE0065,
+ 0x1D90, 0x30AF0064,
+ 0x1D90, 0x30B00063,
+ 0x1D90, 0x30B10044,
+ 0x1D90, 0x30B20043,
+ 0x1D90, 0x30B30042,
+ 0x1D90, 0x30B40026,
0x1D90, 0x30B50025,
0x1D90, 0x30B60024,
0x1D90, 0x30B70023,
0x1D90, 0x30B80022,
0x1D90, 0x30B90021,
- 0x1D90, 0x30BA0020,
- 0x1D90, 0x30BB0003,
- 0x1D90, 0x30BC0002,
- 0x1D90, 0x30BD0001,
- 0x1D90, 0x30BE0000,
+ 0x1D90, 0x30BA0005,
+ 0x1D90, 0x30BB0004,
+ 0x1D90, 0x30BC0003,
+ 0x1D90, 0x30BD0002,
+ 0x1D90, 0x30BE0001,
0x1D90, 0x30BF0000,
+ 0xB0000000, 0x00000000,
+ 0x80000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x1D90, 0x30C000F8,
+ 0x1D90, 0x30C100F7,
+ 0x1D90, 0x30C200F6,
+ 0x1D90, 0x30C300F5,
+ 0x1D90, 0x30C400F4,
+ 0x1D90, 0x30C500F3,
+ 0x1D90, 0x30C600F2,
+ 0x1D90, 0x30C700F1,
+ 0x1D90, 0x30C800F0,
+ 0x1D90, 0x30C900EF,
+ 0x1D90, 0x30CA00EE,
+ 0x1D90, 0x30CB00ED,
+ 0x1D90, 0x30CC00EC,
+ 0x1D90, 0x30CD00EB,
+ 0x1D90, 0x30CE00EA,
+ 0x1D90, 0x30CF00E8,
+ 0x1D90, 0x30D000E7,
+ 0x1D90, 0x30D100E6,
+ 0x1D90, 0x30D200E5,
+ 0x1D90, 0x30D300E4,
+ 0x1D90, 0x30D400E3,
+ 0x1D90, 0x30D500E2,
+ 0x1D90, 0x30D600A6,
+ 0x1D90, 0x30D700A5,
+ 0x1D90, 0x30D800A4,
+ 0x1D90, 0x30D900A3,
+ 0x1D90, 0x30DA00A2,
+ 0x1D90, 0x30DB0086,
+ 0x1D90, 0x30DC0085,
+ 0x1D90, 0x30DD0084,
+ 0x1D90, 0x30DE0083,
+ 0x1D90, 0x30DF0081,
+ 0x1D90, 0x30E00068,
+ 0x1D90, 0x30E10067,
+ 0x1D90, 0x30E20066,
+ 0x1D90, 0x30E30065,
+ 0x1D90, 0x30E40064,
+ 0x1D90, 0x30E50045,
+ 0x1D90, 0x30E60044,
+ 0x1D90, 0x30E70043,
+ 0x1D90, 0x30E80042,
+ 0x1D90, 0x30E90025,
+ 0x1D90, 0x30EA0024,
+ 0x1D90, 0x30EB0023,
+ 0x1D90, 0x30EC0022,
+ 0x1D90, 0x30ED0021,
+ 0x1D90, 0x30EE0005,
+ 0x1D90, 0x30EF0004,
+ 0x1D90, 0x30F00003,
+ 0x1D90, 0x30F10002,
+ 0x1D90, 0x30F20001,
+ 0x1D90, 0x30F30000,
+ 0x1D90, 0x30F40000,
+ 0x1D90, 0x30F50000,
+ 0x1D90, 0x30F60000,
+ 0x1D90, 0x30F70000,
+ 0x1D90, 0x30F80000,
+ 0x1D90, 0x30F90000,
+ 0x1D90, 0x30FA0000,
+ 0x1D90, 0x30FB0000,
+ 0x1D90, 0x30FC0000,
+ 0x1D90, 0x30FD0000,
+ 0x1D90, 0x30FE0000,
+ 0x1D90, 0x30FF0000,
+ 0x90000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x1D90, 0x30C000F8,
+ 0x1D90, 0x30C100F7,
+ 0x1D90, 0x30C200F6,
+ 0x1D90, 0x30C300F5,
+ 0x1D90, 0x30C400F4,
+ 0x1D90, 0x30C500F3,
+ 0x1D90, 0x30C600F2,
+ 0x1D90, 0x30C700F1,
+ 0x1D90, 0x30C800F0,
+ 0x1D90, 0x30C900EF,
+ 0x1D90, 0x30CA00EE,
+ 0x1D90, 0x30CB00ED,
+ 0x1D90, 0x30CC00EC,
+ 0x1D90, 0x30CD00EB,
+ 0x1D90, 0x30CE00EA,
+ 0x1D90, 0x30CF00E8,
+ 0x1D90, 0x30D000E7,
+ 0x1D90, 0x30D100E6,
+ 0x1D90, 0x30D200E5,
+ 0x1D90, 0x30D300E4,
+ 0x1D90, 0x30D400E3,
+ 0x1D90, 0x30D500E2,
+ 0x1D90, 0x30D600A6,
+ 0x1D90, 0x30D700A5,
+ 0x1D90, 0x30D800A4,
+ 0x1D90, 0x30D900A3,
+ 0x1D90, 0x30DA00A2,
+ 0x1D90, 0x30DB0086,
+ 0x1D90, 0x30DC0085,
+ 0x1D90, 0x30DD0084,
+ 0x1D90, 0x30DE0083,
+ 0x1D90, 0x30DF0081,
+ 0x1D90, 0x30E00068,
+ 0x1D90, 0x30E10067,
+ 0x1D90, 0x30E20066,
+ 0x1D90, 0x30E30065,
+ 0x1D90, 0x30E40064,
+ 0x1D90, 0x30E50045,
+ 0x1D90, 0x30E60044,
+ 0x1D90, 0x30E70043,
+ 0x1D90, 0x30E80042,
+ 0x1D90, 0x30E90025,
+ 0x1D90, 0x30EA0024,
+ 0x1D90, 0x30EB0023,
+ 0x1D90, 0x30EC0022,
+ 0x1D90, 0x30ED0021,
+ 0x1D90, 0x30EE0005,
+ 0x1D90, 0x30EF0004,
+ 0x1D90, 0x30F00003,
+ 0x1D90, 0x30F10002,
+ 0x1D90, 0x30F20001,
+ 0x1D90, 0x30F30000,
+ 0x1D90, 0x30F40000,
+ 0x1D90, 0x30F50000,
+ 0x1D90, 0x30F60000,
+ 0x1D90, 0x30F70000,
+ 0x1D90, 0x30F80000,
+ 0x1D90, 0x30F90000,
+ 0x1D90, 0x30FA0000,
+ 0x1D90, 0x30FB0000,
+ 0x1D90, 0x30FC0000,
+ 0x1D90, 0x30FD0000,
+ 0x1D90, 0x30FE0000,
+ 0x1D90, 0x30FF0000,
+ 0xA0000000, 0x00000000,
0x1D90, 0x30C000FF,
0x1D90, 0x30C100FF,
0x1D90, 0x30C200FF,
0x1D90, 0x30C300FF,
- 0x1D90, 0x30C400FE,
- 0x1D90, 0x30C500FD,
- 0x1D90, 0x30C600FC,
- 0x1D90, 0x30C700FB,
- 0x1D90, 0x30C800FA,
- 0x1D90, 0x30C900F9,
- 0x1D90, 0x30CA00F8,
- 0x1D90, 0x30CB00F7,
- 0x1D90, 0x30CC00F6,
- 0x1D90, 0x30CD00F5,
- 0x1D90, 0x30CE00F4,
- 0x1D90, 0x30CF00F3,
- 0x1D90, 0x30D000F2,
- 0x1D90, 0x30D100F1,
- 0x1D90, 0x30D200F0,
- 0x1D90, 0x30D300EF,
- 0x1D90, 0x30D400EE,
- 0x1D90, 0x30D500ED,
- 0x1D90, 0x30D600EC,
- 0x1D90, 0x30D700EB,
- 0x1D90, 0x30D800EA,
- 0x1D90, 0x30D900E9,
- 0x1D90, 0x30DA00E8,
- 0x1D90, 0x30DB00E7,
- 0x1D90, 0x30DC00E6,
- 0x1D90, 0x30DD00C7,
- 0x1D90, 0x30DE00C6,
- 0x1D90, 0x30DF00C5,
- 0x1D90, 0x30E000C4,
- 0x1D90, 0x30E100C3,
- 0x1D90, 0x30E200C2,
- 0x1D90, 0x30E300A4,
- 0x1D90, 0x30E400A3,
- 0x1D90, 0x30E500A2,
- 0x1D90, 0x30E60086,
- 0x1D90, 0x30E70085,
- 0x1D90, 0x30E80084,
- 0x1D90, 0x30E90083,
- 0x1D90, 0x30EA0082,
- 0x1D90, 0x30EB0069,
- 0x1D90, 0x30EC0068,
- 0x1D90, 0x30ED0067,
- 0x1D90, 0x30EE0066,
- 0x1D90, 0x30EF0065,
- 0x1D90, 0x30F00064,
- 0x1D90, 0x30F10063,
+ 0x1D90, 0x30C400FF,
+ 0x1D90, 0x30C500FF,
+ 0x1D90, 0x30C600FE,
+ 0x1D90, 0x30C700FD,
+ 0x1D90, 0x30C800FC,
+ 0x1D90, 0x30C900FB,
+ 0x1D90, 0x30CA00FA,
+ 0x1D90, 0x30CB00F9,
+ 0x1D90, 0x30CC00F8,
+ 0x1D90, 0x30CD00F7,
+ 0x1D90, 0x30CE00F6,
+ 0x1D90, 0x30CF00F5,
+ 0x1D90, 0x30D000F4,
+ 0x1D90, 0x30D100F3,
+ 0x1D90, 0x30D200F2,
+ 0x1D90, 0x30D300F1,
+ 0x1D90, 0x30D400F0,
+ 0x1D90, 0x30D500EF,
+ 0x1D90, 0x30D600EE,
+ 0x1D90, 0x30D700ED,
+ 0x1D90, 0x30D800EC,
+ 0x1D90, 0x30D900EB,
+ 0x1D90, 0x30DA00EA,
+ 0x1D90, 0x30DB00E8,
+ 0x1D90, 0x30DC00E7,
+ 0x1D90, 0x30DD00E6,
+ 0x1D90, 0x30DE00E5,
+ 0x1D90, 0x30DF00E4,
+ 0x1D90, 0x30E000E3,
+ 0x1D90, 0x30E100E2,
+ 0x1D90, 0x30E200A6,
+ 0x1D90, 0x30E300A5,
+ 0x1D90, 0x30E400A4,
+ 0x1D90, 0x30E500A3,
+ 0x1D90, 0x30E600A2,
+ 0x1D90, 0x30E70086,
+ 0x1D90, 0x30E80085,
+ 0x1D90, 0x30E90084,
+ 0x1D90, 0x30EA0083,
+ 0x1D90, 0x30EB0082,
+ 0x1D90, 0x30EC0067,
+ 0x1D90, 0x30ED0066,
+ 0x1D90, 0x30EE0065,
+ 0x1D90, 0x30EF0064,
+ 0x1D90, 0x30F00063,
+ 0x1D90, 0x30F10045,
0x1D90, 0x30F20044,
0x1D90, 0x30F30043,
0x1D90, 0x30F40042,
@@ -263,12 +790,14 @@ static const u32 rtw8822c_agc[] = {
0x1D90, 0x30F70023,
0x1D90, 0x30F80022,
0x1D90, 0x30F90021,
- 0x1D90, 0x30FA0020,
- 0x1D90, 0x30FB0003,
- 0x1D90, 0x30FC0002,
- 0x1D90, 0x30FD0001,
- 0x1D90, 0x30FE0000,
+ 0x1D90, 0x30FA0005,
+ 0x1D90, 0x30FB0004,
+ 0x1D90, 0x30FC0003,
+ 0x1D90, 0x30FD0002,
+ 0x1D90, 0x30FE0001,
0x1D90, 0x30FF0000,
+ 0xB0000000, 0x00000000,
+ 0x80000015, 0x00000000, 0x40000000, 0x00000000,
0x1D90, 0x310001FF,
0x1D90, 0x310101FF,
0x1D90, 0x310201FF,
@@ -333,6 +862,203 @@ static const u32 rtw8822c_agc[] = {
0x1D90, 0x313D0045,
0x1D90, 0x313E0044,
0x1D90, 0x313F0043,
+ 0x90000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x1D90, 0x310001FF,
+ 0x1D90, 0x310101FF,
+ 0x1D90, 0x310201FF,
+ 0x1D90, 0x310301FF,
+ 0x1D90, 0x310401FF,
+ 0x1D90, 0x310501FF,
+ 0x1D90, 0x310601FF,
+ 0x1D90, 0x310701FF,
+ 0x1D90, 0x310801FF,
+ 0x1D90, 0x310901FE,
+ 0x1D90, 0x310A01FD,
+ 0x1D90, 0x310B01FC,
+ 0x1D90, 0x310C01FB,
+ 0x1D90, 0x310D01FA,
+ 0x1D90, 0x310E01F9,
+ 0x1D90, 0x310F01F8,
+ 0x1D90, 0x311001F7,
+ 0x1D90, 0x311101F6,
+ 0x1D90, 0x311201F5,
+ 0x1D90, 0x311301F4,
+ 0x1D90, 0x311401F3,
+ 0x1D90, 0x311501F2,
+ 0x1D90, 0x311601F1,
+ 0x1D90, 0x311701F0,
+ 0x1D90, 0x311801EF,
+ 0x1D90, 0x311901EE,
+ 0x1D90, 0x311A01ED,
+ 0x1D90, 0x311B01EC,
+ 0x1D90, 0x311C01EB,
+ 0x1D90, 0x311D0192,
+ 0x1D90, 0x311E0191,
+ 0x1D90, 0x311F0190,
+ 0x1D90, 0x3120018F,
+ 0x1D90, 0x3121018E,
+ 0x1D90, 0x3122018D,
+ 0x1D90, 0x3123018C,
+ 0x1D90, 0x3124018B,
+ 0x1D90, 0x3125018A,
+ 0x1D90, 0x31260189,
+ 0x1D90, 0x31270188,
+ 0x1D90, 0x31280187,
+ 0x1D90, 0x31290186,
+ 0x1D90, 0x312A0185,
+ 0x1D90, 0x312B0149,
+ 0x1D90, 0x312C0148,
+ 0x1D90, 0x312D0147,
+ 0x1D90, 0x312E0146,
+ 0x1D90, 0x312F0145,
+ 0x1D90, 0x31300144,
+ 0x1D90, 0x31310143,
+ 0x1D90, 0x31320142,
+ 0x1D90, 0x31330141,
+ 0x1D90, 0x31340140,
+ 0x1D90, 0x313500C7,
+ 0x1D90, 0x313600C6,
+ 0x1D90, 0x313700C5,
+ 0x1D90, 0x313800C4,
+ 0x1D90, 0x313900C3,
+ 0x1D90, 0x313A0088,
+ 0x1D90, 0x313B0087,
+ 0x1D90, 0x313C0086,
+ 0x1D90, 0x313D0045,
+ 0x1D90, 0x313E0044,
+ 0x1D90, 0x313F0043,
+ 0xA0000000, 0x00000000,
+ 0x1D90, 0x310001FF,
+ 0x1D90, 0x310101FF,
+ 0x1D90, 0x310201FF,
+ 0x1D90, 0x310301FF,
+ 0x1D90, 0x310401FF,
+ 0x1D90, 0x310501FF,
+ 0x1D90, 0x310601FF,
+ 0x1D90, 0x310701FF,
+ 0x1D90, 0x310801FF,
+ 0x1D90, 0x310901FE,
+ 0x1D90, 0x310A01FD,
+ 0x1D90, 0x310B01FC,
+ 0x1D90, 0x310C01FB,
+ 0x1D90, 0x310D01FA,
+ 0x1D90, 0x310E01F9,
+ 0x1D90, 0x310F01F8,
+ 0x1D90, 0x311001F7,
+ 0x1D90, 0x311101F6,
+ 0x1D90, 0x311201F5,
+ 0x1D90, 0x311301F4,
+ 0x1D90, 0x311401F3,
+ 0x1D90, 0x311501F2,
+ 0x1D90, 0x311601F1,
+ 0x1D90, 0x311701F0,
+ 0x1D90, 0x311801EF,
+ 0x1D90, 0x311901EE,
+ 0x1D90, 0x311A01ED,
+ 0x1D90, 0x311B01EC,
+ 0x1D90, 0x311C01EB,
+ 0x1D90, 0x311D0192,
+ 0x1D90, 0x311E0191,
+ 0x1D90, 0x311F0190,
+ 0x1D90, 0x3120018F,
+ 0x1D90, 0x3121018E,
+ 0x1D90, 0x3122018D,
+ 0x1D90, 0x3123018C,
+ 0x1D90, 0x3124018B,
+ 0x1D90, 0x3125018A,
+ 0x1D90, 0x31260189,
+ 0x1D90, 0x31270188,
+ 0x1D90, 0x31280187,
+ 0x1D90, 0x31290186,
+ 0x1D90, 0x312A0185,
+ 0x1D90, 0x312B0149,
+ 0x1D90, 0x312C0148,
+ 0x1D90, 0x312D0147,
+ 0x1D90, 0x312E0146,
+ 0x1D90, 0x312F0145,
+ 0x1D90, 0x31300144,
+ 0x1D90, 0x31310143,
+ 0x1D90, 0x31320142,
+ 0x1D90, 0x31330141,
+ 0x1D90, 0x31340140,
+ 0x1D90, 0x313500C7,
+ 0x1D90, 0x313600C6,
+ 0x1D90, 0x313700C5,
+ 0x1D90, 0x313800C4,
+ 0x1D90, 0x313900C3,
+ 0x1D90, 0x313A0088,
+ 0x1D90, 0x313B0087,
+ 0x1D90, 0x313C0086,
+ 0x1D90, 0x313D0045,
+ 0x1D90, 0x313E0044,
+ 0x1D90, 0x313F0043,
+ 0xB0000000, 0x00000000,
+ 0x80000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x1D90, 0x314001FF,
+ 0x1D90, 0x314101FF,
+ 0x1D90, 0x314201FF,
+ 0x1D90, 0x314301FF,
+ 0x1D90, 0x314401FF,
+ 0x1D90, 0x314501FF,
+ 0x1D90, 0x314601FF,
+ 0x1D90, 0x314701FE,
+ 0x1D90, 0x314801FD,
+ 0x1D90, 0x314901FC,
+ 0x1D90, 0x314A01FB,
+ 0x1D90, 0x314B01FA,
+ 0x1D90, 0x314C01F9,
+ 0x1D90, 0x314D01F8,
+ 0x1D90, 0x314E01F7,
+ 0x1D90, 0x314F01F6,
+ 0x1D90, 0x315001F5,
+ 0x1D90, 0x315101F4,
+ 0x1D90, 0x315201F3,
+ 0x1D90, 0x315301F2,
+ 0x1D90, 0x315401F1,
+ 0x1D90, 0x315501F0,
+ 0x1D90, 0x315601EF,
+ 0x1D90, 0x315701EE,
+ 0x1D90, 0x315801ED,
+ 0x1D90, 0x315901EC,
+ 0x1D90, 0x315A01EB,
+ 0x1D90, 0x315B01EA,
+ 0x1D90, 0x315C01E9,
+ 0x1D90, 0x315D018F,
+ 0x1D90, 0x315E018E,
+ 0x1D90, 0x315F018D,
+ 0x1D90, 0x3160018C,
+ 0x1D90, 0x3161018B,
+ 0x1D90, 0x3162018A,
+ 0x1D90, 0x31630189,
+ 0x1D90, 0x31640188,
+ 0x1D90, 0x31650187,
+ 0x1D90, 0x31660186,
+ 0x1D90, 0x31670185,
+ 0x1D90, 0x31680184,
+ 0x1D90, 0x31690183,
+ 0x1D90, 0x316A0182,
+ 0x1D90, 0x316B0149,
+ 0x1D90, 0x316C0148,
+ 0x1D90, 0x316D0147,
+ 0x1D90, 0x316E0146,
+ 0x1D90, 0x316F0145,
+ 0x1D90, 0x31700144,
+ 0x1D90, 0x31710143,
+ 0x1D90, 0x31720142,
+ 0x1D90, 0x31730141,
+ 0x1D90, 0x31740140,
+ 0x1D90, 0x317500C7,
+ 0x1D90, 0x317600C6,
+ 0x1D90, 0x317700C5,
+ 0x1D90, 0x317800C4,
+ 0x1D90, 0x317900C3,
+ 0x1D90, 0x317A0088,
+ 0x1D90, 0x317B0087,
+ 0x1D90, 0x317C0086,
+ 0x1D90, 0x317D0045,
+ 0x1D90, 0x317E0044,
+ 0x1D90, 0x317F0043,
+ 0x90000016, 0x00000000, 0x40000000, 0x00000000,
0x1D90, 0x314001FF,
0x1D90, 0x314101FF,
0x1D90, 0x314201FF,
@@ -397,6 +1123,73 @@ static const u32 rtw8822c_agc[] = {
0x1D90, 0x317D0045,
0x1D90, 0x317E0044,
0x1D90, 0x317F0043,
+ 0xA0000000, 0x00000000,
+ 0x1D90, 0x314001FF,
+ 0x1D90, 0x314101FF,
+ 0x1D90, 0x314201FF,
+ 0x1D90, 0x314301FF,
+ 0x1D90, 0x314401FF,
+ 0x1D90, 0x314501FF,
+ 0x1D90, 0x314601FF,
+ 0x1D90, 0x314701FE,
+ 0x1D90, 0x314801FD,
+ 0x1D90, 0x314901FC,
+ 0x1D90, 0x314A01FB,
+ 0x1D90, 0x314B01FA,
+ 0x1D90, 0x314C01F9,
+ 0x1D90, 0x314D01F8,
+ 0x1D90, 0x314E01F7,
+ 0x1D90, 0x314F01F6,
+ 0x1D90, 0x315001F5,
+ 0x1D90, 0x315101F4,
+ 0x1D90, 0x315201F3,
+ 0x1D90, 0x315301F2,
+ 0x1D90, 0x315401F1,
+ 0x1D90, 0x315501F0,
+ 0x1D90, 0x315601EF,
+ 0x1D90, 0x315701EE,
+ 0x1D90, 0x315801ED,
+ 0x1D90, 0x315901EC,
+ 0x1D90, 0x315A01EB,
+ 0x1D90, 0x315B01EA,
+ 0x1D90, 0x315C01E9,
+ 0x1D90, 0x315D018F,
+ 0x1D90, 0x315E018E,
+ 0x1D90, 0x315F018D,
+ 0x1D90, 0x3160018C,
+ 0x1D90, 0x3161018B,
+ 0x1D90, 0x3162018A,
+ 0x1D90, 0x31630189,
+ 0x1D90, 0x31640188,
+ 0x1D90, 0x31650187,
+ 0x1D90, 0x31660186,
+ 0x1D90, 0x31670185,
+ 0x1D90, 0x31680184,
+ 0x1D90, 0x31690183,
+ 0x1D90, 0x316A0182,
+ 0x1D90, 0x316B0149,
+ 0x1D90, 0x316C0148,
+ 0x1D90, 0x316D0147,
+ 0x1D90, 0x316E0146,
+ 0x1D90, 0x316F0145,
+ 0x1D90, 0x31700144,
+ 0x1D90, 0x31710143,
+ 0x1D90, 0x31720142,
+ 0x1D90, 0x31730141,
+ 0x1D90, 0x31740140,
+ 0x1D90, 0x317500C7,
+ 0x1D90, 0x317600C6,
+ 0x1D90, 0x317700C5,
+ 0x1D90, 0x317800C4,
+ 0x1D90, 0x317900C3,
+ 0x1D90, 0x317A0088,
+ 0x1D90, 0x317B0087,
+ 0x1D90, 0x317C0086,
+ 0x1D90, 0x317D0045,
+ 0x1D90, 0x317E0044,
+ 0x1D90, 0x317F0043,
+ 0xB0000000, 0x00000000,
+ 0x80000015, 0x00000000, 0x40000000, 0x00000000,
0x1D90, 0x318001FE,
0x1D90, 0x318101FD,
0x1D90, 0x318201FC,
@@ -461,8 +1254,147 @@ static const u32 rtw8822c_agc[] = {
0x1D90, 0x31BD0003,
0x1D90, 0x31BE0002,
0x1D90, 0x31BF0001,
+ 0x90000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x1D90, 0x318001FE,
+ 0x1D90, 0x318101FD,
+ 0x1D90, 0x318201FC,
+ 0x1D90, 0x318301FB,
+ 0x1D90, 0x318401FA,
+ 0x1D90, 0x318501F9,
+ 0x1D90, 0x318601F8,
+ 0x1D90, 0x318701F7,
+ 0x1D90, 0x318801F6,
+ 0x1D90, 0x318901F5,
+ 0x1D90, 0x318A01F4,
+ 0x1D90, 0x318B01F3,
+ 0x1D90, 0x318C01F2,
+ 0x1D90, 0x318D01F1,
+ 0x1D90, 0x318E01F0,
+ 0x1D90, 0x318F01EF,
+ 0x1D90, 0x319001EE,
+ 0x1D90, 0x319101ED,
+ 0x1D90, 0x319201EC,
+ 0x1D90, 0x319301EB,
+ 0x1D90, 0x319401EA,
+ 0x1D90, 0x319501E9,
+ 0x1D90, 0x319601E7,
+ 0x1D90, 0x319701E6,
+ 0x1D90, 0x319801E5,
+ 0x1D90, 0x319901E4,
+ 0x1D90, 0x319A01A8,
+ 0x1D90, 0x319B01A7,
+ 0x1D90, 0x319C01A6,
+ 0x1D90, 0x319D01A5,
+ 0x1D90, 0x319E0185,
+ 0x1D90, 0x319F0184,
+ 0x1D90, 0x31A00183,
+ 0x1D90, 0x31A10182,
+ 0x1D90, 0x31A20149,
+ 0x1D90, 0x31A30148,
+ 0x1D90, 0x31A40147,
+ 0x1D90, 0x31A50145,
+ 0x1D90, 0x31A60144,
+ 0x1D90, 0x31A70143,
+ 0x1D90, 0x31A80142,
+ 0x1D90, 0x31A900E6,
+ 0x1D90, 0x31AA00E5,
+ 0x1D90, 0x31AB00C9,
+ 0x1D90, 0x31AC00C8,
+ 0x1D90, 0x31AD00C7,
+ 0x1D90, 0x31AE00C6,
+ 0x1D90, 0x31AF00C5,
+ 0x1D90, 0x31B000C4,
+ 0x1D90, 0x31B100C3,
+ 0x1D90, 0x31B20088,
+ 0x1D90, 0x31B30087,
+ 0x1D90, 0x31B40086,
+ 0x1D90, 0x31B50085,
+ 0x1D90, 0x31B60026,
+ 0x1D90, 0x31B70025,
+ 0x1D90, 0x31B80024,
+ 0x1D90, 0x31B90023,
+ 0x1D90, 0x31BA0022,
+ 0x1D90, 0x31BB0021,
+ 0x1D90, 0x31BC0020,
+ 0x1D90, 0x31BD0003,
+ 0x1D90, 0x31BE0002,
+ 0x1D90, 0x31BF0001,
+ 0xA0000000, 0x00000000,
+ 0x1D90, 0x318001FE,
+ 0x1D90, 0x318101FD,
+ 0x1D90, 0x318201FC,
+ 0x1D90, 0x318301FB,
+ 0x1D90, 0x318401FA,
+ 0x1D90, 0x318501F9,
+ 0x1D90, 0x318601F8,
+ 0x1D90, 0x318701F7,
+ 0x1D90, 0x318801F6,
+ 0x1D90, 0x318901F5,
+ 0x1D90, 0x318A01F4,
+ 0x1D90, 0x318B01F3,
+ 0x1D90, 0x318C01F2,
+ 0x1D90, 0x318D01F1,
+ 0x1D90, 0x318E01F0,
+ 0x1D90, 0x318F01EF,
+ 0x1D90, 0x319001EE,
+ 0x1D90, 0x319101ED,
+ 0x1D90, 0x319201EC,
+ 0x1D90, 0x319301EB,
+ 0x1D90, 0x319401EA,
+ 0x1D90, 0x319501E9,
+ 0x1D90, 0x319601E7,
+ 0x1D90, 0x319701E6,
+ 0x1D90, 0x319801E5,
+ 0x1D90, 0x319901E4,
+ 0x1D90, 0x319A01A8,
+ 0x1D90, 0x319B01A7,
+ 0x1D90, 0x319C01A6,
+ 0x1D90, 0x319D01A5,
+ 0x1D90, 0x319E0185,
+ 0x1D90, 0x319F0184,
+ 0x1D90, 0x31A00183,
+ 0x1D90, 0x31A10182,
+ 0x1D90, 0x31A20149,
+ 0x1D90, 0x31A30148,
+ 0x1D90, 0x31A40147,
+ 0x1D90, 0x31A50145,
+ 0x1D90, 0x31A60144,
+ 0x1D90, 0x31A70143,
+ 0x1D90, 0x31A80142,
+ 0x1D90, 0x31A900E6,
+ 0x1D90, 0x31AA00E5,
+ 0x1D90, 0x31AB00C9,
+ 0x1D90, 0x31AC00C8,
+ 0x1D90, 0x31AD00C7,
+ 0x1D90, 0x31AE00C6,
+ 0x1D90, 0x31AF00C5,
+ 0x1D90, 0x31B000C4,
+ 0x1D90, 0x31B100C3,
+ 0x1D90, 0x31B20088,
+ 0x1D90, 0x31B30087,
+ 0x1D90, 0x31B40086,
+ 0x1D90, 0x31B50085,
+ 0x1D90, 0x31B60026,
+ 0x1D90, 0x31B70025,
+ 0x1D90, 0x31B80024,
+ 0x1D90, 0x31B90023,
+ 0x1D90, 0x31BA0022,
+ 0x1D90, 0x31BB0021,
+ 0x1D90, 0x31BC0020,
+ 0x1D90, 0x31BD0003,
+ 0x1D90, 0x31BE0002,
+ 0x1D90, 0x31BF0001,
+ 0xB0000000, 0x00000000,
+ 0x80000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x1D70, 0x22222222,
+ 0x1D70, 0x20202020,
+ 0x90000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x1D70, 0x22222222,
+ 0x1D70, 0x20202020,
+ 0xA0000000, 0x00000000,
0x1D70, 0x22222222,
0x1D70, 0x20202020,
+ 0xB0000000, 0x00000000,
};
RTW_DECL_TABLE_PHY_COND(rtw8822c_agc, rtw_phy_cfg_agc);
@@ -732,7 +1664,7 @@ static const u32 rtw8822c_bb[] = {
0xC18, 0x00087672,
0xC1C, 0x15260000,
0xC20, 0x00000000,
- 0xC24, 0x40600000,
+ 0xC24, 0x406000FF,
0xC28, 0x06400F76,
0xC2C, 0xE30020E1,
0xC30, 0x140C9494,
@@ -861,9 +1793,29 @@ static const u32 rtw8822c_bb[] = {
0x1828, 0x000004FD,
0x182C, 0x00000000,
0x1834, 0x00000000,
+ 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x1838, 0x20100000,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x1838, 0x20100000,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x1838, 0x20100000,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x1838, 0x20100000,
+ 0xA0000000, 0x00000000,
0x1838, 0x20000000,
+ 0xB0000000, 0x00000000,
0x183C, 0x00000000,
+ 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x1840, 0x00002300,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x1840, 0x00002300,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x1840, 0x00002300,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x1840, 0x00002300,
+ 0xA0000000, 0x00000000,
0x1840, 0x00000000,
+ 0xB0000000, 0x00000000,
0x1844, 0x00000000,
0x1848, 0x00000000,
0x184C, 0x00000000,
@@ -874,13 +1826,33 @@ static const u32 rtw8822c_bb[] = {
0x1860, 0xF0040FF8,
0x1864, 0x7F000000,
0x1868, 0x00000000,
+ 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x186C, 0x0000FF02,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x186C, 0x0000FF02,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x186C, 0x0000FF02,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x186C, 0x0000FF02,
+ 0xA0000000, 0x00000000,
0x186C, 0x0000FF00,
+ 0xB0000000, 0x00000000,
0x1870, 0x00000000,
0x1874, 0x00000000,
0x1878, 0x00000000,
0x187C, 0x00000000,
0x1880, 0x00000000,
+ 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x1884, 0x03B00000,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x1884, 0x03B00000,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x1884, 0x03B00000,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x1884, 0x03B00000,
+ 0xA0000000, 0x00000000,
0x1884, 0x02B00000,
+ 0xB0000000, 0x00000000,
0x1888, 0x00000000,
0x188C, 0x00000000,
0x1890, 0x00000000,
@@ -999,7 +1971,7 @@ static const u32 rtw8822c_bb[] = {
0x1C58, 0x00000000,
0x1C5C, 0xFFFFFFFF,
0x1C60, 0x0F030032,
- 0x1C64, 0x360F0000,
+ 0x1C64, 0x360F0008,
0x1C68, 0x007F0000,
0x1C6C, 0x00010000,
0x1C70, 0x00037FFE,
@@ -1010,8 +1982,22 @@ static const u32 rtw8822c_bb[] = {
0x1C84, 0x245120D4,
0x1C88, 0xC8400483,
0x1C8C, 0x40005A20,
+ 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x1C94, 0x00000B0E,
+ 0x1C98, 0x00450000,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x1C94, 0x00000B0E,
+ 0x1C98, 0x00450000,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x1C94, 0x00000B0E,
+ 0x1C98, 0x00450000,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x1C94, 0x00000B0E,
+ 0x1C98, 0x00450000,
+ 0xA0000000, 0x00000000,
0x1C94, 0x00000000,
0x1C98, 0x00000000,
+ 0xB0000000, 0x00000000,
0x1C9C, 0x00000000,
0x1CA0, 0x00000000,
0x1CA4, 0x20000000,
@@ -1125,7 +2111,7 @@ static const u32 rtw8822c_bb[] = {
0x1E60, 0x00000000,
0x1E64, 0xF3A00001,
0x1E68, 0x0028846E,
- 0x1E6C, 0x40374906,
+ 0x1E6C, 0x40274906,
0x1E70, 0x00001000,
0x1E74, 0x00000000,
0x1E78, 0x00000000,
@@ -1344,10 +2330,30 @@ static const u32 rtw8822c_bb[] = {
0x4128, 0x000004FD,
0x412C, 0x00000000,
0x4134, 0x00000000,
+ 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x4138, 0x20100000,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x4138, 0x20100000,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x4138, 0x20100000,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x4138, 0x20100000,
+ 0xA0000000, 0x00000000,
0x4138, 0x20000000,
+ 0xB0000000, 0x00000000,
0x413C, 0x00000000,
0x4140, 0x00000000,
+ 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x4144, 0x00002030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x4144, 0x00002030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x4144, 0x00002030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x4144, 0x00002030,
+ 0xA0000000, 0x00000000,
0x4144, 0x00000000,
+ 0xB0000000, 0x00000000,
0x4148, 0x00000000,
0x414C, 0x00000000,
0x4150, 0x00000000,
@@ -1357,13 +2363,33 @@ static const u32 rtw8822c_bb[] = {
0x4160, 0xF0040FF8,
0x4164, 0x7F000000,
0x4168, 0x00000000,
+ 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x416C, 0x00008002,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x416C, 0x00008002,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x416C, 0x00008002,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x416C, 0x00008002,
+ 0xA0000000, 0x00000000,
0x416C, 0x00008000,
+ 0xB0000000, 0x00000000,
0x4170, 0x00000000,
0x4174, 0x00000000,
0x4178, 0x00000000,
0x417C, 0x00000000,
0x4180, 0x00000000,
+ 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x4184, 0x03B00000,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x4184, 0x03B00000,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x4184, 0x03B00000,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x4184, 0x03B00000,
+ 0xA0000000, 0x00000000,
0x4184, 0x02B00000,
+ 0xB0000000, 0x00000000,
0x4188, 0x00000000,
0x418C, 0x00000000,
0x4190, 0x00000000,
@@ -1483,7 +2509,7 @@ static const u32 rtw8822c_bb[] = {
0x1AC4, 0x00000000,
0x1AC8, 0x00000807,
0x1ACC, 0x00000707,
- 0x1AD0, 0xA33529AD,
+ 0x1AD0, 0xA33529CE,
0x1AD4, 0x0D8D8452,
0x1AD8, 0x08024024,
0x1ADC, 0x000D0001,
@@ -1757,56 +2783,55 @@ static const u32 rtw8822c_bb[] = {
0x1D94, 0x40FF0000,
0xC0C, 0x02F1D8B7,
0x1EE8, 0x00000000,
-
};
RTW_DECL_TABLE_PHY_COND(rtw8822c_bb, rtw_phy_cfg_bb);
static const struct rtw_phy_pg_cfg_pair rtw8822c_bb_pg_type0[] = {
{ 0, 0, 0, 0x00000c20, 0xffffffff, 0x484c5054, },
- { 0, 0, 0, 0x00000c24, 0xffffffff, 0x54585c60, },
+ { 0, 0, 0, 0x00000c24, 0xffffffff, 0x54585858, },
{ 0, 0, 0, 0x00000c28, 0xffffffff, 0x44484c50, },
- { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x5054585c, },
+ { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x50545858, },
{ 0, 0, 0, 0x00000c30, 0xffffffff, 0x4044484c, },
- { 0, 0, 1, 0x00000c34, 0xffffffff, 0x5054585c, },
+ { 0, 0, 1, 0x00000c34, 0xffffffff, 0x50545858, },
{ 0, 0, 1, 0x00000c38, 0xffffffff, 0x4044484c, },
- { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x5054585c, },
+ { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x50545858, },
{ 0, 0, 0, 0x00000c40, 0xffffffff, 0x4044484c, },
- { 0, 0, 0, 0x00000c44, 0xffffffff, 0x585c383c, },
+ { 0, 0, 0, 0x00000c44, 0xffffffff, 0x5858383c, },
{ 0, 0, 1, 0x00000c48, 0xffffffff, 0x484c5054, },
{ 0, 0, 1, 0x00000c4c, 0xffffffff, 0x383c4044, },
{ 0, 1, 0, 0x00000e20, 0xffffffff, 0x484c5054, },
- { 0, 1, 0, 0x00000e24, 0xffffffff, 0x54585c60, },
+ { 0, 1, 0, 0x00000e24, 0xffffffff, 0x54585858, },
{ 0, 1, 0, 0x00000e28, 0xffffffff, 0x44484c50, },
- { 0, 1, 0, 0x00000e2c, 0xffffffff, 0x5054585c, },
+ { 0, 1, 0, 0x00000e2c, 0xffffffff, 0x50545858, },
{ 0, 1, 0, 0x00000e30, 0xffffffff, 0x4044484c, },
- { 0, 1, 1, 0x00000e34, 0xffffffff, 0x5054585c, },
+ { 0, 1, 1, 0x00000e34, 0xffffffff, 0x50545858, },
{ 0, 1, 1, 0x00000e38, 0xffffffff, 0x4044484c, },
- { 0, 1, 0, 0x00000e3c, 0xffffffff, 0x5054585c, },
+ { 0, 1, 0, 0x00000e3c, 0xffffffff, 0x50545858, },
{ 0, 1, 0, 0x00000e40, 0xffffffff, 0x4044484c, },
- { 0, 1, 0, 0x00000e44, 0xffffffff, 0x585c383c, },
+ { 0, 1, 0, 0x00000e44, 0xffffffff, 0x5858383c, },
{ 0, 1, 1, 0x00000e48, 0xffffffff, 0x484c5054, },
{ 0, 1, 1, 0x00000e4c, 0xffffffff, 0x383c4044, },
- { 1, 0, 0, 0x00000c24, 0xffffffff, 0x54585c60, },
+ { 1, 0, 0, 0x00000c24, 0xffffffff, 0x54585858, },
{ 1, 0, 0, 0x00000c28, 0xffffffff, 0x44484c50, },
- { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x5054585c, },
+ { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x50545858, },
{ 1, 0, 0, 0x00000c30, 0xffffffff, 0x4044484c, },
- { 1, 0, 1, 0x00000c34, 0xffffffff, 0x5054585c, },
+ { 1, 0, 1, 0x00000c34, 0xffffffff, 0x50545858, },
{ 1, 0, 1, 0x00000c38, 0xffffffff, 0x4044484c, },
- { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x5054585c, },
+ { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x50545858, },
{ 1, 0, 0, 0x00000c40, 0xffffffff, 0x4044484c, },
- { 1, 0, 0, 0x00000c44, 0xffffffff, 0x585c383c, },
+ { 1, 0, 0, 0x00000c44, 0xffffffff, 0x5858383c, },
{ 1, 0, 1, 0x00000c48, 0xffffffff, 0x484c5054, },
{ 1, 0, 1, 0x00000c4c, 0xffffffff, 0x383c4044, },
- { 1, 1, 0, 0x00000e24, 0xffffffff, 0x54585c60, },
+ { 1, 1, 0, 0x00000e24, 0xffffffff, 0x54585858, },
{ 1, 1, 0, 0x00000e28, 0xffffffff, 0x44484c50, },
- { 1, 1, 0, 0x00000e2c, 0xffffffff, 0x5054585c, },
+ { 1, 1, 0, 0x00000e2c, 0xffffffff, 0x50545858, },
{ 1, 1, 0, 0x00000e30, 0xffffffff, 0x4044484c, },
- { 1, 1, 1, 0x00000e34, 0xffffffff, 0x5054585c, },
+ { 1, 1, 1, 0x00000e34, 0xffffffff, 0x50545858, },
{ 1, 1, 1, 0x00000e38, 0xffffffff, 0x4044484c, },
- { 1, 1, 0, 0x00000e3c, 0xffffffff, 0x5054585c, },
+ { 1, 1, 0, 0x00000e3c, 0xffffffff, 0x50545858, },
{ 1, 1, 0, 0x00000e40, 0xffffffff, 0x4044484c, },
- { 1, 1, 0, 0x00000e44, 0xffffffff, 0x585c383c, },
+ { 1, 1, 0, 0x00000e44, 0xffffffff, 0x5858383c, },
{ 1, 1, 1, 0x00000e48, 0xffffffff, 0x484c5054, },
{ 1, 1, 1, 0x00000e4c, 0xffffffff, 0x383c4044, },
};
@@ -1834,6 +2859,26 @@ static const u32 rtw8822c_rf_a[] = {
0x08E, 0x000A5540,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x08E, 0x000A5540,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
0xA0000000, 0x00000000,
0x08E, 0x000A5540,
0xB0000000, 0x00000000,
@@ -1856,6 +2901,26 @@ static const u32 rtw8822c_rf_a[] = {
0x085, 0x0006A06C,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x085, 0x0006A06C,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
0xA0000000, 0x00000000,
0x085, 0x0006A06C,
0xB0000000, 0x00000000,
@@ -1931,6 +2996,96 @@ static const u32 rtw8822c_rf_a[] = {
0x033, 0x00000002,
0x03F, 0x0000002A,
0x0EE, 0x00000000,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
0xA0000000, 0x00000000,
0x0EE, 0x00000010,
0x033, 0x00000001,
@@ -2149,6 +3304,266 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000180,
0x033, 0x00000004,
0x03F, 0x00000040,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
0xA0000000, 0x00000000,
0x0EF, 0x00010000,
0x033, 0x0000000F,
@@ -2378,6 +3793,256 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000180,
0x033, 0x00000014,
0x03F, 0x00000040,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
0xA0000000, 0x00000000,
0x033, 0x0000001F,
0x03F, 0x000773E8,
@@ -2606,6 +4271,256 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000180,
0x033, 0x00000024,
0x03F, 0x00000040,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
0xA0000000, 0x00000000,
0x033, 0x0000002F,
0x03F, 0x000773E8,
@@ -2834,6 +4749,256 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000180,
0x033, 0x00000034,
0x03F, 0x00000040,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
0xA0000000, 0x00000000,
0x033, 0x0000003F,
0x03F, 0x000773E8,
@@ -3062,6 +5227,256 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000180,
0x033, 0x00000044,
0x03F, 0x00000040,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
0xA0000000, 0x00000000,
0x033, 0x0000004F,
0x03F, 0x000773E8,
@@ -3290,6 +5705,256 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000180,
0x033, 0x00000054,
0x03F, 0x00000040,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
0xA0000000, 0x00000000,
0x033, 0x0000005F,
0x03F, 0x000773E8,
@@ -3334,6 +5999,26 @@ static const u32 rtw8822c_rf_a[] = {
0x0EF, 0x00000000,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00000000,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
0xA0000000, 0x00000000,
0x0EF, 0x00000000,
0xB0000000, 0x00000000,
@@ -3378,8 +6063,155 @@ static const u32 rtw8822c_rf_a[] = {
0x03E, 0x00000000,
0x03F, 0x0002F81C,
0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
0x03E, 0x00001C86,
0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
0x033, 0x00000009,
0x03E, 0x00001C02,
0x03F, 0x00020000,
@@ -3498,7 +6330,7 @@ static const u32 rtw8822c_rf_a[] = {
0x03E, 0x00000000,
0x03F, 0x0002C010,
0x0EF, 0x00000000,
- 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00020000,
0x033, 0x00000000,
0x03E, 0x00001C86,
@@ -3525,8 +6357,155 @@ static const u32 rtw8822c_rf_a[] = {
0x03E, 0x00000000,
0x03F, 0x0002F81C,
0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
0x03E, 0x00001C86,
0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
0x033, 0x00000009,
0x03E, 0x00001C02,
0x03F, 0x00020000,
@@ -3645,7 +6624,7 @@ static const u32 rtw8822c_rf_a[] = {
0x03E, 0x00000000,
0x03F, 0x0002C010,
0x0EF, 0x00000000,
- 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00020000,
0x033, 0x00000000,
0x03E, 0x00001C86,
@@ -3672,8 +6651,155 @@ static const u32 rtw8822c_rf_a[] = {
0x03E, 0x00000000,
0x03F, 0x0002F81C,
0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
0x03E, 0x00001C86,
0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
0x033, 0x00000009,
0x03E, 0x00001C02,
0x03F, 0x00020000,
@@ -3792,7 +6918,7 @@ static const u32 rtw8822c_rf_a[] = {
0x03E, 0x00000000,
0x03F, 0x0002C010,
0x0EF, 0x00000000,
- 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00020000,
0x033, 0x00000000,
0x03E, 0x00001C86,
@@ -3819,8 +6945,155 @@ static const u32 rtw8822c_rf_a[] = {
0x03E, 0x00000000,
0x03F, 0x0002F81C,
0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
0x03E, 0x00001C86,
0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
0x033, 0x00000009,
0x03E, 0x00001C02,
0x03F, 0x00020000,
@@ -3939,7 +7212,7 @@ static const u32 rtw8822c_rf_a[] = {
0x03E, 0x00000000,
0x03F, 0x0002C010,
0x0EF, 0x00000000,
- 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00020000,
0x033, 0x00000000,
0x03E, 0x00001C86,
@@ -3966,8 +7239,155 @@ static const u32 rtw8822c_rf_a[] = {
0x03E, 0x00000000,
0x03F, 0x0002F81C,
0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
0x03E, 0x00001C86,
0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
0x033, 0x00000009,
0x03E, 0x00001C02,
0x03F, 0x00020000,
@@ -4086,7 +7506,7 @@ static const u32 rtw8822c_rf_a[] = {
0x03E, 0x00000000,
0x03F, 0x0002C010,
0x0EF, 0x00000000,
- 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00020000,
0x033, 0x00000000,
0x03E, 0x00001C86,
@@ -4113,8 +7533,155 @@ static const u32 rtw8822c_rf_a[] = {
0x03E, 0x00000000,
0x03F, 0x0002F81C,
0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
0x03E, 0x00001C86,
0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
0x033, 0x00000009,
0x03E, 0x00001C02,
0x03F, 0x00020000,
@@ -4233,7 +7800,7 @@ static const u32 rtw8822c_rf_a[] = {
0x03E, 0x00000000,
0x03F, 0x0002C010,
0x0EF, 0x00000000,
- 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00020000,
0x033, 0x00000000,
0x03E, 0x00001C86,
@@ -4260,8 +7827,155 @@ static const u32 rtw8822c_rf_a[] = {
0x03E, 0x00000000,
0x03F, 0x0002F81C,
0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
0x03E, 0x00001C86,
0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
0x033, 0x00000009,
0x03E, 0x00001C02,
0x03F, 0x00020000,
@@ -4380,7 +8094,7 @@ static const u32 rtw8822c_rf_a[] = {
0x03E, 0x00000000,
0x03F, 0x0002C010,
0x0EF, 0x00000000,
- 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00020000,
0x033, 0x00000000,
0x03E, 0x00001C86,
@@ -4407,8 +8121,449 @@ static const u32 rtw8822c_rf_a[] = {
0x03E, 0x00000000,
0x03F, 0x0002F81C,
0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
0x03E, 0x00001C86,
0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
0x033, 0x00000009,
0x03E, 0x00001C02,
0x03F, 0x00020000,
@@ -4695,6 +8850,26 @@ static const u32 rtw8822c_rf_a[] = {
0x063, 0x00000002,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x063, 0x00000002,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
0xA0000000, 0x00000000,
0x063, 0x00000C02,
0xB0000000, 0x00000000,
@@ -4915,6 +9090,276 @@ static const u32 rtw8822c_rf_a[] = {
0x030, 0x00017239,
0x030, 0x00018209,
0x030, 0x00019239,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000238,
+ 0x030, 0x00001238,
+ 0x030, 0x00002238,
+ 0x030, 0x00003238,
+ 0x030, 0x00004228,
+ 0x030, 0x00005238,
+ 0x030, 0x00006238,
+ 0x030, 0x00007238,
+ 0x030, 0x00008228,
+ 0x030, 0x00009238,
+ 0x030, 0x0000A238,
+ 0x030, 0x0000B238,
+ 0x030, 0x0000C238,
+ 0x030, 0x0000D238,
+ 0x030, 0x0000E228,
+ 0x030, 0x0000F238,
+ 0x030, 0x00010238,
+ 0x030, 0x00011238,
+ 0x030, 0x00012228,
+ 0x030, 0x00013238,
+ 0x030, 0x00014238,
+ 0x030, 0x00015238,
+ 0x030, 0x00016228,
+ 0x030, 0x00017238,
+ 0x030, 0x00018228,
+ 0x030, 0x00019238,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000239,
+ 0x030, 0x00001239,
+ 0x030, 0x00002239,
+ 0x030, 0x00003239,
+ 0x030, 0x00004239,
+ 0x030, 0x00005239,
+ 0x030, 0x00006239,
+ 0x030, 0x00007239,
+ 0x030, 0x00008239,
+ 0x030, 0x00009239,
+ 0x030, 0x0000A239,
+ 0x030, 0x0000B239,
+ 0x030, 0x0000C239,
+ 0x030, 0x0000D239,
+ 0x030, 0x0000E209,
+ 0x030, 0x0000F239,
+ 0x030, 0x00010239,
+ 0x030, 0x00011239,
+ 0x030, 0x00012209,
+ 0x030, 0x00013239,
+ 0x030, 0x00014239,
+ 0x030, 0x00015239,
+ 0x030, 0x00016209,
+ 0x030, 0x00017239,
+ 0x030, 0x00018209,
+ 0x030, 0x00019239,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000239,
+ 0x030, 0x00001239,
+ 0x030, 0x00002239,
+ 0x030, 0x00003239,
+ 0x030, 0x00004239,
+ 0x030, 0x00005239,
+ 0x030, 0x00006239,
+ 0x030, 0x00007239,
+ 0x030, 0x00008239,
+ 0x030, 0x00009239,
+ 0x030, 0x0000A239,
+ 0x030, 0x0000B239,
+ 0x030, 0x0000C239,
+ 0x030, 0x0000D239,
+ 0x030, 0x0000E209,
+ 0x030, 0x0000F239,
+ 0x030, 0x00010239,
+ 0x030, 0x00011239,
+ 0x030, 0x00012209,
+ 0x030, 0x00013239,
+ 0x030, 0x00014239,
+ 0x030, 0x00015239,
+ 0x030, 0x00016209,
+ 0x030, 0x00017239,
+ 0x030, 0x00018209,
+ 0x030, 0x00019239,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000238,
+ 0x030, 0x00001238,
+ 0x030, 0x00002238,
+ 0x030, 0x00003238,
+ 0x030, 0x00004228,
+ 0x030, 0x00005238,
+ 0x030, 0x00006238,
+ 0x030, 0x00007238,
+ 0x030, 0x00008228,
+ 0x030, 0x00009238,
+ 0x030, 0x0000A238,
+ 0x030, 0x0000B238,
+ 0x030, 0x0000C238,
+ 0x030, 0x0000D238,
+ 0x030, 0x0000E228,
+ 0x030, 0x0000F238,
+ 0x030, 0x00010238,
+ 0x030, 0x00011238,
+ 0x030, 0x00012228,
+ 0x030, 0x00013238,
+ 0x030, 0x00014238,
+ 0x030, 0x00015238,
+ 0x030, 0x00016228,
+ 0x030, 0x00017238,
+ 0x030, 0x00018228,
+ 0x030, 0x00019238,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000238,
+ 0x030, 0x00001238,
+ 0x030, 0x00002238,
+ 0x030, 0x00003238,
+ 0x030, 0x00004228,
+ 0x030, 0x00005238,
+ 0x030, 0x00006238,
+ 0x030, 0x00007238,
+ 0x030, 0x00008228,
+ 0x030, 0x00009238,
+ 0x030, 0x0000A238,
+ 0x030, 0x0000B238,
+ 0x030, 0x0000C238,
+ 0x030, 0x0000D238,
+ 0x030, 0x0000E228,
+ 0x030, 0x0000F238,
+ 0x030, 0x00010238,
+ 0x030, 0x00011238,
+ 0x030, 0x00012228,
+ 0x030, 0x00013238,
+ 0x030, 0x00014238,
+ 0x030, 0x00015238,
+ 0x030, 0x00016228,
+ 0x030, 0x00017238,
+ 0x030, 0x00018228,
+ 0x030, 0x00019238,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000239,
+ 0x030, 0x00001239,
+ 0x030, 0x00002239,
+ 0x030, 0x00003239,
+ 0x030, 0x00004239,
+ 0x030, 0x00005239,
+ 0x030, 0x00006239,
+ 0x030, 0x00007239,
+ 0x030, 0x00008239,
+ 0x030, 0x00009239,
+ 0x030, 0x0000A239,
+ 0x030, 0x0000B239,
+ 0x030, 0x0000C239,
+ 0x030, 0x0000D239,
+ 0x030, 0x0000E209,
+ 0x030, 0x0000F239,
+ 0x030, 0x00010239,
+ 0x030, 0x00011239,
+ 0x030, 0x00012209,
+ 0x030, 0x00013239,
+ 0x030, 0x00014239,
+ 0x030, 0x00015239,
+ 0x030, 0x00016209,
+ 0x030, 0x00017239,
+ 0x030, 0x00018209,
+ 0x030, 0x00019239,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000239,
+ 0x030, 0x00001239,
+ 0x030, 0x00002239,
+ 0x030, 0x00003239,
+ 0x030, 0x00004239,
+ 0x030, 0x00005239,
+ 0x030, 0x00006239,
+ 0x030, 0x00007239,
+ 0x030, 0x00008239,
+ 0x030, 0x00009239,
+ 0x030, 0x0000A239,
+ 0x030, 0x0000B239,
+ 0x030, 0x0000C239,
+ 0x030, 0x0000D239,
+ 0x030, 0x0000E209,
+ 0x030, 0x0000F239,
+ 0x030, 0x00010239,
+ 0x030, 0x00011239,
+ 0x030, 0x00012209,
+ 0x030, 0x00013239,
+ 0x030, 0x00014239,
+ 0x030, 0x00015239,
+ 0x030, 0x00016209,
+ 0x030, 0x00017239,
+ 0x030, 0x00018209,
+ 0x030, 0x00019239,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000238,
+ 0x030, 0x00001238,
+ 0x030, 0x00002238,
+ 0x030, 0x00003238,
+ 0x030, 0x00004228,
+ 0x030, 0x00005238,
+ 0x030, 0x00006238,
+ 0x030, 0x00007238,
+ 0x030, 0x00008228,
+ 0x030, 0x00009238,
+ 0x030, 0x0000A238,
+ 0x030, 0x0000B238,
+ 0x030, 0x0000C238,
+ 0x030, 0x0000D238,
+ 0x030, 0x0000E228,
+ 0x030, 0x0000F238,
+ 0x030, 0x00010238,
+ 0x030, 0x00011238,
+ 0x030, 0x00012228,
+ 0x030, 0x00013238,
+ 0x030, 0x00014238,
+ 0x030, 0x00015238,
+ 0x030, 0x00016228,
+ 0x030, 0x00017238,
+ 0x030, 0x00018228,
+ 0x030, 0x00019238,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000239,
+ 0x030, 0x00001239,
+ 0x030, 0x00002239,
+ 0x030, 0x00003239,
+ 0x030, 0x00004239,
+ 0x030, 0x00005239,
+ 0x030, 0x00006239,
+ 0x030, 0x00007239,
+ 0x030, 0x00008239,
+ 0x030, 0x00009239,
+ 0x030, 0x0000A239,
+ 0x030, 0x0000B239,
+ 0x030, 0x0000C239,
+ 0x030, 0x0000D239,
+ 0x030, 0x0000E209,
+ 0x030, 0x0000F239,
+ 0x030, 0x00010239,
+ 0x030, 0x00011239,
+ 0x030, 0x00012209,
+ 0x030, 0x00013239,
+ 0x030, 0x00014239,
+ 0x030, 0x00015239,
+ 0x030, 0x00016209,
+ 0x030, 0x00017239,
+ 0x030, 0x00018209,
+ 0x030, 0x00019239,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000239,
+ 0x030, 0x00001239,
+ 0x030, 0x00002239,
+ 0x030, 0x00003239,
+ 0x030, 0x00004239,
+ 0x030, 0x00005239,
+ 0x030, 0x00006239,
+ 0x030, 0x00007239,
+ 0x030, 0x00008239,
+ 0x030, 0x00009239,
+ 0x030, 0x0000A239,
+ 0x030, 0x0000B239,
+ 0x030, 0x0000C239,
+ 0x030, 0x0000D239,
+ 0x030, 0x0000E209,
+ 0x030, 0x0000F239,
+ 0x030, 0x00010239,
+ 0x030, 0x00011239,
+ 0x030, 0x00012209,
+ 0x030, 0x00013239,
+ 0x030, 0x00014239,
+ 0x030, 0x00015239,
+ 0x030, 0x00016209,
+ 0x030, 0x00017239,
+ 0x030, 0x00018209,
+ 0x030, 0x00019239,
0xA0000000, 0x00000000,
0x030, 0x00000233,
0x030, 0x00001233,
@@ -5049,6 +9494,136 @@ static const u32 rtw8822c_rf_a[] = {
0x030, 0x00009334,
0x030, 0x0000A334,
0x030, 0x0000B334,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
0xA0000000, 0x00000000,
0x030, 0x00000232,
0x030, 0x00001232,
@@ -5076,6 +9651,99 @@ static const u32 rtw8822c_rf_a[] = {
0x030, 0x0000C330,
0x0EF, 0x00000000,
0x0EE, 0x00010000,
+ 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000201,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000204,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000205,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000206,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000207,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000077,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000201,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000204,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000205,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000206,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000207,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000077,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000201,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000204,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000205,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000206,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000207,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000077,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000201,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000204,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000205,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000206,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000207,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000077,
+ 0xA0000000, 0x00000000,
0x033, 0x00000200,
0x03F, 0x0000006A,
0x033, 0x00000201,
@@ -5098,6 +9766,100 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000CF4,
0x033, 0x0000020A,
0x03F, 0x00000CF7,
+ 0xB0000000, 0x00000000,
+ 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000281,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000284,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000285,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000286,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000287,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000077,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000281,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000284,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000285,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000286,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000287,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000077,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000281,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000284,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000285,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000286,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000287,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000077,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000281,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000284,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000285,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000286,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000287,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000077,
+ 0xA0000000, 0x00000000,
0x033, 0x00000280,
0x03F, 0x0000006A,
0x033, 0x00000281,
@@ -5120,6 +9882,104 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000CF4,
0x033, 0x0000028A,
0x03F, 0x00000CF7,
+ 0xB0000000, 0x00000000,
+ 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000301,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000304,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000305,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000031,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000034,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000053,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000056,
+ 0x033, 0x0000030A,
+ 0x03F, 0x000000D1,
+ 0x0EE, 0x00000000,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000301,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000304,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000305,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000031,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000034,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000053,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000056,
+ 0x033, 0x0000030A,
+ 0x03F, 0x000000D1,
+ 0x0EE, 0x00000000,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000301,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000304,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000305,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000031,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000034,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000053,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000056,
+ 0x033, 0x0000030A,
+ 0x03F, 0x000000D1,
+ 0x0EE, 0x00000000,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000301,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000304,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000305,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000031,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000034,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000053,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000056,
+ 0x033, 0x0000030A,
+ 0x03F, 0x000000D1,
+ 0x0EE, 0x00000000,
+ 0xA0000000, 0x00000000,
0x033, 0x00000300,
0x03F, 0x0000006A,
0x033, 0x00000301,
@@ -5143,6 +10003,7 @@ static const u32 rtw8822c_rf_a[] = {
0x033, 0x0000030A,
0x03F, 0x00000CF7,
0x0EE, 0x00000000,
+ 0xB0000000, 0x00000000,
0x051, 0x0003C800,
0x81000001, 0x00000000, 0x40000000, 0x00000000,
0x052, 0x000902CA,
@@ -5160,6 +10021,26 @@ static const u32 rtw8822c_rf_a[] = {
0x052, 0x000902CA,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x052, 0x000902CA,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
0xA0000000, 0x00000000,
0x052, 0x000942CA,
0xB0000000, 0x00000000,
@@ -5185,6 +10066,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00028246,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5206,6 +10107,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00028246,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5227,6 +10148,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00030246,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00030246,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5248,6 +10189,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00028246,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5269,6 +10230,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00028246,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5290,6 +10271,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00030246,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00030246,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5311,6 +10312,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00028246,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5332,6 +10353,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00028246,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5353,6 +10394,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00030246,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00030246,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5374,6 +10435,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00028246,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5395,6 +10476,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00028246,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5416,6 +10517,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00030246,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00030246,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5437,6 +10558,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00028246,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5458,6 +10599,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00028246,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5479,6 +10640,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00030246,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00030246,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5493,13 +10674,33 @@ static const u32 rtw8822c_rf_a[] = {
0x92000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00028246,
+ 0x03F, 0x00025E46,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00028246,
+ 0x03F, 0x00025E46,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00028246,
+ 0x03F, 0x00025E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00028246,
+ 0x03F, 0x00025E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5514,13 +10715,33 @@ static const u32 rtw8822c_rf_a[] = {
0x92000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00028246,
+ 0x03F, 0x00025E46,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00028246,
+ 0x03F, 0x00025E46,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00028246,
+ 0x03F, 0x00025E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00028246,
+ 0x03F, 0x00025E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5535,13 +10756,33 @@ static const u32 rtw8822c_rf_a[] = {
0x92000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00030246,
+ 0x03F, 0x00031E46,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00030246,
+ 0x03F, 0x00031E46,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00030246,
+ 0x03F, 0x00031E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00030246,
+ 0x03F, 0x00031E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5563,6 +10804,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5584,6 +10845,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5605,6 +10886,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00031E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5626,6 +10927,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5647,6 +10968,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5668,6 +11009,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00031E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5689,6 +11050,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5710,6 +11091,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5731,6 +11132,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00031E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5752,6 +11173,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5773,6 +11214,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5794,6 +11255,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00031E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5815,6 +11296,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5836,6 +11337,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5857,6 +11378,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00031E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5878,6 +11419,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5899,6 +11460,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5920,6 +11501,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00031E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5941,6 +11542,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5962,6 +11583,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5983,6 +11624,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00031E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -6004,6 +11665,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -6025,6 +11706,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -6046,6 +11747,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00031E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -6067,6 +11788,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00021E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00021E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -6256,6 +11997,236 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000DF4,
0x033, 0x0000006A,
0x03F, 0x00000DF7,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
0xA0000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000487,
@@ -6464,6 +12435,236 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000DF4,
0x033, 0x0000002A,
0x03F, 0x00000DF7,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
0xA0000000, 0x00000000,
0x033, 0x00000020,
0x03F, 0x00000487,
@@ -6489,7 +12690,7 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000DF7,
0xB0000000, 0x00000000,
0x0EE, 0x00000000,
- 0x05C, 0x000FCC00,
+ 0x05C, 0x000FC000,
0x067, 0x0000A505,
0x0D3, 0x00000542,
0x043, 0x00005000,
@@ -6513,6 +12714,26 @@ static const u32 rtw8822c_rf_a[] = {
0x0B3, 0x000FC760,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x0B3, 0x000FC760,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
0xA0000000, 0x00000000,
0x0B3, 0x0007C760,
0xB0000000, 0x00000000,
@@ -6522,6 +12743,18 @@ static const u32 rtw8822c_rf_a[] = {
0x0B6, 0x000387F8,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x0B6, 0x000387F8,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B6, 0x000387F8,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B6, 0x000387F8,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B6, 0x000387F8,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B6, 0x000387F8,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B6, 0x000387F8,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B6, 0x000387F8,
0xA0000000, 0x00000000,
0x0B6, 0x000187F8,
0xB0000000, 0x00000000,
@@ -6552,12 +12785,33 @@ static const u32 rtw8822c_rf_a[] = {
0x0B3, 0x000FC760,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x0B3, 0x000FC760,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
0xA0000000, 0x00000000,
0x0B3, 0x0007C700,
0xB0000000, 0x00000000,
0x018, 0x0001B124,
0xFFE, 0x00000000,
0xFFE, 0x00000000,
+ 0xFFE, 0x00000000,
0x81000001, 0x00000000, 0x40000000, 0x00000000,
0x0B3, 0x0007C760,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
@@ -6574,6 +12828,26 @@ static const u32 rtw8822c_rf_a[] = {
0x0B3, 0x000FC760,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x0B3, 0x000FC760,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
0xA0000000, 0x00000000,
0x0B3, 0x0007C760,
0xB0000000, 0x00000000,
@@ -6605,6 +12879,26 @@ static const u32 rtw8822c_rf_a[] = {
0x0DD, 0x00000540,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x0DD, 0x00000540,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DD, 0x00000540,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DD, 0x00000540,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DD, 0x00000540,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DD, 0x00000540,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DD, 0x00000540,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DD, 0x00000540,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DD, 0x00000540,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DD, 0x00000540,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DD, 0x00000540,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DD, 0x00000540,
0xA0000000, 0x00000000,
0x0DD, 0x00000500,
0xB0000000, 0x00000000,
@@ -6714,7 +13008,37 @@ static const u32 rtw8822c_rf_b[] = {
0x093, 0x0008483F,
0x0EF, 0x00080000,
0x033, 0x00000001,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0009123E,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0009123E,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0009123E,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0009123E,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0009123E,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0009123E,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0009123E,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0009123E,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0009123E,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0009123E,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0009123E,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0009123E,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0009123E,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0009123E,
+ 0xA0000000, 0x00000000,
0x03F, 0x00091230,
+ 0xB0000000, 0x00000000,
0x0EF, 0x00000000,
0x0DE, 0x00000020,
0x81000001, 0x00000000, 0x40000000, 0x00000000,
@@ -6733,6 +13057,26 @@ static const u32 rtw8822c_rf_b[] = {
0x08E, 0x000A5540,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x08E, 0x000A5540,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
0xA0000000, 0x00000000,
0x08E, 0x000A5540,
0xB0000000, 0x00000000,
@@ -6812,6 +13156,96 @@ static const u32 rtw8822c_rf_b[] = {
0x033, 0x00000002,
0x03F, 0x0000002A,
0x0EE, 0x00000000,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
0xA0000000, 0x00000000,
0x0EE, 0x00000010,
0x033, 0x00000001,
@@ -7030,6 +13464,266 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000180,
0x033, 0x00000004,
0x03F, 0x00000040,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
0xA0000000, 0x00000000,
0x0EF, 0x00010000,
0x033, 0x0000000F,
@@ -7259,6 +13953,256 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000180,
0x033, 0x00000014,
0x03F, 0x00000040,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
0xA0000000, 0x00000000,
0x033, 0x0000001F,
0x03F, 0x000773E8,
@@ -7487,6 +14431,256 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000180,
0x033, 0x00000024,
0x03F, 0x00000040,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
0xA0000000, 0x00000000,
0x033, 0x0000002F,
0x03F, 0x000773E8,
@@ -7715,6 +14909,256 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000180,
0x033, 0x00000034,
0x03F, 0x00000040,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
0xA0000000, 0x00000000,
0x033, 0x0000003F,
0x03F, 0x000773E8,
@@ -7943,6 +15387,256 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000180,
0x033, 0x00000044,
0x03F, 0x00000040,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
0xA0000000, 0x00000000,
0x033, 0x0000004F,
0x03F, 0x000773E8,
@@ -8171,6 +15865,256 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000180,
0x033, 0x00000054,
0x03F, 0x00000040,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
0xA0000000, 0x00000000,
0x033, 0x0000005F,
0x03F, 0x000773E8,
@@ -8215,6 +16159,26 @@ static const u32 rtw8822c_rf_b[] = {
0x0EF, 0x00000000,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00000000,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
0xA0000000, 0x00000000,
0x0EF, 0x00000000,
0xB0000000, 0x00000000,
@@ -8257,10 +16221,10 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00020000,
0x033, 0x00000007,
0x03E, 0x00000000,
- 0x03F, 0x0002F81C,
+ 0x03F, 0x0002C010,
0x033, 0x00000008,
- 0x03E, 0x00001C86,
- 0x03F, 0x00020000,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
0x033, 0x00000009,
0x03E, 0x00001C02,
0x03F, 0x00020000,
@@ -8404,10 +16368,157 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00020000,
0x033, 0x00000007,
0x03E, 0x00000000,
- 0x03F, 0x0002F81C,
+ 0x03F, 0x0002C010,
0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
0x03E, 0x00001C86,
0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
0x033, 0x00000009,
0x03E, 0x00001C02,
0x03F, 0x00020000,
@@ -8526,7 +16637,7 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000000,
0x03F, 0x0002C010,
0x0EF, 0x00000000,
- 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00020000,
0x033, 0x00000000,
0x03E, 0x00001C86,
@@ -8551,10 +16662,304 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00020000,
0x033, 0x00000007,
0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
0x03E, 0x00001C86,
0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
0x033, 0x00000009,
0x03E, 0x00001C02,
0x03F, 0x00020000,
@@ -8673,7 +17078,7 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000000,
0x03F, 0x0002C010,
0x0EF, 0x00000000,
- 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00020000,
0x033, 0x00000000,
0x03E, 0x00001C86,
@@ -8698,10 +17103,304 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00020000,
0x033, 0x00000007,
0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
0x03E, 0x00001C86,
0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
0x033, 0x00000009,
0x03E, 0x00001C02,
0x03F, 0x00020000,
@@ -8820,7 +17519,7 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000000,
0x03F, 0x0002C010,
0x0EF, 0x00000000,
- 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00020000,
0x033, 0x00000000,
0x03E, 0x00001C86,
@@ -8847,8 +17546,155 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000000,
0x03F, 0x0002F81C,
0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
0x03E, 0x00001C86,
0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
0x033, 0x00000009,
0x03E, 0x00001C02,
0x03F, 0x00020000,
@@ -8967,7 +17813,7 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000000,
0x03F, 0x0002C010,
0x0EF, 0x00000000,
- 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00020000,
0x033, 0x00000000,
0x03E, 0x00001C86,
@@ -8992,10 +17838,157 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00020000,
0x033, 0x00000007,
0x03E, 0x00000000,
- 0x03F, 0x0002F81C,
+ 0x03F, 0x0002C010,
0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
0x03E, 0x00001C86,
0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
0x033, 0x00000009,
0x03E, 0x00001C02,
0x03F, 0x00020000,
@@ -9114,7 +18107,7 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000000,
0x03F, 0x0002C010,
0x0EF, 0x00000000,
- 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00020000,
0x033, 0x00000000,
0x03E, 0x00001C86,
@@ -9139,10 +18132,304 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00020000,
0x033, 0x00000007,
0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
0x03E, 0x00001C86,
0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
0x033, 0x00000009,
0x03E, 0x00001C02,
0x03F, 0x00020000,
@@ -9261,7 +18548,7 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000000,
0x03F, 0x0002C010,
0x0EF, 0x00000000,
- 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00020000,
0x033, 0x00000000,
0x03E, 0x00001C86,
@@ -9288,8 +18575,155 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000000,
0x03F, 0x0002F81C,
0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
0x03E, 0x00001C86,
0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
0x033, 0x00000009,
0x03E, 0x00001C02,
0x03F, 0x00020000,
@@ -9576,6 +19010,26 @@ static const u32 rtw8822c_rf_b[] = {
0x063, 0x00000002,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x063, 0x00000002,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
0xA0000000, 0x00000000,
0x063, 0x00000C02,
0xB0000000, 0x00000000,
@@ -9796,6 +19250,276 @@ static const u32 rtw8822c_rf_b[] = {
0x030, 0x00017239,
0x030, 0x00018209,
0x030, 0x00019239,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000238,
+ 0x030, 0x00001238,
+ 0x030, 0x00002238,
+ 0x030, 0x00003238,
+ 0x030, 0x00004228,
+ 0x030, 0x00005238,
+ 0x030, 0x00006238,
+ 0x030, 0x00007238,
+ 0x030, 0x00008228,
+ 0x030, 0x00009238,
+ 0x030, 0x0000A238,
+ 0x030, 0x0000B238,
+ 0x030, 0x0000C238,
+ 0x030, 0x0000D238,
+ 0x030, 0x0000E228,
+ 0x030, 0x0000F238,
+ 0x030, 0x00010238,
+ 0x030, 0x00011238,
+ 0x030, 0x00012228,
+ 0x030, 0x00013238,
+ 0x030, 0x00014238,
+ 0x030, 0x00015238,
+ 0x030, 0x00016228,
+ 0x030, 0x00017238,
+ 0x030, 0x00018228,
+ 0x030, 0x00019238,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000239,
+ 0x030, 0x00001239,
+ 0x030, 0x00002239,
+ 0x030, 0x00003239,
+ 0x030, 0x00004239,
+ 0x030, 0x00005239,
+ 0x030, 0x00006239,
+ 0x030, 0x00007239,
+ 0x030, 0x00008239,
+ 0x030, 0x00009239,
+ 0x030, 0x0000A239,
+ 0x030, 0x0000B239,
+ 0x030, 0x0000C239,
+ 0x030, 0x0000D239,
+ 0x030, 0x0000E209,
+ 0x030, 0x0000F239,
+ 0x030, 0x00010239,
+ 0x030, 0x00011239,
+ 0x030, 0x00012209,
+ 0x030, 0x00013239,
+ 0x030, 0x00014239,
+ 0x030, 0x00015239,
+ 0x030, 0x00016209,
+ 0x030, 0x00017239,
+ 0x030, 0x00018209,
+ 0x030, 0x00019239,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000239,
+ 0x030, 0x00001239,
+ 0x030, 0x00002239,
+ 0x030, 0x00003239,
+ 0x030, 0x00004239,
+ 0x030, 0x00005239,
+ 0x030, 0x00006239,
+ 0x030, 0x00007239,
+ 0x030, 0x00008239,
+ 0x030, 0x00009239,
+ 0x030, 0x0000A239,
+ 0x030, 0x0000B239,
+ 0x030, 0x0000C239,
+ 0x030, 0x0000D239,
+ 0x030, 0x0000E209,
+ 0x030, 0x0000F239,
+ 0x030, 0x00010239,
+ 0x030, 0x00011239,
+ 0x030, 0x00012209,
+ 0x030, 0x00013239,
+ 0x030, 0x00014239,
+ 0x030, 0x00015239,
+ 0x030, 0x00016209,
+ 0x030, 0x00017239,
+ 0x030, 0x00018209,
+ 0x030, 0x00019239,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000238,
+ 0x030, 0x00001238,
+ 0x030, 0x00002238,
+ 0x030, 0x00003238,
+ 0x030, 0x00004228,
+ 0x030, 0x00005238,
+ 0x030, 0x00006238,
+ 0x030, 0x00007238,
+ 0x030, 0x00008228,
+ 0x030, 0x00009238,
+ 0x030, 0x0000A238,
+ 0x030, 0x0000B238,
+ 0x030, 0x0000C238,
+ 0x030, 0x0000D238,
+ 0x030, 0x0000E228,
+ 0x030, 0x0000F238,
+ 0x030, 0x00010238,
+ 0x030, 0x00011238,
+ 0x030, 0x00012228,
+ 0x030, 0x00013238,
+ 0x030, 0x00014238,
+ 0x030, 0x00015238,
+ 0x030, 0x00016228,
+ 0x030, 0x00017238,
+ 0x030, 0x00018228,
+ 0x030, 0x00019238,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000238,
+ 0x030, 0x00001238,
+ 0x030, 0x00002238,
+ 0x030, 0x00003238,
+ 0x030, 0x00004228,
+ 0x030, 0x00005238,
+ 0x030, 0x00006238,
+ 0x030, 0x00007238,
+ 0x030, 0x00008228,
+ 0x030, 0x00009238,
+ 0x030, 0x0000A238,
+ 0x030, 0x0000B238,
+ 0x030, 0x0000C238,
+ 0x030, 0x0000D238,
+ 0x030, 0x0000E228,
+ 0x030, 0x0000F238,
+ 0x030, 0x00010238,
+ 0x030, 0x00011238,
+ 0x030, 0x00012228,
+ 0x030, 0x00013238,
+ 0x030, 0x00014238,
+ 0x030, 0x00015238,
+ 0x030, 0x00016228,
+ 0x030, 0x00017238,
+ 0x030, 0x00018228,
+ 0x030, 0x00019238,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000239,
+ 0x030, 0x00001239,
+ 0x030, 0x00002239,
+ 0x030, 0x00003239,
+ 0x030, 0x00004239,
+ 0x030, 0x00005239,
+ 0x030, 0x00006239,
+ 0x030, 0x00007239,
+ 0x030, 0x00008239,
+ 0x030, 0x00009239,
+ 0x030, 0x0000A239,
+ 0x030, 0x0000B239,
+ 0x030, 0x0000C239,
+ 0x030, 0x0000D239,
+ 0x030, 0x0000E209,
+ 0x030, 0x0000F239,
+ 0x030, 0x00010239,
+ 0x030, 0x00011239,
+ 0x030, 0x00012209,
+ 0x030, 0x00013239,
+ 0x030, 0x00014239,
+ 0x030, 0x00015239,
+ 0x030, 0x00016209,
+ 0x030, 0x00017239,
+ 0x030, 0x00018209,
+ 0x030, 0x00019239,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000239,
+ 0x030, 0x00001239,
+ 0x030, 0x00002239,
+ 0x030, 0x00003239,
+ 0x030, 0x00004239,
+ 0x030, 0x00005239,
+ 0x030, 0x00006239,
+ 0x030, 0x00007239,
+ 0x030, 0x00008239,
+ 0x030, 0x00009239,
+ 0x030, 0x0000A239,
+ 0x030, 0x0000B239,
+ 0x030, 0x0000C239,
+ 0x030, 0x0000D239,
+ 0x030, 0x0000E209,
+ 0x030, 0x0000F239,
+ 0x030, 0x00010239,
+ 0x030, 0x00011239,
+ 0x030, 0x00012209,
+ 0x030, 0x00013239,
+ 0x030, 0x00014239,
+ 0x030, 0x00015239,
+ 0x030, 0x00016209,
+ 0x030, 0x00017239,
+ 0x030, 0x00018209,
+ 0x030, 0x00019239,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000238,
+ 0x030, 0x00001238,
+ 0x030, 0x00002238,
+ 0x030, 0x00003238,
+ 0x030, 0x00004228,
+ 0x030, 0x00005238,
+ 0x030, 0x00006238,
+ 0x030, 0x00007238,
+ 0x030, 0x00008228,
+ 0x030, 0x00009238,
+ 0x030, 0x0000A238,
+ 0x030, 0x0000B238,
+ 0x030, 0x0000C238,
+ 0x030, 0x0000D238,
+ 0x030, 0x0000E228,
+ 0x030, 0x0000F238,
+ 0x030, 0x00010238,
+ 0x030, 0x00011238,
+ 0x030, 0x00012228,
+ 0x030, 0x00013238,
+ 0x030, 0x00014238,
+ 0x030, 0x00015238,
+ 0x030, 0x00016228,
+ 0x030, 0x00017238,
+ 0x030, 0x00018228,
+ 0x030, 0x00019238,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000239,
+ 0x030, 0x00001239,
+ 0x030, 0x00002239,
+ 0x030, 0x00003239,
+ 0x030, 0x00004239,
+ 0x030, 0x00005239,
+ 0x030, 0x00006239,
+ 0x030, 0x00007239,
+ 0x030, 0x00008239,
+ 0x030, 0x00009239,
+ 0x030, 0x0000A239,
+ 0x030, 0x0000B239,
+ 0x030, 0x0000C239,
+ 0x030, 0x0000D239,
+ 0x030, 0x0000E209,
+ 0x030, 0x0000F239,
+ 0x030, 0x00010239,
+ 0x030, 0x00011239,
+ 0x030, 0x00012209,
+ 0x030, 0x00013239,
+ 0x030, 0x00014239,
+ 0x030, 0x00015239,
+ 0x030, 0x00016209,
+ 0x030, 0x00017239,
+ 0x030, 0x00018209,
+ 0x030, 0x00019239,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000239,
+ 0x030, 0x00001239,
+ 0x030, 0x00002239,
+ 0x030, 0x00003239,
+ 0x030, 0x00004239,
+ 0x030, 0x00005239,
+ 0x030, 0x00006239,
+ 0x030, 0x00007239,
+ 0x030, 0x00008239,
+ 0x030, 0x00009239,
+ 0x030, 0x0000A239,
+ 0x030, 0x0000B239,
+ 0x030, 0x0000C239,
+ 0x030, 0x0000D239,
+ 0x030, 0x0000E209,
+ 0x030, 0x0000F239,
+ 0x030, 0x00010239,
+ 0x030, 0x00011239,
+ 0x030, 0x00012209,
+ 0x030, 0x00013239,
+ 0x030, 0x00014239,
+ 0x030, 0x00015239,
+ 0x030, 0x00016209,
+ 0x030, 0x00017239,
+ 0x030, 0x00018209,
+ 0x030, 0x00019239,
0xA0000000, 0x00000000,
0x030, 0x00000233,
0x030, 0x00001233,
@@ -9930,6 +19654,136 @@ static const u32 rtw8822c_rf_b[] = {
0x030, 0x00009334,
0x030, 0x0000A334,
0x030, 0x0000B334,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
0xA0000000, 0x00000000,
0x030, 0x00000232,
0x030, 0x00001232,
@@ -9957,6 +19811,99 @@ static const u32 rtw8822c_rf_b[] = {
0x030, 0x0000C330,
0x0EF, 0x00000000,
0x0EE, 0x00010000,
+ 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000201,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000204,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000205,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000206,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000207,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000077,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000201,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000204,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000205,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000206,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000207,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000077,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000201,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000204,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000205,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000206,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000207,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000077,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000201,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000204,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000205,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000206,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000207,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000077,
+ 0xA0000000, 0x00000000,
0x033, 0x00000200,
0x03F, 0x0000006A,
0x033, 0x00000201,
@@ -9979,6 +19926,100 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000CF4,
0x033, 0x0000020A,
0x03F, 0x00000CF7,
+ 0xB0000000, 0x00000000,
+ 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000281,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000284,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000285,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000286,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000287,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000077,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000281,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000284,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000285,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000286,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000287,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000077,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000281,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000284,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000285,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000286,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000287,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000077,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000281,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000284,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000285,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000286,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000287,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000077,
+ 0xA0000000, 0x00000000,
0x033, 0x00000280,
0x03F, 0x0000006A,
0x033, 0x00000281,
@@ -10001,6 +20042,104 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000CF4,
0x033, 0x0000028A,
0x03F, 0x00000CF7,
+ 0xB0000000, 0x00000000,
+ 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000301,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000304,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000305,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000031,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000034,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000053,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000056,
+ 0x033, 0x0000030A,
+ 0x03F, 0x000000D1,
+ 0x0EE, 0x00000000,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000301,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000304,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000305,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000031,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000034,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000053,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000056,
+ 0x033, 0x0000030A,
+ 0x03F, 0x000000D1,
+ 0x0EE, 0x00000000,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000301,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000304,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000305,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000031,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000034,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000053,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000056,
+ 0x033, 0x0000030A,
+ 0x03F, 0x000000D1,
+ 0x0EE, 0x00000000,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000301,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000304,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000305,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000031,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000034,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000053,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000056,
+ 0x033, 0x0000030A,
+ 0x03F, 0x000000D1,
+ 0x0EE, 0x00000000,
+ 0xA0000000, 0x00000000,
0x033, 0x00000300,
0x03F, 0x0000006A,
0x033, 0x00000301,
@@ -10024,6 +20163,7 @@ static const u32 rtw8822c_rf_b[] = {
0x033, 0x0000030A,
0x03F, 0x00000CF7,
0x0EE, 0x00000000,
+ 0xB0000000, 0x00000000,
0x051, 0x0003C800,
0x81000001, 0x00000000, 0x40000000, 0x00000000,
0x052, 0x000902CA,
@@ -10041,6 +20181,26 @@ static const u32 rtw8822c_rf_b[] = {
0x052, 0x000902CA,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x052, 0x000902CA,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
0xA0000000, 0x00000000,
0x052, 0x000942C0,
0xB0000000, 0x00000000,
@@ -10057,6 +20217,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10076,6 +20256,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000241C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000241C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
0xA0000000, 0x00000000,
0x03F, 0x0000C246,
0xB0000000, 0x00000000,
@@ -10088,6 +20288,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10107,6 +20327,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000241C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000241C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
0xA0000000, 0x00000000,
0x03F, 0x0000C246,
0xB0000000, 0x00000000,
@@ -10128,6 +20368,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002C246,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002C246,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
0xA0000000, 0x00000000,
0x03F, 0x0000C246,
0xB0000000, 0x00000000,
@@ -10140,6 +20400,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10159,6 +20439,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000241C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000241C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
0xA0000000, 0x00000000,
0x03F, 0x0000C246,
0xB0000000, 0x00000000,
@@ -10171,6 +20471,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10190,6 +20510,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000241C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000241C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
0xA0000000, 0x00000000,
0x03F, 0x0000C246,
0xB0000000, 0x00000000,
@@ -10211,6 +20551,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002C246,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002C246,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
0xA0000000, 0x00000000,
0x03F, 0x0000C246,
0xB0000000, 0x00000000,
@@ -10223,6 +20583,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10242,6 +20622,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000241C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000241C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
0xA0000000, 0x00000000,
0x03F, 0x0000C246,
0xB0000000, 0x00000000,
@@ -10254,6 +20654,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10273,6 +20693,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000241C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000241C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
0xA0000000, 0x00000000,
0x03F, 0x0000C246,
0xB0000000, 0x00000000,
@@ -10294,6 +20734,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002C246,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002C246,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
0xA0000000, 0x00000000,
0x03F, 0x0000C246,
0xB0000000, 0x00000000,
@@ -10306,6 +20766,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10325,6 +20805,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000241C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000241C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10337,6 +20837,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10356,6 +20876,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000241C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000241C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10377,6 +20917,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002C246,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002C246,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10389,6 +20949,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10408,6 +20988,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000241C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000241C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10420,6 +21020,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10439,6 +21059,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000241C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000241C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10460,6 +21100,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002C246,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002C246,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10472,6 +21132,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10484,13 +21164,33 @@ static const u32 rtw8822c_rf_b[] = {
0x92000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00024246,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x000241C6,
+ 0x03F, 0x000209C6,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x000241C6,
+ 0x03F, 0x000209C6,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x000241C6,
+ 0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x000241C6,
+ 0x03F, 0x000209C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10503,6 +21203,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10515,13 +21235,33 @@ static const u32 rtw8822c_rf_b[] = {
0x92000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00024246,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x000241C6,
+ 0x03F, 0x000209C6,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x000241C6,
+ 0x03F, 0x000209C6,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x000241C6,
+ 0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x000241C6,
+ 0x03F, 0x000209C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10536,13 +21276,33 @@ static const u32 rtw8822c_rf_b[] = {
0x92000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00024246,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x0002C246,
+ 0x03F, 0x0002CA46,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x0002C246,
+ 0x03F, 0x0002CA46,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x0002C246,
+ 0x03F, 0x0002CA46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x0002C246,
+ 0x03F, 0x0002CA46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10555,6 +21315,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10574,6 +21354,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000209C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10586,6 +21386,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10605,6 +21425,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000209C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10626,6 +21466,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002CA46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10638,6 +21498,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10657,6 +21537,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000209C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10669,6 +21569,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10688,6 +21608,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000209C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10709,6 +21649,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002CA46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10721,6 +21681,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10740,6 +21720,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000209C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10752,6 +21752,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10771,6 +21791,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000209C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10792,6 +21832,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002CA46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10804,6 +21864,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10823,6 +21903,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000209C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10835,6 +21935,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10854,6 +21974,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000209C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10875,6 +22015,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002CA46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10887,6 +22047,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10906,6 +22086,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000209C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10918,6 +22118,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10937,6 +22157,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000209C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10958,6 +22198,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002CA46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10970,6 +22230,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10989,6 +22269,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000209C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -11001,6 +22301,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -11020,6 +22340,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000209C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -11032,6 +22372,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000020,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000020,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -11051,6 +22411,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002CA46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -11063,6 +22443,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -11082,6 +22482,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000209C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -11094,6 +22514,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -11113,6 +22553,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000209C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -11134,6 +22594,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002CA46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -11146,6 +22626,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -11165,6 +22665,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000209C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -11177,6 +22697,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -11196,6 +22736,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000209C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -11217,6 +22777,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002CA46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -11238,6 +22818,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0001CA46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0001CA46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -11427,6 +23027,236 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000DF4,
0x033, 0x0000006A,
0x03F, 0x00000DF7,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
0xA0000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000487,
@@ -11635,6 +23465,236 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000DF4,
0x033, 0x0000002A,
0x03F, 0x00000DF7,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
0xA0000000, 0x00000000,
0x033, 0x00000020,
0x03F, 0x00000487,
@@ -11660,7 +23720,7 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000DF7,
0xB0000000, 0x00000000,
0x0EE, 0x00000000,
- 0x05C, 0x000FCC00,
+ 0x05C, 0x000FC000,
0x067, 0x0000A505,
0x0D3, 0x00000542,
0x043, 0x00005000,
@@ -11710,6 +23770,10 @@ static const u32 rtw8822c_rf_b[] = {
0x033, 0x00000007,
0x03F, 0x00000002,
0x0EF, 0x00000000,
+ 0x0EF, 0x00080000,
+ 0x033, 0x00000001,
+ 0x03F, 0x000916BF,
+ 0x0EF, 0x00000000,
};
RTW_DECL_TABLE_RF_RADIO(rtw8822c_rf_b, B);
@@ -11717,394 +23781,1961 @@ RTW_DECL_TABLE_RF_RADIO(rtw8822c_rf_b, B);
static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 0, 0, 0, 0, 1, 72, },
{ 2, 0, 0, 0, 1, 60, },
+ { 1, 0, 0, 0, 1, 68, },
+ { 3, 0, 0, 0, 1, 72, },
+ { 4, 0, 0, 0, 1, 76, },
+ { 5, 0, 0, 0, 1, 60, },
+ { 6, 0, 0, 0, 1, 72, },
+ { 7, 0, 0, 0, 1, 60, },
+ { 8, 0, 0, 0, 1, 72, },
+ { 9, 0, 0, 0, 1, 60, },
{ 0, 0, 0, 0, 2, 72, },
{ 2, 0, 0, 0, 2, 60, },
+ { 1, 0, 0, 0, 2, 68, },
+ { 3, 0, 0, 0, 2, 72, },
+ { 4, 0, 0, 0, 2, 76, },
+ { 5, 0, 0, 0, 2, 60, },
+ { 6, 0, 0, 0, 2, 72, },
+ { 7, 0, 0, 0, 2, 60, },
+ { 8, 0, 0, 0, 2, 72, },
+ { 9, 0, 0, 0, 2, 60, },
{ 0, 0, 0, 0, 3, 76, },
{ 2, 0, 0, 0, 3, 60, },
+ { 1, 0, 0, 0, 3, 68, },
+ { 3, 0, 0, 0, 3, 76, },
+ { 4, 0, 0, 0, 3, 76, },
+ { 5, 0, 0, 0, 3, 60, },
+ { 6, 0, 0, 0, 3, 76, },
+ { 7, 0, 0, 0, 3, 60, },
+ { 8, 0, 0, 0, 3, 76, },
+ { 9, 0, 0, 0, 3, 60, },
{ 0, 0, 0, 0, 4, 76, },
{ 2, 0, 0, 0, 4, 60, },
+ { 1, 0, 0, 0, 4, 68, },
+ { 3, 0, 0, 0, 4, 76, },
+ { 4, 0, 0, 0, 4, 76, },
+ { 5, 0, 0, 0, 4, 60, },
+ { 6, 0, 0, 0, 4, 76, },
+ { 7, 0, 0, 0, 4, 60, },
+ { 8, 0, 0, 0, 4, 76, },
+ { 9, 0, 0, 0, 4, 60, },
{ 0, 0, 0, 0, 5, 76, },
{ 2, 0, 0, 0, 5, 60, },
+ { 1, 0, 0, 0, 5, 68, },
+ { 3, 0, 0, 0, 5, 76, },
+ { 4, 0, 0, 0, 5, 76, },
+ { 5, 0, 0, 0, 5, 60, },
+ { 6, 0, 0, 0, 5, 76, },
+ { 7, 0, 0, 0, 5, 60, },
+ { 8, 0, 0, 0, 5, 76, },
+ { 9, 0, 0, 0, 5, 60, },
{ 0, 0, 0, 0, 6, 76, },
{ 2, 0, 0, 0, 6, 60, },
+ { 1, 0, 0, 0, 6, 68, },
+ { 3, 0, 0, 0, 6, 76, },
+ { 4, 0, 0, 0, 6, 76, },
+ { 5, 0, 0, 0, 6, 60, },
+ { 6, 0, 0, 0, 6, 76, },
+ { 7, 0, 0, 0, 6, 60, },
+ { 8, 0, 0, 0, 6, 76, },
+ { 9, 0, 0, 0, 6, 60, },
{ 0, 0, 0, 0, 7, 76, },
{ 2, 0, 0, 0, 7, 60, },
+ { 1, 0, 0, 0, 7, 68, },
+ { 3, 0, 0, 0, 7, 76, },
+ { 4, 0, 0, 0, 7, 76, },
+ { 5, 0, 0, 0, 7, 60, },
+ { 6, 0, 0, 0, 7, 76, },
+ { 7, 0, 0, 0, 7, 60, },
+ { 8, 0, 0, 0, 7, 76, },
+ { 9, 0, 0, 0, 7, 60, },
{ 0, 0, 0, 0, 8, 76, },
{ 2, 0, 0, 0, 8, 60, },
+ { 1, 0, 0, 0, 8, 68, },
+ { 3, 0, 0, 0, 8, 76, },
+ { 4, 0, 0, 0, 8, 76, },
+ { 5, 0, 0, 0, 8, 60, },
+ { 6, 0, 0, 0, 8, 76, },
+ { 7, 0, 0, 0, 8, 60, },
+ { 8, 0, 0, 0, 8, 76, },
+ { 9, 0, 0, 0, 8, 60, },
{ 0, 0, 0, 0, 9, 76, },
{ 2, 0, 0, 0, 9, 60, },
+ { 1, 0, 0, 0, 9, 68, },
+ { 3, 0, 0, 0, 9, 76, },
+ { 4, 0, 0, 0, 9, 76, },
+ { 5, 0, 0, 0, 9, 60, },
+ { 6, 0, 0, 0, 9, 76, },
+ { 7, 0, 0, 0, 9, 60, },
+ { 8, 0, 0, 0, 9, 76, },
+ { 9, 0, 0, 0, 9, 60, },
{ 0, 0, 0, 0, 10, 72, },
{ 2, 0, 0, 0, 10, 60, },
+ { 1, 0, 0, 0, 10, 68, },
+ { 3, 0, 0, 0, 10, 72, },
+ { 4, 0, 0, 0, 10, 76, },
+ { 5, 0, 0, 0, 10, 60, },
+ { 6, 0, 0, 0, 10, 72, },
+ { 7, 0, 0, 0, 10, 60, },
+ { 8, 0, 0, 0, 10, 72, },
+ { 9, 0, 0, 0, 10, 60, },
{ 0, 0, 0, 0, 11, 72, },
{ 2, 0, 0, 0, 11, 60, },
+ { 1, 0, 0, 0, 11, 68, },
+ { 3, 0, 0, 0, 11, 72, },
+ { 4, 0, 0, 0, 11, 76, },
+ { 5, 0, 0, 0, 11, 60, },
+ { 6, 0, 0, 0, 11, 72, },
+ { 7, 0, 0, 0, 11, 60, },
+ { 8, 0, 0, 0, 11, 72, },
+ { 9, 0, 0, 0, 11, 60, },
{ 0, 0, 0, 0, 12, 52, },
{ 2, 0, 0, 0, 12, 60, },
+ { 1, 0, 0, 0, 12, 68, },
+ { 3, 0, 0, 0, 12, 52, },
+ { 4, 0, 0, 0, 12, 76, },
+ { 5, 0, 0, 0, 12, 60, },
+ { 6, 0, 0, 0, 12, 52, },
+ { 7, 0, 0, 0, 12, 60, },
+ { 8, 0, 0, 0, 12, 52, },
+ { 9, 0, 0, 0, 12, 60, },
{ 0, 0, 0, 0, 13, 48, },
{ 2, 0, 0, 0, 13, 60, },
+ { 1, 0, 0, 0, 13, 68, },
+ { 3, 0, 0, 0, 13, 48, },
+ { 4, 0, 0, 0, 13, 76, },
+ { 5, 0, 0, 0, 13, 60, },
+ { 6, 0, 0, 0, 13, 48, },
+ { 7, 0, 0, 0, 13, 60, },
+ { 8, 0, 0, 0, 13, 48, },
+ { 9, 0, 0, 0, 13, 60, },
{ 0, 0, 0, 0, 14, 127, },
{ 2, 0, 0, 0, 14, 127, },
+ { 1, 0, 0, 0, 14, 68, },
+ { 3, 0, 0, 0, 14, 127, },
+ { 4, 0, 0, 0, 14, 127, },
+ { 5, 0, 0, 0, 14, 127, },
+ { 6, 0, 0, 0, 14, 127, },
+ { 7, 0, 0, 0, 14, 127, },
+ { 8, 0, 0, 0, 14, 127, },
+ { 9, 0, 0, 0, 14, 127, },
{ 0, 0, 0, 1, 1, 52, },
{ 2, 0, 0, 1, 1, 60, },
+ { 1, 0, 0, 1, 1, 76, },
+ { 3, 0, 0, 1, 1, 52, },
+ { 4, 0, 0, 1, 1, 76, },
+ { 5, 0, 0, 1, 1, 60, },
+ { 6, 0, 0, 1, 1, 52, },
+ { 7, 0, 0, 1, 1, 60, },
+ { 8, 0, 0, 1, 1, 52, },
+ { 9, 0, 0, 1, 1, 60, },
{ 0, 0, 0, 1, 2, 60, },
{ 2, 0, 0, 1, 2, 60, },
+ { 1, 0, 0, 1, 2, 76, },
+ { 3, 0, 0, 1, 2, 60, },
+ { 4, 0, 0, 1, 2, 76, },
+ { 5, 0, 0, 1, 2, 60, },
+ { 6, 0, 0, 1, 2, 60, },
+ { 7, 0, 0, 1, 2, 60, },
+ { 8, 0, 0, 1, 2, 60, },
+ { 9, 0, 0, 1, 2, 60, },
{ 0, 0, 0, 1, 3, 64, },
{ 2, 0, 0, 1, 3, 60, },
+ { 1, 0, 0, 1, 3, 76, },
+ { 3, 0, 0, 1, 3, 64, },
+ { 4, 0, 0, 1, 3, 76, },
+ { 5, 0, 0, 1, 3, 60, },
+ { 6, 0, 0, 1, 3, 64, },
+ { 7, 0, 0, 1, 3, 60, },
+ { 8, 0, 0, 1, 3, 64, },
+ { 9, 0, 0, 1, 3, 60, },
{ 0, 0, 0, 1, 4, 68, },
{ 2, 0, 0, 1, 4, 60, },
+ { 1, 0, 0, 1, 4, 76, },
+ { 3, 0, 0, 1, 4, 68, },
+ { 4, 0, 0, 1, 4, 76, },
+ { 5, 0, 0, 1, 4, 60, },
+ { 6, 0, 0, 1, 4, 68, },
+ { 7, 0, 0, 1, 4, 60, },
+ { 8, 0, 0, 1, 4, 68, },
+ { 9, 0, 0, 1, 4, 60, },
{ 0, 0, 0, 1, 5, 76, },
{ 2, 0, 0, 1, 5, 60, },
+ { 1, 0, 0, 1, 5, 76, },
+ { 3, 0, 0, 1, 5, 76, },
+ { 4, 0, 0, 1, 5, 76, },
+ { 5, 0, 0, 1, 5, 60, },
+ { 6, 0, 0, 1, 5, 76, },
+ { 7, 0, 0, 1, 5, 60, },
+ { 8, 0, 0, 1, 5, 76, },
+ { 9, 0, 0, 1, 5, 60, },
{ 0, 0, 0, 1, 6, 76, },
{ 2, 0, 0, 1, 6, 60, },
+ { 1, 0, 0, 1, 6, 76, },
+ { 3, 0, 0, 1, 6, 76, },
+ { 4, 0, 0, 1, 6, 76, },
+ { 5, 0, 0, 1, 6, 60, },
+ { 6, 0, 0, 1, 6, 76, },
+ { 7, 0, 0, 1, 6, 60, },
+ { 8, 0, 0, 1, 6, 76, },
+ { 9, 0, 0, 1, 6, 60, },
{ 0, 0, 0, 1, 7, 76, },
{ 2, 0, 0, 1, 7, 60, },
+ { 1, 0, 0, 1, 7, 76, },
+ { 3, 0, 0, 1, 7, 76, },
+ { 4, 0, 0, 1, 7, 76, },
+ { 5, 0, 0, 1, 7, 60, },
+ { 6, 0, 0, 1, 7, 76, },
+ { 7, 0, 0, 1, 7, 60, },
+ { 8, 0, 0, 1, 7, 76, },
+ { 9, 0, 0, 1, 7, 60, },
{ 0, 0, 0, 1, 8, 68, },
{ 2, 0, 0, 1, 8, 60, },
+ { 1, 0, 0, 1, 8, 76, },
+ { 3, 0, 0, 1, 8, 68, },
+ { 4, 0, 0, 1, 8, 76, },
+ { 5, 0, 0, 1, 8, 60, },
+ { 6, 0, 0, 1, 8, 68, },
+ { 7, 0, 0, 1, 8, 60, },
+ { 8, 0, 0, 1, 8, 68, },
+ { 9, 0, 0, 1, 8, 60, },
{ 0, 0, 0, 1, 9, 64, },
{ 2, 0, 0, 1, 9, 60, },
+ { 1, 0, 0, 1, 9, 76, },
+ { 3, 0, 0, 1, 9, 64, },
+ { 4, 0, 0, 1, 9, 76, },
+ { 5, 0, 0, 1, 9, 60, },
+ { 6, 0, 0, 1, 9, 64, },
+ { 7, 0, 0, 1, 9, 60, },
+ { 8, 0, 0, 1, 9, 64, },
+ { 9, 0, 0, 1, 9, 60, },
{ 0, 0, 0, 1, 10, 60, },
{ 2, 0, 0, 1, 10, 60, },
+ { 1, 0, 0, 1, 10, 76, },
+ { 3, 0, 0, 1, 10, 60, },
+ { 4, 0, 0, 1, 10, 76, },
+ { 5, 0, 0, 1, 10, 60, },
+ { 6, 0, 0, 1, 10, 60, },
+ { 7, 0, 0, 1, 10, 60, },
+ { 8, 0, 0, 1, 10, 60, },
+ { 9, 0, 0, 1, 10, 60, },
{ 0, 0, 0, 1, 11, 52, },
{ 2, 0, 0, 1, 11, 60, },
+ { 1, 0, 0, 1, 11, 76, },
+ { 3, 0, 0, 1, 11, 52, },
+ { 4, 0, 0, 1, 11, 76, },
+ { 5, 0, 0, 1, 11, 60, },
+ { 6, 0, 0, 1, 11, 52, },
+ { 7, 0, 0, 1, 11, 60, },
+ { 8, 0, 0, 1, 11, 52, },
+ { 9, 0, 0, 1, 11, 60, },
{ 0, 0, 0, 1, 12, 40, },
{ 2, 0, 0, 1, 12, 60, },
+ { 1, 0, 0, 1, 12, 76, },
+ { 3, 0, 0, 1, 12, 40, },
+ { 4, 0, 0, 1, 12, 76, },
+ { 5, 0, 0, 1, 12, 60, },
+ { 6, 0, 0, 1, 12, 40, },
+ { 7, 0, 0, 1, 12, 60, },
+ { 8, 0, 0, 1, 12, 40, },
+ { 9, 0, 0, 1, 12, 60, },
{ 0, 0, 0, 1, 13, 28, },
{ 2, 0, 0, 1, 13, 60, },
+ { 1, 0, 0, 1, 13, 76, },
+ { 3, 0, 0, 1, 13, 28, },
+ { 4, 0, 0, 1, 13, 70, },
+ { 5, 0, 0, 1, 13, 60, },
+ { 6, 0, 0, 1, 13, 28, },
+ { 7, 0, 0, 1, 13, 60, },
+ { 8, 0, 0, 1, 13, 28, },
+ { 9, 0, 0, 1, 13, 60, },
{ 0, 0, 0, 1, 14, 127, },
{ 2, 0, 0, 1, 14, 127, },
+ { 1, 0, 0, 1, 14, 127, },
+ { 3, 0, 0, 1, 14, 127, },
+ { 4, 0, 0, 1, 14, 127, },
+ { 5, 0, 0, 1, 14, 127, },
+ { 6, 0, 0, 1, 14, 127, },
+ { 7, 0, 0, 1, 14, 127, },
+ { 8, 0, 0, 1, 14, 127, },
+ { 9, 0, 0, 1, 14, 127, },
{ 0, 0, 0, 2, 1, 52, },
{ 2, 0, 0, 2, 1, 60, },
+ { 1, 0, 0, 2, 1, 76, },
+ { 3, 0, 0, 2, 1, 52, },
+ { 4, 0, 0, 2, 1, 76, },
+ { 5, 0, 0, 2, 1, 60, },
+ { 6, 0, 0, 2, 1, 52, },
+ { 7, 0, 0, 2, 1, 60, },
+ { 8, 0, 0, 2, 1, 52, },
+ { 9, 0, 0, 2, 1, 60, },
{ 0, 0, 0, 2, 2, 60, },
{ 2, 0, 0, 2, 2, 60, },
+ { 1, 0, 0, 2, 2, 76, },
+ { 3, 0, 0, 2, 2, 60, },
+ { 4, 0, 0, 2, 2, 76, },
+ { 5, 0, 0, 2, 2, 60, },
+ { 6, 0, 0, 2, 2, 60, },
+ { 7, 0, 0, 2, 2, 60, },
+ { 8, 0, 0, 2, 2, 60, },
+ { 9, 0, 0, 2, 2, 60, },
{ 0, 0, 0, 2, 3, 64, },
{ 2, 0, 0, 2, 3, 60, },
+ { 1, 0, 0, 2, 3, 76, },
+ { 3, 0, 0, 2, 3, 64, },
+ { 4, 0, 0, 2, 3, 76, },
+ { 5, 0, 0, 2, 3, 60, },
+ { 6, 0, 0, 2, 3, 64, },
+ { 7, 0, 0, 2, 3, 60, },
+ { 8, 0, 0, 2, 3, 64, },
+ { 9, 0, 0, 2, 3, 60, },
{ 0, 0, 0, 2, 4, 68, },
{ 2, 0, 0, 2, 4, 60, },
+ { 1, 0, 0, 2, 4, 76, },
+ { 3, 0, 0, 2, 4, 68, },
+ { 4, 0, 0, 2, 4, 76, },
+ { 5, 0, 0, 2, 4, 60, },
+ { 6, 0, 0, 2, 4, 68, },
+ { 7, 0, 0, 2, 4, 60, },
+ { 8, 0, 0, 2, 4, 68, },
+ { 9, 0, 0, 2, 4, 60, },
{ 0, 0, 0, 2, 5, 76, },
{ 2, 0, 0, 2, 5, 60, },
+ { 1, 0, 0, 2, 5, 76, },
+ { 3, 0, 0, 2, 5, 76, },
+ { 4, 0, 0, 2, 5, 76, },
+ { 5, 0, 0, 2, 5, 60, },
+ { 6, 0, 0, 2, 5, 76, },
+ { 7, 0, 0, 2, 5, 60, },
+ { 8, 0, 0, 2, 5, 76, },
+ { 9, 0, 0, 2, 5, 60, },
{ 0, 0, 0, 2, 6, 76, },
{ 2, 0, 0, 2, 6, 60, },
+ { 1, 0, 0, 2, 6, 76, },
+ { 3, 0, 0, 2, 6, 76, },
+ { 4, 0, 0, 2, 6, 76, },
+ { 5, 0, 0, 2, 6, 60, },
+ { 6, 0, 0, 2, 6, 76, },
+ { 7, 0, 0, 2, 6, 60, },
+ { 8, 0, 0, 2, 6, 76, },
+ { 9, 0, 0, 2, 6, 60, },
{ 0, 0, 0, 2, 7, 76, },
{ 2, 0, 0, 2, 7, 60, },
+ { 1, 0, 0, 2, 7, 76, },
+ { 3, 0, 0, 2, 7, 76, },
+ { 4, 0, 0, 2, 7, 76, },
+ { 5, 0, 0, 2, 7, 60, },
+ { 6, 0, 0, 2, 7, 76, },
+ { 7, 0, 0, 2, 7, 60, },
+ { 8, 0, 0, 2, 7, 76, },
+ { 9, 0, 0, 2, 7, 60, },
{ 0, 0, 0, 2, 8, 68, },
{ 2, 0, 0, 2, 8, 60, },
+ { 1, 0, 0, 2, 8, 76, },
+ { 3, 0, 0, 2, 8, 68, },
+ { 4, 0, 0, 2, 8, 76, },
+ { 5, 0, 0, 2, 8, 60, },
+ { 6, 0, 0, 2, 8, 68, },
+ { 7, 0, 0, 2, 8, 60, },
+ { 8, 0, 0, 2, 8, 68, },
+ { 9, 0, 0, 2, 8, 60, },
{ 0, 0, 0, 2, 9, 64, },
{ 2, 0, 0, 2, 9, 60, },
+ { 1, 0, 0, 2, 9, 76, },
+ { 3, 0, 0, 2, 9, 64, },
+ { 4, 0, 0, 2, 9, 76, },
+ { 5, 0, 0, 2, 9, 60, },
+ { 6, 0, 0, 2, 9, 64, },
+ { 7, 0, 0, 2, 9, 60, },
+ { 8, 0, 0, 2, 9, 64, },
+ { 9, 0, 0, 2, 9, 60, },
{ 0, 0, 0, 2, 10, 60, },
{ 2, 0, 0, 2, 10, 60, },
+ { 1, 0, 0, 2, 10, 76, },
+ { 3, 0, 0, 2, 10, 60, },
+ { 4, 0, 0, 2, 10, 76, },
+ { 5, 0, 0, 2, 10, 60, },
+ { 6, 0, 0, 2, 10, 60, },
+ { 7, 0, 0, 2, 10, 60, },
+ { 8, 0, 0, 2, 10, 60, },
+ { 9, 0, 0, 2, 10, 60, },
{ 0, 0, 0, 2, 11, 52, },
{ 2, 0, 0, 2, 11, 60, },
+ { 1, 0, 0, 2, 11, 76, },
+ { 3, 0, 0, 2, 11, 52, },
+ { 4, 0, 0, 2, 11, 76, },
+ { 5, 0, 0, 2, 11, 60, },
+ { 6, 0, 0, 2, 11, 52, },
+ { 7, 0, 0, 2, 11, 60, },
+ { 8, 0, 0, 2, 11, 52, },
+ { 9, 0, 0, 2, 11, 60, },
{ 0, 0, 0, 2, 12, 40, },
{ 2, 0, 0, 2, 12, 60, },
+ { 1, 0, 0, 2, 12, 76, },
+ { 3, 0, 0, 2, 12, 40, },
+ { 4, 0, 0, 2, 12, 76, },
+ { 5, 0, 0, 2, 12, 60, },
+ { 6, 0, 0, 2, 12, 40, },
+ { 7, 0, 0, 2, 12, 60, },
+ { 8, 0, 0, 2, 12, 40, },
+ { 9, 0, 0, 2, 12, 60, },
{ 0, 0, 0, 2, 13, 28, },
{ 2, 0, 0, 2, 13, 60, },
+ { 1, 0, 0, 2, 13, 76, },
+ { 3, 0, 0, 2, 13, 28, },
+ { 4, 0, 0, 2, 13, 72, },
+ { 5, 0, 0, 2, 13, 60, },
+ { 6, 0, 0, 2, 13, 28, },
+ { 7, 0, 0, 2, 13, 60, },
+ { 8, 0, 0, 2, 13, 28, },
+ { 9, 0, 0, 2, 13, 60, },
{ 0, 0, 0, 2, 14, 127, },
{ 2, 0, 0, 2, 14, 127, },
+ { 1, 0, 0, 2, 14, 127, },
+ { 3, 0, 0, 2, 14, 127, },
+ { 4, 0, 0, 2, 14, 127, },
+ { 5, 0, 0, 2, 14, 127, },
+ { 6, 0, 0, 2, 14, 127, },
+ { 7, 0, 0, 2, 14, 127, },
+ { 8, 0, 0, 2, 14, 127, },
+ { 9, 0, 0, 2, 14, 127, },
{ 0, 0, 0, 3, 1, 52, },
{ 2, 0, 0, 3, 1, 36, },
+ { 1, 0, 0, 3, 1, 66, },
+ { 3, 0, 0, 3, 1, 52, },
+ { 4, 0, 0, 3, 1, 68, },
+ { 5, 0, 0, 3, 1, 36, },
+ { 6, 0, 0, 3, 1, 52, },
+ { 7, 0, 0, 3, 1, 36, },
+ { 8, 0, 0, 3, 1, 52, },
+ { 9, 0, 0, 3, 1, 36, },
{ 0, 0, 0, 3, 2, 60, },
{ 2, 0, 0, 3, 2, 36, },
+ { 1, 0, 0, 3, 2, 66, },
+ { 3, 0, 0, 3, 2, 60, },
+ { 4, 0, 0, 3, 2, 70, },
+ { 5, 0, 0, 3, 2, 36, },
+ { 6, 0, 0, 3, 2, 60, },
+ { 7, 0, 0, 3, 2, 36, },
+ { 8, 0, 0, 3, 2, 60, },
+ { 9, 0, 0, 3, 2, 36, },
{ 0, 0, 0, 3, 3, 64, },
{ 2, 0, 0, 3, 3, 36, },
+ { 1, 0, 0, 3, 3, 66, },
+ { 3, 0, 0, 3, 3, 64, },
+ { 4, 0, 0, 3, 3, 70, },
+ { 5, 0, 0, 3, 3, 36, },
+ { 6, 0, 0, 3, 3, 64, },
+ { 7, 0, 0, 3, 3, 36, },
+ { 8, 0, 0, 3, 3, 64, },
+ { 9, 0, 0, 3, 3, 36, },
{ 0, 0, 0, 3, 4, 68, },
{ 2, 0, 0, 3, 4, 36, },
+ { 1, 0, 0, 3, 4, 66, },
+ { 3, 0, 0, 3, 4, 68, },
+ { 4, 0, 0, 3, 4, 70, },
+ { 5, 0, 0, 3, 4, 36, },
+ { 6, 0, 0, 3, 4, 68, },
+ { 7, 0, 0, 3, 4, 36, },
+ { 8, 0, 0, 3, 4, 68, },
+ { 9, 0, 0, 3, 4, 36, },
{ 0, 0, 0, 3, 5, 76, },
{ 2, 0, 0, 3, 5, 36, },
+ { 1, 0, 0, 3, 5, 66, },
+ { 3, 0, 0, 3, 5, 76, },
+ { 4, 0, 0, 3, 5, 70, },
+ { 5, 0, 0, 3, 5, 36, },
+ { 6, 0, 0, 3, 5, 76, },
+ { 7, 0, 0, 3, 5, 36, },
+ { 8, 0, 0, 3, 5, 76, },
+ { 9, 0, 0, 3, 5, 36, },
{ 0, 0, 0, 3, 6, 76, },
{ 2, 0, 0, 3, 6, 36, },
+ { 1, 0, 0, 3, 6, 66, },
+ { 3, 0, 0, 3, 6, 76, },
+ { 4, 0, 0, 3, 6, 70, },
+ { 5, 0, 0, 3, 6, 36, },
+ { 6, 0, 0, 3, 6, 76, },
+ { 7, 0, 0, 3, 6, 36, },
+ { 8, 0, 0, 3, 6, 76, },
+ { 9, 0, 0, 3, 6, 36, },
{ 0, 0, 0, 3, 7, 76, },
{ 2, 0, 0, 3, 7, 36, },
+ { 1, 0, 0, 3, 7, 66, },
+ { 3, 0, 0, 3, 7, 76, },
+ { 4, 0, 0, 3, 7, 70, },
+ { 5, 0, 0, 3, 7, 36, },
+ { 6, 0, 0, 3, 7, 76, },
+ { 7, 0, 0, 3, 7, 36, },
+ { 8, 0, 0, 3, 7, 76, },
+ { 9, 0, 0, 3, 7, 36, },
{ 0, 0, 0, 3, 8, 68, },
{ 2, 0, 0, 3, 8, 36, },
+ { 1, 0, 0, 3, 8, 66, },
+ { 3, 0, 0, 3, 8, 68, },
+ { 4, 0, 0, 3, 8, 70, },
+ { 5, 0, 0, 3, 8, 36, },
+ { 6, 0, 0, 3, 8, 68, },
+ { 7, 0, 0, 3, 8, 36, },
+ { 8, 0, 0, 3, 8, 68, },
+ { 9, 0, 0, 3, 8, 36, },
{ 0, 0, 0, 3, 9, 64, },
{ 2, 0, 0, 3, 9, 36, },
+ { 1, 0, 0, 3, 9, 66, },
+ { 3, 0, 0, 3, 9, 64, },
+ { 4, 0, 0, 3, 9, 70, },
+ { 5, 0, 0, 3, 9, 36, },
+ { 6, 0, 0, 3, 9, 64, },
+ { 7, 0, 0, 3, 9, 36, },
+ { 8, 0, 0, 3, 9, 64, },
+ { 9, 0, 0, 3, 9, 36, },
{ 0, 0, 0, 3, 10, 60, },
{ 2, 0, 0, 3, 10, 36, },
+ { 1, 0, 0, 3, 10, 66, },
+ { 3, 0, 0, 3, 10, 60, },
+ { 4, 0, 0, 3, 10, 70, },
+ { 5, 0, 0, 3, 10, 36, },
+ { 6, 0, 0, 3, 10, 60, },
+ { 7, 0, 0, 3, 10, 36, },
+ { 8, 0, 0, 3, 10, 60, },
+ { 9, 0, 0, 3, 10, 36, },
{ 0, 0, 0, 3, 11, 52, },
{ 2, 0, 0, 3, 11, 36, },
+ { 1, 0, 0, 3, 11, 66, },
+ { 3, 0, 0, 3, 11, 52, },
+ { 4, 0, 0, 3, 11, 70, },
+ { 5, 0, 0, 3, 11, 36, },
+ { 6, 0, 0, 3, 11, 52, },
+ { 7, 0, 0, 3, 11, 36, },
+ { 8, 0, 0, 3, 11, 52, },
+ { 9, 0, 0, 3, 11, 36, },
{ 0, 0, 0, 3, 12, 40, },
{ 2, 0, 0, 3, 12, 36, },
+ { 1, 0, 0, 3, 12, 66, },
+ { 3, 0, 0, 3, 12, 40, },
+ { 4, 0, 0, 3, 12, 70, },
+ { 5, 0, 0, 3, 12, 36, },
+ { 6, 0, 0, 3, 12, 40, },
+ { 7, 0, 0, 3, 12, 36, },
+ { 8, 0, 0, 3, 12, 40, },
+ { 9, 0, 0, 3, 12, 36, },
{ 0, 0, 0, 3, 13, 28, },
{ 2, 0, 0, 3, 13, 36, },
+ { 1, 0, 0, 3, 13, 66, },
+ { 3, 0, 0, 3, 13, 28, },
+ { 4, 0, 0, 3, 13, 62, },
+ { 5, 0, 0, 3, 13, 36, },
+ { 6, 0, 0, 3, 13, 28, },
+ { 7, 0, 0, 3, 13, 36, },
+ { 8, 0, 0, 3, 13, 28, },
+ { 9, 0, 0, 3, 13, 36, },
{ 0, 0, 0, 3, 14, 127, },
{ 2, 0, 0, 3, 14, 127, },
+ { 1, 0, 0, 3, 14, 127, },
+ { 3, 0, 0, 3, 14, 127, },
+ { 4, 0, 0, 3, 14, 127, },
+ { 5, 0, 0, 3, 14, 127, },
+ { 6, 0, 0, 3, 14, 127, },
+ { 7, 0, 0, 3, 14, 127, },
+ { 8, 0, 0, 3, 14, 127, },
+ { 9, 0, 0, 3, 14, 127, },
{ 0, 0, 1, 2, 1, 127, },
{ 2, 0, 1, 2, 1, 127, },
+ { 1, 0, 1, 2, 1, 127, },
+ { 3, 0, 1, 2, 1, 127, },
+ { 4, 0, 1, 2, 1, 127, },
+ { 5, 0, 1, 2, 1, 127, },
+ { 6, 0, 1, 2, 1, 127, },
+ { 7, 0, 1, 2, 1, 127, },
+ { 8, 0, 1, 2, 1, 127, },
+ { 9, 0, 1, 2, 1, 127, },
{ 0, 0, 1, 2, 2, 127, },
{ 2, 0, 1, 2, 2, 127, },
+ { 1, 0, 1, 2, 2, 127, },
+ { 3, 0, 1, 2, 2, 127, },
+ { 4, 0, 1, 2, 2, 127, },
+ { 5, 0, 1, 2, 2, 127, },
+ { 6, 0, 1, 2, 2, 127, },
+ { 7, 0, 1, 2, 2, 127, },
+ { 8, 0, 1, 2, 2, 127, },
+ { 9, 0, 1, 2, 2, 127, },
{ 0, 0, 1, 2, 3, 52, },
{ 2, 0, 1, 2, 3, 60, },
+ { 1, 0, 1, 2, 3, 72, },
+ { 3, 0, 1, 2, 3, 52, },
+ { 4, 0, 1, 2, 3, 72, },
+ { 5, 0, 1, 2, 3, 60, },
+ { 6, 0, 1, 2, 3, 52, },
+ { 7, 0, 1, 2, 3, 60, },
+ { 8, 0, 1, 2, 3, 52, },
+ { 9, 0, 1, 2, 3, 60, },
{ 0, 0, 1, 2, 4, 52, },
{ 2, 0, 1, 2, 4, 60, },
+ { 1, 0, 1, 2, 4, 72, },
+ { 3, 0, 1, 2, 4, 52, },
+ { 4, 0, 1, 2, 4, 72, },
+ { 5, 0, 1, 2, 4, 60, },
+ { 6, 0, 1, 2, 4, 52, },
+ { 7, 0, 1, 2, 4, 60, },
+ { 8, 0, 1, 2, 4, 52, },
+ { 9, 0, 1, 2, 4, 60, },
{ 0, 0, 1, 2, 5, 60, },
{ 2, 0, 1, 2, 5, 60, },
+ { 1, 0, 1, 2, 5, 72, },
+ { 3, 0, 1, 2, 5, 60, },
+ { 4, 0, 1, 2, 5, 72, },
+ { 5, 0, 1, 2, 5, 60, },
+ { 6, 0, 1, 2, 5, 60, },
+ { 7, 0, 1, 2, 5, 60, },
+ { 8, 0, 1, 2, 5, 60, },
+ { 9, 0, 1, 2, 5, 60, },
{ 0, 0, 1, 2, 6, 64, },
{ 2, 0, 1, 2, 6, 60, },
+ { 1, 0, 1, 2, 6, 72, },
+ { 3, 0, 1, 2, 6, 64, },
+ { 4, 0, 1, 2, 6, 72, },
+ { 5, 0, 1, 2, 6, 60, },
+ { 6, 0, 1, 2, 6, 64, },
+ { 7, 0, 1, 2, 6, 60, },
+ { 8, 0, 1, 2, 6, 64, },
+ { 9, 0, 1, 2, 6, 60, },
{ 0, 0, 1, 2, 7, 60, },
{ 2, 0, 1, 2, 7, 60, },
+ { 1, 0, 1, 2, 7, 72, },
+ { 3, 0, 1, 2, 7, 60, },
+ { 4, 0, 1, 2, 7, 72, },
+ { 5, 0, 1, 2, 7, 60, },
+ { 6, 0, 1, 2, 7, 60, },
+ { 7, 0, 1, 2, 7, 60, },
+ { 8, 0, 1, 2, 7, 60, },
+ { 9, 0, 1, 2, 7, 60, },
{ 0, 0, 1, 2, 8, 52, },
{ 2, 0, 1, 2, 8, 60, },
+ { 1, 0, 1, 2, 8, 72, },
+ { 3, 0, 1, 2, 8, 52, },
+ { 4, 0, 1, 2, 8, 72, },
+ { 5, 0, 1, 2, 8, 60, },
+ { 6, 0, 1, 2, 8, 52, },
+ { 7, 0, 1, 2, 8, 60, },
+ { 8, 0, 1, 2, 8, 52, },
+ { 9, 0, 1, 2, 8, 60, },
{ 0, 0, 1, 2, 9, 52, },
{ 2, 0, 1, 2, 9, 60, },
+ { 1, 0, 1, 2, 9, 72, },
+ { 3, 0, 1, 2, 9, 52, },
+ { 4, 0, 1, 2, 9, 72, },
+ { 5, 0, 1, 2, 9, 60, },
+ { 6, 0, 1, 2, 9, 52, },
+ { 7, 0, 1, 2, 9, 60, },
+ { 8, 0, 1, 2, 9, 52, },
+ { 9, 0, 1, 2, 9, 60, },
{ 0, 0, 1, 2, 10, 40, },
{ 2, 0, 1, 2, 10, 60, },
+ { 1, 0, 1, 2, 10, 72, },
+ { 3, 0, 1, 2, 10, 40, },
+ { 4, 0, 1, 2, 10, 72, },
+ { 5, 0, 1, 2, 10, 60, },
+ { 6, 0, 1, 2, 10, 40, },
+ { 7, 0, 1, 2, 10, 60, },
+ { 8, 0, 1, 2, 10, 40, },
+ { 9, 0, 1, 2, 10, 60, },
{ 0, 0, 1, 2, 11, 28, },
{ 2, 0, 1, 2, 11, 60, },
+ { 1, 0, 1, 2, 11, 72, },
+ { 3, 0, 1, 2, 11, 28, },
+ { 4, 0, 1, 2, 11, 70, },
+ { 5, 0, 1, 2, 11, 60, },
+ { 6, 0, 1, 2, 11, 28, },
+ { 7, 0, 1, 2, 11, 60, },
+ { 8, 0, 1, 2, 11, 28, },
+ { 9, 0, 1, 2, 11, 60, },
{ 0, 0, 1, 2, 12, 127, },
{ 2, 0, 1, 2, 12, 127, },
+ { 1, 0, 1, 2, 12, 127, },
+ { 3, 0, 1, 2, 12, 127, },
+ { 4, 0, 1, 2, 12, 127, },
+ { 5, 0, 1, 2, 12, 127, },
+ { 6, 0, 1, 2, 12, 127, },
+ { 7, 0, 1, 2, 12, 127, },
+ { 8, 0, 1, 2, 12, 127, },
+ { 9, 0, 1, 2, 12, 127, },
{ 0, 0, 1, 2, 13, 127, },
{ 2, 0, 1, 2, 13, 127, },
+ { 1, 0, 1, 2, 13, 127, },
+ { 3, 0, 1, 2, 13, 127, },
+ { 4, 0, 1, 2, 13, 127, },
+ { 5, 0, 1, 2, 13, 127, },
+ { 6, 0, 1, 2, 13, 127, },
+ { 7, 0, 1, 2, 13, 127, },
+ { 8, 0, 1, 2, 13, 127, },
+ { 9, 0, 1, 2, 13, 127, },
{ 0, 0, 1, 2, 14, 127, },
{ 2, 0, 1, 2, 14, 127, },
+ { 1, 0, 1, 2, 14, 127, },
+ { 3, 0, 1, 2, 14, 127, },
+ { 4, 0, 1, 2, 14, 127, },
+ { 5, 0, 1, 2, 14, 127, },
+ { 6, 0, 1, 2, 14, 127, },
+ { 7, 0, 1, 2, 14, 127, },
+ { 8, 0, 1, 2, 14, 127, },
+ { 9, 0, 1, 2, 14, 127, },
{ 0, 0, 1, 3, 1, 127, },
{ 2, 0, 1, 3, 1, 127, },
+ { 1, 0, 1, 3, 1, 127, },
+ { 3, 0, 1, 3, 1, 127, },
+ { 4, 0, 1, 3, 1, 127, },
+ { 5, 0, 1, 3, 1, 127, },
+ { 6, 0, 1, 3, 1, 127, },
+ { 7, 0, 1, 3, 1, 127, },
+ { 8, 0, 1, 3, 1, 127, },
+ { 9, 0, 1, 3, 1, 127, },
{ 0, 0, 1, 3, 2, 127, },
{ 2, 0, 1, 3, 2, 127, },
+ { 1, 0, 1, 3, 2, 127, },
+ { 3, 0, 1, 3, 2, 127, },
+ { 4, 0, 1, 3, 2, 127, },
+ { 5, 0, 1, 3, 2, 127, },
+ { 6, 0, 1, 3, 2, 127, },
+ { 7, 0, 1, 3, 2, 127, },
+ { 8, 0, 1, 3, 2, 127, },
+ { 9, 0, 1, 3, 2, 127, },
{ 0, 0, 1, 3, 3, 48, },
{ 2, 0, 1, 3, 3, 36, },
+ { 1, 0, 1, 3, 3, 66, },
+ { 3, 0, 1, 3, 3, 48, },
+ { 4, 0, 1, 3, 3, 66, },
+ { 5, 0, 1, 3, 3, 36, },
+ { 6, 0, 1, 3, 3, 48, },
+ { 7, 0, 1, 3, 3, 36, },
+ { 8, 0, 1, 3, 3, 48, },
+ { 9, 0, 1, 3, 3, 36, },
{ 0, 0, 1, 3, 4, 48, },
{ 2, 0, 1, 3, 4, 36, },
+ { 1, 0, 1, 3, 4, 66, },
+ { 3, 0, 1, 3, 4, 48, },
+ { 4, 0, 1, 3, 4, 70, },
+ { 5, 0, 1, 3, 4, 36, },
+ { 6, 0, 1, 3, 4, 48, },
+ { 7, 0, 1, 3, 4, 36, },
+ { 8, 0, 1, 3, 4, 48, },
+ { 9, 0, 1, 3, 4, 36, },
{ 0, 0, 1, 3, 5, 60, },
{ 2, 0, 1, 3, 5, 36, },
+ { 1, 0, 1, 3, 5, 66, },
+ { 3, 0, 1, 3, 5, 60, },
+ { 4, 0, 1, 3, 5, 70, },
+ { 5, 0, 1, 3, 5, 36, },
+ { 6, 0, 1, 3, 5, 60, },
+ { 7, 0, 1, 3, 5, 36, },
+ { 8, 0, 1, 3, 5, 60, },
+ { 9, 0, 1, 3, 5, 36, },
{ 0, 0, 1, 3, 6, 64, },
{ 2, 0, 1, 3, 6, 36, },
+ { 1, 0, 1, 3, 6, 66, },
+ { 3, 0, 1, 3, 6, 64, },
+ { 4, 0, 1, 3, 6, 70, },
+ { 5, 0, 1, 3, 6, 36, },
+ { 6, 0, 1, 3, 6, 64, },
+ { 7, 0, 1, 3, 6, 36, },
+ { 8, 0, 1, 3, 6, 64, },
+ { 9, 0, 1, 3, 6, 36, },
{ 0, 0, 1, 3, 7, 60, },
{ 2, 0, 1, 3, 7, 36, },
+ { 1, 0, 1, 3, 7, 66, },
+ { 3, 0, 1, 3, 7, 60, },
+ { 4, 0, 1, 3, 7, 70, },
+ { 5, 0, 1, 3, 7, 36, },
+ { 6, 0, 1, 3, 7, 60, },
+ { 7, 0, 1, 3, 7, 36, },
+ { 8, 0, 1, 3, 7, 60, },
+ { 9, 0, 1, 3, 7, 36, },
{ 0, 0, 1, 3, 8, 52, },
{ 2, 0, 1, 3, 8, 36, },
+ { 1, 0, 1, 3, 8, 66, },
+ { 3, 0, 1, 3, 8, 52, },
+ { 4, 0, 1, 3, 8, 70, },
+ { 5, 0, 1, 3, 8, 36, },
+ { 6, 0, 1, 3, 8, 52, },
+ { 7, 0, 1, 3, 8, 36, },
+ { 8, 0, 1, 3, 8, 52, },
+ { 9, 0, 1, 3, 8, 36, },
{ 0, 0, 1, 3, 9, 52, },
{ 2, 0, 1, 3, 9, 36, },
+ { 1, 0, 1, 3, 9, 66, },
+ { 3, 0, 1, 3, 9, 52, },
+ { 4, 0, 1, 3, 9, 70, },
+ { 5, 0, 1, 3, 9, 36, },
+ { 6, 0, 1, 3, 9, 52, },
+ { 7, 0, 1, 3, 9, 36, },
+ { 8, 0, 1, 3, 9, 52, },
+ { 9, 0, 1, 3, 9, 36, },
{ 0, 0, 1, 3, 10, 40, },
{ 2, 0, 1, 3, 10, 36, },
+ { 1, 0, 1, 3, 10, 66, },
+ { 3, 0, 1, 3, 10, 40, },
+ { 4, 0, 1, 3, 10, 70, },
+ { 5, 0, 1, 3, 10, 36, },
+ { 6, 0, 1, 3, 10, 40, },
+ { 7, 0, 1, 3, 10, 36, },
+ { 8, 0, 1, 3, 10, 40, },
+ { 9, 0, 1, 3, 10, 36, },
{ 0, 0, 1, 3, 11, 26, },
{ 2, 0, 1, 3, 11, 36, },
+ { 1, 0, 1, 3, 11, 66, },
+ { 3, 0, 1, 3, 11, 26, },
+ { 4, 0, 1, 3, 11, 66, },
+ { 5, 0, 1, 3, 11, 36, },
+ { 6, 0, 1, 3, 11, 26, },
+ { 7, 0, 1, 3, 11, 36, },
+ { 8, 0, 1, 3, 11, 26, },
+ { 9, 0, 1, 3, 11, 36, },
{ 0, 0, 1, 3, 12, 127, },
{ 2, 0, 1, 3, 12, 127, },
+ { 1, 0, 1, 3, 12, 127, },
+ { 3, 0, 1, 3, 12, 127, },
+ { 4, 0, 1, 3, 12, 127, },
+ { 5, 0, 1, 3, 12, 127, },
+ { 6, 0, 1, 3, 12, 127, },
+ { 7, 0, 1, 3, 12, 127, },
+ { 8, 0, 1, 3, 12, 127, },
+ { 9, 0, 1, 3, 12, 127, },
{ 0, 0, 1, 3, 13, 127, },
{ 2, 0, 1, 3, 13, 127, },
+ { 1, 0, 1, 3, 13, 127, },
+ { 3, 0, 1, 3, 13, 127, },
+ { 4, 0, 1, 3, 13, 127, },
+ { 5, 0, 1, 3, 13, 127, },
+ { 6, 0, 1, 3, 13, 127, },
+ { 7, 0, 1, 3, 13, 127, },
+ { 8, 0, 1, 3, 13, 127, },
+ { 9, 0, 1, 3, 13, 127, },
{ 0, 0, 1, 3, 14, 127, },
{ 2, 0, 1, 3, 14, 127, },
+ { 1, 0, 1, 3, 14, 127, },
+ { 3, 0, 1, 3, 14, 127, },
+ { 4, 0, 1, 3, 14, 127, },
+ { 5, 0, 1, 3, 14, 127, },
+ { 6, 0, 1, 3, 14, 127, },
+ { 7, 0, 1, 3, 14, 127, },
+ { 8, 0, 1, 3, 14, 127, },
+ { 9, 0, 1, 3, 14, 127, },
{ 0, 1, 0, 1, 36, 74, },
{ 2, 1, 0, 1, 36, 62, },
+ { 1, 1, 0, 1, 36, 60, },
+ { 3, 1, 0, 1, 36, 62, },
+ { 4, 1, 0, 1, 36, 76, },
+ { 5, 1, 0, 1, 36, 62, },
+ { 6, 1, 0, 1, 36, 64, },
+ { 7, 1, 0, 1, 36, 54, },
+ { 8, 1, 0, 1, 36, 62, },
+ { 9, 1, 0, 1, 36, 62, },
{ 0, 1, 0, 1, 40, 76, },
{ 2, 1, 0, 1, 40, 62, },
+ { 1, 1, 0, 1, 40, 62, },
+ { 3, 1, 0, 1, 40, 62, },
+ { 4, 1, 0, 1, 40, 76, },
+ { 5, 1, 0, 1, 40, 62, },
+ { 6, 1, 0, 1, 40, 64, },
+ { 7, 1, 0, 1, 40, 54, },
+ { 8, 1, 0, 1, 40, 62, },
+ { 9, 1, 0, 1, 40, 62, },
{ 0, 1, 0, 1, 44, 76, },
{ 2, 1, 0, 1, 44, 62, },
+ { 1, 1, 0, 1, 44, 62, },
+ { 3, 1, 0, 1, 44, 62, },
+ { 4, 1, 0, 1, 44, 76, },
+ { 5, 1, 0, 1, 44, 62, },
+ { 6, 1, 0, 1, 44, 64, },
+ { 7, 1, 0, 1, 44, 54, },
+ { 8, 1, 0, 1, 44, 62, },
+ { 9, 1, 0, 1, 44, 62, },
{ 0, 1, 0, 1, 48, 76, },
{ 2, 1, 0, 1, 48, 62, },
+ { 1, 1, 0, 1, 48, 62, },
+ { 3, 1, 0, 1, 48, 62, },
+ { 4, 1, 0, 1, 48, 54, },
+ { 5, 1, 0, 1, 48, 62, },
+ { 6, 1, 0, 1, 48, 64, },
+ { 7, 1, 0, 1, 48, 54, },
+ { 8, 1, 0, 1, 48, 62, },
+ { 9, 1, 0, 1, 48, 62, },
{ 0, 1, 0, 1, 52, 76, },
{ 2, 1, 0, 1, 52, 62, },
+ { 1, 1, 0, 1, 52, 62, },
+ { 3, 1, 0, 1, 52, 64, },
+ { 4, 1, 0, 1, 52, 76, },
+ { 5, 1, 0, 1, 52, 62, },
+ { 6, 1, 0, 1, 52, 76, },
+ { 7, 1, 0, 1, 52, 54, },
+ { 8, 1, 0, 1, 52, 76, },
+ { 9, 1, 0, 1, 52, 62, },
{ 0, 1, 0, 1, 56, 76, },
{ 2, 1, 0, 1, 56, 62, },
+ { 1, 1, 0, 1, 56, 62, },
+ { 3, 1, 0, 1, 56, 64, },
+ { 4, 1, 0, 1, 56, 76, },
+ { 5, 1, 0, 1, 56, 62, },
+ { 6, 1, 0, 1, 56, 76, },
+ { 7, 1, 0, 1, 56, 54, },
+ { 8, 1, 0, 1, 56, 76, },
+ { 9, 1, 0, 1, 56, 62, },
{ 0, 1, 0, 1, 60, 76, },
{ 2, 1, 0, 1, 60, 62, },
+ { 1, 1, 0, 1, 60, 62, },
+ { 3, 1, 0, 1, 60, 64, },
+ { 4, 1, 0, 1, 60, 76, },
+ { 5, 1, 0, 1, 60, 62, },
+ { 6, 1, 0, 1, 60, 76, },
+ { 7, 1, 0, 1, 60, 54, },
+ { 8, 1, 0, 1, 60, 76, },
+ { 9, 1, 0, 1, 60, 62, },
{ 0, 1, 0, 1, 64, 74, },
{ 2, 1, 0, 1, 64, 62, },
+ { 1, 1, 0, 1, 64, 60, },
+ { 3, 1, 0, 1, 64, 64, },
+ { 4, 1, 0, 1, 64, 76, },
+ { 5, 1, 0, 1, 64, 62, },
+ { 6, 1, 0, 1, 64, 74, },
+ { 7, 1, 0, 1, 64, 54, },
+ { 8, 1, 0, 1, 64, 74, },
+ { 9, 1, 0, 1, 64, 62, },
{ 0, 1, 0, 1, 100, 72, },
{ 2, 1, 0, 1, 100, 62, },
+ { 1, 1, 0, 1, 100, 76, },
+ { 3, 1, 0, 1, 100, 72, },
+ { 4, 1, 0, 1, 100, 76, },
+ { 5, 1, 0, 1, 100, 62, },
+ { 6, 1, 0, 1, 100, 72, },
+ { 7, 1, 0, 1, 100, 54, },
+ { 8, 1, 0, 1, 100, 72, },
+ { 9, 1, 0, 1, 100, 127, },
{ 0, 1, 0, 1, 104, 76, },
{ 2, 1, 0, 1, 104, 62, },
+ { 1, 1, 0, 1, 104, 76, },
+ { 3, 1, 0, 1, 104, 76, },
+ { 4, 1, 0, 1, 104, 76, },
+ { 5, 1, 0, 1, 104, 62, },
+ { 6, 1, 0, 1, 104, 76, },
+ { 7, 1, 0, 1, 104, 54, },
+ { 8, 1, 0, 1, 104, 76, },
+ { 9, 1, 0, 1, 104, 127, },
{ 0, 1, 0, 1, 108, 76, },
{ 2, 1, 0, 1, 108, 62, },
+ { 1, 1, 0, 1, 108, 76, },
+ { 3, 1, 0, 1, 108, 76, },
+ { 4, 1, 0, 1, 108, 76, },
+ { 5, 1, 0, 1, 108, 62, },
+ { 6, 1, 0, 1, 108, 76, },
+ { 7, 1, 0, 1, 108, 54, },
+ { 8, 1, 0, 1, 108, 76, },
+ { 9, 1, 0, 1, 108, 127, },
{ 0, 1, 0, 1, 112, 76, },
{ 2, 1, 0, 1, 112, 62, },
+ { 1, 1, 0, 1, 112, 76, },
+ { 3, 1, 0, 1, 112, 76, },
+ { 4, 1, 0, 1, 112, 76, },
+ { 5, 1, 0, 1, 112, 62, },
+ { 6, 1, 0, 1, 112, 76, },
+ { 7, 1, 0, 1, 112, 54, },
+ { 8, 1, 0, 1, 112, 76, },
+ { 9, 1, 0, 1, 112, 127, },
{ 0, 1, 0, 1, 116, 76, },
{ 2, 1, 0, 1, 116, 62, },
+ { 1, 1, 0, 1, 116, 76, },
+ { 3, 1, 0, 1, 116, 76, },
+ { 4, 1, 0, 1, 116, 76, },
+ { 5, 1, 0, 1, 116, 62, },
+ { 6, 1, 0, 1, 116, 76, },
+ { 7, 1, 0, 1, 116, 54, },
+ { 8, 1, 0, 1, 116, 76, },
+ { 9, 1, 0, 1, 116, 127, },
{ 0, 1, 0, 1, 120, 76, },
{ 2, 1, 0, 1, 120, 62, },
+ { 1, 1, 0, 1, 120, 76, },
+ { 3, 1, 0, 1, 120, 127, },
+ { 4, 1, 0, 1, 120, 76, },
+ { 5, 1, 0, 1, 120, 127, },
+ { 6, 1, 0, 1, 120, 76, },
+ { 7, 1, 0, 1, 120, 54, },
+ { 8, 1, 0, 1, 120, 76, },
+ { 9, 1, 0, 1, 120, 127, },
{ 0, 1, 0, 1, 124, 76, },
{ 2, 1, 0, 1, 124, 62, },
+ { 1, 1, 0, 1, 124, 76, },
+ { 3, 1, 0, 1, 124, 127, },
+ { 4, 1, 0, 1, 124, 76, },
+ { 5, 1, 0, 1, 124, 127, },
+ { 6, 1, 0, 1, 124, 76, },
+ { 7, 1, 0, 1, 124, 54, },
+ { 8, 1, 0, 1, 124, 76, },
+ { 9, 1, 0, 1, 124, 127, },
{ 0, 1, 0, 1, 128, 76, },
{ 2, 1, 0, 1, 128, 62, },
+ { 1, 1, 0, 1, 128, 76, },
+ { 3, 1, 0, 1, 128, 127, },
+ { 4, 1, 0, 1, 128, 76, },
+ { 5, 1, 0, 1, 128, 127, },
+ { 6, 1, 0, 1, 128, 76, },
+ { 7, 1, 0, 1, 128, 54, },
+ { 8, 1, 0, 1, 128, 76, },
+ { 9, 1, 0, 1, 128, 127, },
{ 0, 1, 0, 1, 132, 76, },
{ 2, 1, 0, 1, 132, 62, },
+ { 1, 1, 0, 1, 132, 76, },
+ { 3, 1, 0, 1, 132, 76, },
+ { 4, 1, 0, 1, 132, 76, },
+ { 5, 1, 0, 1, 132, 62, },
+ { 6, 1, 0, 1, 132, 76, },
+ { 7, 1, 0, 1, 132, 54, },
+ { 8, 1, 0, 1, 132, 76, },
+ { 9, 1, 0, 1, 132, 127, },
{ 0, 1, 0, 1, 136, 76, },
{ 2, 1, 0, 1, 136, 62, },
+ { 1, 1, 0, 1, 136, 76, },
+ { 3, 1, 0, 1, 136, 76, },
+ { 4, 1, 0, 1, 136, 76, },
+ { 5, 1, 0, 1, 136, 62, },
+ { 6, 1, 0, 1, 136, 76, },
+ { 7, 1, 0, 1, 136, 54, },
+ { 8, 1, 0, 1, 136, 76, },
+ { 9, 1, 0, 1, 136, 127, },
{ 0, 1, 0, 1, 140, 72, },
{ 2, 1, 0, 1, 140, 62, },
+ { 1, 1, 0, 1, 140, 76, },
+ { 3, 1, 0, 1, 140, 72, },
+ { 4, 1, 0, 1, 140, 76, },
+ { 5, 1, 0, 1, 140, 62, },
+ { 6, 1, 0, 1, 140, 72, },
+ { 7, 1, 0, 1, 140, 54, },
+ { 8, 1, 0, 1, 140, 72, },
+ { 9, 1, 0, 1, 140, 127, },
{ 0, 1, 0, 1, 144, 76, },
{ 2, 1, 0, 1, 144, 127, },
+ { 1, 1, 0, 1, 144, 127, },
+ { 3, 1, 0, 1, 144, 76, },
+ { 4, 1, 0, 1, 144, 76, },
+ { 5, 1, 0, 1, 144, 127, },
+ { 6, 1, 0, 1, 144, 76, },
+ { 7, 1, 0, 1, 144, 127, },
+ { 8, 1, 0, 1, 144, 76, },
+ { 9, 1, 0, 1, 144, 127, },
{ 0, 1, 0, 1, 149, 76, },
{ 2, 1, 0, 1, 149, -128, },
+ { 1, 1, 0, 1, 149, 127, },
+ { 3, 1, 0, 1, 149, 76, },
+ { 4, 1, 0, 1, 149, 74, },
+ { 5, 1, 0, 1, 149, 76, },
+ { 6, 1, 0, 1, 149, 76, },
+ { 7, 1, 0, 1, 149, 54, },
+ { 8, 1, 0, 1, 149, 76, },
+ { 9, 1, 0, 1, 149, -128, },
{ 0, 1, 0, 1, 153, 76, },
{ 2, 1, 0, 1, 153, -128, },
+ { 1, 1, 0, 1, 153, 127, },
+ { 3, 1, 0, 1, 153, 76, },
+ { 4, 1, 0, 1, 153, 74, },
+ { 5, 1, 0, 1, 153, 76, },
+ { 6, 1, 0, 1, 153, 76, },
+ { 7, 1, 0, 1, 153, 54, },
+ { 8, 1, 0, 1, 153, 76, },
+ { 9, 1, 0, 1, 153, -128, },
{ 0, 1, 0, 1, 157, 76, },
{ 2, 1, 0, 1, 157, -128, },
+ { 1, 1, 0, 1, 157, 127, },
+ { 3, 1, 0, 1, 157, 76, },
+ { 4, 1, 0, 1, 157, 74, },
+ { 5, 1, 0, 1, 157, 76, },
+ { 6, 1, 0, 1, 157, 76, },
+ { 7, 1, 0, 1, 157, 54, },
+ { 8, 1, 0, 1, 157, 76, },
+ { 9, 1, 0, 1, 157, -128, },
{ 0, 1, 0, 1, 161, 76, },
{ 2, 1, 0, 1, 161, -128, },
+ { 1, 1, 0, 1, 161, 127, },
+ { 3, 1, 0, 1, 161, 76, },
+ { 4, 1, 0, 1, 161, 74, },
+ { 5, 1, 0, 1, 161, 76, },
+ { 6, 1, 0, 1, 161, 76, },
+ { 7, 1, 0, 1, 161, 54, },
+ { 8, 1, 0, 1, 161, 76, },
+ { 9, 1, 0, 1, 161, -128, },
{ 0, 1, 0, 1, 165, 76, },
{ 2, 1, 0, 1, 165, -128, },
+ { 1, 1, 0, 1, 165, 127, },
+ { 3, 1, 0, 1, 165, 76, },
+ { 4, 1, 0, 1, 165, 74, },
+ { 5, 1, 0, 1, 165, 76, },
+ { 6, 1, 0, 1, 165, 76, },
+ { 7, 1, 0, 1, 165, 54, },
+ { 8, 1, 0, 1, 165, 76, },
+ { 9, 1, 0, 1, 165, -128, },
{ 0, 1, 0, 2, 36, 72, },
{ 2, 1, 0, 2, 36, 62, },
+ { 1, 1, 0, 2, 36, 62, },
+ { 3, 1, 0, 2, 36, 62, },
+ { 4, 1, 0, 2, 36, 76, },
+ { 5, 1, 0, 2, 36, 62, },
+ { 6, 1, 0, 2, 36, 64, },
+ { 7, 1, 0, 2, 36, 54, },
+ { 8, 1, 0, 2, 36, 62, },
+ { 9, 1, 0, 2, 36, 62, },
{ 0, 1, 0, 2, 40, 76, },
{ 2, 1, 0, 2, 40, 62, },
+ { 1, 1, 0, 2, 40, 62, },
+ { 3, 1, 0, 2, 40, 62, },
+ { 4, 1, 0, 2, 40, 76, },
+ { 5, 1, 0, 2, 40, 62, },
+ { 6, 1, 0, 2, 40, 64, },
+ { 7, 1, 0, 2, 40, 54, },
+ { 8, 1, 0, 2, 40, 62, },
+ { 9, 1, 0, 2, 40, 62, },
{ 0, 1, 0, 2, 44, 76, },
{ 2, 1, 0, 2, 44, 62, },
+ { 1, 1, 0, 2, 44, 62, },
+ { 3, 1, 0, 2, 44, 62, },
+ { 4, 1, 0, 2, 44, 76, },
+ { 5, 1, 0, 2, 44, 62, },
+ { 6, 1, 0, 2, 44, 64, },
+ { 7, 1, 0, 2, 44, 54, },
+ { 8, 1, 0, 2, 44, 62, },
+ { 9, 1, 0, 2, 44, 62, },
{ 0, 1, 0, 2, 48, 76, },
{ 2, 1, 0, 2, 48, 62, },
+ { 1, 1, 0, 2, 48, 62, },
+ { 3, 1, 0, 2, 48, 62, },
+ { 4, 1, 0, 2, 48, 54, },
+ { 5, 1, 0, 2, 48, 62, },
+ { 6, 1, 0, 2, 48, 64, },
+ { 7, 1, 0, 2, 48, 54, },
+ { 8, 1, 0, 2, 48, 62, },
+ { 9, 1, 0, 2, 48, 62, },
{ 0, 1, 0, 2, 52, 76, },
{ 2, 1, 0, 2, 52, 62, },
+ { 1, 1, 0, 2, 52, 62, },
+ { 3, 1, 0, 2, 52, 64, },
+ { 4, 1, 0, 2, 52, 76, },
+ { 5, 1, 0, 2, 52, 62, },
+ { 6, 1, 0, 2, 52, 76, },
+ { 7, 1, 0, 2, 52, 54, },
+ { 8, 1, 0, 2, 52, 76, },
+ { 9, 1, 0, 2, 52, 62, },
{ 0, 1, 0, 2, 56, 76, },
{ 2, 1, 0, 2, 56, 62, },
+ { 1, 1, 0, 2, 56, 62, },
+ { 3, 1, 0, 2, 56, 64, },
+ { 4, 1, 0, 2, 56, 76, },
+ { 5, 1, 0, 2, 56, 62, },
+ { 6, 1, 0, 2, 56, 76, },
+ { 7, 1, 0, 2, 56, 54, },
+ { 8, 1, 0, 2, 56, 76, },
+ { 9, 1, 0, 2, 56, 62, },
{ 0, 1, 0, 2, 60, 76, },
{ 2, 1, 0, 2, 60, 62, },
+ { 1, 1, 0, 2, 60, 62, },
+ { 3, 1, 0, 2, 60, 64, },
+ { 4, 1, 0, 2, 60, 76, },
+ { 5, 1, 0, 2, 60, 62, },
+ { 6, 1, 0, 2, 60, 76, },
+ { 7, 1, 0, 2, 60, 54, },
+ { 8, 1, 0, 2, 60, 76, },
+ { 9, 1, 0, 2, 60, 62, },
{ 0, 1, 0, 2, 64, 74, },
{ 2, 1, 0, 2, 64, 62, },
+ { 1, 1, 0, 2, 64, 60, },
+ { 3, 1, 0, 2, 64, 64, },
+ { 4, 1, 0, 2, 64, 74, },
+ { 5, 1, 0, 2, 64, 62, },
+ { 6, 1, 0, 2, 64, 74, },
+ { 7, 1, 0, 2, 64, 54, },
+ { 8, 1, 0, 2, 64, 74, },
+ { 9, 1, 0, 2, 64, 62, },
{ 0, 1, 0, 2, 100, 70, },
{ 2, 1, 0, 2, 100, 62, },
+ { 1, 1, 0, 2, 100, 76, },
+ { 3, 1, 0, 2, 100, 70, },
+ { 4, 1, 0, 2, 100, 76, },
+ { 5, 1, 0, 2, 100, 62, },
+ { 6, 1, 0, 2, 100, 70, },
+ { 7, 1, 0, 2, 100, 54, },
+ { 8, 1, 0, 2, 100, 70, },
+ { 9, 1, 0, 2, 100, 127, },
{ 0, 1, 0, 2, 104, 76, },
{ 2, 1, 0, 2, 104, 62, },
+ { 1, 1, 0, 2, 104, 76, },
+ { 3, 1, 0, 2, 104, 76, },
+ { 4, 1, 0, 2, 104, 76, },
+ { 5, 1, 0, 2, 104, 62, },
+ { 6, 1, 0, 2, 104, 76, },
+ { 7, 1, 0, 2, 104, 54, },
+ { 8, 1, 0, 2, 104, 76, },
+ { 9, 1, 0, 2, 104, 127, },
{ 0, 1, 0, 2, 108, 76, },
{ 2, 1, 0, 2, 108, 62, },
+ { 1, 1, 0, 2, 108, 76, },
+ { 3, 1, 0, 2, 108, 76, },
+ { 4, 1, 0, 2, 108, 76, },
+ { 5, 1, 0, 2, 108, 62, },
+ { 6, 1, 0, 2, 108, 76, },
+ { 7, 1, 0, 2, 108, 54, },
+ { 8, 1, 0, 2, 108, 76, },
+ { 9, 1, 0, 2, 108, 127, },
{ 0, 1, 0, 2, 112, 76, },
{ 2, 1, 0, 2, 112, 62, },
+ { 1, 1, 0, 2, 112, 76, },
+ { 3, 1, 0, 2, 112, 76, },
+ { 4, 1, 0, 2, 112, 76, },
+ { 5, 1, 0, 2, 112, 62, },
+ { 6, 1, 0, 2, 112, 76, },
+ { 7, 1, 0, 2, 112, 54, },
+ { 8, 1, 0, 2, 112, 76, },
+ { 9, 1, 0, 2, 112, 127, },
{ 0, 1, 0, 2, 116, 76, },
{ 2, 1, 0, 2, 116, 62, },
+ { 1, 1, 0, 2, 116, 76, },
+ { 3, 1, 0, 2, 116, 76, },
+ { 4, 1, 0, 2, 116, 76, },
+ { 5, 1, 0, 2, 116, 62, },
+ { 6, 1, 0, 2, 116, 76, },
+ { 7, 1, 0, 2, 116, 54, },
+ { 8, 1, 0, 2, 116, 76, },
+ { 9, 1, 0, 2, 116, 127, },
{ 0, 1, 0, 2, 120, 76, },
{ 2, 1, 0, 2, 120, 62, },
+ { 1, 1, 0, 2, 120, 76, },
+ { 3, 1, 0, 2, 120, 127, },
+ { 4, 1, 0, 2, 120, 76, },
+ { 5, 1, 0, 2, 120, 127, },
+ { 6, 1, 0, 2, 120, 76, },
+ { 7, 1, 0, 2, 120, 54, },
+ { 8, 1, 0, 2, 120, 76, },
+ { 9, 1, 0, 2, 120, 127, },
{ 0, 1, 0, 2, 124, 76, },
{ 2, 1, 0, 2, 124, 62, },
+ { 1, 1, 0, 2, 124, 76, },
+ { 3, 1, 0, 2, 124, 127, },
+ { 4, 1, 0, 2, 124, 76, },
+ { 5, 1, 0, 2, 124, 127, },
+ { 6, 1, 0, 2, 124, 76, },
+ { 7, 1, 0, 2, 124, 54, },
+ { 8, 1, 0, 2, 124, 76, },
+ { 9, 1, 0, 2, 124, 127, },
{ 0, 1, 0, 2, 128, 76, },
{ 2, 1, 0, 2, 128, 62, },
+ { 1, 1, 0, 2, 128, 76, },
+ { 3, 1, 0, 2, 128, 127, },
+ { 4, 1, 0, 2, 128, 76, },
+ { 5, 1, 0, 2, 128, 127, },
+ { 6, 1, 0, 2, 128, 76, },
+ { 7, 1, 0, 2, 128, 54, },
+ { 8, 1, 0, 2, 128, 76, },
+ { 9, 1, 0, 2, 128, 127, },
{ 0, 1, 0, 2, 132, 76, },
{ 2, 1, 0, 2, 132, 62, },
+ { 1, 1, 0, 2, 132, 76, },
+ { 3, 1, 0, 2, 132, 76, },
+ { 4, 1, 0, 2, 132, 76, },
+ { 5, 1, 0, 2, 132, 62, },
+ { 6, 1, 0, 2, 132, 76, },
+ { 7, 1, 0, 2, 132, 54, },
+ { 8, 1, 0, 2, 132, 76, },
+ { 9, 1, 0, 2, 132, 127, },
{ 0, 1, 0, 2, 136, 76, },
{ 2, 1, 0, 2, 136, 62, },
+ { 1, 1, 0, 2, 136, 76, },
+ { 3, 1, 0, 2, 136, 76, },
+ { 4, 1, 0, 2, 136, 76, },
+ { 5, 1, 0, 2, 136, 62, },
+ { 6, 1, 0, 2, 136, 76, },
+ { 7, 1, 0, 2, 136, 54, },
+ { 8, 1, 0, 2, 136, 76, },
+ { 9, 1, 0, 2, 136, 127, },
{ 0, 1, 0, 2, 140, 70, },
{ 2, 1, 0, 2, 140, 62, },
+ { 1, 1, 0, 2, 140, 76, },
+ { 3, 1, 0, 2, 140, 70, },
+ { 4, 1, 0, 2, 140, 76, },
+ { 5, 1, 0, 2, 140, 62, },
+ { 6, 1, 0, 2, 140, 70, },
+ { 7, 1, 0, 2, 140, 54, },
+ { 8, 1, 0, 2, 140, 70, },
+ { 9, 1, 0, 2, 140, 127, },
{ 0, 1, 0, 2, 144, 76, },
{ 2, 1, 0, 2, 144, 127, },
+ { 1, 1, 0, 2, 144, 127, },
+ { 3, 1, 0, 2, 144, 76, },
+ { 4, 1, 0, 2, 144, 76, },
+ { 5, 1, 0, 2, 144, 127, },
+ { 6, 1, 0, 2, 144, 76, },
+ { 7, 1, 0, 2, 144, 127, },
+ { 8, 1, 0, 2, 144, 76, },
+ { 9, 1, 0, 2, 144, 127, },
{ 0, 1, 0, 2, 149, 76, },
{ 2, 1, 0, 2, 149, -128, },
+ { 1, 1, 0, 2, 149, 127, },
+ { 3, 1, 0, 2, 149, 76, },
+ { 4, 1, 0, 2, 149, 74, },
+ { 5, 1, 0, 2, 149, 76, },
+ { 6, 1, 0, 2, 149, 76, },
+ { 7, 1, 0, 2, 149, 54, },
+ { 8, 1, 0, 2, 149, 76, },
+ { 9, 1, 0, 2, 149, -128, },
{ 0, 1, 0, 2, 153, 76, },
{ 2, 1, 0, 2, 153, -128, },
+ { 1, 1, 0, 2, 153, 127, },
+ { 3, 1, 0, 2, 153, 76, },
+ { 4, 1, 0, 2, 153, 74, },
+ { 5, 1, 0, 2, 153, 76, },
+ { 6, 1, 0, 2, 153, 76, },
+ { 7, 1, 0, 2, 153, 54, },
+ { 8, 1, 0, 2, 153, 76, },
+ { 9, 1, 0, 2, 153, -128, },
{ 0, 1, 0, 2, 157, 76, },
{ 2, 1, 0, 2, 157, -128, },
+ { 1, 1, 0, 2, 157, 127, },
+ { 3, 1, 0, 2, 157, 76, },
+ { 4, 1, 0, 2, 157, 74, },
+ { 5, 1, 0, 2, 157, 76, },
+ { 6, 1, 0, 2, 157, 76, },
+ { 7, 1, 0, 2, 157, 54, },
+ { 8, 1, 0, 2, 157, 76, },
+ { 9, 1, 0, 2, 157, -128, },
{ 0, 1, 0, 2, 161, 76, },
{ 2, 1, 0, 2, 161, -128, },
+ { 1, 1, 0, 2, 161, 127, },
+ { 3, 1, 0, 2, 161, 76, },
+ { 4, 1, 0, 2, 161, 74, },
+ { 5, 1, 0, 2, 161, 76, },
+ { 6, 1, 0, 2, 161, 76, },
+ { 7, 1, 0, 2, 161, 54, },
+ { 8, 1, 0, 2, 161, 76, },
+ { 9, 1, 0, 2, 161, -128, },
{ 0, 1, 0, 2, 165, 76, },
{ 2, 1, 0, 2, 165, -128, },
+ { 1, 1, 0, 2, 165, 127, },
+ { 3, 1, 0, 2, 165, 76, },
+ { 4, 1, 0, 2, 165, 74, },
+ { 5, 1, 0, 2, 165, 76, },
+ { 6, 1, 0, 2, 165, 76, },
+ { 7, 1, 0, 2, 165, 54, },
+ { 8, 1, 0, 2, 165, 76, },
+ { 9, 1, 0, 2, 165, -128, },
{ 0, 1, 0, 3, 36, 68, },
{ 2, 1, 0, 3, 36, 38, },
+ { 1, 1, 0, 3, 36, 50, },
+ { 3, 1, 0, 3, 36, 38, },
+ { 4, 1, 0, 3, 36, 66, },
+ { 5, 1, 0, 3, 36, 38, },
+ { 6, 1, 0, 3, 36, 52, },
+ { 7, 1, 0, 3, 36, 30, },
+ { 8, 1, 0, 3, 36, 50, },
+ { 9, 1, 0, 3, 36, 38, },
{ 0, 1, 0, 3, 40, 68, },
{ 2, 1, 0, 3, 40, 38, },
+ { 1, 1, 0, 3, 40, 50, },
+ { 3, 1, 0, 3, 40, 38, },
+ { 4, 1, 0, 3, 40, 66, },
+ { 5, 1, 0, 3, 40, 38, },
+ { 6, 1, 0, 3, 40, 52, },
+ { 7, 1, 0, 3, 40, 30, },
+ { 8, 1, 0, 3, 40, 50, },
+ { 9, 1, 0, 3, 40, 38, },
{ 0, 1, 0, 3, 44, 68, },
{ 2, 1, 0, 3, 44, 38, },
+ { 1, 1, 0, 3, 44, 50, },
+ { 3, 1, 0, 3, 44, 38, },
+ { 4, 1, 0, 3, 44, 66, },
+ { 5, 1, 0, 3, 44, 38, },
+ { 6, 1, 0, 3, 44, 52, },
+ { 7, 1, 0, 3, 44, 30, },
+ { 8, 1, 0, 3, 44, 50, },
+ { 9, 1, 0, 3, 44, 38, },
{ 0, 1, 0, 3, 48, 68, },
{ 2, 1, 0, 3, 48, 38, },
+ { 1, 1, 0, 3, 48, 50, },
+ { 3, 1, 0, 3, 48, 38, },
+ { 4, 1, 0, 3, 48, 36, },
+ { 5, 1, 0, 3, 48, 38, },
+ { 6, 1, 0, 3, 48, 52, },
+ { 7, 1, 0, 3, 48, 30, },
+ { 8, 1, 0, 3, 48, 50, },
+ { 9, 1, 0, 3, 48, 38, },
{ 0, 1, 0, 3, 52, 68, },
{ 2, 1, 0, 3, 52, 38, },
+ { 1, 1, 0, 3, 52, 50, },
+ { 3, 1, 0, 3, 52, 40, },
+ { 4, 1, 0, 3, 52, 66, },
+ { 5, 1, 0, 3, 52, 38, },
+ { 6, 1, 0, 3, 52, 68, },
+ { 7, 1, 0, 3, 52, 30, },
+ { 8, 1, 0, 3, 52, 68, },
+ { 9, 1, 0, 3, 52, 38, },
{ 0, 1, 0, 3, 56, 68, },
{ 2, 1, 0, 3, 56, 38, },
+ { 1, 1, 0, 3, 56, 50, },
+ { 3, 1, 0, 3, 56, 40, },
+ { 4, 1, 0, 3, 56, 66, },
+ { 5, 1, 0, 3, 56, 38, },
+ { 6, 1, 0, 3, 56, 68, },
+ { 7, 1, 0, 3, 56, 30, },
+ { 8, 1, 0, 3, 56, 68, },
+ { 9, 1, 0, 3, 56, 38, },
{ 0, 1, 0, 3, 60, 66, },
{ 2, 1, 0, 3, 60, 38, },
+ { 1, 1, 0, 3, 60, 50, },
+ { 3, 1, 0, 3, 60, 40, },
+ { 4, 1, 0, 3, 60, 66, },
+ { 5, 1, 0, 3, 60, 38, },
+ { 6, 1, 0, 3, 60, 66, },
+ { 7, 1, 0, 3, 60, 30, },
+ { 8, 1, 0, 3, 60, 66, },
+ { 9, 1, 0, 3, 60, 38, },
{ 0, 1, 0, 3, 64, 68, },
{ 2, 1, 0, 3, 64, 38, },
+ { 1, 1, 0, 3, 64, 50, },
+ { 3, 1, 0, 3, 64, 40, },
+ { 4, 1, 0, 3, 64, 66, },
+ { 5, 1, 0, 3, 64, 38, },
+ { 6, 1, 0, 3, 64, 68, },
+ { 7, 1, 0, 3, 64, 30, },
+ { 8, 1, 0, 3, 64, 68, },
+ { 9, 1, 0, 3, 64, 38, },
{ 0, 1, 0, 3, 100, 60, },
{ 2, 1, 0, 3, 100, 38, },
+ { 1, 1, 0, 3, 100, 70, },
+ { 3, 1, 0, 3, 100, 60, },
+ { 4, 1, 0, 3, 100, 64, },
+ { 5, 1, 0, 3, 100, 38, },
+ { 6, 1, 0, 3, 100, 60, },
+ { 7, 1, 0, 3, 100, 30, },
+ { 8, 1, 0, 3, 100, 60, },
+ { 9, 1, 0, 3, 100, 127, },
{ 0, 1, 0, 3, 104, 68, },
{ 2, 1, 0, 3, 104, 38, },
+ { 1, 1, 0, 3, 104, 70, },
+ { 3, 1, 0, 3, 104, 68, },
+ { 4, 1, 0, 3, 104, 64, },
+ { 5, 1, 0, 3, 104, 38, },
+ { 6, 1, 0, 3, 104, 68, },
+ { 7, 1, 0, 3, 104, 30, },
+ { 8, 1, 0, 3, 104, 68, },
+ { 9, 1, 0, 3, 104, 127, },
{ 0, 1, 0, 3, 108, 68, },
{ 2, 1, 0, 3, 108, 38, },
+ { 1, 1, 0, 3, 108, 70, },
+ { 3, 1, 0, 3, 108, 68, },
+ { 4, 1, 0, 3, 108, 64, },
+ { 5, 1, 0, 3, 108, 38, },
+ { 6, 1, 0, 3, 108, 68, },
+ { 7, 1, 0, 3, 108, 30, },
+ { 8, 1, 0, 3, 108, 68, },
+ { 9, 1, 0, 3, 108, 127, },
{ 0, 1, 0, 3, 112, 68, },
{ 2, 1, 0, 3, 112, 38, },
+ { 1, 1, 0, 3, 112, 70, },
+ { 3, 1, 0, 3, 112, 68, },
+ { 4, 1, 0, 3, 112, 64, },
+ { 5, 1, 0, 3, 112, 38, },
+ { 6, 1, 0, 3, 112, 68, },
+ { 7, 1, 0, 3, 112, 30, },
+ { 8, 1, 0, 3, 112, 68, },
+ { 9, 1, 0, 3, 112, 127, },
{ 0, 1, 0, 3, 116, 68, },
{ 2, 1, 0, 3, 116, 38, },
+ { 1, 1, 0, 3, 116, 70, },
+ { 3, 1, 0, 3, 116, 68, },
+ { 4, 1, 0, 3, 116, 64, },
+ { 5, 1, 0, 3, 116, 38, },
+ { 6, 1, 0, 3, 116, 68, },
+ { 7, 1, 0, 3, 116, 30, },
+ { 8, 1, 0, 3, 116, 68, },
+ { 9, 1, 0, 3, 116, 127, },
{ 0, 1, 0, 3, 120, 68, },
{ 2, 1, 0, 3, 120, 38, },
+ { 1, 1, 0, 3, 120, 70, },
+ { 3, 1, 0, 3, 120, 127, },
+ { 4, 1, 0, 3, 120, 64, },
+ { 5, 1, 0, 3, 120, 127, },
+ { 6, 1, 0, 3, 120, 68, },
+ { 7, 1, 0, 3, 120, 30, },
+ { 8, 1, 0, 3, 120, 68, },
+ { 9, 1, 0, 3, 120, 127, },
{ 0, 1, 0, 3, 124, 68, },
{ 2, 1, 0, 3, 124, 38, },
+ { 1, 1, 0, 3, 124, 70, },
+ { 3, 1, 0, 3, 124, 127, },
+ { 4, 1, 0, 3, 124, 64, },
+ { 5, 1, 0, 3, 124, 127, },
+ { 6, 1, 0, 3, 124, 68, },
+ { 7, 1, 0, 3, 124, 30, },
+ { 8, 1, 0, 3, 124, 68, },
+ { 9, 1, 0, 3, 124, 127, },
{ 0, 1, 0, 3, 128, 68, },
{ 2, 1, 0, 3, 128, 38, },
+ { 1, 1, 0, 3, 128, 70, },
+ { 3, 1, 0, 3, 128, 127, },
+ { 4, 1, 0, 3, 128, 64, },
+ { 5, 1, 0, 3, 128, 127, },
+ { 6, 1, 0, 3, 128, 68, },
+ { 7, 1, 0, 3, 128, 30, },
+ { 8, 1, 0, 3, 128, 68, },
+ { 9, 1, 0, 3, 128, 127, },
{ 0, 1, 0, 3, 132, 68, },
{ 2, 1, 0, 3, 132, 38, },
+ { 1, 1, 0, 3, 132, 70, },
+ { 3, 1, 0, 3, 132, 68, },
+ { 4, 1, 0, 3, 132, 64, },
+ { 5, 1, 0, 3, 132, 38, },
+ { 6, 1, 0, 3, 132, 68, },
+ { 7, 1, 0, 3, 132, 30, },
+ { 8, 1, 0, 3, 132, 68, },
+ { 9, 1, 0, 3, 132, 127, },
{ 0, 1, 0, 3, 136, 68, },
{ 2, 1, 0, 3, 136, 38, },
+ { 1, 1, 0, 3, 136, 70, },
+ { 3, 1, 0, 3, 136, 68, },
+ { 4, 1, 0, 3, 136, 64, },
+ { 5, 1, 0, 3, 136, 38, },
+ { 6, 1, 0, 3, 136, 68, },
+ { 7, 1, 0, 3, 136, 30, },
+ { 8, 1, 0, 3, 136, 68, },
+ { 9, 1, 0, 3, 136, 127, },
{ 0, 1, 0, 3, 140, 60, },
{ 2, 1, 0, 3, 140, 38, },
+ { 1, 1, 0, 3, 140, 70, },
+ { 3, 1, 0, 3, 140, 60, },
+ { 4, 1, 0, 3, 140, 64, },
+ { 5, 1, 0, 3, 140, 38, },
+ { 6, 1, 0, 3, 140, 60, },
+ { 7, 1, 0, 3, 140, 30, },
+ { 8, 1, 0, 3, 140, 60, },
+ { 9, 1, 0, 3, 140, 127, },
{ 0, 1, 0, 3, 144, 68, },
{ 2, 1, 0, 3, 144, 127, },
+ { 1, 1, 0, 3, 144, 127, },
+ { 3, 1, 0, 3, 144, 68, },
+ { 4, 1, 0, 3, 144, 64, },
+ { 5, 1, 0, 3, 144, 127, },
+ { 6, 1, 0, 3, 144, 68, },
+ { 7, 1, 0, 3, 144, 127, },
+ { 8, 1, 0, 3, 144, 68, },
+ { 9, 1, 0, 3, 144, 127, },
{ 0, 1, 0, 3, 149, 76, },
{ 2, 1, 0, 3, 149, -128, },
+ { 1, 1, 0, 3, 149, 127, },
+ { 3, 1, 0, 3, 149, 76, },
+ { 4, 1, 0, 3, 149, 60, },
+ { 5, 1, 0, 3, 149, 76, },
+ { 6, 1, 0, 3, 149, 76, },
+ { 7, 1, 0, 3, 149, 30, },
+ { 8, 1, 0, 3, 149, 72, },
+ { 9, 1, 0, 3, 149, -128, },
{ 0, 1, 0, 3, 153, 76, },
{ 2, 1, 0, 3, 153, -128, },
+ { 1, 1, 0, 3, 153, 127, },
+ { 3, 1, 0, 3, 153, 76, },
+ { 4, 1, 0, 3, 153, 60, },
+ { 5, 1, 0, 3, 153, 76, },
+ { 6, 1, 0, 3, 153, 76, },
+ { 7, 1, 0, 3, 153, 30, },
+ { 8, 1, 0, 3, 153, 76, },
+ { 9, 1, 0, 3, 153, -128, },
{ 0, 1, 0, 3, 157, 76, },
{ 2, 1, 0, 3, 157, -128, },
+ { 1, 1, 0, 3, 157, 127, },
+ { 3, 1, 0, 3, 157, 76, },
+ { 4, 1, 0, 3, 157, 60, },
+ { 5, 1, 0, 3, 157, 76, },
+ { 6, 1, 0, 3, 157, 76, },
+ { 7, 1, 0, 3, 157, 30, },
+ { 8, 1, 0, 3, 157, 76, },
+ { 9, 1, 0, 3, 157, -128, },
{ 0, 1, 0, 3, 161, 76, },
{ 2, 1, 0, 3, 161, -128, },
+ { 1, 1, 0, 3, 161, 127, },
+ { 3, 1, 0, 3, 161, 76, },
+ { 4, 1, 0, 3, 161, 60, },
+ { 5, 1, 0, 3, 161, 76, },
+ { 6, 1, 0, 3, 161, 76, },
+ { 7, 1, 0, 3, 161, 30, },
+ { 8, 1, 0, 3, 161, 76, },
+ { 9, 1, 0, 3, 161, -128, },
{ 0, 1, 0, 3, 165, 76, },
{ 2, 1, 0, 3, 165, -128, },
+ { 1, 1, 0, 3, 165, 127, },
+ { 3, 1, 0, 3, 165, 76, },
+ { 4, 1, 0, 3, 165, 60, },
+ { 5, 1, 0, 3, 165, 76, },
+ { 6, 1, 0, 3, 165, 76, },
+ { 7, 1, 0, 3, 165, 30, },
+ { 8, 1, 0, 3, 165, 76, },
+ { 9, 1, 0, 3, 165, -128, },
{ 0, 1, 1, 2, 38, 66, },
{ 2, 1, 1, 2, 38, 64, },
+ { 1, 1, 1, 2, 38, 62, },
+ { 3, 1, 1, 2, 38, 64, },
+ { 4, 1, 1, 2, 38, 72, },
+ { 5, 1, 1, 2, 38, 64, },
+ { 6, 1, 1, 2, 38, 64, },
+ { 7, 1, 1, 2, 38, 54, },
+ { 8, 1, 1, 2, 38, 62, },
+ { 9, 1, 1, 2, 38, 64, },
{ 0, 1, 1, 2, 46, 72, },
{ 2, 1, 1, 2, 46, 64, },
+ { 1, 1, 1, 2, 46, 62, },
+ { 3, 1, 1, 2, 46, 64, },
+ { 4, 1, 1, 2, 46, 60, },
+ { 5, 1, 1, 2, 46, 64, },
+ { 6, 1, 1, 2, 46, 64, },
+ { 7, 1, 1, 2, 46, 54, },
+ { 8, 1, 1, 2, 46, 62, },
+ { 9, 1, 1, 2, 46, 64, },
{ 0, 1, 1, 2, 54, 72, },
{ 2, 1, 1, 2, 54, 64, },
+ { 1, 1, 1, 2, 54, 62, },
+ { 3, 1, 1, 2, 54, 64, },
+ { 4, 1, 1, 2, 54, 72, },
+ { 5, 1, 1, 2, 54, 64, },
+ { 6, 1, 1, 2, 54, 72, },
+ { 7, 1, 1, 2, 54, 54, },
+ { 8, 1, 1, 2, 54, 72, },
+ { 9, 1, 1, 2, 54, 64, },
{ 0, 1, 1, 2, 62, 64, },
{ 2, 1, 1, 2, 62, 64, },
+ { 1, 1, 1, 2, 62, 62, },
+ { 3, 1, 1, 2, 62, 64, },
+ { 4, 1, 1, 2, 62, 70, },
+ { 5, 1, 1, 2, 62, 64, },
+ { 6, 1, 1, 2, 62, 64, },
+ { 7, 1, 1, 2, 62, 54, },
+ { 8, 1, 1, 2, 62, 64, },
+ { 9, 1, 1, 2, 62, 64, },
{ 0, 1, 1, 2, 102, 58, },
{ 2, 1, 1, 2, 102, 64, },
+ { 1, 1, 1, 2, 102, 72, },
+ { 3, 1, 1, 2, 102, 58, },
+ { 4, 1, 1, 2, 102, 72, },
+ { 5, 1, 1, 2, 102, 64, },
+ { 6, 1, 1, 2, 102, 58, },
+ { 7, 1, 1, 2, 102, 54, },
+ { 8, 1, 1, 2, 102, 58, },
+ { 9, 1, 1, 2, 102, 127, },
{ 0, 1, 1, 2, 110, 72, },
{ 2, 1, 1, 2, 110, 64, },
+ { 1, 1, 1, 2, 110, 72, },
+ { 3, 1, 1, 2, 110, 72, },
+ { 4, 1, 1, 2, 110, 72, },
+ { 5, 1, 1, 2, 110, 64, },
+ { 6, 1, 1, 2, 110, 72, },
+ { 7, 1, 1, 2, 110, 54, },
+ { 8, 1, 1, 2, 110, 72, },
+ { 9, 1, 1, 2, 110, 127, },
{ 0, 1, 1, 2, 118, 72, },
{ 2, 1, 1, 2, 118, 64, },
+ { 1, 1, 1, 2, 118, 72, },
+ { 3, 1, 1, 2, 118, 127, },
+ { 4, 1, 1, 2, 118, 72, },
+ { 5, 1, 1, 2, 118, 127, },
+ { 6, 1, 1, 2, 118, 72, },
+ { 7, 1, 1, 2, 118, 54, },
+ { 8, 1, 1, 2, 118, 72, },
+ { 9, 1, 1, 2, 118, 127, },
{ 0, 1, 1, 2, 126, 72, },
{ 2, 1, 1, 2, 126, 64, },
+ { 1, 1, 1, 2, 126, 72, },
+ { 3, 1, 1, 2, 126, 127, },
+ { 4, 1, 1, 2, 126, 72, },
+ { 5, 1, 1, 2, 126, 127, },
+ { 6, 1, 1, 2, 126, 72, },
+ { 7, 1, 1, 2, 126, 54, },
+ { 8, 1, 1, 2, 126, 72, },
+ { 9, 1, 1, 2, 126, 127, },
{ 0, 1, 1, 2, 134, 72, },
{ 2, 1, 1, 2, 134, 64, },
+ { 1, 1, 1, 2, 134, 72, },
+ { 3, 1, 1, 2, 134, 72, },
+ { 4, 1, 1, 2, 134, 72, },
+ { 5, 1, 1, 2, 134, 64, },
+ { 6, 1, 1, 2, 134, 72, },
+ { 7, 1, 1, 2, 134, 54, },
+ { 8, 1, 1, 2, 134, 72, },
+ { 9, 1, 1, 2, 134, 127, },
{ 0, 1, 1, 2, 142, 72, },
{ 2, 1, 1, 2, 142, 127, },
+ { 1, 1, 1, 2, 142, 127, },
+ { 3, 1, 1, 2, 142, 72, },
+ { 4, 1, 1, 2, 142, 72, },
+ { 5, 1, 1, 2, 142, 127, },
+ { 6, 1, 1, 2, 142, 72, },
+ { 7, 1, 1, 2, 142, 127, },
+ { 8, 1, 1, 2, 142, 72, },
+ { 9, 1, 1, 2, 142, 127, },
{ 0, 1, 1, 2, 151, 72, },
{ 2, 1, 1, 2, 151, -128, },
+ { 1, 1, 1, 2, 151, 127, },
+ { 3, 1, 1, 2, 151, 72, },
+ { 4, 1, 1, 2, 151, 72, },
+ { 5, 1, 1, 2, 151, 72, },
+ { 6, 1, 1, 2, 151, 72, },
+ { 7, 1, 1, 2, 151, 54, },
+ { 8, 1, 1, 2, 151, 72, },
+ { 9, 1, 1, 2, 151, -128, },
{ 0, 1, 1, 2, 159, 72, },
{ 2, 1, 1, 2, 159, -128, },
+ { 1, 1, 1, 2, 159, 127, },
+ { 3, 1, 1, 2, 159, 72, },
+ { 4, 1, 1, 2, 159, 72, },
+ { 5, 1, 1, 2, 159, 72, },
+ { 6, 1, 1, 2, 159, 72, },
+ { 7, 1, 1, 2, 159, 54, },
+ { 8, 1, 1, 2, 159, 72, },
+ { 9, 1, 1, 2, 159, -128, },
{ 0, 1, 1, 3, 38, 60, },
{ 2, 1, 1, 3, 38, 40, },
+ { 1, 1, 1, 3, 38, 50, },
+ { 3, 1, 1, 3, 38, 40, },
+ { 4, 1, 1, 3, 38, 62, },
+ { 5, 1, 1, 3, 38, 40, },
+ { 6, 1, 1, 3, 38, 52, },
+ { 7, 1, 1, 3, 38, 30, },
+ { 8, 1, 1, 3, 38, 50, },
+ { 9, 1, 1, 3, 38, 40, },
{ 0, 1, 1, 3, 46, 68, },
{ 2, 1, 1, 3, 46, 40, },
+ { 1, 1, 1, 3, 46, 50, },
+ { 3, 1, 1, 3, 46, 40, },
+ { 4, 1, 1, 3, 46, 46, },
+ { 5, 1, 1, 3, 46, 40, },
+ { 6, 1, 1, 3, 46, 52, },
+ { 7, 1, 1, 3, 46, 30, },
+ { 8, 1, 1, 3, 46, 50, },
+ { 9, 1, 1, 3, 46, 40, },
{ 0, 1, 1, 3, 54, 68, },
{ 2, 1, 1, 3, 54, 40, },
+ { 1, 1, 1, 3, 54, 50, },
+ { 3, 1, 1, 3, 54, 40, },
+ { 4, 1, 1, 3, 54, 62, },
+ { 5, 1, 1, 3, 54, 40, },
+ { 6, 1, 1, 3, 54, 68, },
+ { 7, 1, 1, 3, 54, 30, },
+ { 8, 1, 1, 3, 54, 68, },
+ { 9, 1, 1, 3, 54, 40, },
{ 0, 1, 1, 3, 62, 58, },
{ 2, 1, 1, 3, 62, 40, },
+ { 1, 1, 1, 3, 62, 48, },
+ { 3, 1, 1, 3, 62, 40, },
+ { 4, 1, 1, 3, 62, 58, },
+ { 5, 1, 1, 3, 62, 40, },
+ { 6, 1, 1, 3, 62, 58, },
+ { 7, 1, 1, 3, 62, 30, },
+ { 8, 1, 1, 3, 62, 58, },
+ { 9, 1, 1, 3, 62, 40, },
{ 0, 1, 1, 3, 102, 54, },
{ 2, 1, 1, 3, 102, 40, },
+ { 1, 1, 1, 3, 102, 70, },
+ { 3, 1, 1, 3, 102, 54, },
+ { 4, 1, 1, 3, 102, 64, },
+ { 5, 1, 1, 3, 102, 40, },
+ { 6, 1, 1, 3, 102, 54, },
+ { 7, 1, 1, 3, 102, 30, },
+ { 8, 1, 1, 3, 102, 54, },
+ { 9, 1, 1, 3, 102, 127, },
{ 0, 1, 1, 3, 110, 68, },
{ 2, 1, 1, 3, 110, 40, },
+ { 1, 1, 1, 3, 110, 70, },
+ { 3, 1, 1, 3, 110, 68, },
+ { 4, 1, 1, 3, 110, 64, },
+ { 5, 1, 1, 3, 110, 40, },
+ { 6, 1, 1, 3, 110, 68, },
+ { 7, 1, 1, 3, 110, 30, },
+ { 8, 1, 1, 3, 110, 68, },
+ { 9, 1, 1, 3, 110, 127, },
{ 0, 1, 1, 3, 118, 68, },
{ 2, 1, 1, 3, 118, 40, },
+ { 1, 1, 1, 3, 118, 70, },
+ { 3, 1, 1, 3, 118, 127, },
+ { 4, 1, 1, 3, 118, 64, },
+ { 5, 1, 1, 3, 118, 127, },
+ { 6, 1, 1, 3, 118, 68, },
+ { 7, 1, 1, 3, 118, 30, },
+ { 8, 1, 1, 3, 118, 68, },
+ { 9, 1, 1, 3, 118, 127, },
{ 0, 1, 1, 3, 126, 68, },
{ 2, 1, 1, 3, 126, 40, },
+ { 1, 1, 1, 3, 126, 70, },
+ { 3, 1, 1, 3, 126, 127, },
+ { 4, 1, 1, 3, 126, 64, },
+ { 5, 1, 1, 3, 126, 127, },
+ { 6, 1, 1, 3, 126, 68, },
+ { 7, 1, 1, 3, 126, 30, },
+ { 8, 1, 1, 3, 126, 68, },
+ { 9, 1, 1, 3, 126, 127, },
{ 0, 1, 1, 3, 134, 68, },
{ 2, 1, 1, 3, 134, 40, },
+ { 1, 1, 1, 3, 134, 70, },
+ { 3, 1, 1, 3, 134, 68, },
+ { 4, 1, 1, 3, 134, 64, },
+ { 5, 1, 1, 3, 134, 40, },
+ { 6, 1, 1, 3, 134, 68, },
+ { 7, 1, 1, 3, 134, 30, },
+ { 8, 1, 1, 3, 134, 68, },
+ { 9, 1, 1, 3, 134, 127, },
{ 0, 1, 1, 3, 142, 68, },
{ 2, 1, 1, 3, 142, 127, },
+ { 1, 1, 1, 3, 142, 127, },
+ { 3, 1, 1, 3, 142, 68, },
+ { 4, 1, 1, 3, 142, 64, },
+ { 5, 1, 1, 3, 142, 127, },
+ { 6, 1, 1, 3, 142, 68, },
+ { 7, 1, 1, 3, 142, 127, },
+ { 8, 1, 1, 3, 142, 68, },
+ { 9, 1, 1, 3, 142, 127, },
{ 0, 1, 1, 3, 151, 72, },
{ 2, 1, 1, 3, 151, -128, },
+ { 1, 1, 1, 3, 151, 127, },
+ { 3, 1, 1, 3, 151, 72, },
+ { 4, 1, 1, 3, 151, 66, },
+ { 5, 1, 1, 3, 151, 72, },
+ { 6, 1, 1, 3, 151, 72, },
+ { 7, 1, 1, 3, 151, 30, },
+ { 8, 1, 1, 3, 151, 68, },
+ { 9, 1, 1, 3, 151, -128, },
{ 0, 1, 1, 3, 159, 72, },
{ 2, 1, 1, 3, 159, -128, },
+ { 1, 1, 1, 3, 159, 127, },
+ { 3, 1, 1, 3, 159, 72, },
+ { 4, 1, 1, 3, 159, 66, },
+ { 5, 1, 1, 3, 159, 72, },
+ { 6, 1, 1, 3, 159, 72, },
+ { 7, 1, 1, 3, 159, 30, },
+ { 8, 1, 1, 3, 159, 72, },
+ { 9, 1, 1, 3, 159, -128, },
{ 0, 1, 2, 4, 42, 64, },
{ 2, 1, 2, 4, 42, 64, },
+ { 1, 1, 2, 4, 42, 64, },
+ { 3, 1, 2, 4, 42, 64, },
+ { 4, 1, 2, 4, 42, 68, },
+ { 5, 1, 2, 4, 42, 64, },
+ { 6, 1, 2, 4, 42, 64, },
+ { 7, 1, 2, 4, 42, 54, },
+ { 8, 1, 2, 4, 42, 62, },
+ { 9, 1, 2, 4, 42, 64, },
{ 0, 1, 2, 4, 58, 62, },
{ 2, 1, 2, 4, 58, 64, },
+ { 1, 1, 2, 4, 58, 64, },
+ { 3, 1, 2, 4, 58, 62, },
+ { 4, 1, 2, 4, 58, 64, },
+ { 5, 1, 2, 4, 58, 64, },
+ { 6, 1, 2, 4, 58, 62, },
+ { 7, 1, 2, 4, 58, 54, },
+ { 8, 1, 2, 4, 58, 62, },
+ { 9, 1, 2, 4, 58, 64, },
{ 0, 1, 2, 4, 106, 58, },
{ 2, 1, 2, 4, 106, 64, },
+ { 1, 1, 2, 4, 106, 72, },
+ { 3, 1, 2, 4, 106, 58, },
+ { 4, 1, 2, 4, 106, 66, },
+ { 5, 1, 2, 4, 106, 64, },
+ { 6, 1, 2, 4, 106, 58, },
+ { 7, 1, 2, 4, 106, 54, },
+ { 8, 1, 2, 4, 106, 58, },
+ { 9, 1, 2, 4, 106, 127, },
{ 0, 1, 2, 4, 122, 72, },
{ 2, 1, 2, 4, 122, 64, },
+ { 1, 1, 2, 4, 122, 72, },
+ { 3, 1, 2, 4, 122, 127, },
+ { 4, 1, 2, 4, 122, 68, },
+ { 5, 1, 2, 4, 122, 127, },
+ { 6, 1, 2, 4, 122, 72, },
+ { 7, 1, 2, 4, 122, 54, },
+ { 8, 1, 2, 4, 122, 72, },
+ { 9, 1, 2, 4, 122, 127, },
{ 0, 1, 2, 4, 138, 72, },
{ 2, 1, 2, 4, 138, 127, },
+ { 1, 1, 2, 4, 138, 127, },
+ { 3, 1, 2, 4, 138, 72, },
+ { 4, 1, 2, 4, 138, 68, },
+ { 5, 1, 2, 4, 138, 127, },
+ { 6, 1, 2, 4, 138, 72, },
+ { 7, 1, 2, 4, 138, 127, },
+ { 8, 1, 2, 4, 138, 72, },
+ { 9, 1, 2, 4, 138, 127, },
{ 0, 1, 2, 4, 155, 72, },
{ 2, 1, 2, 4, 155, -128, },
+ { 1, 1, 2, 4, 155, 127, },
+ { 3, 1, 2, 4, 155, 72, },
+ { 4, 1, 2, 4, 155, 68, },
+ { 5, 1, 2, 4, 155, 72, },
+ { 6, 1, 2, 4, 155, 72, },
+ { 7, 1, 2, 4, 155, 54, },
+ { 8, 1, 2, 4, 155, 68, },
+ { 9, 1, 2, 4, 155, -128, },
{ 0, 1, 2, 5, 42, 54, },
{ 2, 1, 2, 5, 42, 40, },
+ { 1, 1, 2, 5, 42, 50, },
+ { 3, 1, 2, 5, 42, 40, },
+ { 4, 1, 2, 5, 42, 58, },
+ { 5, 1, 2, 5, 42, 40, },
+ { 6, 1, 2, 5, 42, 52, },
+ { 7, 1, 2, 5, 42, 30, },
+ { 8, 1, 2, 5, 42, 50, },
+ { 9, 1, 2, 5, 42, 40, },
{ 0, 1, 2, 5, 58, 52, },
{ 2, 1, 2, 5, 58, 40, },
+ { 1, 1, 2, 5, 58, 50, },
+ { 3, 1, 2, 5, 58, 40, },
+ { 4, 1, 2, 5, 58, 56, },
+ { 5, 1, 2, 5, 58, 40, },
+ { 6, 1, 2, 5, 58, 52, },
+ { 7, 1, 2, 5, 58, 30, },
+ { 8, 1, 2, 5, 58, 52, },
+ { 9, 1, 2, 5, 58, 40, },
{ 0, 1, 2, 5, 106, 50, },
{ 2, 1, 2, 5, 106, 40, },
+ { 1, 1, 2, 5, 106, 72, },
+ { 3, 1, 2, 5, 106, 50, },
+ { 4, 1, 2, 5, 106, 56, },
+ { 5, 1, 2, 5, 106, 40, },
+ { 6, 1, 2, 5, 106, 50, },
+ { 7, 1, 2, 5, 106, 30, },
+ { 8, 1, 2, 5, 106, 50, },
+ { 9, 1, 2, 5, 106, 127, },
{ 0, 1, 2, 5, 122, 66, },
{ 2, 1, 2, 5, 122, 40, },
+ { 1, 1, 2, 5, 122, 72, },
+ { 3, 1, 2, 5, 122, 127, },
+ { 4, 1, 2, 5, 122, 56, },
+ { 5, 1, 2, 5, 122, 127, },
+ { 6, 1, 2, 5, 122, 66, },
+ { 7, 1, 2, 5, 122, 30, },
+ { 8, 1, 2, 5, 122, 66, },
+ { 9, 1, 2, 5, 122, 127, },
{ 0, 1, 2, 5, 138, 66, },
{ 2, 1, 2, 5, 138, 127, },
+ { 1, 1, 2, 5, 138, 127, },
+ { 3, 1, 2, 5, 138, 66, },
+ { 4, 1, 2, 5, 138, 58, },
+ { 5, 1, 2, 5, 138, 127, },
+ { 6, 1, 2, 5, 138, 66, },
+ { 7, 1, 2, 5, 138, 127, },
+ { 8, 1, 2, 5, 138, 66, },
+ { 9, 1, 2, 5, 138, 127, },
{ 0, 1, 2, 5, 155, 62, },
{ 2, 1, 2, 5, 155, -128, },
+ { 1, 1, 2, 5, 155, 127, },
+ { 3, 1, 2, 5, 155, 62, },
+ { 4, 1, 2, 5, 155, 58, },
+ { 5, 1, 2, 5, 155, 72, },
+ { 6, 1, 2, 5, 155, 62, },
+ { 7, 1, 2, 5, 155, 30, },
+ { 8, 1, 2, 5, 155, 62, },
+ { 9, 1, 2, 5, 155, -128, },
+};
+
+RTW_DECL_TABLE_TXPWR_LMT(rtw8822c_txpwr_lmt_type0);
+
+static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
+ { 0, 0, 0, 0, 1, 72, },
+ { 2, 0, 0, 0, 1, 60, },
{ 1, 0, 0, 0, 1, 68, },
{ 3, 0, 0, 0, 1, 72, },
{ 4, 0, 0, 0, 1, 76, },
@@ -12112,6 +25743,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 0, 1, 72, },
{ 7, 0, 0, 0, 1, 60, },
{ 8, 0, 0, 0, 1, 72, },
+ { 9, 0, 0, 0, 1, 60, },
+ { 0, 0, 0, 0, 2, 72, },
+ { 2, 0, 0, 0, 2, 60, },
{ 1, 0, 0, 0, 2, 68, },
{ 3, 0, 0, 0, 2, 72, },
{ 4, 0, 0, 0, 2, 76, },
@@ -12119,6 +25753,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 0, 2, 72, },
{ 7, 0, 0, 0, 2, 60, },
{ 8, 0, 0, 0, 2, 72, },
+ { 9, 0, 0, 0, 2, 60, },
+ { 0, 0, 0, 0, 3, 76, },
+ { 2, 0, 0, 0, 3, 60, },
{ 1, 0, 0, 0, 3, 68, },
{ 3, 0, 0, 0, 3, 76, },
{ 4, 0, 0, 0, 3, 76, },
@@ -12126,6 +25763,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 0, 3, 76, },
{ 7, 0, 0, 0, 3, 60, },
{ 8, 0, 0, 0, 3, 76, },
+ { 9, 0, 0, 0, 3, 60, },
+ { 0, 0, 0, 0, 4, 76, },
+ { 2, 0, 0, 0, 4, 60, },
{ 1, 0, 0, 0, 4, 68, },
{ 3, 0, 0, 0, 4, 76, },
{ 4, 0, 0, 0, 4, 76, },
@@ -12133,6 +25773,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 0, 4, 76, },
{ 7, 0, 0, 0, 4, 60, },
{ 8, 0, 0, 0, 4, 76, },
+ { 9, 0, 0, 0, 4, 60, },
+ { 0, 0, 0, 0, 5, 76, },
+ { 2, 0, 0, 0, 5, 60, },
{ 1, 0, 0, 0, 5, 68, },
{ 3, 0, 0, 0, 5, 76, },
{ 4, 0, 0, 0, 5, 76, },
@@ -12140,6 +25783,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 0, 5, 76, },
{ 7, 0, 0, 0, 5, 60, },
{ 8, 0, 0, 0, 5, 76, },
+ { 9, 0, 0, 0, 5, 60, },
+ { 0, 0, 0, 0, 6, 76, },
+ { 2, 0, 0, 0, 6, 60, },
{ 1, 0, 0, 0, 6, 68, },
{ 3, 0, 0, 0, 6, 76, },
{ 4, 0, 0, 0, 6, 76, },
@@ -12147,6 +25793,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 0, 6, 76, },
{ 7, 0, 0, 0, 6, 60, },
{ 8, 0, 0, 0, 6, 76, },
+ { 9, 0, 0, 0, 6, 60, },
+ { 0, 0, 0, 0, 7, 76, },
+ { 2, 0, 0, 0, 7, 60, },
{ 1, 0, 0, 0, 7, 68, },
{ 3, 0, 0, 0, 7, 76, },
{ 4, 0, 0, 0, 7, 76, },
@@ -12154,6 +25803,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 0, 7, 76, },
{ 7, 0, 0, 0, 7, 60, },
{ 8, 0, 0, 0, 7, 76, },
+ { 9, 0, 0, 0, 7, 60, },
+ { 0, 0, 0, 0, 8, 76, },
+ { 2, 0, 0, 0, 8, 60, },
{ 1, 0, 0, 0, 8, 68, },
{ 3, 0, 0, 0, 8, 76, },
{ 4, 0, 0, 0, 8, 76, },
@@ -12161,6 +25813,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 0, 8, 76, },
{ 7, 0, 0, 0, 8, 60, },
{ 8, 0, 0, 0, 8, 76, },
+ { 9, 0, 0, 0, 8, 60, },
+ { 0, 0, 0, 0, 9, 76, },
+ { 2, 0, 0, 0, 9, 60, },
{ 1, 0, 0, 0, 9, 68, },
{ 3, 0, 0, 0, 9, 76, },
{ 4, 0, 0, 0, 9, 76, },
@@ -12168,6 +25823,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 0, 9, 76, },
{ 7, 0, 0, 0, 9, 60, },
{ 8, 0, 0, 0, 9, 76, },
+ { 9, 0, 0, 0, 9, 60, },
+ { 0, 0, 0, 0, 10, 72, },
+ { 2, 0, 0, 0, 10, 60, },
{ 1, 0, 0, 0, 10, 68, },
{ 3, 0, 0, 0, 10, 72, },
{ 4, 0, 0, 0, 10, 76, },
@@ -12175,6 +25833,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 0, 10, 72, },
{ 7, 0, 0, 0, 10, 60, },
{ 8, 0, 0, 0, 10, 72, },
+ { 9, 0, 0, 0, 10, 60, },
+ { 0, 0, 0, 0, 11, 72, },
+ { 2, 0, 0, 0, 11, 60, },
{ 1, 0, 0, 0, 11, 68, },
{ 3, 0, 0, 0, 11, 72, },
{ 4, 0, 0, 0, 11, 76, },
@@ -12182,6 +25843,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 0, 11, 72, },
{ 7, 0, 0, 0, 11, 60, },
{ 8, 0, 0, 0, 11, 72, },
+ { 9, 0, 0, 0, 11, 60, },
+ { 0, 0, 0, 0, 12, 52, },
+ { 2, 0, 0, 0, 12, 60, },
{ 1, 0, 0, 0, 12, 68, },
{ 3, 0, 0, 0, 12, 52, },
{ 4, 0, 0, 0, 12, 76, },
@@ -12189,6 +25853,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 0, 12, 52, },
{ 7, 0, 0, 0, 12, 60, },
{ 8, 0, 0, 0, 12, 52, },
+ { 9, 0, 0, 0, 12, 60, },
+ { 0, 0, 0, 0, 13, 48, },
+ { 2, 0, 0, 0, 13, 60, },
{ 1, 0, 0, 0, 13, 68, },
{ 3, 0, 0, 0, 13, 48, },
{ 4, 0, 0, 0, 13, 76, },
@@ -12196,6 +25863,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 0, 13, 48, },
{ 7, 0, 0, 0, 13, 60, },
{ 8, 0, 0, 0, 13, 48, },
+ { 9, 0, 0, 0, 13, 60, },
+ { 0, 0, 0, 0, 14, 127, },
+ { 2, 0, 0, 0, 14, 127, },
{ 1, 0, 0, 0, 14, 68, },
{ 3, 0, 0, 0, 14, 127, },
{ 4, 0, 0, 0, 14, 127, },
@@ -12203,6 +25873,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 0, 14, 127, },
{ 7, 0, 0, 0, 14, 127, },
{ 8, 0, 0, 0, 14, 127, },
+ { 9, 0, 0, 0, 14, 127, },
+ { 0, 0, 0, 1, 1, 52, },
+ { 2, 0, 0, 1, 1, 60, },
{ 1, 0, 0, 1, 1, 76, },
{ 3, 0, 0, 1, 1, 52, },
{ 4, 0, 0, 1, 1, 76, },
@@ -12210,6 +25883,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 1, 1, 52, },
{ 7, 0, 0, 1, 1, 60, },
{ 8, 0, 0, 1, 1, 52, },
+ { 9, 0, 0, 1, 1, 60, },
+ { 0, 0, 0, 1, 2, 60, },
+ { 2, 0, 0, 1, 2, 60, },
{ 1, 0, 0, 1, 2, 76, },
{ 3, 0, 0, 1, 2, 60, },
{ 4, 0, 0, 1, 2, 76, },
@@ -12217,6 +25893,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 1, 2, 60, },
{ 7, 0, 0, 1, 2, 60, },
{ 8, 0, 0, 1, 2, 60, },
+ { 9, 0, 0, 1, 2, 60, },
+ { 0, 0, 0, 1, 3, 64, },
+ { 2, 0, 0, 1, 3, 60, },
{ 1, 0, 0, 1, 3, 76, },
{ 3, 0, 0, 1, 3, 64, },
{ 4, 0, 0, 1, 3, 76, },
@@ -12224,6 +25903,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 1, 3, 64, },
{ 7, 0, 0, 1, 3, 60, },
{ 8, 0, 0, 1, 3, 64, },
+ { 9, 0, 0, 1, 3, 60, },
+ { 0, 0, 0, 1, 4, 68, },
+ { 2, 0, 0, 1, 4, 60, },
{ 1, 0, 0, 1, 4, 76, },
{ 3, 0, 0, 1, 4, 68, },
{ 4, 0, 0, 1, 4, 76, },
@@ -12231,6 +25913,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 1, 4, 68, },
{ 7, 0, 0, 1, 4, 60, },
{ 8, 0, 0, 1, 4, 68, },
+ { 9, 0, 0, 1, 4, 60, },
+ { 0, 0, 0, 1, 5, 76, },
+ { 2, 0, 0, 1, 5, 60, },
{ 1, 0, 0, 1, 5, 76, },
{ 3, 0, 0, 1, 5, 76, },
{ 4, 0, 0, 1, 5, 76, },
@@ -12238,6 +25923,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 1, 5, 76, },
{ 7, 0, 0, 1, 5, 60, },
{ 8, 0, 0, 1, 5, 76, },
+ { 9, 0, 0, 1, 5, 60, },
+ { 0, 0, 0, 1, 6, 76, },
+ { 2, 0, 0, 1, 6, 60, },
{ 1, 0, 0, 1, 6, 76, },
{ 3, 0, 0, 1, 6, 76, },
{ 4, 0, 0, 1, 6, 76, },
@@ -12245,6 +25933,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 1, 6, 76, },
{ 7, 0, 0, 1, 6, 60, },
{ 8, 0, 0, 1, 6, 76, },
+ { 9, 0, 0, 1, 6, 60, },
+ { 0, 0, 0, 1, 7, 76, },
+ { 2, 0, 0, 1, 7, 60, },
{ 1, 0, 0, 1, 7, 76, },
{ 3, 0, 0, 1, 7, 76, },
{ 4, 0, 0, 1, 7, 76, },
@@ -12252,6 +25943,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 1, 7, 76, },
{ 7, 0, 0, 1, 7, 60, },
{ 8, 0, 0, 1, 7, 76, },
+ { 9, 0, 0, 1, 7, 60, },
+ { 0, 0, 0, 1, 8, 68, },
+ { 2, 0, 0, 1, 8, 60, },
{ 1, 0, 0, 1, 8, 76, },
{ 3, 0, 0, 1, 8, 68, },
{ 4, 0, 0, 1, 8, 76, },
@@ -12259,6 +25953,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 1, 8, 68, },
{ 7, 0, 0, 1, 8, 60, },
{ 8, 0, 0, 1, 8, 68, },
+ { 9, 0, 0, 1, 8, 60, },
+ { 0, 0, 0, 1, 9, 64, },
+ { 2, 0, 0, 1, 9, 60, },
{ 1, 0, 0, 1, 9, 76, },
{ 3, 0, 0, 1, 9, 64, },
{ 4, 0, 0, 1, 9, 76, },
@@ -12266,6 +25963,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 1, 9, 64, },
{ 7, 0, 0, 1, 9, 60, },
{ 8, 0, 0, 1, 9, 64, },
+ { 9, 0, 0, 1, 9, 60, },
+ { 0, 0, 0, 1, 10, 60, },
+ { 2, 0, 0, 1, 10, 60, },
{ 1, 0, 0, 1, 10, 76, },
{ 3, 0, 0, 1, 10, 60, },
{ 4, 0, 0, 1, 10, 76, },
@@ -12273,6 +25973,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 1, 10, 60, },
{ 7, 0, 0, 1, 10, 60, },
{ 8, 0, 0, 1, 10, 60, },
+ { 9, 0, 0, 1, 10, 60, },
+ { 0, 0, 0, 1, 11, 52, },
+ { 2, 0, 0, 1, 11, 60, },
{ 1, 0, 0, 1, 11, 76, },
{ 3, 0, 0, 1, 11, 52, },
{ 4, 0, 0, 1, 11, 76, },
@@ -12280,6 +25983,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 1, 11, 52, },
{ 7, 0, 0, 1, 11, 60, },
{ 8, 0, 0, 1, 11, 52, },
+ { 9, 0, 0, 1, 11, 60, },
+ { 0, 0, 0, 1, 12, 40, },
+ { 2, 0, 0, 1, 12, 60, },
{ 1, 0, 0, 1, 12, 76, },
{ 3, 0, 0, 1, 12, 40, },
{ 4, 0, 0, 1, 12, 76, },
@@ -12287,6 +25993,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 1, 12, 40, },
{ 7, 0, 0, 1, 12, 60, },
{ 8, 0, 0, 1, 12, 40, },
+ { 9, 0, 0, 1, 12, 60, },
+ { 0, 0, 0, 1, 13, 28, },
+ { 2, 0, 0, 1, 13, 60, },
{ 1, 0, 0, 1, 13, 76, },
{ 3, 0, 0, 1, 13, 28, },
{ 4, 0, 0, 1, 13, 70, },
@@ -12294,6 +26003,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 1, 13, 28, },
{ 7, 0, 0, 1, 13, 60, },
{ 8, 0, 0, 1, 13, 28, },
+ { 9, 0, 0, 1, 13, 60, },
+ { 0, 0, 0, 1, 14, 127, },
+ { 2, 0, 0, 1, 14, 127, },
{ 1, 0, 0, 1, 14, 127, },
{ 3, 0, 0, 1, 14, 127, },
{ 4, 0, 0, 1, 14, 127, },
@@ -12301,6 +26013,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 1, 14, 127, },
{ 7, 0, 0, 1, 14, 127, },
{ 8, 0, 0, 1, 14, 127, },
+ { 9, 0, 0, 1, 14, 127, },
+ { 0, 0, 0, 2, 1, 52, },
+ { 2, 0, 0, 2, 1, 60, },
{ 1, 0, 0, 2, 1, 76, },
{ 3, 0, 0, 2, 1, 52, },
{ 4, 0, 0, 2, 1, 76, },
@@ -12308,6 +26023,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 2, 1, 52, },
{ 7, 0, 0, 2, 1, 60, },
{ 8, 0, 0, 2, 1, 52, },
+ { 9, 0, 0, 2, 1, 60, },
+ { 0, 0, 0, 2, 2, 60, },
+ { 2, 0, 0, 2, 2, 60, },
{ 1, 0, 0, 2, 2, 76, },
{ 3, 0, 0, 2, 2, 60, },
{ 4, 0, 0, 2, 2, 76, },
@@ -12315,6 +26033,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 2, 2, 60, },
{ 7, 0, 0, 2, 2, 60, },
{ 8, 0, 0, 2, 2, 60, },
+ { 9, 0, 0, 2, 2, 60, },
+ { 0, 0, 0, 2, 3, 64, },
+ { 2, 0, 0, 2, 3, 60, },
{ 1, 0, 0, 2, 3, 76, },
{ 3, 0, 0, 2, 3, 64, },
{ 4, 0, 0, 2, 3, 76, },
@@ -12322,6 +26043,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 2, 3, 64, },
{ 7, 0, 0, 2, 3, 60, },
{ 8, 0, 0, 2, 3, 64, },
+ { 9, 0, 0, 2, 3, 60, },
+ { 0, 0, 0, 2, 4, 68, },
+ { 2, 0, 0, 2, 4, 60, },
{ 1, 0, 0, 2, 4, 76, },
{ 3, 0, 0, 2, 4, 68, },
{ 4, 0, 0, 2, 4, 76, },
@@ -12329,6 +26053,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 2, 4, 68, },
{ 7, 0, 0, 2, 4, 60, },
{ 8, 0, 0, 2, 4, 68, },
+ { 9, 0, 0, 2, 4, 60, },
+ { 0, 0, 0, 2, 5, 76, },
+ { 2, 0, 0, 2, 5, 60, },
{ 1, 0, 0, 2, 5, 76, },
{ 3, 0, 0, 2, 5, 76, },
{ 4, 0, 0, 2, 5, 76, },
@@ -12336,6 +26063,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 2, 5, 76, },
{ 7, 0, 0, 2, 5, 60, },
{ 8, 0, 0, 2, 5, 76, },
+ { 9, 0, 0, 2, 5, 60, },
+ { 0, 0, 0, 2, 6, 76, },
+ { 2, 0, 0, 2, 6, 60, },
{ 1, 0, 0, 2, 6, 76, },
{ 3, 0, 0, 2, 6, 76, },
{ 4, 0, 0, 2, 6, 76, },
@@ -12343,6 +26073,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 2, 6, 76, },
{ 7, 0, 0, 2, 6, 60, },
{ 8, 0, 0, 2, 6, 76, },
+ { 9, 0, 0, 2, 6, 60, },
+ { 0, 0, 0, 2, 7, 76, },
+ { 2, 0, 0, 2, 7, 60, },
{ 1, 0, 0, 2, 7, 76, },
{ 3, 0, 0, 2, 7, 76, },
{ 4, 0, 0, 2, 7, 76, },
@@ -12350,6 +26083,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 2, 7, 76, },
{ 7, 0, 0, 2, 7, 60, },
{ 8, 0, 0, 2, 7, 76, },
+ { 9, 0, 0, 2, 7, 60, },
+ { 0, 0, 0, 2, 8, 68, },
+ { 2, 0, 0, 2, 8, 60, },
{ 1, 0, 0, 2, 8, 76, },
{ 3, 0, 0, 2, 8, 68, },
{ 4, 0, 0, 2, 8, 76, },
@@ -12357,6 +26093,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 2, 8, 68, },
{ 7, 0, 0, 2, 8, 60, },
{ 8, 0, 0, 2, 8, 68, },
+ { 9, 0, 0, 2, 8, 60, },
+ { 0, 0, 0, 2, 9, 64, },
+ { 2, 0, 0, 2, 9, 60, },
{ 1, 0, 0, 2, 9, 76, },
{ 3, 0, 0, 2, 9, 64, },
{ 4, 0, 0, 2, 9, 76, },
@@ -12364,6 +26103,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 2, 9, 64, },
{ 7, 0, 0, 2, 9, 60, },
{ 8, 0, 0, 2, 9, 64, },
+ { 9, 0, 0, 2, 9, 60, },
+ { 0, 0, 0, 2, 10, 60, },
+ { 2, 0, 0, 2, 10, 60, },
{ 1, 0, 0, 2, 10, 76, },
{ 3, 0, 0, 2, 10, 60, },
{ 4, 0, 0, 2, 10, 76, },
@@ -12371,6 +26113,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 2, 10, 60, },
{ 7, 0, 0, 2, 10, 60, },
{ 8, 0, 0, 2, 10, 60, },
+ { 9, 0, 0, 2, 10, 60, },
+ { 0, 0, 0, 2, 11, 52, },
+ { 2, 0, 0, 2, 11, 60, },
{ 1, 0, 0, 2, 11, 76, },
{ 3, 0, 0, 2, 11, 52, },
{ 4, 0, 0, 2, 11, 76, },
@@ -12378,6 +26123,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 2, 11, 52, },
{ 7, 0, 0, 2, 11, 60, },
{ 8, 0, 0, 2, 11, 52, },
+ { 9, 0, 0, 2, 11, 60, },
+ { 0, 0, 0, 2, 12, 40, },
+ { 2, 0, 0, 2, 12, 60, },
{ 1, 0, 0, 2, 12, 76, },
{ 3, 0, 0, 2, 12, 40, },
{ 4, 0, 0, 2, 12, 76, },
@@ -12385,6 +26133,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 2, 12, 40, },
{ 7, 0, 0, 2, 12, 60, },
{ 8, 0, 0, 2, 12, 40, },
+ { 9, 0, 0, 2, 12, 60, },
+ { 0, 0, 0, 2, 13, 28, },
+ { 2, 0, 0, 2, 13, 60, },
{ 1, 0, 0, 2, 13, 76, },
{ 3, 0, 0, 2, 13, 28, },
{ 4, 0, 0, 2, 13, 72, },
@@ -12392,6 +26143,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 2, 13, 28, },
{ 7, 0, 0, 2, 13, 60, },
{ 8, 0, 0, 2, 13, 28, },
+ { 9, 0, 0, 2, 13, 60, },
+ { 0, 0, 0, 2, 14, 127, },
+ { 2, 0, 0, 2, 14, 127, },
{ 1, 0, 0, 2, 14, 127, },
{ 3, 0, 0, 2, 14, 127, },
{ 4, 0, 0, 2, 14, 127, },
@@ -12399,6 +26153,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 2, 14, 127, },
{ 7, 0, 0, 2, 14, 127, },
{ 8, 0, 0, 2, 14, 127, },
+ { 9, 0, 0, 2, 14, 127, },
+ { 0, 0, 0, 3, 1, 52, },
+ { 2, 0, 0, 3, 1, 36, },
{ 1, 0, 0, 3, 1, 66, },
{ 3, 0, 0, 3, 1, 52, },
{ 4, 0, 0, 3, 1, 68, },
@@ -12406,6 +26163,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 3, 1, 52, },
{ 7, 0, 0, 3, 1, 36, },
{ 8, 0, 0, 3, 1, 52, },
+ { 9, 0, 0, 3, 1, 36, },
+ { 0, 0, 0, 3, 2, 60, },
+ { 2, 0, 0, 3, 2, 36, },
{ 1, 0, 0, 3, 2, 66, },
{ 3, 0, 0, 3, 2, 60, },
{ 4, 0, 0, 3, 2, 70, },
@@ -12413,6 +26173,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 3, 2, 60, },
{ 7, 0, 0, 3, 2, 36, },
{ 8, 0, 0, 3, 2, 60, },
+ { 9, 0, 0, 3, 2, 36, },
+ { 0, 0, 0, 3, 3, 64, },
+ { 2, 0, 0, 3, 3, 36, },
{ 1, 0, 0, 3, 3, 66, },
{ 3, 0, 0, 3, 3, 64, },
{ 4, 0, 0, 3, 3, 70, },
@@ -12420,6 +26183,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 3, 3, 64, },
{ 7, 0, 0, 3, 3, 36, },
{ 8, 0, 0, 3, 3, 64, },
+ { 9, 0, 0, 3, 3, 36, },
+ { 0, 0, 0, 3, 4, 68, },
+ { 2, 0, 0, 3, 4, 36, },
{ 1, 0, 0, 3, 4, 66, },
{ 3, 0, 0, 3, 4, 68, },
{ 4, 0, 0, 3, 4, 70, },
@@ -12427,6 +26193,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 3, 4, 68, },
{ 7, 0, 0, 3, 4, 36, },
{ 8, 0, 0, 3, 4, 68, },
+ { 9, 0, 0, 3, 4, 36, },
+ { 0, 0, 0, 3, 5, 76, },
+ { 2, 0, 0, 3, 5, 36, },
{ 1, 0, 0, 3, 5, 66, },
{ 3, 0, 0, 3, 5, 76, },
{ 4, 0, 0, 3, 5, 70, },
@@ -12434,6 +26203,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 3, 5, 76, },
{ 7, 0, 0, 3, 5, 36, },
{ 8, 0, 0, 3, 5, 76, },
+ { 9, 0, 0, 3, 5, 36, },
+ { 0, 0, 0, 3, 6, 76, },
+ { 2, 0, 0, 3, 6, 36, },
{ 1, 0, 0, 3, 6, 66, },
{ 3, 0, 0, 3, 6, 76, },
{ 4, 0, 0, 3, 6, 70, },
@@ -12441,6 +26213,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 3, 6, 76, },
{ 7, 0, 0, 3, 6, 36, },
{ 8, 0, 0, 3, 6, 76, },
+ { 9, 0, 0, 3, 6, 36, },
+ { 0, 0, 0, 3, 7, 76, },
+ { 2, 0, 0, 3, 7, 36, },
{ 1, 0, 0, 3, 7, 66, },
{ 3, 0, 0, 3, 7, 76, },
{ 4, 0, 0, 3, 7, 70, },
@@ -12448,6 +26223,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 3, 7, 76, },
{ 7, 0, 0, 3, 7, 36, },
{ 8, 0, 0, 3, 7, 76, },
+ { 9, 0, 0, 3, 7, 36, },
+ { 0, 0, 0, 3, 8, 68, },
+ { 2, 0, 0, 3, 8, 36, },
{ 1, 0, 0, 3, 8, 66, },
{ 3, 0, 0, 3, 8, 68, },
{ 4, 0, 0, 3, 8, 70, },
@@ -12455,6 +26233,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 3, 8, 68, },
{ 7, 0, 0, 3, 8, 36, },
{ 8, 0, 0, 3, 8, 68, },
+ { 9, 0, 0, 3, 8, 36, },
+ { 0, 0, 0, 3, 9, 64, },
+ { 2, 0, 0, 3, 9, 36, },
{ 1, 0, 0, 3, 9, 66, },
{ 3, 0, 0, 3, 9, 64, },
{ 4, 0, 0, 3, 9, 70, },
@@ -12462,6 +26243,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 3, 9, 64, },
{ 7, 0, 0, 3, 9, 36, },
{ 8, 0, 0, 3, 9, 64, },
+ { 9, 0, 0, 3, 9, 36, },
+ { 0, 0, 0, 3, 10, 60, },
+ { 2, 0, 0, 3, 10, 36, },
{ 1, 0, 0, 3, 10, 66, },
{ 3, 0, 0, 3, 10, 60, },
{ 4, 0, 0, 3, 10, 70, },
@@ -12469,6 +26253,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 3, 10, 60, },
{ 7, 0, 0, 3, 10, 36, },
{ 8, 0, 0, 3, 10, 60, },
+ { 9, 0, 0, 3, 10, 36, },
+ { 0, 0, 0, 3, 11, 52, },
+ { 2, 0, 0, 3, 11, 36, },
{ 1, 0, 0, 3, 11, 66, },
{ 3, 0, 0, 3, 11, 52, },
{ 4, 0, 0, 3, 11, 70, },
@@ -12476,6 +26263,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 3, 11, 52, },
{ 7, 0, 0, 3, 11, 36, },
{ 8, 0, 0, 3, 11, 52, },
+ { 9, 0, 0, 3, 11, 36, },
+ { 0, 0, 0, 3, 12, 40, },
+ { 2, 0, 0, 3, 12, 36, },
{ 1, 0, 0, 3, 12, 66, },
{ 3, 0, 0, 3, 12, 40, },
{ 4, 0, 0, 3, 12, 70, },
@@ -12483,6 +26273,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 3, 12, 40, },
{ 7, 0, 0, 3, 12, 36, },
{ 8, 0, 0, 3, 12, 40, },
+ { 9, 0, 0, 3, 12, 36, },
+ { 0, 0, 0, 3, 13, 28, },
+ { 2, 0, 0, 3, 13, 36, },
{ 1, 0, 0, 3, 13, 66, },
{ 3, 0, 0, 3, 13, 28, },
{ 4, 0, 0, 3, 13, 62, },
@@ -12490,6 +26283,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 3, 13, 28, },
{ 7, 0, 0, 3, 13, 36, },
{ 8, 0, 0, 3, 13, 28, },
+ { 9, 0, 0, 3, 13, 36, },
+ { 0, 0, 0, 3, 14, 127, },
+ { 2, 0, 0, 3, 14, 127, },
{ 1, 0, 0, 3, 14, 127, },
{ 3, 0, 0, 3, 14, 127, },
{ 4, 0, 0, 3, 14, 127, },
@@ -12497,6 +26293,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 3, 14, 127, },
{ 7, 0, 0, 3, 14, 127, },
{ 8, 0, 0, 3, 14, 127, },
+ { 9, 0, 0, 3, 14, 127, },
+ { 0, 0, 1, 2, 1, 127, },
+ { 2, 0, 1, 2, 1, 127, },
{ 1, 0, 1, 2, 1, 127, },
{ 3, 0, 1, 2, 1, 127, },
{ 4, 0, 1, 2, 1, 127, },
@@ -12504,6 +26303,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 2, 1, 127, },
{ 7, 0, 1, 2, 1, 127, },
{ 8, 0, 1, 2, 1, 127, },
+ { 9, 0, 1, 2, 1, 127, },
+ { 0, 0, 1, 2, 2, 127, },
+ { 2, 0, 1, 2, 2, 127, },
{ 1, 0, 1, 2, 2, 127, },
{ 3, 0, 1, 2, 2, 127, },
{ 4, 0, 1, 2, 2, 127, },
@@ -12511,6 +26313,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 2, 2, 127, },
{ 7, 0, 1, 2, 2, 127, },
{ 8, 0, 1, 2, 2, 127, },
+ { 9, 0, 1, 2, 2, 127, },
+ { 0, 0, 1, 2, 3, 52, },
+ { 2, 0, 1, 2, 3, 60, },
{ 1, 0, 1, 2, 3, 72, },
{ 3, 0, 1, 2, 3, 52, },
{ 4, 0, 1, 2, 3, 72, },
@@ -12518,6 +26323,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 2, 3, 52, },
{ 7, 0, 1, 2, 3, 60, },
{ 8, 0, 1, 2, 3, 52, },
+ { 9, 0, 1, 2, 3, 60, },
+ { 0, 0, 1, 2, 4, 52, },
+ { 2, 0, 1, 2, 4, 60, },
{ 1, 0, 1, 2, 4, 72, },
{ 3, 0, 1, 2, 4, 52, },
{ 4, 0, 1, 2, 4, 72, },
@@ -12525,6 +26333,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 2, 4, 52, },
{ 7, 0, 1, 2, 4, 60, },
{ 8, 0, 1, 2, 4, 52, },
+ { 9, 0, 1, 2, 4, 60, },
+ { 0, 0, 1, 2, 5, 60, },
+ { 2, 0, 1, 2, 5, 60, },
{ 1, 0, 1, 2, 5, 72, },
{ 3, 0, 1, 2, 5, 60, },
{ 4, 0, 1, 2, 5, 72, },
@@ -12532,6 +26343,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 2, 5, 60, },
{ 7, 0, 1, 2, 5, 60, },
{ 8, 0, 1, 2, 5, 60, },
+ { 9, 0, 1, 2, 5, 60, },
+ { 0, 0, 1, 2, 6, 64, },
+ { 2, 0, 1, 2, 6, 60, },
{ 1, 0, 1, 2, 6, 72, },
{ 3, 0, 1, 2, 6, 64, },
{ 4, 0, 1, 2, 6, 72, },
@@ -12539,6 +26353,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 2, 6, 64, },
{ 7, 0, 1, 2, 6, 60, },
{ 8, 0, 1, 2, 6, 64, },
+ { 9, 0, 1, 2, 6, 60, },
+ { 0, 0, 1, 2, 7, 60, },
+ { 2, 0, 1, 2, 7, 60, },
{ 1, 0, 1, 2, 7, 72, },
{ 3, 0, 1, 2, 7, 60, },
{ 4, 0, 1, 2, 7, 72, },
@@ -12546,6 +26363,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 2, 7, 60, },
{ 7, 0, 1, 2, 7, 60, },
{ 8, 0, 1, 2, 7, 60, },
+ { 9, 0, 1, 2, 7, 60, },
+ { 0, 0, 1, 2, 8, 52, },
+ { 2, 0, 1, 2, 8, 60, },
{ 1, 0, 1, 2, 8, 72, },
{ 3, 0, 1, 2, 8, 52, },
{ 4, 0, 1, 2, 8, 72, },
@@ -12553,6 +26373,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 2, 8, 52, },
{ 7, 0, 1, 2, 8, 60, },
{ 8, 0, 1, 2, 8, 52, },
+ { 9, 0, 1, 2, 8, 60, },
+ { 0, 0, 1, 2, 9, 52, },
+ { 2, 0, 1, 2, 9, 60, },
{ 1, 0, 1, 2, 9, 72, },
{ 3, 0, 1, 2, 9, 52, },
{ 4, 0, 1, 2, 9, 72, },
@@ -12560,6 +26383,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 2, 9, 52, },
{ 7, 0, 1, 2, 9, 60, },
{ 8, 0, 1, 2, 9, 52, },
+ { 9, 0, 1, 2, 9, 60, },
+ { 0, 0, 1, 2, 10, 40, },
+ { 2, 0, 1, 2, 10, 60, },
{ 1, 0, 1, 2, 10, 72, },
{ 3, 0, 1, 2, 10, 40, },
{ 4, 0, 1, 2, 10, 72, },
@@ -12567,6 +26393,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 2, 10, 40, },
{ 7, 0, 1, 2, 10, 60, },
{ 8, 0, 1, 2, 10, 40, },
+ { 9, 0, 1, 2, 10, 60, },
+ { 0, 0, 1, 2, 11, 28, },
+ { 2, 0, 1, 2, 11, 60, },
{ 1, 0, 1, 2, 11, 72, },
{ 3, 0, 1, 2, 11, 28, },
{ 4, 0, 1, 2, 11, 70, },
@@ -12574,6 +26403,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 2, 11, 28, },
{ 7, 0, 1, 2, 11, 60, },
{ 8, 0, 1, 2, 11, 28, },
+ { 9, 0, 1, 2, 11, 60, },
+ { 0, 0, 1, 2, 12, 127, },
+ { 2, 0, 1, 2, 12, 127, },
{ 1, 0, 1, 2, 12, 127, },
{ 3, 0, 1, 2, 12, 127, },
{ 4, 0, 1, 2, 12, 127, },
@@ -12581,6 +26413,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 2, 12, 127, },
{ 7, 0, 1, 2, 12, 127, },
{ 8, 0, 1, 2, 12, 127, },
+ { 9, 0, 1, 2, 12, 127, },
+ { 0, 0, 1, 2, 13, 127, },
+ { 2, 0, 1, 2, 13, 127, },
{ 1, 0, 1, 2, 13, 127, },
{ 3, 0, 1, 2, 13, 127, },
{ 4, 0, 1, 2, 13, 127, },
@@ -12588,6 +26423,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 2, 13, 127, },
{ 7, 0, 1, 2, 13, 127, },
{ 8, 0, 1, 2, 13, 127, },
+ { 9, 0, 1, 2, 13, 127, },
+ { 0, 0, 1, 2, 14, 127, },
+ { 2, 0, 1, 2, 14, 127, },
{ 1, 0, 1, 2, 14, 127, },
{ 3, 0, 1, 2, 14, 127, },
{ 4, 0, 1, 2, 14, 127, },
@@ -12595,6 +26433,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 2, 14, 127, },
{ 7, 0, 1, 2, 14, 127, },
{ 8, 0, 1, 2, 14, 127, },
+ { 9, 0, 1, 2, 14, 127, },
+ { 0, 0, 1, 3, 1, 127, },
+ { 2, 0, 1, 3, 1, 127, },
{ 1, 0, 1, 3, 1, 127, },
{ 3, 0, 1, 3, 1, 127, },
{ 4, 0, 1, 3, 1, 127, },
@@ -12602,6 +26443,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 3, 1, 127, },
{ 7, 0, 1, 3, 1, 127, },
{ 8, 0, 1, 3, 1, 127, },
+ { 9, 0, 1, 3, 1, 127, },
+ { 0, 0, 1, 3, 2, 127, },
+ { 2, 0, 1, 3, 2, 127, },
{ 1, 0, 1, 3, 2, 127, },
{ 3, 0, 1, 3, 2, 127, },
{ 4, 0, 1, 3, 2, 127, },
@@ -12609,6 +26453,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 3, 2, 127, },
{ 7, 0, 1, 3, 2, 127, },
{ 8, 0, 1, 3, 2, 127, },
+ { 9, 0, 1, 3, 2, 127, },
+ { 0, 0, 1, 3, 3, 48, },
+ { 2, 0, 1, 3, 3, 36, },
{ 1, 0, 1, 3, 3, 66, },
{ 3, 0, 1, 3, 3, 48, },
{ 4, 0, 1, 3, 3, 66, },
@@ -12616,6 +26463,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 3, 3, 48, },
{ 7, 0, 1, 3, 3, 36, },
{ 8, 0, 1, 3, 3, 48, },
+ { 9, 0, 1, 3, 3, 36, },
+ { 0, 0, 1, 3, 4, 48, },
+ { 2, 0, 1, 3, 4, 36, },
{ 1, 0, 1, 3, 4, 66, },
{ 3, 0, 1, 3, 4, 48, },
{ 4, 0, 1, 3, 4, 70, },
@@ -12623,6 +26473,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 3, 4, 48, },
{ 7, 0, 1, 3, 4, 36, },
{ 8, 0, 1, 3, 4, 48, },
+ { 9, 0, 1, 3, 4, 36, },
+ { 0, 0, 1, 3, 5, 60, },
+ { 2, 0, 1, 3, 5, 36, },
{ 1, 0, 1, 3, 5, 66, },
{ 3, 0, 1, 3, 5, 60, },
{ 4, 0, 1, 3, 5, 70, },
@@ -12630,6 +26483,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 3, 5, 60, },
{ 7, 0, 1, 3, 5, 36, },
{ 8, 0, 1, 3, 5, 60, },
+ { 9, 0, 1, 3, 5, 36, },
+ { 0, 0, 1, 3, 6, 64, },
+ { 2, 0, 1, 3, 6, 36, },
{ 1, 0, 1, 3, 6, 66, },
{ 3, 0, 1, 3, 6, 64, },
{ 4, 0, 1, 3, 6, 70, },
@@ -12637,6 +26493,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 3, 6, 64, },
{ 7, 0, 1, 3, 6, 36, },
{ 8, 0, 1, 3, 6, 64, },
+ { 9, 0, 1, 3, 6, 36, },
+ { 0, 0, 1, 3, 7, 60, },
+ { 2, 0, 1, 3, 7, 36, },
{ 1, 0, 1, 3, 7, 66, },
{ 3, 0, 1, 3, 7, 60, },
{ 4, 0, 1, 3, 7, 70, },
@@ -12644,6 +26503,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 3, 7, 60, },
{ 7, 0, 1, 3, 7, 36, },
{ 8, 0, 1, 3, 7, 60, },
+ { 9, 0, 1, 3, 7, 36, },
+ { 0, 0, 1, 3, 8, 52, },
+ { 2, 0, 1, 3, 8, 36, },
{ 1, 0, 1, 3, 8, 66, },
{ 3, 0, 1, 3, 8, 52, },
{ 4, 0, 1, 3, 8, 70, },
@@ -12651,6 +26513,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 3, 8, 52, },
{ 7, 0, 1, 3, 8, 36, },
{ 8, 0, 1, 3, 8, 52, },
+ { 9, 0, 1, 3, 8, 36, },
+ { 0, 0, 1, 3, 9, 52, },
+ { 2, 0, 1, 3, 9, 36, },
{ 1, 0, 1, 3, 9, 66, },
{ 3, 0, 1, 3, 9, 52, },
{ 4, 0, 1, 3, 9, 70, },
@@ -12658,6 +26523,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 3, 9, 52, },
{ 7, 0, 1, 3, 9, 36, },
{ 8, 0, 1, 3, 9, 52, },
+ { 9, 0, 1, 3, 9, 36, },
+ { 0, 0, 1, 3, 10, 40, },
+ { 2, 0, 1, 3, 10, 36, },
{ 1, 0, 1, 3, 10, 66, },
{ 3, 0, 1, 3, 10, 40, },
{ 4, 0, 1, 3, 10, 70, },
@@ -12665,6 +26533,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 3, 10, 40, },
{ 7, 0, 1, 3, 10, 36, },
{ 8, 0, 1, 3, 10, 40, },
+ { 9, 0, 1, 3, 10, 36, },
+ { 0, 0, 1, 3, 11, 26, },
+ { 2, 0, 1, 3, 11, 36, },
{ 1, 0, 1, 3, 11, 66, },
{ 3, 0, 1, 3, 11, 26, },
{ 4, 0, 1, 3, 11, 66, },
@@ -12672,6 +26543,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 3, 11, 26, },
{ 7, 0, 1, 3, 11, 36, },
{ 8, 0, 1, 3, 11, 26, },
+ { 9, 0, 1, 3, 11, 36, },
+ { 0, 0, 1, 3, 12, 127, },
+ { 2, 0, 1, 3, 12, 127, },
{ 1, 0, 1, 3, 12, 127, },
{ 3, 0, 1, 3, 12, 127, },
{ 4, 0, 1, 3, 12, 127, },
@@ -12679,6 +26553,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 3, 12, 127, },
{ 7, 0, 1, 3, 12, 127, },
{ 8, 0, 1, 3, 12, 127, },
+ { 9, 0, 1, 3, 12, 127, },
+ { 0, 0, 1, 3, 13, 127, },
+ { 2, 0, 1, 3, 13, 127, },
{ 1, 0, 1, 3, 13, 127, },
{ 3, 0, 1, 3, 13, 127, },
{ 4, 0, 1, 3, 13, 127, },
@@ -12686,6 +26563,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 3, 13, 127, },
{ 7, 0, 1, 3, 13, 127, },
{ 8, 0, 1, 3, 13, 127, },
+ { 9, 0, 1, 3, 13, 127, },
+ { 0, 0, 1, 3, 14, 127, },
+ { 2, 0, 1, 3, 14, 127, },
{ 1, 0, 1, 3, 14, 127, },
{ 3, 0, 1, 3, 14, 127, },
{ 4, 0, 1, 3, 14, 127, },
@@ -12693,6 +26573,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 3, 14, 127, },
{ 7, 0, 1, 3, 14, 127, },
{ 8, 0, 1, 3, 14, 127, },
+ { 9, 0, 1, 3, 14, 127, },
+ { 0, 1, 0, 1, 36, 74, },
+ { 2, 1, 0, 1, 36, 62, },
{ 1, 1, 0, 1, 36, 60, },
{ 3, 1, 0, 1, 36, 62, },
{ 4, 1, 0, 1, 36, 76, },
@@ -12700,6 +26583,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 36, 64, },
{ 7, 1, 0, 1, 36, 54, },
{ 8, 1, 0, 1, 36, 62, },
+ { 9, 1, 0, 1, 36, 62, },
+ { 0, 1, 0, 1, 40, 76, },
+ { 2, 1, 0, 1, 40, 62, },
{ 1, 1, 0, 1, 40, 62, },
{ 3, 1, 0, 1, 40, 62, },
{ 4, 1, 0, 1, 40, 76, },
@@ -12707,6 +26593,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 40, 64, },
{ 7, 1, 0, 1, 40, 54, },
{ 8, 1, 0, 1, 40, 62, },
+ { 9, 1, 0, 1, 40, 62, },
+ { 0, 1, 0, 1, 44, 76, },
+ { 2, 1, 0, 1, 44, 62, },
{ 1, 1, 0, 1, 44, 62, },
{ 3, 1, 0, 1, 44, 62, },
{ 4, 1, 0, 1, 44, 76, },
@@ -12714,13 +26603,19 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 44, 64, },
{ 7, 1, 0, 1, 44, 54, },
{ 8, 1, 0, 1, 44, 62, },
+ { 9, 1, 0, 1, 44, 62, },
+ { 0, 1, 0, 1, 48, 76, },
+ { 2, 1, 0, 1, 48, 62, },
{ 1, 1, 0, 1, 48, 62, },
{ 3, 1, 0, 1, 48, 62, },
- { 4, 1, 0, 1, 48, 76, },
+ { 4, 1, 0, 1, 48, 54, },
{ 5, 1, 0, 1, 48, 62, },
{ 6, 1, 0, 1, 48, 64, },
{ 7, 1, 0, 1, 48, 54, },
{ 8, 1, 0, 1, 48, 62, },
+ { 9, 1, 0, 1, 48, 62, },
+ { 0, 1, 0, 1, 52, 76, },
+ { 2, 1, 0, 1, 52, 62, },
{ 1, 1, 0, 1, 52, 62, },
{ 3, 1, 0, 1, 52, 64, },
{ 4, 1, 0, 1, 52, 76, },
@@ -12728,6 +26623,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 52, 76, },
{ 7, 1, 0, 1, 52, 54, },
{ 8, 1, 0, 1, 52, 76, },
+ { 9, 1, 0, 1, 52, 62, },
+ { 0, 1, 0, 1, 56, 76, },
+ { 2, 1, 0, 1, 56, 62, },
{ 1, 1, 0, 1, 56, 62, },
{ 3, 1, 0, 1, 56, 64, },
{ 4, 1, 0, 1, 56, 76, },
@@ -12735,6 +26633,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 56, 76, },
{ 7, 1, 0, 1, 56, 54, },
{ 8, 1, 0, 1, 56, 76, },
+ { 9, 1, 0, 1, 56, 62, },
+ { 0, 1, 0, 1, 60, 76, },
+ { 2, 1, 0, 1, 60, 62, },
{ 1, 1, 0, 1, 60, 62, },
{ 3, 1, 0, 1, 60, 64, },
{ 4, 1, 0, 1, 60, 76, },
@@ -12742,6 +26643,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 60, 76, },
{ 7, 1, 0, 1, 60, 54, },
{ 8, 1, 0, 1, 60, 76, },
+ { 9, 1, 0, 1, 60, 62, },
+ { 0, 1, 0, 1, 64, 74, },
+ { 2, 1, 0, 1, 64, 62, },
{ 1, 1, 0, 1, 64, 60, },
{ 3, 1, 0, 1, 64, 64, },
{ 4, 1, 0, 1, 64, 76, },
@@ -12749,6 +26653,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 64, 74, },
{ 7, 1, 0, 1, 64, 54, },
{ 8, 1, 0, 1, 64, 74, },
+ { 9, 1, 0, 1, 64, 62, },
+ { 0, 1, 0, 1, 100, 72, },
+ { 2, 1, 0, 1, 100, 62, },
{ 1, 1, 0, 1, 100, 76, },
{ 3, 1, 0, 1, 100, 72, },
{ 4, 1, 0, 1, 100, 76, },
@@ -12756,6 +26663,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 100, 72, },
{ 7, 1, 0, 1, 100, 54, },
{ 8, 1, 0, 1, 100, 72, },
+ { 9, 1, 0, 1, 100, 127, },
+ { 0, 1, 0, 1, 104, 76, },
+ { 2, 1, 0, 1, 104, 62, },
{ 1, 1, 0, 1, 104, 76, },
{ 3, 1, 0, 1, 104, 76, },
{ 4, 1, 0, 1, 104, 76, },
@@ -12763,6 +26673,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 104, 76, },
{ 7, 1, 0, 1, 104, 54, },
{ 8, 1, 0, 1, 104, 76, },
+ { 9, 1, 0, 1, 104, 127, },
+ { 0, 1, 0, 1, 108, 76, },
+ { 2, 1, 0, 1, 108, 62, },
{ 1, 1, 0, 1, 108, 76, },
{ 3, 1, 0, 1, 108, 76, },
{ 4, 1, 0, 1, 108, 76, },
@@ -12770,6 +26683,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 108, 76, },
{ 7, 1, 0, 1, 108, 54, },
{ 8, 1, 0, 1, 108, 76, },
+ { 9, 1, 0, 1, 108, 127, },
+ { 0, 1, 0, 1, 112, 76, },
+ { 2, 1, 0, 1, 112, 62, },
{ 1, 1, 0, 1, 112, 76, },
{ 3, 1, 0, 1, 112, 76, },
{ 4, 1, 0, 1, 112, 76, },
@@ -12777,6 +26693,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 112, 76, },
{ 7, 1, 0, 1, 112, 54, },
{ 8, 1, 0, 1, 112, 76, },
+ { 9, 1, 0, 1, 112, 127, },
+ { 0, 1, 0, 1, 116, 76, },
+ { 2, 1, 0, 1, 116, 62, },
{ 1, 1, 0, 1, 116, 76, },
{ 3, 1, 0, 1, 116, 76, },
{ 4, 1, 0, 1, 116, 76, },
@@ -12784,6 +26703,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 116, 76, },
{ 7, 1, 0, 1, 116, 54, },
{ 8, 1, 0, 1, 116, 76, },
+ { 9, 1, 0, 1, 116, 127, },
+ { 0, 1, 0, 1, 120, 76, },
+ { 2, 1, 0, 1, 120, 62, },
{ 1, 1, 0, 1, 120, 76, },
{ 3, 1, 0, 1, 120, 127, },
{ 4, 1, 0, 1, 120, 76, },
@@ -12791,6 +26713,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 120, 76, },
{ 7, 1, 0, 1, 120, 54, },
{ 8, 1, 0, 1, 120, 76, },
+ { 9, 1, 0, 1, 120, 127, },
+ { 0, 1, 0, 1, 124, 76, },
+ { 2, 1, 0, 1, 124, 62, },
{ 1, 1, 0, 1, 124, 76, },
{ 3, 1, 0, 1, 124, 127, },
{ 4, 1, 0, 1, 124, 76, },
@@ -12798,6 +26723,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 124, 76, },
{ 7, 1, 0, 1, 124, 54, },
{ 8, 1, 0, 1, 124, 76, },
+ { 9, 1, 0, 1, 124, 127, },
+ { 0, 1, 0, 1, 128, 76, },
+ { 2, 1, 0, 1, 128, 62, },
{ 1, 1, 0, 1, 128, 76, },
{ 3, 1, 0, 1, 128, 127, },
{ 4, 1, 0, 1, 128, 76, },
@@ -12805,6 +26733,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 128, 76, },
{ 7, 1, 0, 1, 128, 54, },
{ 8, 1, 0, 1, 128, 76, },
+ { 9, 1, 0, 1, 128, 127, },
+ { 0, 1, 0, 1, 132, 76, },
+ { 2, 1, 0, 1, 132, 62, },
{ 1, 1, 0, 1, 132, 76, },
{ 3, 1, 0, 1, 132, 76, },
{ 4, 1, 0, 1, 132, 76, },
@@ -12812,20 +26743,29 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 132, 76, },
{ 7, 1, 0, 1, 132, 54, },
{ 8, 1, 0, 1, 132, 76, },
+ { 9, 1, 0, 1, 132, 127, },
+ { 0, 1, 0, 1, 136, 76, },
+ { 2, 1, 0, 1, 136, 62, },
{ 1, 1, 0, 1, 136, 76, },
{ 3, 1, 0, 1, 136, 76, },
{ 4, 1, 0, 1, 136, 76, },
{ 5, 1, 0, 1, 136, 62, },
{ 6, 1, 0, 1, 136, 76, },
- { 7, 1, 0, 1, 136, 127, },
+ { 7, 1, 0, 1, 136, 54, },
{ 8, 1, 0, 1, 136, 76, },
+ { 9, 1, 0, 1, 136, 127, },
+ { 0, 1, 0, 1, 140, 72, },
+ { 2, 1, 0, 1, 140, 62, },
{ 1, 1, 0, 1, 140, 76, },
{ 3, 1, 0, 1, 140, 72, },
{ 4, 1, 0, 1, 140, 76, },
{ 5, 1, 0, 1, 140, 62, },
{ 6, 1, 0, 1, 140, 72, },
- { 7, 1, 0, 1, 140, 127, },
+ { 7, 1, 0, 1, 140, 54, },
{ 8, 1, 0, 1, 140, 72, },
+ { 9, 1, 0, 1, 140, 127, },
+ { 0, 1, 0, 1, 144, 76, },
+ { 2, 1, 0, 1, 144, 127, },
{ 1, 1, 0, 1, 144, 127, },
{ 3, 1, 0, 1, 144, 76, },
{ 4, 1, 0, 1, 144, 76, },
@@ -12833,6 +26773,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 144, 76, },
{ 7, 1, 0, 1, 144, 127, },
{ 8, 1, 0, 1, 144, 76, },
+ { 9, 1, 0, 1, 144, 127, },
+ { 0, 1, 0, 1, 149, 76, },
+ { 2, 1, 0, 1, 149, -128, },
{ 1, 1, 0, 1, 149, 127, },
{ 3, 1, 0, 1, 149, 76, },
{ 4, 1, 0, 1, 149, 74, },
@@ -12840,6 +26783,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 149, 76, },
{ 7, 1, 0, 1, 149, 54, },
{ 8, 1, 0, 1, 149, 76, },
+ { 9, 1, 0, 1, 149, -128, },
+ { 0, 1, 0, 1, 153, 76, },
+ { 2, 1, 0, 1, 153, -128, },
{ 1, 1, 0, 1, 153, 127, },
{ 3, 1, 0, 1, 153, 76, },
{ 4, 1, 0, 1, 153, 74, },
@@ -12847,6 +26793,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 153, 76, },
{ 7, 1, 0, 1, 153, 54, },
{ 8, 1, 0, 1, 153, 76, },
+ { 9, 1, 0, 1, 153, -128, },
+ { 0, 1, 0, 1, 157, 76, },
+ { 2, 1, 0, 1, 157, -128, },
{ 1, 1, 0, 1, 157, 127, },
{ 3, 1, 0, 1, 157, 76, },
{ 4, 1, 0, 1, 157, 74, },
@@ -12854,6 +26803,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 157, 76, },
{ 7, 1, 0, 1, 157, 54, },
{ 8, 1, 0, 1, 157, 76, },
+ { 9, 1, 0, 1, 157, -128, },
+ { 0, 1, 0, 1, 161, 76, },
+ { 2, 1, 0, 1, 161, -128, },
{ 1, 1, 0, 1, 161, 127, },
{ 3, 1, 0, 1, 161, 76, },
{ 4, 1, 0, 1, 161, 74, },
@@ -12861,6 +26813,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 161, 76, },
{ 7, 1, 0, 1, 161, 54, },
{ 8, 1, 0, 1, 161, 76, },
+ { 9, 1, 0, 1, 161, -128, },
+ { 0, 1, 0, 1, 165, 76, },
+ { 2, 1, 0, 1, 165, -128, },
{ 1, 1, 0, 1, 165, 127, },
{ 3, 1, 0, 1, 165, 76, },
{ 4, 1, 0, 1, 165, 74, },
@@ -12868,6 +26823,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 165, 76, },
{ 7, 1, 0, 1, 165, 54, },
{ 8, 1, 0, 1, 165, 76, },
+ { 9, 1, 0, 1, 165, -128, },
+ { 0, 1, 0, 2, 36, 72, },
+ { 2, 1, 0, 2, 36, 62, },
{ 1, 1, 0, 2, 36, 62, },
{ 3, 1, 0, 2, 36, 62, },
{ 4, 1, 0, 2, 36, 76, },
@@ -12875,6 +26833,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 36, 64, },
{ 7, 1, 0, 2, 36, 54, },
{ 8, 1, 0, 2, 36, 62, },
+ { 9, 1, 0, 2, 36, 62, },
+ { 0, 1, 0, 2, 40, 76, },
+ { 2, 1, 0, 2, 40, 62, },
{ 1, 1, 0, 2, 40, 62, },
{ 3, 1, 0, 2, 40, 62, },
{ 4, 1, 0, 2, 40, 76, },
@@ -12882,6 +26843,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 40, 64, },
{ 7, 1, 0, 2, 40, 54, },
{ 8, 1, 0, 2, 40, 62, },
+ { 9, 1, 0, 2, 40, 62, },
+ { 0, 1, 0, 2, 44, 76, },
+ { 2, 1, 0, 2, 44, 62, },
{ 1, 1, 0, 2, 44, 62, },
{ 3, 1, 0, 2, 44, 62, },
{ 4, 1, 0, 2, 44, 76, },
@@ -12889,13 +26853,19 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 44, 64, },
{ 7, 1, 0, 2, 44, 54, },
{ 8, 1, 0, 2, 44, 62, },
+ { 9, 1, 0, 2, 44, 62, },
+ { 0, 1, 0, 2, 48, 76, },
+ { 2, 1, 0, 2, 48, 62, },
{ 1, 1, 0, 2, 48, 62, },
{ 3, 1, 0, 2, 48, 62, },
- { 4, 1, 0, 2, 48, 76, },
+ { 4, 1, 0, 2, 48, 54, },
{ 5, 1, 0, 2, 48, 62, },
{ 6, 1, 0, 2, 48, 64, },
{ 7, 1, 0, 2, 48, 54, },
{ 8, 1, 0, 2, 48, 62, },
+ { 9, 1, 0, 2, 48, 62, },
+ { 0, 1, 0, 2, 52, 76, },
+ { 2, 1, 0, 2, 52, 62, },
{ 1, 1, 0, 2, 52, 62, },
{ 3, 1, 0, 2, 52, 64, },
{ 4, 1, 0, 2, 52, 76, },
@@ -12903,6 +26873,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 52, 76, },
{ 7, 1, 0, 2, 52, 54, },
{ 8, 1, 0, 2, 52, 76, },
+ { 9, 1, 0, 2, 52, 62, },
+ { 0, 1, 0, 2, 56, 76, },
+ { 2, 1, 0, 2, 56, 62, },
{ 1, 1, 0, 2, 56, 62, },
{ 3, 1, 0, 2, 56, 64, },
{ 4, 1, 0, 2, 56, 76, },
@@ -12910,6 +26883,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 56, 76, },
{ 7, 1, 0, 2, 56, 54, },
{ 8, 1, 0, 2, 56, 76, },
+ { 9, 1, 0, 2, 56, 62, },
+ { 0, 1, 0, 2, 60, 76, },
+ { 2, 1, 0, 2, 60, 62, },
{ 1, 1, 0, 2, 60, 62, },
{ 3, 1, 0, 2, 60, 64, },
{ 4, 1, 0, 2, 60, 76, },
@@ -12917,6 +26893,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 60, 76, },
{ 7, 1, 0, 2, 60, 54, },
{ 8, 1, 0, 2, 60, 76, },
+ { 9, 1, 0, 2, 60, 62, },
+ { 0, 1, 0, 2, 64, 74, },
+ { 2, 1, 0, 2, 64, 62, },
{ 1, 1, 0, 2, 64, 60, },
{ 3, 1, 0, 2, 64, 64, },
{ 4, 1, 0, 2, 64, 74, },
@@ -12924,6 +26903,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 64, 74, },
{ 7, 1, 0, 2, 64, 54, },
{ 8, 1, 0, 2, 64, 74, },
+ { 9, 1, 0, 2, 64, 62, },
+ { 0, 1, 0, 2, 100, 70, },
+ { 2, 1, 0, 2, 100, 62, },
{ 1, 1, 0, 2, 100, 76, },
{ 3, 1, 0, 2, 100, 70, },
{ 4, 1, 0, 2, 100, 76, },
@@ -12931,6 +26913,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 100, 70, },
{ 7, 1, 0, 2, 100, 54, },
{ 8, 1, 0, 2, 100, 70, },
+ { 9, 1, 0, 2, 100, 127, },
+ { 0, 1, 0, 2, 104, 76, },
+ { 2, 1, 0, 2, 104, 62, },
{ 1, 1, 0, 2, 104, 76, },
{ 3, 1, 0, 2, 104, 76, },
{ 4, 1, 0, 2, 104, 76, },
@@ -12938,6 +26923,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 104, 76, },
{ 7, 1, 0, 2, 104, 54, },
{ 8, 1, 0, 2, 104, 76, },
+ { 9, 1, 0, 2, 104, 127, },
+ { 0, 1, 0, 2, 108, 76, },
+ { 2, 1, 0, 2, 108, 62, },
{ 1, 1, 0, 2, 108, 76, },
{ 3, 1, 0, 2, 108, 76, },
{ 4, 1, 0, 2, 108, 76, },
@@ -12945,6 +26933,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 108, 76, },
{ 7, 1, 0, 2, 108, 54, },
{ 8, 1, 0, 2, 108, 76, },
+ { 9, 1, 0, 2, 108, 127, },
+ { 0, 1, 0, 2, 112, 76, },
+ { 2, 1, 0, 2, 112, 62, },
{ 1, 1, 0, 2, 112, 76, },
{ 3, 1, 0, 2, 112, 76, },
{ 4, 1, 0, 2, 112, 76, },
@@ -12952,6 +26943,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 112, 76, },
{ 7, 1, 0, 2, 112, 54, },
{ 8, 1, 0, 2, 112, 76, },
+ { 9, 1, 0, 2, 112, 127, },
+ { 0, 1, 0, 2, 116, 76, },
+ { 2, 1, 0, 2, 116, 62, },
{ 1, 1, 0, 2, 116, 76, },
{ 3, 1, 0, 2, 116, 76, },
{ 4, 1, 0, 2, 116, 76, },
@@ -12959,6 +26953,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 116, 76, },
{ 7, 1, 0, 2, 116, 54, },
{ 8, 1, 0, 2, 116, 76, },
+ { 9, 1, 0, 2, 116, 127, },
+ { 0, 1, 0, 2, 120, 76, },
+ { 2, 1, 0, 2, 120, 62, },
{ 1, 1, 0, 2, 120, 76, },
{ 3, 1, 0, 2, 120, 127, },
{ 4, 1, 0, 2, 120, 76, },
@@ -12966,6 +26963,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 120, 76, },
{ 7, 1, 0, 2, 120, 54, },
{ 8, 1, 0, 2, 120, 76, },
+ { 9, 1, 0, 2, 120, 127, },
+ { 0, 1, 0, 2, 124, 76, },
+ { 2, 1, 0, 2, 124, 62, },
{ 1, 1, 0, 2, 124, 76, },
{ 3, 1, 0, 2, 124, 127, },
{ 4, 1, 0, 2, 124, 76, },
@@ -12973,6 +26973,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 124, 76, },
{ 7, 1, 0, 2, 124, 54, },
{ 8, 1, 0, 2, 124, 76, },
+ { 9, 1, 0, 2, 124, 127, },
+ { 0, 1, 0, 2, 128, 76, },
+ { 2, 1, 0, 2, 128, 62, },
{ 1, 1, 0, 2, 128, 76, },
{ 3, 1, 0, 2, 128, 127, },
{ 4, 1, 0, 2, 128, 76, },
@@ -12980,6 +26983,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 128, 76, },
{ 7, 1, 0, 2, 128, 54, },
{ 8, 1, 0, 2, 128, 76, },
+ { 9, 1, 0, 2, 128, 127, },
+ { 0, 1, 0, 2, 132, 76, },
+ { 2, 1, 0, 2, 132, 62, },
{ 1, 1, 0, 2, 132, 76, },
{ 3, 1, 0, 2, 132, 76, },
{ 4, 1, 0, 2, 132, 76, },
@@ -12987,20 +26993,29 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 132, 76, },
{ 7, 1, 0, 2, 132, 54, },
{ 8, 1, 0, 2, 132, 76, },
+ { 9, 1, 0, 2, 132, 127, },
+ { 0, 1, 0, 2, 136, 76, },
+ { 2, 1, 0, 2, 136, 62, },
{ 1, 1, 0, 2, 136, 76, },
{ 3, 1, 0, 2, 136, 76, },
{ 4, 1, 0, 2, 136, 76, },
{ 5, 1, 0, 2, 136, 62, },
{ 6, 1, 0, 2, 136, 76, },
- { 7, 1, 0, 2, 136, 127, },
+ { 7, 1, 0, 2, 136, 54, },
{ 8, 1, 0, 2, 136, 76, },
+ { 9, 1, 0, 2, 136, 127, },
+ { 0, 1, 0, 2, 140, 70, },
+ { 2, 1, 0, 2, 140, 62, },
{ 1, 1, 0, 2, 140, 76, },
{ 3, 1, 0, 2, 140, 70, },
{ 4, 1, 0, 2, 140, 76, },
{ 5, 1, 0, 2, 140, 62, },
{ 6, 1, 0, 2, 140, 70, },
- { 7, 1, 0, 2, 140, 127, },
+ { 7, 1, 0, 2, 140, 54, },
{ 8, 1, 0, 2, 140, 70, },
+ { 9, 1, 0, 2, 140, 127, },
+ { 0, 1, 0, 2, 144, 76, },
+ { 2, 1, 0, 2, 144, 127, },
{ 1, 1, 0, 2, 144, 127, },
{ 3, 1, 0, 2, 144, 76, },
{ 4, 1, 0, 2, 144, 76, },
@@ -13008,6 +27023,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 144, 76, },
{ 7, 1, 0, 2, 144, 127, },
{ 8, 1, 0, 2, 144, 76, },
+ { 9, 1, 0, 2, 144, 127, },
+ { 0, 1, 0, 2, 149, 76, },
+ { 2, 1, 0, 2, 149, -128, },
{ 1, 1, 0, 2, 149, 127, },
{ 3, 1, 0, 2, 149, 76, },
{ 4, 1, 0, 2, 149, 74, },
@@ -13015,6 +27033,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 149, 76, },
{ 7, 1, 0, 2, 149, 54, },
{ 8, 1, 0, 2, 149, 76, },
+ { 9, 1, 0, 2, 149, -128, },
+ { 0, 1, 0, 2, 153, 76, },
+ { 2, 1, 0, 2, 153, -128, },
{ 1, 1, 0, 2, 153, 127, },
{ 3, 1, 0, 2, 153, 76, },
{ 4, 1, 0, 2, 153, 74, },
@@ -13022,6 +27043,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 153, 76, },
{ 7, 1, 0, 2, 153, 54, },
{ 8, 1, 0, 2, 153, 76, },
+ { 9, 1, 0, 2, 153, -128, },
+ { 0, 1, 0, 2, 157, 76, },
+ { 2, 1, 0, 2, 157, -128, },
{ 1, 1, 0, 2, 157, 127, },
{ 3, 1, 0, 2, 157, 76, },
{ 4, 1, 0, 2, 157, 74, },
@@ -13029,6 +27053,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 157, 76, },
{ 7, 1, 0, 2, 157, 54, },
{ 8, 1, 0, 2, 157, 76, },
+ { 9, 1, 0, 2, 157, -128, },
+ { 0, 1, 0, 2, 161, 76, },
+ { 2, 1, 0, 2, 161, -128, },
{ 1, 1, 0, 2, 161, 127, },
{ 3, 1, 0, 2, 161, 76, },
{ 4, 1, 0, 2, 161, 74, },
@@ -13036,6 +27063,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 161, 76, },
{ 7, 1, 0, 2, 161, 54, },
{ 8, 1, 0, 2, 161, 76, },
+ { 9, 1, 0, 2, 161, -128, },
+ { 0, 1, 0, 2, 165, 76, },
+ { 2, 1, 0, 2, 165, -128, },
{ 1, 1, 0, 2, 165, 127, },
{ 3, 1, 0, 2, 165, 76, },
{ 4, 1, 0, 2, 165, 74, },
@@ -13043,6 +27073,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 165, 76, },
{ 7, 1, 0, 2, 165, 54, },
{ 8, 1, 0, 2, 165, 76, },
+ { 9, 1, 0, 2, 165, -128, },
+ { 0, 1, 0, 3, 36, 68, },
+ { 2, 1, 0, 3, 36, 38, },
{ 1, 1, 0, 3, 36, 50, },
{ 3, 1, 0, 3, 36, 38, },
{ 4, 1, 0, 3, 36, 66, },
@@ -13050,6 +27083,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 36, 52, },
{ 7, 1, 0, 3, 36, 30, },
{ 8, 1, 0, 3, 36, 50, },
+ { 9, 1, 0, 3, 36, 38, },
+ { 0, 1, 0, 3, 40, 68, },
+ { 2, 1, 0, 3, 40, 38, },
{ 1, 1, 0, 3, 40, 50, },
{ 3, 1, 0, 3, 40, 38, },
{ 4, 1, 0, 3, 40, 66, },
@@ -13057,6 +27093,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 40, 52, },
{ 7, 1, 0, 3, 40, 30, },
{ 8, 1, 0, 3, 40, 50, },
+ { 9, 1, 0, 3, 40, 38, },
+ { 0, 1, 0, 3, 44, 68, },
+ { 2, 1, 0, 3, 44, 38, },
{ 1, 1, 0, 3, 44, 50, },
{ 3, 1, 0, 3, 44, 38, },
{ 4, 1, 0, 3, 44, 66, },
@@ -13064,13 +27103,19 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 44, 52, },
{ 7, 1, 0, 3, 44, 30, },
{ 8, 1, 0, 3, 44, 50, },
+ { 9, 1, 0, 3, 44, 38, },
+ { 0, 1, 0, 3, 48, 68, },
+ { 2, 1, 0, 3, 48, 38, },
{ 1, 1, 0, 3, 48, 50, },
{ 3, 1, 0, 3, 48, 38, },
- { 4, 1, 0, 3, 48, 66, },
+ { 4, 1, 0, 3, 48, 36, },
{ 5, 1, 0, 3, 48, 38, },
{ 6, 1, 0, 3, 48, 52, },
{ 7, 1, 0, 3, 48, 30, },
{ 8, 1, 0, 3, 48, 50, },
+ { 9, 1, 0, 3, 48, 38, },
+ { 0, 1, 0, 3, 52, 68, },
+ { 2, 1, 0, 3, 52, 38, },
{ 1, 1, 0, 3, 52, 50, },
{ 3, 1, 0, 3, 52, 40, },
{ 4, 1, 0, 3, 52, 66, },
@@ -13078,6 +27123,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 52, 68, },
{ 7, 1, 0, 3, 52, 30, },
{ 8, 1, 0, 3, 52, 68, },
+ { 9, 1, 0, 3, 52, 38, },
+ { 0, 1, 0, 3, 56, 68, },
+ { 2, 1, 0, 3, 56, 38, },
{ 1, 1, 0, 3, 56, 50, },
{ 3, 1, 0, 3, 56, 40, },
{ 4, 1, 0, 3, 56, 66, },
@@ -13085,6 +27133,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 56, 68, },
{ 7, 1, 0, 3, 56, 30, },
{ 8, 1, 0, 3, 56, 68, },
+ { 9, 1, 0, 3, 56, 38, },
+ { 0, 1, 0, 3, 60, 66, },
+ { 2, 1, 0, 3, 60, 38, },
{ 1, 1, 0, 3, 60, 50, },
{ 3, 1, 0, 3, 60, 40, },
{ 4, 1, 0, 3, 60, 66, },
@@ -13092,6 +27143,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 60, 66, },
{ 7, 1, 0, 3, 60, 30, },
{ 8, 1, 0, 3, 60, 66, },
+ { 9, 1, 0, 3, 60, 38, },
+ { 0, 1, 0, 3, 64, 68, },
+ { 2, 1, 0, 3, 64, 38, },
{ 1, 1, 0, 3, 64, 50, },
{ 3, 1, 0, 3, 64, 40, },
{ 4, 1, 0, 3, 64, 66, },
@@ -13099,6 +27153,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 64, 68, },
{ 7, 1, 0, 3, 64, 30, },
{ 8, 1, 0, 3, 64, 68, },
+ { 9, 1, 0, 3, 64, 38, },
+ { 0, 1, 0, 3, 100, 60, },
+ { 2, 1, 0, 3, 100, 38, },
{ 1, 1, 0, 3, 100, 70, },
{ 3, 1, 0, 3, 100, 60, },
{ 4, 1, 0, 3, 100, 64, },
@@ -13106,6 +27163,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 100, 60, },
{ 7, 1, 0, 3, 100, 30, },
{ 8, 1, 0, 3, 100, 60, },
+ { 9, 1, 0, 3, 100, 127, },
+ { 0, 1, 0, 3, 104, 68, },
+ { 2, 1, 0, 3, 104, 38, },
{ 1, 1, 0, 3, 104, 70, },
{ 3, 1, 0, 3, 104, 68, },
{ 4, 1, 0, 3, 104, 64, },
@@ -13113,6 +27173,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 104, 68, },
{ 7, 1, 0, 3, 104, 30, },
{ 8, 1, 0, 3, 104, 68, },
+ { 9, 1, 0, 3, 104, 127, },
+ { 0, 1, 0, 3, 108, 68, },
+ { 2, 1, 0, 3, 108, 38, },
{ 1, 1, 0, 3, 108, 70, },
{ 3, 1, 0, 3, 108, 68, },
{ 4, 1, 0, 3, 108, 64, },
@@ -13120,6 +27183,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 108, 68, },
{ 7, 1, 0, 3, 108, 30, },
{ 8, 1, 0, 3, 108, 68, },
+ { 9, 1, 0, 3, 108, 127, },
+ { 0, 1, 0, 3, 112, 68, },
+ { 2, 1, 0, 3, 112, 38, },
{ 1, 1, 0, 3, 112, 70, },
{ 3, 1, 0, 3, 112, 68, },
{ 4, 1, 0, 3, 112, 64, },
@@ -13127,6 +27193,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 112, 68, },
{ 7, 1, 0, 3, 112, 30, },
{ 8, 1, 0, 3, 112, 68, },
+ { 9, 1, 0, 3, 112, 127, },
+ { 0, 1, 0, 3, 116, 68, },
+ { 2, 1, 0, 3, 116, 38, },
{ 1, 1, 0, 3, 116, 70, },
{ 3, 1, 0, 3, 116, 68, },
{ 4, 1, 0, 3, 116, 64, },
@@ -13134,6 +27203,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 116, 68, },
{ 7, 1, 0, 3, 116, 30, },
{ 8, 1, 0, 3, 116, 68, },
+ { 9, 1, 0, 3, 116, 127, },
+ { 0, 1, 0, 3, 120, 68, },
+ { 2, 1, 0, 3, 120, 38, },
{ 1, 1, 0, 3, 120, 70, },
{ 3, 1, 0, 3, 120, 127, },
{ 4, 1, 0, 3, 120, 64, },
@@ -13141,6 +27213,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 120, 68, },
{ 7, 1, 0, 3, 120, 30, },
{ 8, 1, 0, 3, 120, 68, },
+ { 9, 1, 0, 3, 120, 127, },
+ { 0, 1, 0, 3, 124, 68, },
+ { 2, 1, 0, 3, 124, 38, },
{ 1, 1, 0, 3, 124, 70, },
{ 3, 1, 0, 3, 124, 127, },
{ 4, 1, 0, 3, 124, 64, },
@@ -13148,6 +27223,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 124, 68, },
{ 7, 1, 0, 3, 124, 30, },
{ 8, 1, 0, 3, 124, 68, },
+ { 9, 1, 0, 3, 124, 127, },
+ { 0, 1, 0, 3, 128, 68, },
+ { 2, 1, 0, 3, 128, 38, },
{ 1, 1, 0, 3, 128, 70, },
{ 3, 1, 0, 3, 128, 127, },
{ 4, 1, 0, 3, 128, 64, },
@@ -13155,6 +27233,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 128, 68, },
{ 7, 1, 0, 3, 128, 30, },
{ 8, 1, 0, 3, 128, 68, },
+ { 9, 1, 0, 3, 128, 127, },
+ { 0, 1, 0, 3, 132, 68, },
+ { 2, 1, 0, 3, 132, 38, },
{ 1, 1, 0, 3, 132, 70, },
{ 3, 1, 0, 3, 132, 68, },
{ 4, 1, 0, 3, 132, 64, },
@@ -13162,20 +27243,29 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 132, 68, },
{ 7, 1, 0, 3, 132, 30, },
{ 8, 1, 0, 3, 132, 68, },
+ { 9, 1, 0, 3, 132, 127, },
+ { 0, 1, 0, 3, 136, 68, },
+ { 2, 1, 0, 3, 136, 38, },
{ 1, 1, 0, 3, 136, 70, },
{ 3, 1, 0, 3, 136, 68, },
{ 4, 1, 0, 3, 136, 64, },
{ 5, 1, 0, 3, 136, 38, },
{ 6, 1, 0, 3, 136, 68, },
- { 7, 1, 0, 3, 136, 127, },
+ { 7, 1, 0, 3, 136, 30, },
{ 8, 1, 0, 3, 136, 68, },
+ { 9, 1, 0, 3, 136, 127, },
+ { 0, 1, 0, 3, 140, 60, },
+ { 2, 1, 0, 3, 140, 38, },
{ 1, 1, 0, 3, 140, 70, },
{ 3, 1, 0, 3, 140, 60, },
{ 4, 1, 0, 3, 140, 64, },
{ 5, 1, 0, 3, 140, 38, },
{ 6, 1, 0, 3, 140, 60, },
- { 7, 1, 0, 3, 140, 127, },
+ { 7, 1, 0, 3, 140, 30, },
{ 8, 1, 0, 3, 140, 60, },
+ { 9, 1, 0, 3, 140, 127, },
+ { 0, 1, 0, 3, 144, 68, },
+ { 2, 1, 0, 3, 144, 127, },
{ 1, 1, 0, 3, 144, 127, },
{ 3, 1, 0, 3, 144, 68, },
{ 4, 1, 0, 3, 144, 64, },
@@ -13183,6 +27273,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 144, 68, },
{ 7, 1, 0, 3, 144, 127, },
{ 8, 1, 0, 3, 144, 68, },
+ { 9, 1, 0, 3, 144, 127, },
+ { 0, 1, 0, 3, 149, 76, },
+ { 2, 1, 0, 3, 149, -128, },
{ 1, 1, 0, 3, 149, 127, },
{ 3, 1, 0, 3, 149, 76, },
{ 4, 1, 0, 3, 149, 60, },
@@ -13190,6 +27283,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 149, 76, },
{ 7, 1, 0, 3, 149, 30, },
{ 8, 1, 0, 3, 149, 72, },
+ { 9, 1, 0, 3, 149, -128, },
+ { 0, 1, 0, 3, 153, 76, },
+ { 2, 1, 0, 3, 153, -128, },
{ 1, 1, 0, 3, 153, 127, },
{ 3, 1, 0, 3, 153, 76, },
{ 4, 1, 0, 3, 153, 60, },
@@ -13197,6 +27293,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 153, 76, },
{ 7, 1, 0, 3, 153, 30, },
{ 8, 1, 0, 3, 153, 76, },
+ { 9, 1, 0, 3, 153, -128, },
+ { 0, 1, 0, 3, 157, 76, },
+ { 2, 1, 0, 3, 157, -128, },
{ 1, 1, 0, 3, 157, 127, },
{ 3, 1, 0, 3, 157, 76, },
{ 4, 1, 0, 3, 157, 60, },
@@ -13204,6 +27303,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 157, 76, },
{ 7, 1, 0, 3, 157, 30, },
{ 8, 1, 0, 3, 157, 76, },
+ { 9, 1, 0, 3, 157, -128, },
+ { 0, 1, 0, 3, 161, 76, },
+ { 2, 1, 0, 3, 161, -128, },
{ 1, 1, 0, 3, 161, 127, },
{ 3, 1, 0, 3, 161, 76, },
{ 4, 1, 0, 3, 161, 60, },
@@ -13211,6 +27313,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 161, 76, },
{ 7, 1, 0, 3, 161, 30, },
{ 8, 1, 0, 3, 161, 76, },
+ { 9, 1, 0, 3, 161, -128, },
+ { 0, 1, 0, 3, 165, 76, },
+ { 2, 1, 0, 3, 165, -128, },
{ 1, 1, 0, 3, 165, 127, },
{ 3, 1, 0, 3, 165, 76, },
{ 4, 1, 0, 3, 165, 60, },
@@ -13218,6 +27323,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 165, 76, },
{ 7, 1, 0, 3, 165, 30, },
{ 8, 1, 0, 3, 165, 76, },
+ { 9, 1, 0, 3, 165, -128, },
+ { 0, 1, 1, 2, 38, 66, },
+ { 2, 1, 1, 2, 38, 64, },
{ 1, 1, 1, 2, 38, 62, },
{ 3, 1, 1, 2, 38, 64, },
{ 4, 1, 1, 2, 38, 72, },
@@ -13225,13 +27333,19 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 2, 38, 64, },
{ 7, 1, 1, 2, 38, 54, },
{ 8, 1, 1, 2, 38, 62, },
+ { 9, 1, 1, 2, 38, 64, },
+ { 0, 1, 1, 2, 46, 72, },
+ { 2, 1, 1, 2, 46, 64, },
{ 1, 1, 1, 2, 46, 62, },
{ 3, 1, 1, 2, 46, 64, },
- { 4, 1, 1, 2, 46, 72, },
+ { 4, 1, 1, 2, 46, 60, },
{ 5, 1, 1, 2, 46, 64, },
{ 6, 1, 1, 2, 46, 64, },
{ 7, 1, 1, 2, 46, 54, },
{ 8, 1, 1, 2, 46, 62, },
+ { 9, 1, 1, 2, 46, 64, },
+ { 0, 1, 1, 2, 54, 72, },
+ { 2, 1, 1, 2, 54, 64, },
{ 1, 1, 1, 2, 54, 62, },
{ 3, 1, 1, 2, 54, 64, },
{ 4, 1, 1, 2, 54, 72, },
@@ -13239,6 +27353,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 2, 54, 72, },
{ 7, 1, 1, 2, 54, 54, },
{ 8, 1, 1, 2, 54, 72, },
+ { 9, 1, 1, 2, 54, 64, },
+ { 0, 1, 1, 2, 62, 64, },
+ { 2, 1, 1, 2, 62, 64, },
{ 1, 1, 1, 2, 62, 62, },
{ 3, 1, 1, 2, 62, 64, },
{ 4, 1, 1, 2, 62, 70, },
@@ -13246,6 +27363,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 2, 62, 64, },
{ 7, 1, 1, 2, 62, 54, },
{ 8, 1, 1, 2, 62, 64, },
+ { 9, 1, 1, 2, 62, 64, },
+ { 0, 1, 1, 2, 102, 58, },
+ { 2, 1, 1, 2, 102, 64, },
{ 1, 1, 1, 2, 102, 72, },
{ 3, 1, 1, 2, 102, 58, },
{ 4, 1, 1, 2, 102, 72, },
@@ -13253,6 +27373,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 2, 102, 58, },
{ 7, 1, 1, 2, 102, 54, },
{ 8, 1, 1, 2, 102, 58, },
+ { 9, 1, 1, 2, 102, 127, },
+ { 0, 1, 1, 2, 110, 72, },
+ { 2, 1, 1, 2, 110, 64, },
{ 1, 1, 1, 2, 110, 72, },
{ 3, 1, 1, 2, 110, 72, },
{ 4, 1, 1, 2, 110, 72, },
@@ -13260,6 +27383,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 2, 110, 72, },
{ 7, 1, 1, 2, 110, 54, },
{ 8, 1, 1, 2, 110, 72, },
+ { 9, 1, 1, 2, 110, 127, },
+ { 0, 1, 1, 2, 118, 72, },
+ { 2, 1, 1, 2, 118, 64, },
{ 1, 1, 1, 2, 118, 72, },
{ 3, 1, 1, 2, 118, 127, },
{ 4, 1, 1, 2, 118, 72, },
@@ -13267,6 +27393,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 2, 118, 72, },
{ 7, 1, 1, 2, 118, 54, },
{ 8, 1, 1, 2, 118, 72, },
+ { 9, 1, 1, 2, 118, 127, },
+ { 0, 1, 1, 2, 126, 72, },
+ { 2, 1, 1, 2, 126, 64, },
{ 1, 1, 1, 2, 126, 72, },
{ 3, 1, 1, 2, 126, 127, },
{ 4, 1, 1, 2, 126, 72, },
@@ -13274,13 +27403,19 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 2, 126, 72, },
{ 7, 1, 1, 2, 126, 54, },
{ 8, 1, 1, 2, 126, 72, },
+ { 9, 1, 1, 2, 126, 127, },
+ { 0, 1, 1, 2, 134, 72, },
+ { 2, 1, 1, 2, 134, 64, },
{ 1, 1, 1, 2, 134, 72, },
{ 3, 1, 1, 2, 134, 72, },
{ 4, 1, 1, 2, 134, 72, },
{ 5, 1, 1, 2, 134, 64, },
{ 6, 1, 1, 2, 134, 72, },
- { 7, 1, 1, 2, 134, 127, },
+ { 7, 1, 1, 2, 134, 54, },
{ 8, 1, 1, 2, 134, 72, },
+ { 9, 1, 1, 2, 134, 127, },
+ { 0, 1, 1, 2, 142, 72, },
+ { 2, 1, 1, 2, 142, 127, },
{ 1, 1, 1, 2, 142, 127, },
{ 3, 1, 1, 2, 142, 72, },
{ 4, 1, 1, 2, 142, 72, },
@@ -13288,6 +27423,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 2, 142, 72, },
{ 7, 1, 1, 2, 142, 127, },
{ 8, 1, 1, 2, 142, 72, },
+ { 9, 1, 1, 2, 142, 127, },
+ { 0, 1, 1, 2, 151, 72, },
+ { 2, 1, 1, 2, 151, -128, },
{ 1, 1, 1, 2, 151, 127, },
{ 3, 1, 1, 2, 151, 72, },
{ 4, 1, 1, 2, 151, 72, },
@@ -13295,6 +27433,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 2, 151, 72, },
{ 7, 1, 1, 2, 151, 54, },
{ 8, 1, 1, 2, 151, 72, },
+ { 9, 1, 1, 2, 151, -128, },
+ { 0, 1, 1, 2, 159, 72, },
+ { 2, 1, 1, 2, 159, -128, },
{ 1, 1, 1, 2, 159, 127, },
{ 3, 1, 1, 2, 159, 72, },
{ 4, 1, 1, 2, 159, 72, },
@@ -13302,6 +27443,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 2, 159, 72, },
{ 7, 1, 1, 2, 159, 54, },
{ 8, 1, 1, 2, 159, 72, },
+ { 9, 1, 1, 2, 159, -128, },
+ { 0, 1, 1, 3, 38, 60, },
+ { 2, 1, 1, 3, 38, 40, },
{ 1, 1, 1, 3, 38, 50, },
{ 3, 1, 1, 3, 38, 40, },
{ 4, 1, 1, 3, 38, 62, },
@@ -13309,13 +27453,19 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 3, 38, 52, },
{ 7, 1, 1, 3, 38, 30, },
{ 8, 1, 1, 3, 38, 50, },
+ { 9, 1, 1, 3, 38, 40, },
+ { 0, 1, 1, 3, 46, 68, },
+ { 2, 1, 1, 3, 46, 40, },
{ 1, 1, 1, 3, 46, 50, },
{ 3, 1, 1, 3, 46, 40, },
- { 4, 1, 1, 3, 46, 62, },
+ { 4, 1, 1, 3, 46, 46, },
{ 5, 1, 1, 3, 46, 40, },
{ 6, 1, 1, 3, 46, 52, },
{ 7, 1, 1, 3, 46, 30, },
{ 8, 1, 1, 3, 46, 50, },
+ { 9, 1, 1, 3, 46, 40, },
+ { 0, 1, 1, 3, 54, 68, },
+ { 2, 1, 1, 3, 54, 40, },
{ 1, 1, 1, 3, 54, 50, },
{ 3, 1, 1, 3, 54, 40, },
{ 4, 1, 1, 3, 54, 62, },
@@ -13323,6 +27473,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 3, 54, 68, },
{ 7, 1, 1, 3, 54, 30, },
{ 8, 1, 1, 3, 54, 68, },
+ { 9, 1, 1, 3, 54, 40, },
+ { 0, 1, 1, 3, 62, 58, },
+ { 2, 1, 1, 3, 62, 40, },
{ 1, 1, 1, 3, 62, 48, },
{ 3, 1, 1, 3, 62, 40, },
{ 4, 1, 1, 3, 62, 58, },
@@ -13330,6 +27483,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 3, 62, 58, },
{ 7, 1, 1, 3, 62, 30, },
{ 8, 1, 1, 3, 62, 58, },
+ { 9, 1, 1, 3, 62, 40, },
+ { 0, 1, 1, 3, 102, 54, },
+ { 2, 1, 1, 3, 102, 40, },
{ 1, 1, 1, 3, 102, 70, },
{ 3, 1, 1, 3, 102, 54, },
{ 4, 1, 1, 3, 102, 64, },
@@ -13337,6 +27493,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 3, 102, 54, },
{ 7, 1, 1, 3, 102, 30, },
{ 8, 1, 1, 3, 102, 54, },
+ { 9, 1, 1, 3, 102, 127, },
+ { 0, 1, 1, 3, 110, 68, },
+ { 2, 1, 1, 3, 110, 40, },
{ 1, 1, 1, 3, 110, 70, },
{ 3, 1, 1, 3, 110, 68, },
{ 4, 1, 1, 3, 110, 64, },
@@ -13344,6 +27503,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 3, 110, 68, },
{ 7, 1, 1, 3, 110, 30, },
{ 8, 1, 1, 3, 110, 68, },
+ { 9, 1, 1, 3, 110, 127, },
+ { 0, 1, 1, 3, 118, 68, },
+ { 2, 1, 1, 3, 118, 40, },
{ 1, 1, 1, 3, 118, 70, },
{ 3, 1, 1, 3, 118, 127, },
{ 4, 1, 1, 3, 118, 64, },
@@ -13351,6 +27513,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 3, 118, 68, },
{ 7, 1, 1, 3, 118, 30, },
{ 8, 1, 1, 3, 118, 68, },
+ { 9, 1, 1, 3, 118, 127, },
+ { 0, 1, 1, 3, 126, 68, },
+ { 2, 1, 1, 3, 126, 40, },
{ 1, 1, 1, 3, 126, 70, },
{ 3, 1, 1, 3, 126, 127, },
{ 4, 1, 1, 3, 126, 64, },
@@ -13358,13 +27523,19 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 3, 126, 68, },
{ 7, 1, 1, 3, 126, 30, },
{ 8, 1, 1, 3, 126, 68, },
+ { 9, 1, 1, 3, 126, 127, },
+ { 0, 1, 1, 3, 134, 68, },
+ { 2, 1, 1, 3, 134, 40, },
{ 1, 1, 1, 3, 134, 70, },
{ 3, 1, 1, 3, 134, 68, },
{ 4, 1, 1, 3, 134, 64, },
{ 5, 1, 1, 3, 134, 40, },
{ 6, 1, 1, 3, 134, 68, },
- { 7, 1, 1, 3, 134, 127, },
+ { 7, 1, 1, 3, 134, 30, },
{ 8, 1, 1, 3, 134, 68, },
+ { 9, 1, 1, 3, 134, 127, },
+ { 0, 1, 1, 3, 142, 68, },
+ { 2, 1, 1, 3, 142, 127, },
{ 1, 1, 1, 3, 142, 127, },
{ 3, 1, 1, 3, 142, 68, },
{ 4, 1, 1, 3, 142, 64, },
@@ -13372,6 +27543,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 3, 142, 68, },
{ 7, 1, 1, 3, 142, 127, },
{ 8, 1, 1, 3, 142, 68, },
+ { 9, 1, 1, 3, 142, 127, },
+ { 0, 1, 1, 3, 151, 72, },
+ { 2, 1, 1, 3, 151, -128, },
{ 1, 1, 1, 3, 151, 127, },
{ 3, 1, 1, 3, 151, 72, },
{ 4, 1, 1, 3, 151, 66, },
@@ -13379,6 +27553,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 3, 151, 72, },
{ 7, 1, 1, 3, 151, 30, },
{ 8, 1, 1, 3, 151, 68, },
+ { 9, 1, 1, 3, 151, -128, },
+ { 0, 1, 1, 3, 159, 72, },
+ { 2, 1, 1, 3, 159, -128, },
{ 1, 1, 1, 3, 159, 127, },
{ 3, 1, 1, 3, 159, 72, },
{ 4, 1, 1, 3, 159, 66, },
@@ -13386,6 +27563,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 3, 159, 72, },
{ 7, 1, 1, 3, 159, 30, },
{ 8, 1, 1, 3, 159, 72, },
+ { 9, 1, 1, 3, 159, -128, },
+ { 0, 1, 2, 4, 42, 64, },
+ { 2, 1, 2, 4, 42, 64, },
{ 1, 1, 2, 4, 42, 64, },
{ 3, 1, 2, 4, 42, 64, },
{ 4, 1, 2, 4, 42, 68, },
@@ -13393,6 +27573,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 2, 4, 42, 64, },
{ 7, 1, 2, 4, 42, 54, },
{ 8, 1, 2, 4, 42, 62, },
+ { 9, 1, 2, 4, 42, 64, },
+ { 0, 1, 2, 4, 58, 62, },
+ { 2, 1, 2, 4, 58, 64, },
{ 1, 1, 2, 4, 58, 64, },
{ 3, 1, 2, 4, 58, 62, },
{ 4, 1, 2, 4, 58, 64, },
@@ -13400,6 +27583,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 2, 4, 58, 62, },
{ 7, 1, 2, 4, 58, 54, },
{ 8, 1, 2, 4, 58, 62, },
+ { 9, 1, 2, 4, 58, 64, },
+ { 0, 1, 2, 4, 106, 58, },
+ { 2, 1, 2, 4, 106, 64, },
{ 1, 1, 2, 4, 106, 72, },
{ 3, 1, 2, 4, 106, 58, },
{ 4, 1, 2, 4, 106, 66, },
@@ -13407,6 +27593,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 2, 4, 106, 58, },
{ 7, 1, 2, 4, 106, 54, },
{ 8, 1, 2, 4, 106, 58, },
+ { 9, 1, 2, 4, 106, 127, },
+ { 0, 1, 2, 4, 122, 72, },
+ { 2, 1, 2, 4, 122, 64, },
{ 1, 1, 2, 4, 122, 72, },
{ 3, 1, 2, 4, 122, 127, },
{ 4, 1, 2, 4, 122, 68, },
@@ -13414,6 +27603,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 2, 4, 122, 72, },
{ 7, 1, 2, 4, 122, 54, },
{ 8, 1, 2, 4, 122, 72, },
+ { 9, 1, 2, 4, 122, 127, },
+ { 0, 1, 2, 4, 138, 72, },
+ { 2, 1, 2, 4, 138, 127, },
{ 1, 1, 2, 4, 138, 127, },
{ 3, 1, 2, 4, 138, 72, },
{ 4, 1, 2, 4, 138, 68, },
@@ -13421,6 +27613,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 2, 4, 138, 72, },
{ 7, 1, 2, 4, 138, 127, },
{ 8, 1, 2, 4, 138, 72, },
+ { 9, 1, 2, 4, 138, 127, },
+ { 0, 1, 2, 4, 155, 72, },
+ { 2, 1, 2, 4, 155, -128, },
{ 1, 1, 2, 4, 155, 127, },
{ 3, 1, 2, 4, 155, 72, },
{ 4, 1, 2, 4, 155, 68, },
@@ -13428,6 +27623,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 2, 4, 155, 72, },
{ 7, 1, 2, 4, 155, 54, },
{ 8, 1, 2, 4, 155, 68, },
+ { 9, 1, 2, 4, 155, -128, },
+ { 0, 1, 2, 5, 42, 54, },
+ { 2, 1, 2, 5, 42, 40, },
{ 1, 1, 2, 5, 42, 50, },
{ 3, 1, 2, 5, 42, 40, },
{ 4, 1, 2, 5, 42, 58, },
@@ -13435,6 +27633,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 2, 5, 42, 52, },
{ 7, 1, 2, 5, 42, 30, },
{ 8, 1, 2, 5, 42, 50, },
+ { 9, 1, 2, 5, 42, 40, },
+ { 0, 1, 2, 5, 58, 52, },
+ { 2, 1, 2, 5, 58, 40, },
{ 1, 1, 2, 5, 58, 50, },
{ 3, 1, 2, 5, 58, 40, },
{ 4, 1, 2, 5, 58, 56, },
@@ -13442,6 +27643,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 2, 5, 58, 52, },
{ 7, 1, 2, 5, 58, 30, },
{ 8, 1, 2, 5, 58, 52, },
+ { 9, 1, 2, 5, 58, 40, },
+ { 0, 1, 2, 5, 106, 50, },
+ { 2, 1, 2, 5, 106, 40, },
{ 1, 1, 2, 5, 106, 72, },
{ 3, 1, 2, 5, 106, 50, },
{ 4, 1, 2, 5, 106, 56, },
@@ -13449,6 +27653,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 2, 5, 106, 50, },
{ 7, 1, 2, 5, 106, 30, },
{ 8, 1, 2, 5, 106, 50, },
+ { 9, 1, 2, 5, 106, 127, },
+ { 0, 1, 2, 5, 122, 66, },
+ { 2, 1, 2, 5, 122, 40, },
{ 1, 1, 2, 5, 122, 72, },
{ 3, 1, 2, 5, 122, 127, },
{ 4, 1, 2, 5, 122, 56, },
@@ -13456,6 +27663,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 2, 5, 122, 66, },
{ 7, 1, 2, 5, 122, 30, },
{ 8, 1, 2, 5, 122, 66, },
+ { 9, 1, 2, 5, 122, 127, },
+ { 0, 1, 2, 5, 138, 66, },
+ { 2, 1, 2, 5, 138, 127, },
{ 1, 1, 2, 5, 138, 127, },
{ 3, 1, 2, 5, 138, 66, },
{ 4, 1, 2, 5, 138, 58, },
@@ -13463,6 +27673,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 2, 5, 138, 66, },
{ 7, 1, 2, 5, 138, 127, },
{ 8, 1, 2, 5, 138, 66, },
+ { 9, 1, 2, 5, 138, 127, },
+ { 0, 1, 2, 5, 155, 62, },
+ { 2, 1, 2, 5, 155, -128, },
{ 1, 1, 2, 5, 155, 127, },
{ 3, 1, 2, 5, 155, 62, },
{ 4, 1, 2, 5, 155, 58, },
@@ -13470,9 +27683,10 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 2, 5, 155, 62, },
{ 7, 1, 2, 5, 155, 30, },
{ 8, 1, 2, 5, 155, 62, },
+ { 9, 1, 2, 5, 155, -128, },
};
-RTW_DECL_TABLE_TXPWR_LMT(rtw8822c_txpwr_lmt_type0);
+RTW_DECL_TABLE_TXPWR_LMT(rtw8822c_txpwr_lmt_type5);
static const u32 rtw8822c_dpk_afe_no_dpk[] = {
0x18a4, BIT(7), 0,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.h b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.h
index 80c06c4f8184..2ae2b0aa5699 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.h
@@ -12,6 +12,7 @@ extern const struct rtw_table rtw8822c_bb_pg_type0_tbl;
extern const struct rtw_table rtw8822c_rf_a_tbl;
extern const struct rtw_table rtw8822c_rf_b_tbl;
extern const struct rtw_table rtw8822c_txpwr_lmt_type0_tbl;
+extern const struct rtw_table rtw8822c_txpwr_lmt_type5_tbl;
extern const struct rtw_table rtw8822c_dpk_afe_no_dpk_tbl;
extern const struct rtw_table rtw8822c_dpk_afe_is_dpk_tbl;
extern const struct rtw_table rtw8822c_dpk_mac_bb_tbl;
diff --git a/drivers/net/wireless/realtek/rtw88/tx.c b/drivers/net/wireless/realtek/rtw88/tx.c
index 60989987f67b..79c42118825f 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.c
+++ b/drivers/net/wireless/realtek/rtw88/tx.c
@@ -196,7 +196,7 @@ static void rtw_tx_report_tx_status(struct rtw_dev *rtwdev,
ieee80211_tx_status_irqsafe(rtwdev->hw, skb);
}
-void rtw_tx_report_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
+void rtw_tx_report_handle(struct rtw_dev *rtwdev, struct sk_buff *skb, int src)
{
struct rtw_tx_report *tx_report = &rtwdev->tx_report;
struct rtw_c2h_cmd *c2h;
@@ -207,8 +207,13 @@ void rtw_tx_report_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
c2h = get_c2h_from_skb(skb);
- sn = GET_CCX_REPORT_SEQNUM(c2h->payload);
- st = GET_CCX_REPORT_STATUS(c2h->payload);
+ if (src == C2H_CCX_TX_RPT) {
+ sn = GET_CCX_REPORT_SEQNUM_V0(c2h->payload);
+ st = GET_CCX_REPORT_STATUS_V0(c2h->payload);
+ } else {
+ sn = GET_CCX_REPORT_SEQNUM_V1(c2h->payload);
+ st = GET_CCX_REPORT_STATUS_V1(c2h->payload);
+ }
spin_lock_irqsave(&tx_report->q_lock, flags);
skb_queue_walk_safe(&tx_report->queue, cur, tmp) {
diff --git a/drivers/net/wireless/realtek/rtw88/tx.h b/drivers/net/wireless/realtek/rtw88/tx.h
index b973de0f4dc0..72dfd4059f03 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.h
+++ b/drivers/net/wireless/realtek/rtw88/tx.h
@@ -95,7 +95,7 @@ void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev,
struct sk_buff *skb);
void rtw_tx_fill_tx_desc(struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb);
void rtw_tx_report_enqueue(struct rtw_dev *rtwdev, struct sk_buff *skb, u8 sn);
-void rtw_tx_report_handle(struct rtw_dev *rtwdev, struct sk_buff *skb);
+void rtw_tx_report_handle(struct rtw_dev *rtwdev, struct sk_buff *skb, int src);
void rtw_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
struct sk_buff *skb);
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index 440088293aff..5c0adb0efc5d 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -832,7 +832,7 @@ static void rsi_mac80211_bss_info_changed(struct ieee80211_hw *hw,
common->cqm_info.last_cqm_event_rssi = 0;
common->cqm_info.rssi_thold = bss_conf->cqm_rssi_thold;
common->cqm_info.rssi_hyst = bss_conf->cqm_rssi_hyst;
- rsi_dbg(INFO_ZONE, "RSSI throld & hysteresis are: %d %d\n",
+ rsi_dbg(INFO_ZONE, "RSSI threshold & hysteresis are: %d %d\n",
common->cqm_info.rssi_thold,
common->cqm_info.rssi_hyst);
}
diff --git a/drivers/net/wireless/st/cw1200/cw1200_spi.c b/drivers/net/wireless/st/cw1200/cw1200_spi.c
index ef01caac629c..271ed2ce2d7f 100644
--- a/drivers/net/wireless/st/cw1200/cw1200_spi.c
+++ b/drivers/net/wireless/st/cw1200/cw1200_spi.c
@@ -268,15 +268,11 @@ exit:
return ret;
}
-static int cw1200_spi_irq_unsubscribe(struct hwbus_priv *self)
+static void cw1200_spi_irq_unsubscribe(struct hwbus_priv *self)
{
- int ret = 0;
-
pr_debug("SW IRQ unsubscribe\n");
disable_irq_wake(self->func->irq);
free_irq(self->func->irq, self);
-
- return ret;
}
static int cw1200_spi_off(const struct cw1200_platform_data_spi *pdata)
diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index f2609d5b6bf7..9acd8a41ea61 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -458,6 +458,7 @@ enum wl1271_cmd_key_type {
KEY_TKIP = 2,
KEY_AES = 3,
KEY_GEM = 4,
+ KEY_IGTK = 5,
};
struct wl1271_cmd_set_keys {
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index f140f7d7f553..4421fc656b1c 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -3547,6 +3547,9 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
case WL1271_CIPHER_SUITE_GEM:
key_type = KEY_GEM;
break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ key_type = KEY_IGTK;
+ break;
default:
wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
@@ -6214,6 +6217,7 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
WLAN_CIPHER_SUITE_TKIP,
WLAN_CIPHER_SUITE_CCMP,
WL1271_CIPHER_SUITE_GEM,
+ WLAN_CIPHER_SUITE_AES_CMAC,
};
/* The tx descriptor buffer */
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 9f982c0627a0..a04afe79529c 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -60,39 +60,15 @@ static struct mii_timestamper *of_find_mii_timestamper(struct device_node *node)
return register_mii_timestamper(arg.np, arg.args[0]);
}
-static int of_mdiobus_register_phy(struct mii_bus *mdio,
- struct device_node *child, u32 addr)
+int of_mdiobus_phy_device_register(struct mii_bus *mdio, struct phy_device *phy,
+ struct device_node *child, u32 addr)
{
- struct mii_timestamper *mii_ts;
- struct phy_device *phy;
- bool is_c45;
int rc;
- u32 phy_id;
-
- mii_ts = of_find_mii_timestamper(child);
- if (IS_ERR(mii_ts))
- return PTR_ERR(mii_ts);
-
- is_c45 = of_device_is_compatible(child,
- "ethernet-phy-ieee802.3-c45");
-
- if (!is_c45 && !of_get_phy_id(child, &phy_id))
- phy = phy_device_create(mdio, addr, phy_id, 0, NULL);
- else
- phy = get_phy_device(mdio, addr, is_c45);
- if (IS_ERR(phy)) {
- if (mii_ts)
- unregister_mii_timestamper(mii_ts);
- return PTR_ERR(phy);
- }
rc = of_irq_get(child, 0);
- if (rc == -EPROBE_DEFER) {
- if (mii_ts)
- unregister_mii_timestamper(mii_ts);
- phy_device_free(phy);
+ if (rc == -EPROBE_DEFER)
return rc;
- }
+
if (rc > 0) {
phy->irq = rc;
mdio->irq[addr] = rc;
@@ -118,10 +94,47 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio,
* register it */
rc = phy_device_register(phy);
if (rc) {
+ of_node_put(child);
+ return rc;
+ }
+
+ dev_dbg(&mdio->dev, "registered phy %pOFn at address %i\n",
+ child, addr);
+ return 0;
+}
+EXPORT_SYMBOL(of_mdiobus_phy_device_register);
+
+static int of_mdiobus_register_phy(struct mii_bus *mdio,
+ struct device_node *child, u32 addr)
+{
+ struct mii_timestamper *mii_ts;
+ struct phy_device *phy;
+ bool is_c45;
+ int rc;
+ u32 phy_id;
+
+ mii_ts = of_find_mii_timestamper(child);
+ if (IS_ERR(mii_ts))
+ return PTR_ERR(mii_ts);
+
+ is_c45 = of_device_is_compatible(child,
+ "ethernet-phy-ieee802.3-c45");
+
+ if (!is_c45 && !of_get_phy_id(child, &phy_id))
+ phy = phy_device_create(mdio, addr, phy_id, 0, NULL);
+ else
+ phy = get_phy_device(mdio, addr, is_c45);
+ if (IS_ERR(phy)) {
+ if (mii_ts)
+ unregister_mii_timestamper(mii_ts);
+ return PTR_ERR(phy);
+ }
+
+ rc = of_mdiobus_phy_device_register(mdio, phy, child, addr);
+ if (rc) {
if (mii_ts)
unregister_mii_timestamper(mii_ts);
phy_device_free(phy);
- of_node_put(child);
return rc;
}
@@ -132,8 +145,6 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio,
if (mii_ts)
phy->mii_ts = mii_ts;
- dev_dbg(&mdio->dev, "registered phy %pOFn at address %i\n",
- child, addr);
return 0;
}
diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
index 48804049d697..ee7b5daabfd4 100644
--- a/drivers/parport/procfs.c
+++ b/drivers/parport/procfs.c
@@ -34,7 +34,7 @@
#define PARPORT_MAX_SPINTIME_VALUE 1000
static int do_active_device(struct ctl_table *table, int write,
- void __user *result, size_t *lenp, loff_t *ppos)
+ void *result, size_t *lenp, loff_t *ppos)
{
struct parport *port = (struct parport *)table->extra1;
char buffer[256];
@@ -65,13 +65,13 @@ static int do_active_device(struct ctl_table *table, int write,
*lenp = len;
*ppos += len;
-
- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
+ memcpy(result, buffer, len);
+ return 0;
}
#ifdef CONFIG_PARPORT_1284
static int do_autoprobe(struct ctl_table *table, int write,
- void __user *result, size_t *lenp, loff_t *ppos)
+ void *result, size_t *lenp, loff_t *ppos)
{
struct parport_device_info *info = table->extra2;
const char *str;
@@ -108,13 +108,13 @@ static int do_autoprobe(struct ctl_table *table, int write,
*ppos += len;
- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
+ memcpy(result, buffer, len);
+ return 0;
}
#endif /* IEEE1284.3 support. */
static int do_hardware_base_addr(struct ctl_table *table, int write,
- void __user *result,
- size_t *lenp, loff_t *ppos)
+ void *result, size_t *lenp, loff_t *ppos)
{
struct parport *port = (struct parport *)table->extra1;
char buffer[20];
@@ -136,13 +136,12 @@ static int do_hardware_base_addr(struct ctl_table *table, int write,
*lenp = len;
*ppos += len;
-
- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
+ memcpy(result, buffer, len);
+ return 0;
}
static int do_hardware_irq(struct ctl_table *table, int write,
- void __user *result,
- size_t *lenp, loff_t *ppos)
+ void *result, size_t *lenp, loff_t *ppos)
{
struct parport *port = (struct parport *)table->extra1;
char buffer[20];
@@ -164,13 +163,12 @@ static int do_hardware_irq(struct ctl_table *table, int write,
*lenp = len;
*ppos += len;
-
- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
+ memcpy(result, buffer, len);
+ return 0;
}
static int do_hardware_dma(struct ctl_table *table, int write,
- void __user *result,
- size_t *lenp, loff_t *ppos)
+ void *result, size_t *lenp, loff_t *ppos)
{
struct parport *port = (struct parport *)table->extra1;
char buffer[20];
@@ -192,13 +190,12 @@ static int do_hardware_dma(struct ctl_table *table, int write,
*lenp = len;
*ppos += len;
-
- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
+ memcpy(result, buffer, len);
+ return 0;
}
static int do_hardware_modes(struct ctl_table *table, int write,
- void __user *result,
- size_t *lenp, loff_t *ppos)
+ void *result, size_t *lenp, loff_t *ppos)
{
struct parport *port = (struct parport *)table->extra1;
char buffer[40];
@@ -231,8 +228,8 @@ static int do_hardware_modes(struct ctl_table *table, int write,
*lenp = len;
*ppos += len;
-
- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
+ memcpy(result, buffer, len);
+ return 0;
}
#define PARPORT_PORT_DIR(CHILD) { .procname = NULL, .mode = 0555, .child = CHILD }
diff --git a/drivers/power/supply/test_power.c b/drivers/power/supply/test_power.c
index 65c23ef6408d..b3c05ff05783 100644
--- a/drivers/power/supply/test_power.c
+++ b/drivers/power/supply/test_power.c
@@ -16,7 +16,7 @@
#include <linux/power_supply.h>
#include <linux/errno.h>
#include <linux/delay.h>
-#include <linux/vermagic.h>
+#include <generated/utsrelease.h>
enum test_power_id {
TEST_AC,
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 93d574faf1fe..375cd6e4aade 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -136,6 +136,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
caps.pps = ptp->info->pps;
caps.n_pins = ptp->info->n_pins;
caps.cross_timestamping = ptp->info->getcrosststamp != NULL;
+ caps.adjust_phase = ptp->info->adjphase != NULL;
if (copy_to_user((void __user *)arg, &caps, sizeof(caps)))
err = -EFAULT;
break;
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index acabbe72e55e..fc984a8828fb 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -146,6 +146,9 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
else
err = ops->adjfreq(ops, ppb);
ptp->dialed_frequency = tx->freq;
+ } else if (tx->modes & ADJ_OFFSET) {
+ if (ops->adjphase)
+ err = ops->adjphase(ops, tx->offset);
} else if (tx->modes == 0) {
tx->freq = ptp->dialed_frequency;
err = 0;
diff --git a/drivers/ptp/ptp_clockmatrix.c b/drivers/ptp/ptp_clockmatrix.c
index 032e112c3dd9..ceb6bc58f3b4 100644
--- a/drivers/ptp/ptp_clockmatrix.c
+++ b/drivers/ptp/ptp_clockmatrix.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/delay.h>
+#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/timekeeping.h>
@@ -24,6 +25,16 @@ MODULE_LICENSE("GPL");
#define SETTIME_CORRECTION (0)
+static long set_write_phase_ready(struct ptp_clock_info *ptp)
+{
+ struct idtcm_channel *channel =
+ container_of(ptp, struct idtcm_channel, caps);
+
+ channel->write_phase_ready = 1;
+
+ return 0;
+}
+
static int char_array_to_timespec(u8 *buf,
u8 count,
struct timespec64 *ts)
@@ -780,7 +791,7 @@ static int idtcm_load_firmware(struct idtcm *idtcm,
/* Page size 128, last 4 bytes of page skipped */
if (((loaddr > 0x7b) && (loaddr <= 0x7f))
- || ((loaddr > 0xfb) && (loaddr <= 0xff)))
+ || loaddr > 0xfb)
continue;
err = idtcm_write(idtcm, regaddr, 0, &val, sizeof(val));
@@ -871,6 +882,64 @@ static int idtcm_set_pll_mode(struct idtcm_channel *channel,
/* PTP Hardware Clock interface */
+/**
+ * @brief Maximum absolute value for write phase offset in picoseconds
+ *
+ * Destination signed register is 32-bit register in resolution of 50ps
+ *
+ * 0x7fffffff * 50 = 2147483647 * 50 = 107374182350
+ */
+static int _idtcm_adjphase(struct idtcm_channel *channel, s32 delta_ns)
+{
+ struct idtcm *idtcm = channel->idtcm;
+
+ int err;
+ u8 i;
+ u8 buf[4] = {0};
+ s32 phase_50ps;
+ s64 offset_ps;
+
+ if (channel->pll_mode != PLL_MODE_WRITE_PHASE) {
+
+ err = idtcm_set_pll_mode(channel, PLL_MODE_WRITE_PHASE);
+
+ if (err)
+ return err;
+
+ channel->write_phase_ready = 0;
+
+ ptp_schedule_worker(channel->ptp_clock,
+ msecs_to_jiffies(WR_PHASE_SETUP_MS));
+ }
+
+ if (!channel->write_phase_ready)
+ delta_ns = 0;
+
+ offset_ps = (s64)delta_ns * 1000;
+
+ /*
+ * Check for 32-bit signed max * 50:
+ *
+ * 0x7fffffff * 50 = 2147483647 * 50 = 107374182350
+ */
+ if (offset_ps > MAX_ABS_WRITE_PHASE_PICOSECONDS)
+ offset_ps = MAX_ABS_WRITE_PHASE_PICOSECONDS;
+ else if (offset_ps < -MAX_ABS_WRITE_PHASE_PICOSECONDS)
+ offset_ps = -MAX_ABS_WRITE_PHASE_PICOSECONDS;
+
+ phase_50ps = DIV_ROUND_CLOSEST(div64_s64(offset_ps, 50), 1);
+
+ for (i = 0; i < 4; i++) {
+ buf[i] = phase_50ps & 0xff;
+ phase_50ps >>= 8;
+ }
+
+ err = idtcm_write(idtcm, channel->dpll_phase, DPLL_WR_PHASE,
+ buf, sizeof(buf));
+
+ return err;
+}
+
static int idtcm_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
{
struct idtcm_channel *channel =
@@ -977,6 +1046,24 @@ static int idtcm_adjtime(struct ptp_clock_info *ptp, s64 delta)
return err;
}
+static int idtcm_adjphase(struct ptp_clock_info *ptp, s32 delta)
+{
+ struct idtcm_channel *channel =
+ container_of(ptp, struct idtcm_channel, caps);
+
+ struct idtcm *idtcm = channel->idtcm;
+
+ int err;
+
+ mutex_lock(&idtcm->reg_lock);
+
+ err = _idtcm_adjphase(channel, delta);
+
+ mutex_unlock(&idtcm->reg_lock);
+
+ return err;
+}
+
static int idtcm_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
@@ -1055,13 +1142,16 @@ static const struct ptp_clock_info idtcm_caps = {
.owner = THIS_MODULE,
.max_adj = 244000,
.n_per_out = 1,
+ .adjphase = &idtcm_adjphase,
.adjfreq = &idtcm_adjfreq,
.adjtime = &idtcm_adjtime,
.gettime64 = &idtcm_gettime,
.settime64 = &idtcm_settime,
.enable = &idtcm_enable,
+ .do_aux_work = &set_write_phase_ready,
};
+
static int idtcm_enable_channel(struct idtcm *idtcm, u32 index)
{
struct idtcm_channel *channel;
@@ -1146,6 +1236,8 @@ static int idtcm_enable_channel(struct idtcm *idtcm, u32 index)
if (!channel->ptp_clock)
return -ENOTSUPP;
+ channel->write_phase_ready = 0;
+
dev_info(&idtcm->client->dev, "PLL%d registered as ptp%d\n",
index, channel->ptp_clock->index);
diff --git a/drivers/ptp/ptp_clockmatrix.h b/drivers/ptp/ptp_clockmatrix.h
index 6c1f93ab46f3..3de0eb72889c 100644
--- a/drivers/ptp/ptp_clockmatrix.h
+++ b/drivers/ptp/ptp_clockmatrix.h
@@ -15,6 +15,8 @@
#define FW_FILENAME "idtcm.bin"
#define MAX_PHC_PLL 4
+#define MAX_ABS_WRITE_PHASE_PICOSECONDS (107374182350LL)
+
#define PLL_MASK_ADDR (0xFFA5)
#define DEFAULT_PLL_MASK (0x04)
@@ -33,8 +35,9 @@
#define POST_SM_RESET_DELAY_MS (3000)
#define PHASE_PULL_IN_THRESHOLD_NS (150000)
-#define TOD_WRITE_OVERHEAD_COUNT_MAX (5)
-#define TOD_BYTE_COUNT (11)
+#define TOD_WRITE_OVERHEAD_COUNT_MAX (5)
+#define TOD_BYTE_COUNT (11)
+#define WR_PHASE_SETUP_MS (5000)
/* Values of DPLL_N.DPLL_MODE.PLL_MODE */
enum pll_mode {
@@ -77,6 +80,7 @@ struct idtcm_channel {
u16 hw_dpll_n;
enum pll_mode pll_mode;
u16 output_mask;
+ int write_phase_ready;
};
struct idtcm {
diff --git a/drivers/ptp/ptp_idt82p33.c b/drivers/ptp/ptp_idt82p33.c
index b63ac240308b..179f6c472e50 100644
--- a/drivers/ptp/ptp_idt82p33.c
+++ b/drivers/ptp/ptp_idt82p33.c
@@ -23,12 +23,12 @@ MODULE_VERSION("1.0");
MODULE_LICENSE("GPL");
/* Module Parameters */
-u32 sync_tod_timeout = SYNC_TOD_TIMEOUT_SEC;
+static u32 sync_tod_timeout = SYNC_TOD_TIMEOUT_SEC;
module_param(sync_tod_timeout, uint, 0);
MODULE_PARM_DESC(sync_tod_timeout,
"duration in second to keep SYNC_TOD on (set to 0 to keep it always on)");
-u32 phase_snap_threshold = SNAP_THRESHOLD_NS;
+static u32 phase_snap_threshold = SNAP_THRESHOLD_NS;
module_param(phase_snap_threshold, uint, 0);
MODULE_PARM_DESC(phase_snap_threshold,
"threshold (150000ns by default) below which adjtime would ignore");
@@ -881,7 +881,7 @@ static int idt82p33_load_firmware(struct idt82p33 *idt82p33)
/* Page size 128, last 4 bytes of page skipped */
if (((loaddr > 0x7b) && (loaddr <= 0x7f))
- || ((loaddr > 0xfb) && (loaddr <= 0xff)))
+ || loaddr > 0xfb)
continue;
err = idt82p33_write(idt82p33, _ADDR(page, loaddr),
diff --git a/drivers/ptp/ptp_ines.c b/drivers/ptp/ptp_ines.c
index 52d77db39829..7711651ff19e 100644
--- a/drivers/ptp/ptp_ines.c
+++ b/drivers/ptp/ptp_ines.c
@@ -783,16 +783,10 @@ static struct mii_timestamping_ctrl ines_ctrl = {
static int ines_ptp_ctrl_probe(struct platform_device *pld)
{
struct ines_clock *clock;
- struct resource *res;
void __iomem *addr;
int err = 0;
- res = platform_get_resource(pld, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pld->dev, "missing memory resource\n");
- return -EINVAL;
- }
- addr = devm_ioremap_resource(&pld->dev, res);
+ addr = devm_platform_ioremap_resource(pld, 0);
if (IS_ERR(addr)) {
err = PTR_ERR(addr);
goto out;
diff --git a/drivers/ptp/ptp_kvm.c b/drivers/ptp/ptp_kvm.c
index fc7d0b77e118..658d33fc3195 100644
--- a/drivers/ptp/ptp_kvm.c
+++ b/drivers/ptp/ptp_kvm.c
@@ -22,7 +22,7 @@ struct kvm_ptp_clock {
struct ptp_clock_info caps;
};
-DEFINE_SPINLOCK(kvm_ptp_lock);
+static DEFINE_SPINLOCK(kvm_ptp_lock);
static struct pvclock_vsyscall_time_info *hv_clock;
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index 3850a0f5f0bc..53120e68796e 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -63,12 +63,9 @@ config QETH
prompt "Gigabit Ethernet device support"
depends on CCW && NETDEVICES && IP_MULTICAST && QDIO && ETHERNET
help
- This driver supports the IBM System z OSA Express adapters
- in QDIO mode (all media types), HiperSockets interfaces and z/VM
- virtual NICs for Guest LAN and VSWITCH.
-
- For details please refer to the documentation provided by IBM at
- <http://www.ibm.com/developerworks/linux/linux390>
+ This driver supports IBM's OSA Express network adapters in QDIO mode,
+ HiperSockets interfaces and z/VM virtual NICs for Guest LAN and
+ VSWITCH.
To compile this driver as a module, choose M.
The module name is qeth.
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index e0b26310ecab..51ea56b73a97 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -11,6 +11,7 @@
#define __QETH_CORE_H__
#include <linux/completion.h>
+#include <linux/debugfs.h>
#include <linux/if.h>
#include <linux/if_arp.h>
#include <linux/etherdevice.h>
@@ -21,8 +22,10 @@
#include <linux/seq_file.h>
#include <linux/hashtable.h>
#include <linux/ip.h>
+#include <linux/rcupdate.h>
#include <linux/refcount.h>
#include <linux/timer.h>
+#include <linux/types.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
@@ -31,6 +34,7 @@
#include <net/ipv6.h>
#include <net/if_inet6.h>
#include <net/addrconf.h>
+#include <net/route.h>
#include <net/sch_generic.h>
#include <net/tcp.h>
@@ -231,11 +235,7 @@ struct qeth_hdr_layer3 {
__u16 frame_offset;
union {
/* TX: */
- struct in6_addr ipv6_addr;
- struct ipv4 {
- u8 res[12];
- u32 addr;
- } ipv4;
+ struct in6_addr addr;
/* RX: */
struct rx {
u8 res1[2];
@@ -352,10 +352,15 @@ static inline bool qeth_l3_same_next_hop(struct qeth_hdr_layer3 *h1,
struct qeth_hdr_layer3 *h2)
{
return !((h1->flags ^ h2->flags) & QETH_HDR_IPV6) &&
- ipv6_addr_equal(&h1->next_hop.ipv6_addr,
- &h2->next_hop.ipv6_addr);
+ ipv6_addr_equal(&h1->next_hop.addr, &h2->next_hop.addr);
}
+struct qeth_local_addr {
+ struct hlist_node hnode;
+ struct rcu_head rcu;
+ struct in6_addr addr;
+};
+
enum qeth_qdio_info_states {
QETH_QDIO_UNINITIALIZED,
QETH_QDIO_ALLOCATED,
@@ -688,6 +693,9 @@ struct qeth_card_info {
u8 promisc_mode:1;
u8 use_v1_blkt:1;
u8 is_vm_nic:1;
+ /* no bitfield, we take a pointer on these two: */
+ u8 has_lp2lp_cso_v6;
+ u8 has_lp2lp_cso_v4;
enum qeth_card_types type;
enum qeth_link_types link_type;
int broadcast_capable;
@@ -786,6 +794,7 @@ struct qeth_card {
struct qeth_channel data;
struct net_device *dev;
+ struct dentry *debugfs;
struct qeth_card_stats stats;
struct qeth_card_info info;
struct qeth_token token;
@@ -797,6 +806,10 @@ struct qeth_card {
wait_queue_head_t wait_q;
DECLARE_HASHTABLE(mac_htable, 4);
DECLARE_HASHTABLE(ip_htable, 4);
+ DECLARE_HASHTABLE(local_addrs4, 4);
+ DECLARE_HASHTABLE(local_addrs6, 4);
+ spinlock_t local_addrs4_lock;
+ spinlock_t local_addrs6_lock;
struct mutex ip_lock;
DECLARE_HASHTABLE(ip_mc_htable, 4);
struct work_struct rx_mode_work;
@@ -928,6 +941,25 @@ static inline struct dst_entry *qeth_dst_check_rcu(struct sk_buff *skb, int ipv)
return dst;
}
+static inline __be32 qeth_next_hop_v4_rcu(struct sk_buff *skb,
+ struct dst_entry *dst)
+{
+ struct rtable *rt = (struct rtable *) dst;
+
+ return (rt) ? rt_nexthop(rt, ip_hdr(skb)->daddr) : ip_hdr(skb)->daddr;
+}
+
+static inline struct in6_addr *qeth_next_hop_v6_rcu(struct sk_buff *skb,
+ struct dst_entry *dst)
+{
+ struct rt6_info *rt = (struct rt6_info *) dst;
+
+ if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
+ return &rt->rt6i_gateway;
+ else
+ return &ipv6_hdr(skb)->daddr;
+}
+
static inline void qeth_tx_csum(struct sk_buff *skb, u8 *flags, int ipv)
{
*flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ;
@@ -1021,7 +1053,8 @@ struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason);
void qeth_put_cmd(struct qeth_cmd_buffer *iob);
-void qeth_schedule_recovery(struct qeth_card *);
+int qeth_schedule_recovery(struct qeth_card *card);
+void qeth_flush_local_addrs(struct qeth_card *card);
int qeth_poll(struct napi_struct *napi, int budget);
void qeth_clear_ipacmd_list(struct qeth_card *);
int qeth_qdio_clear_card(struct qeth_card *, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 569966bdc513..db8e069be3a0 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -26,6 +26,7 @@
#include <linux/if_vlan.h>
#include <linux/netdevice.h>
#include <linux/netdev_features.h>
+#include <linux/rcutree.h>
#include <linux/skbuff.h>
#include <linux/vmalloc.h>
@@ -60,6 +61,7 @@ EXPORT_SYMBOL_GPL(qeth_core_header_cache);
static struct kmem_cache *qeth_qdio_outbuf_cache;
static struct device *qeth_core_root_dev;
+static struct dentry *qeth_debugfs_root;
static struct lock_class_key qdio_out_skb_queue_key;
static void qeth_issue_next_read_cb(struct qeth_card *card,
@@ -623,6 +625,257 @@ void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason)
}
EXPORT_SYMBOL_GPL(qeth_notify_cmd);
+static void qeth_flush_local_addrs4(struct qeth_card *card)
+{
+ struct qeth_local_addr *addr;
+ struct hlist_node *tmp;
+ unsigned int i;
+
+ spin_lock_irq(&card->local_addrs4_lock);
+ hash_for_each_safe(card->local_addrs4, i, tmp, addr, hnode) {
+ hash_del_rcu(&addr->hnode);
+ kfree_rcu(addr, rcu);
+ }
+ spin_unlock_irq(&card->local_addrs4_lock);
+}
+
+static void qeth_flush_local_addrs6(struct qeth_card *card)
+{
+ struct qeth_local_addr *addr;
+ struct hlist_node *tmp;
+ unsigned int i;
+
+ spin_lock_irq(&card->local_addrs6_lock);
+ hash_for_each_safe(card->local_addrs6, i, tmp, addr, hnode) {
+ hash_del_rcu(&addr->hnode);
+ kfree_rcu(addr, rcu);
+ }
+ spin_unlock_irq(&card->local_addrs6_lock);
+}
+
+void qeth_flush_local_addrs(struct qeth_card *card)
+{
+ qeth_flush_local_addrs4(card);
+ qeth_flush_local_addrs6(card);
+}
+EXPORT_SYMBOL_GPL(qeth_flush_local_addrs);
+
+static void qeth_add_local_addrs4(struct qeth_card *card,
+ struct qeth_ipacmd_local_addrs4 *cmd)
+{
+ unsigned int i;
+
+ if (cmd->addr_length !=
+ sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
+ dev_err_ratelimited(&card->gdev->dev,
+ "Dropped IPv4 ADD LOCAL ADDR event with bad length %u\n",
+ cmd->addr_length);
+ return;
+ }
+
+ spin_lock(&card->local_addrs4_lock);
+ for (i = 0; i < cmd->count; i++) {
+ unsigned int key = ipv4_addr_hash(cmd->addrs[i].addr);
+ struct qeth_local_addr *addr;
+ bool duplicate = false;
+
+ hash_for_each_possible(card->local_addrs4, addr, hnode, key) {
+ if (addr->addr.s6_addr32[3] == cmd->addrs[i].addr) {
+ duplicate = true;
+ break;
+ }
+ }
+
+ if (duplicate)
+ continue;
+
+ addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
+ if (!addr) {
+ dev_err(&card->gdev->dev,
+ "Failed to allocate local addr object. Traffic to %pI4 might suffer.\n",
+ &cmd->addrs[i].addr);
+ continue;
+ }
+
+ ipv6_addr_set(&addr->addr, 0, 0, 0, cmd->addrs[i].addr);
+ hash_add_rcu(card->local_addrs4, &addr->hnode, key);
+ }
+ spin_unlock(&card->local_addrs4_lock);
+}
+
+static void qeth_add_local_addrs6(struct qeth_card *card,
+ struct qeth_ipacmd_local_addrs6 *cmd)
+{
+ unsigned int i;
+
+ if (cmd->addr_length !=
+ sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
+ dev_err_ratelimited(&card->gdev->dev,
+ "Dropped IPv6 ADD LOCAL ADDR event with bad length %u\n",
+ cmd->addr_length);
+ return;
+ }
+
+ spin_lock(&card->local_addrs6_lock);
+ for (i = 0; i < cmd->count; i++) {
+ u32 key = ipv6_addr_hash(&cmd->addrs[i].addr);
+ struct qeth_local_addr *addr;
+ bool duplicate = false;
+
+ hash_for_each_possible(card->local_addrs6, addr, hnode, key) {
+ if (ipv6_addr_equal(&addr->addr, &cmd->addrs[i].addr)) {
+ duplicate = true;
+ break;
+ }
+ }
+
+ if (duplicate)
+ continue;
+
+ addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
+ if (!addr) {
+ dev_err(&card->gdev->dev,
+ "Failed to allocate local addr object. Traffic to %pI6c might suffer.\n",
+ &cmd->addrs[i].addr);
+ continue;
+ }
+
+ addr->addr = cmd->addrs[i].addr;
+ hash_add_rcu(card->local_addrs6, &addr->hnode, key);
+ }
+ spin_unlock(&card->local_addrs6_lock);
+}
+
+static void qeth_del_local_addrs4(struct qeth_card *card,
+ struct qeth_ipacmd_local_addrs4 *cmd)
+{
+ unsigned int i;
+
+ if (cmd->addr_length !=
+ sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
+ dev_err_ratelimited(&card->gdev->dev,
+ "Dropped IPv4 DEL LOCAL ADDR event with bad length %u\n",
+ cmd->addr_length);
+ return;
+ }
+
+ spin_lock(&card->local_addrs4_lock);
+ for (i = 0; i < cmd->count; i++) {
+ struct qeth_ipacmd_local_addr4 *addr = &cmd->addrs[i];
+ unsigned int key = ipv4_addr_hash(addr->addr);
+ struct qeth_local_addr *tmp;
+
+ hash_for_each_possible(card->local_addrs4, tmp, hnode, key) {
+ if (tmp->addr.s6_addr32[3] == addr->addr) {
+ hash_del_rcu(&tmp->hnode);
+ kfree_rcu(tmp, rcu);
+ break;
+ }
+ }
+ }
+ spin_unlock(&card->local_addrs4_lock);
+}
+
+static void qeth_del_local_addrs6(struct qeth_card *card,
+ struct qeth_ipacmd_local_addrs6 *cmd)
+{
+ unsigned int i;
+
+ if (cmd->addr_length !=
+ sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
+ dev_err_ratelimited(&card->gdev->dev,
+ "Dropped IPv6 DEL LOCAL ADDR event with bad length %u\n",
+ cmd->addr_length);
+ return;
+ }
+
+ spin_lock(&card->local_addrs6_lock);
+ for (i = 0; i < cmd->count; i++) {
+ struct qeth_ipacmd_local_addr6 *addr = &cmd->addrs[i];
+ u32 key = ipv6_addr_hash(&addr->addr);
+ struct qeth_local_addr *tmp;
+
+ hash_for_each_possible(card->local_addrs6, tmp, hnode, key) {
+ if (ipv6_addr_equal(&tmp->addr, &addr->addr)) {
+ hash_del_rcu(&tmp->hnode);
+ kfree_rcu(tmp, rcu);
+ break;
+ }
+ }
+ }
+ spin_unlock(&card->local_addrs6_lock);
+}
+
+static bool qeth_next_hop_is_local_v4(struct qeth_card *card,
+ struct sk_buff *skb)
+{
+ struct qeth_local_addr *tmp;
+ bool is_local = false;
+ unsigned int key;
+ __be32 next_hop;
+
+ if (hash_empty(card->local_addrs4))
+ return false;
+
+ rcu_read_lock();
+ next_hop = qeth_next_hop_v4_rcu(skb, qeth_dst_check_rcu(skb, 4));
+ key = ipv4_addr_hash(next_hop);
+
+ hash_for_each_possible_rcu(card->local_addrs4, tmp, hnode, key) {
+ if (tmp->addr.s6_addr32[3] == next_hop) {
+ is_local = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return is_local;
+}
+
+static bool qeth_next_hop_is_local_v6(struct qeth_card *card,
+ struct sk_buff *skb)
+{
+ struct qeth_local_addr *tmp;
+ struct in6_addr *next_hop;
+ bool is_local = false;
+ u32 key;
+
+ if (hash_empty(card->local_addrs6))
+ return false;
+
+ rcu_read_lock();
+ next_hop = qeth_next_hop_v6_rcu(skb, qeth_dst_check_rcu(skb, 6));
+ key = ipv6_addr_hash(next_hop);
+
+ hash_for_each_possible_rcu(card->local_addrs6, tmp, hnode, key) {
+ if (ipv6_addr_equal(&tmp->addr, next_hop)) {
+ is_local = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return is_local;
+}
+
+static int qeth_debugfs_local_addr_show(struct seq_file *m, void *v)
+{
+ struct qeth_card *card = m->private;
+ struct qeth_local_addr *tmp;
+ unsigned int i;
+
+ rcu_read_lock();
+ hash_for_each_rcu(card->local_addrs4, i, tmp, hnode)
+ seq_printf(m, "%pI4\n", &tmp->addr.s6_addr32[3]);
+ hash_for_each_rcu(card->local_addrs6, i, tmp, hnode)
+ seq_printf(m, "%pI6c\n", &tmp->addr);
+ rcu_read_unlock();
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(qeth_debugfs_local_addr);
+
static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
struct qeth_card *card)
{
@@ -686,9 +939,19 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
case IPA_CMD_MODCCID:
return cmd;
case IPA_CMD_REGISTER_LOCAL_ADDR:
+ if (cmd->hdr.prot_version == QETH_PROT_IPV4)
+ qeth_add_local_addrs4(card, &cmd->data.local_addrs4);
+ else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
+ qeth_add_local_addrs6(card, &cmd->data.local_addrs6);
+
QETH_CARD_TEXT(card, 3, "irla");
return NULL;
case IPA_CMD_UNREGISTER_LOCAL_ADDR:
+ if (cmd->hdr.prot_version == QETH_PROT_IPV4)
+ qeth_del_local_addrs4(card, &cmd->data.local_addrs4);
+ else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
+ qeth_del_local_addrs6(card, &cmd->data.local_addrs6);
+
QETH_CARD_TEXT(card, 3, "urla");
return NULL;
default:
@@ -868,16 +1131,18 @@ static int qeth_set_thread_start_bit(struct qeth_card *card,
unsigned long thread)
{
unsigned long flags;
+ int rc = 0;
spin_lock_irqsave(&card->thread_mask_lock, flags);
- if (!(card->thread_allowed_mask & thread) ||
- (card->thread_start_mask & thread)) {
- spin_unlock_irqrestore(&card->thread_mask_lock, flags);
- return -EPERM;
- }
- card->thread_start_mask |= thread;
+ if (!(card->thread_allowed_mask & thread))
+ rc = -EPERM;
+ else if (card->thread_start_mask & thread)
+ rc = -EBUSY;
+ else
+ card->thread_start_mask |= thread;
spin_unlock_irqrestore(&card->thread_mask_lock, flags);
- return 0;
+
+ return rc;
}
static void qeth_clear_thread_start_bit(struct qeth_card *card,
@@ -930,11 +1195,17 @@ static int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
return rc;
}
-void qeth_schedule_recovery(struct qeth_card *card)
+int qeth_schedule_recovery(struct qeth_card *card)
{
+ int rc;
+
QETH_CARD_TEXT(card, 2, "startrec");
- if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
+
+ rc = qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD);
+ if (!rc)
schedule_work(&card->kernel_thread_starter);
+
+ return rc;
}
static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
@@ -1376,6 +1647,10 @@ static void qeth_setup_card(struct qeth_card *card)
qeth_init_qdio_info(card);
INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
INIT_WORK(&card->close_dev_work, qeth_close_dev_handler);
+ hash_init(card->local_addrs4);
+ hash_init(card->local_addrs6);
+ spin_lock_init(&card->local_addrs4_lock);
+ spin_lock_init(&card->local_addrs6_lock);
}
static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
@@ -1412,6 +1687,11 @@ static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
if (!card->read_cmd)
goto out_read_cmd;
+ card->debugfs = debugfs_create_dir(dev_name(&gdev->dev),
+ qeth_debugfs_root);
+ debugfs_create_file("local_addrs", 0400, card->debugfs, card,
+ &qeth_debugfs_local_addr_fops);
+
card->qeth_service_level.seq_print = qeth_core_sl_print;
register_service_level(&card->qeth_service_level);
return card;
@@ -3345,11 +3625,11 @@ static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
int count)
{
+ struct qeth_qdio_out_buffer *buf = queue->bufs[index];
+ unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
struct qeth_card *card = queue->card;
- struct qeth_qdio_out_buffer *buf;
int rc;
int i;
- unsigned int qdio_flags;
for (i = index; i < index + count; ++i) {
unsigned int bidx = QDIO_BUFNR(i);
@@ -3366,9 +3646,10 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
if (IS_IQD(card)) {
skb_queue_walk(&buf->skb_list, skb)
skb_tx_timestamp(skb);
- continue;
}
+ }
+ if (!IS_IQD(card)) {
if (!queue->do_pack) {
if ((atomic_read(&queue->used_buffers) >=
(QETH_HIGH_WATERMARK_PACK -
@@ -3393,12 +3674,12 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
}
}
+
+ if (atomic_read(&queue->set_pci_flags_count))
+ qdio_flags |= QDIO_FLAG_PCI_OUT;
}
QETH_TXQ_STAT_INC(queue, doorbell);
- qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
- if (atomic_read(&queue->set_pci_flags_count))
- qdio_flags |= QDIO_FLAG_PCI_OUT;
rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
queue->queue_no, index, count);
@@ -3809,15 +4090,47 @@ static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue,
qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3);
}
-static unsigned int __qeth_fill_buffer(struct sk_buff *skb,
- struct qeth_qdio_out_buffer *buf,
- bool is_first_elem, unsigned int offset)
+/**
+ * qeth_fill_buffer() - map skb into an output buffer
+ * @buf: buffer to transport the skb
+ * @skb: skb to map into the buffer
+ * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated
+ * from qeth_core_header_cache.
+ * @offset: when mapping the skb, start at skb->data + offset
+ * @hd_len: if > 0, build a dedicated header element of this size
+ */
+static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
+ struct sk_buff *skb, struct qeth_hdr *hdr,
+ unsigned int offset, unsigned int hd_len)
{
struct qdio_buffer *buffer = buf->buffer;
int element = buf->next_element_to_fill;
int length = skb_headlen(skb) - offset;
char *data = skb->data + offset;
unsigned int elem_length, cnt;
+ bool is_first_elem = true;
+
+ __skb_queue_tail(&buf->skb_list, skb);
+
+ /* build dedicated element for HW Header */
+ if (hd_len) {
+ is_first_elem = false;
+
+ buffer->element[element].addr = virt_to_phys(hdr);
+ buffer->element[element].length = hd_len;
+ buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
+
+ /* HW header is allocated from cache: */
+ if ((void *)hdr != skb->data)
+ buf->is_header[element] = 1;
+ /* HW header was pushed and is contiguous with linear part: */
+ else if (length > 0 && !PAGE_ALIGNED(data) &&
+ (data == (char *)hdr + hd_len))
+ buffer->element[element].eflags |=
+ SBAL_EFLAGS_CONTIGUOUS;
+
+ element++;
+ }
/* map linear part into buffer element(s) */
while (length > 0) {
@@ -3871,40 +4184,6 @@ static unsigned int __qeth_fill_buffer(struct sk_buff *skb,
return element;
}
-/**
- * qeth_fill_buffer() - map skb into an output buffer
- * @buf: buffer to transport the skb
- * @skb: skb to map into the buffer
- * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated
- * from qeth_core_header_cache.
- * @offset: when mapping the skb, start at skb->data + offset
- * @hd_len: if > 0, build a dedicated header element of this size
- */
-static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
- struct sk_buff *skb, struct qeth_hdr *hdr,
- unsigned int offset, unsigned int hd_len)
-{
- struct qdio_buffer *buffer = buf->buffer;
- bool is_first_elem = true;
-
- __skb_queue_tail(&buf->skb_list, skb);
-
- /* build dedicated header element */
- if (hd_len) {
- int element = buf->next_element_to_fill;
- is_first_elem = false;
-
- buffer->element[element].addr = virt_to_phys(hdr);
- buffer->element[element].length = hd_len;
- buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
- /* remember to free cache-allocated qeth_hdr: */
- buf->is_header[element] = ((void *)hdr != skb->data);
- buf->next_element_to_fill++;
- }
-
- return __qeth_fill_buffer(skb, buf, is_first_elem, offset);
-}
-
static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
struct sk_buff *skb, unsigned int elements,
struct qeth_hdr *hdr, unsigned int offset,
@@ -4889,9 +5168,11 @@ out_free_nothing:
static void qeth_core_free_card(struct qeth_card *card)
{
QETH_CARD_TEXT(card, 2, "freecrd");
+
+ unregister_service_level(&card->qeth_service_level);
+ debugfs_remove_recursive(card->debugfs);
qeth_put_cmd(card->read_cmd);
destroy_workqueue(card->event_wq);
- unregister_service_level(&card->qeth_service_level);
dev_set_drvdata(&card->gdev->dev, NULL);
kfree(card);
}
@@ -6300,7 +6581,7 @@ static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype,
}
static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype,
- enum qeth_prot_versions prot)
+ enum qeth_prot_versions prot, u8 *lp2lp)
{
u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP;
struct qeth_cmd_buffer *iob;
@@ -6352,18 +6633,17 @@ static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype,
dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n",
cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot);
- if (!qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP) &&
- cstype == IPA_OUTBOUND_CHECKSUM)
- dev_warn(&card->gdev->dev,
- "Hardware checksumming is performed only if %s and its peer use different OSA Express 3 ports\n",
- QETH_CARD_IFNAME(card));
+
+ if (lp2lp)
+ *lp2lp = qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP);
+
return 0;
}
static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
- enum qeth_prot_versions prot)
+ enum qeth_prot_versions prot, u8 *lp2lp)
{
- return on ? qeth_set_csum_on(card, cstype, prot) :
+ return on ? qeth_set_csum_on(card, cstype, prot, lp2lp) :
qeth_set_csum_off(card, cstype, prot);
}
@@ -6451,13 +6731,13 @@ static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
- QETH_PROT_IPV4);
+ QETH_PROT_IPV4, NULL);
if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
/* no/one Offload Assist available, so the rc is trivial */
return rc_ipv4;
rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
- QETH_PROT_IPV6);
+ QETH_PROT_IPV6, NULL);
if (on)
/* enable: success if any Assist is active */
@@ -6493,6 +6773,24 @@ void qeth_enable_hw_features(struct net_device *dev)
}
EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
+static void qeth_check_restricted_features(struct qeth_card *card,
+ netdev_features_t changed,
+ netdev_features_t actual)
+{
+ netdev_features_t ipv6_features = NETIF_F_TSO6;
+ netdev_features_t ipv4_features = NETIF_F_TSO;
+
+ if (!card->info.has_lp2lp_cso_v6)
+ ipv6_features |= NETIF_F_IPV6_CSUM;
+ if (!card->info.has_lp2lp_cso_v4)
+ ipv4_features |= NETIF_F_IP_CSUM;
+
+ if ((changed & ipv6_features) && !(actual & ipv6_features))
+ qeth_flush_local_addrs6(card);
+ if ((changed & ipv4_features) && !(actual & ipv4_features))
+ qeth_flush_local_addrs4(card);
+}
+
int qeth_set_features(struct net_device *dev, netdev_features_t features)
{
struct qeth_card *card = dev->ml_priv;
@@ -6504,13 +6802,15 @@ int qeth_set_features(struct net_device *dev, netdev_features_t features)
if ((changed & NETIF_F_IP_CSUM)) {
rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM,
- IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4);
+ IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4,
+ &card->info.has_lp2lp_cso_v4);
if (rc)
changed ^= NETIF_F_IP_CSUM;
}
if (changed & NETIF_F_IPV6_CSUM) {
rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM,
- IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6);
+ IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6,
+ &card->info.has_lp2lp_cso_v6);
if (rc)
changed ^= NETIF_F_IPV6_CSUM;
}
@@ -6532,6 +6832,9 @@ int qeth_set_features(struct net_device *dev, netdev_features_t features)
changed ^= NETIF_F_TSO6;
}
+ qeth_check_restricted_features(card, dev->features ^ features,
+ dev->features ^ changed);
+
/* everything changed successfully? */
if ((dev->features ^ features) == changed)
return 0;
@@ -6568,6 +6871,34 @@ netdev_features_t qeth_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
+ /* Traffic with local next-hop is not eligible for some offloads: */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ struct qeth_card *card = dev->ml_priv;
+ netdev_features_t restricted = 0;
+
+ if (skb_is_gso(skb) && !netif_needs_gso(skb, features))
+ restricted |= NETIF_F_ALL_TSO;
+
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IP):
+ if (!card->info.has_lp2lp_cso_v4)
+ restricted |= NETIF_F_IP_CSUM;
+
+ if (restricted && qeth_next_hop_is_local_v4(card, skb))
+ features &= ~restricted;
+ break;
+ case htons(ETH_P_IPV6):
+ if (!card->info.has_lp2lp_cso_v6)
+ restricted |= NETIF_F_IPV6_CSUM;
+
+ if (restricted && qeth_next_hop_is_local_v6(card, skb))
+ features &= ~restricted;
+ break;
+ default:
+ break;
+ }
+ }
+
/* GSO segmentation builds skbs with
* a (small) linear part for the headers, and
* page frags for the data.
@@ -6745,6 +7076,8 @@ static int __init qeth_core_init(void)
pr_info("loading core functions\n");
+ qeth_debugfs_root = debugfs_create_dir("qeth", NULL);
+
rc = qeth_register_dbf_views();
if (rc)
goto dbf_err;
@@ -6786,6 +7119,7 @@ slab_err:
register_err:
qeth_unregister_dbf_views();
dbf_err:
+ debugfs_remove_recursive(qeth_debugfs_root);
pr_err("Initializing the qeth device driver failed\n");
return rc;
}
@@ -6799,6 +7133,7 @@ static void __exit qeth_core_exit(void)
kmem_cache_destroy(qeth_core_header_cache);
root_device_unregister(qeth_core_root_dev);
qeth_unregister_dbf_views();
+ debugfs_remove_recursive(qeth_debugfs_root);
pr_info("core functions removed\n");
}
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index d89a04bfd8b0..9d6f39d8f9ab 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -772,6 +772,29 @@ struct qeth_ipacmd_addr_change {
struct qeth_ipacmd_addr_change_entry entry[];
} __packed;
+/* [UN]REGISTER_LOCAL_ADDRESS notifications */
+struct qeth_ipacmd_local_addr4 {
+ __be32 addr;
+ u32 flags;
+};
+
+struct qeth_ipacmd_local_addrs4 {
+ u32 count;
+ u32 addr_length;
+ struct qeth_ipacmd_local_addr4 addrs[];
+};
+
+struct qeth_ipacmd_local_addr6 {
+ struct in6_addr addr;
+ u32 flags;
+};
+
+struct qeth_ipacmd_local_addrs6 {
+ u32 count;
+ u32 addr_length;
+ struct qeth_ipacmd_local_addr6 addrs[];
+};
+
/* Header for each IPA command */
struct qeth_ipacmd_hdr {
__u8 command;
@@ -803,6 +826,8 @@ struct qeth_ipa_cmd {
struct qeth_ipacmd_setbridgeport sbp;
struct qeth_ipacmd_addr_change addrchange;
struct qeth_ipacmd_vnicc vnicc;
+ struct qeth_ipacmd_local_addrs4 local_addrs4;
+ struct qeth_ipacmd_local_addrs6 local_addrs6;
} data;
} __attribute__ ((packed));
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index d7e429f6631e..c901c942fed7 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -275,17 +275,20 @@ static ssize_t qeth_dev_recover_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
- char *tmp;
- int i;
+ bool reset;
+ int rc;
+
+ rc = kstrtobool(buf, &reset);
+ if (rc)
+ return rc;
if (!qeth_card_hw_is_reachable(card))
return -EPERM;
- i = simple_strtoul(buf, &tmp, 16);
- if (i == 1)
- qeth_schedule_recovery(card);
+ if (reset)
+ rc = qeth_schedule_recovery(card);
- return count;
+ return rc ? rc : count;
}
static DEVICE_ATTR(recover, 0200, NULL, qeth_dev_recover_store);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 0bd5b09e7a22..da47e423e1b1 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -291,6 +291,7 @@ static void qeth_l2_stop_card(struct qeth_card *card)
qeth_qdio_clear_card(card, 0);
qeth_clear_working_pool_list(card);
flush_workqueue(card->event_wq);
+ qeth_flush_local_addrs(card);
card->info.promisc_mode = 0;
}
@@ -709,6 +710,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
if (card->dev->hw_features & (NETIF_F_TSO | NETIF_F_TSO6)) {
card->dev->needed_headroom = sizeof(struct qeth_hdr_tso);
+ netif_keep_dst(card->dev);
netif_set_gso_max_size(card->dev,
PAGE_SIZE * (QDIO_MAX_ELEMENTS_PER_BUFFER - 1));
}
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 0742a749d26e..1e50aa0297a3 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1176,6 +1176,7 @@ static void qeth_l3_stop_card(struct qeth_card *card)
qeth_qdio_clear_card(card, 0);
qeth_clear_working_pool_list(card);
flush_workqueue(card->event_wq);
+ qeth_flush_local_addrs(card);
card->info.promisc_mode = 0;
}
@@ -1693,8 +1694,8 @@ static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
if (skb->protocol == htons(ETH_P_AF_IUCV)) {
l3_hdr->flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST;
- l3_hdr->next_hop.ipv6_addr.s6_addr16[0] = htons(0xfe80);
- memcpy(&l3_hdr->next_hop.ipv6_addr.s6_addr32[2],
+ l3_hdr->next_hop.addr.s6_addr16[0] = htons(0xfe80);
+ memcpy(&l3_hdr->next_hop.addr.s6_addr32[2],
iucv_trans_hdr(skb)->destUserID, 8);
return;
}
@@ -1728,18 +1729,10 @@ static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
l3_hdr->flags |= qeth_l3_cast_type_to_flag(cast_type);
if (ipv == 4) {
- struct rtable *rt = (struct rtable *) dst;
-
- *((__be32 *) &hdr->hdr.l3.next_hop.ipv4.addr) = (rt) ?
- rt_nexthop(rt, ip_hdr(skb)->daddr) :
- ip_hdr(skb)->daddr;
+ l3_hdr->next_hop.addr.s6_addr32[3] =
+ qeth_next_hop_v4_rcu(skb, dst);
} else if (ipv == 6) {
- struct rt6_info *rt = (struct rt6_info *) dst;
-
- if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
- l3_hdr->next_hop.ipv6_addr = rt->rt6i_gateway;
- else
- l3_hdr->next_hop.ipv6_addr = ipv6_hdr(skb)->daddr;
+ l3_hdr->next_hop.addr = *qeth_next_hop_v6_rcu(skb, dst);
hdr->hdr.l3.flags |= QETH_HDR_IPV6;
if (!IS_IQD(card))
diff --git a/drivers/soc/fsl/dpio/qbman-portal.c b/drivers/soc/fsl/dpio/qbman-portal.c
index 804b8ba9bf5c..23a1377971f4 100644
--- a/drivers/soc/fsl/dpio/qbman-portal.c
+++ b/drivers/soc/fsl/dpio/qbman-portal.c
@@ -669,6 +669,7 @@ int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
eqcr_ci = s->eqcr.ci;
p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI;
s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
+ s->eqcr.ci &= full_mask;
s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
eqcr_ci, s->eqcr.ci);
diff --git a/drivers/ssb/scan.c b/drivers/ssb/scan.c
index 6ceee98ed6ff..b97a5c32d44a 100644
--- a/drivers/ssb/scan.c
+++ b/drivers/ssb/scan.c
@@ -400,7 +400,8 @@ int ssb_bus_scan(struct ssb_bus *bus,
#ifdef CONFIG_SSB_DRIVER_PCICORE
if (bus->bustype == SSB_BUSTYPE_PCI) {
/* Ignore PCI cores on PCI-E cards.
- * Ignore PCI-E cores on PCI cards. */
+ * Ignore PCI-E cores on PCI cards.
+ */
if (dev->id.coreid == SSB_DEV_PCI) {
if (pci_is_pcie(bus->host_pci))
continue;
@@ -421,7 +422,8 @@ int ssb_bus_scan(struct ssb_bus *bus,
if (bus->host_pci->vendor == PCI_VENDOR_ID_BROADCOM &&
(bus->host_pci->device & 0xFF00) == 0x4300) {
/* This is a dangling ethernet core on a
- * wireless device. Ignore it. */
+ * wireless device. Ignore it.
+ */
continue;
}
}
diff --git a/drivers/ssb/sprom.c b/drivers/ssb/sprom.c
index 52d2e0f33be7..42d620cee8a9 100644
--- a/drivers/ssb/sprom.c
+++ b/drivers/ssb/sprom.c
@@ -78,7 +78,8 @@ ssize_t ssb_attr_sprom_show(struct ssb_bus *bus, char *buf,
/* Use interruptible locking, as the SPROM write might
* be holding the lock for several seconds. So allow userspace
- * to cancel operation. */
+ * to cancel operation.
+ */
err = -ERESTARTSYS;
if (mutex_lock_interruptible(&bus->sprom_mutex))
goto out_kfree;
@@ -121,7 +122,8 @@ ssize_t ssb_attr_sprom_store(struct ssb_bus *bus,
/* Use interruptible locking, as the SPROM write might
* be holding the lock for several seconds. So allow userspace
- * to cancel operation. */
+ * to cancel operation.
+ */
err = -ERESTARTSYS;
if (mutex_lock_interruptible(&bus->sprom_mutex))
goto out_kfree;
@@ -188,9 +190,11 @@ int ssb_fill_sprom_with_fallback(struct ssb_bus *bus, struct ssb_sprom *out)
bool ssb_is_sprom_available(struct ssb_bus *bus)
{
/* status register only exists on chipcomon rev >= 11 and we need check
- for >= 31 only */
+ * for >= 31 only
+ */
/* this routine differs from specs as we do not access SPROM directly
- on PCMCIA */
+ * on PCMCIA
+ */
if (bus->bustype == SSB_BUSTYPE_PCI &&
bus->chipco.dev && /* can be unavailable! */
bus->chipco.dev->id.revision >= 31)
diff --git a/drivers/staging/fsl-dpaa2/ethsw/README b/drivers/staging/fsl-dpaa2/ethsw/README
index f6fc07f780d1..b48dcbf7c5fb 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/README
+++ b/drivers/staging/fsl-dpaa2/ethsw/README
@@ -79,7 +79,7 @@ The DPSW can have ports connected to DPNIs or to PHYs via DPMACs.
For a more detailed description of the Ethernet switch device driver model
see:
- Documentation/networking/switchdev.txt
+ Documentation/networking/switchdev.rst
Creating an Ethernet Switch
===========================
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 2927f02cc7e1..516519dcc8ff 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -747,6 +747,7 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
xdp->data = buf + pad;
xdp->data_end = xdp->data + len;
hdr->buflen = buflen;
+ xdp->frame_sz = buflen;
--net->refcnt_bias;
alloc_frag->offset += buflen;