summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/bluetooth/btmrvl_drv.h4
-rw-r--r--drivers/bluetooth/btmrvl_main.c19
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c103
-rw-r--r--drivers/bluetooth/btmrvl_sdio.h3
-rw-r--r--drivers/bluetooth/hci_h4.c7
-rw-r--r--drivers/clk/ti/clk-43xx.c16
-rw-r--r--drivers/isdn/hisax/hfc4s8s_l1.c111
-rw-r--r--drivers/net/bonding/bond_3ad.c2
-rw-r--r--drivers/net/bonding/bond_alb.c116
-rw-r--r--drivers/net/bonding/bond_alb.h1
-rw-r--r--drivers/net/bonding/bond_main.c24
-rw-r--r--drivers/net/bonding/bond_options.c56
-rw-r--r--drivers/net/bonding/bond_options.h2
-rw-r--r--drivers/net/bonding/bond_sysfs.c553
-rw-r--r--drivers/net/bonding/bonding.h26
-rw-r--r--drivers/net/can/Kconfig8
-rw-r--r--drivers/net/can/Makefile2
-rw-r--r--drivers/net/can/c_can/c_can_pci.c51
-rw-r--r--drivers/net/can/softing/softing_main.c20
-rw-r--r--drivers/net/can/spi/Kconfig10
-rw-r--r--drivers/net/can/spi/Makefile8
-rw-r--r--drivers/net/can/spi/mcp251x.c (renamed from drivers/net/can/mcp251x.c)93
-rw-r--r--drivers/net/can/usb/Kconfig4
-rw-r--r--drivers/net/can/usb/kvaser_usb.c53
-rw-r--r--drivers/net/dsa/mv88e6123_61_65.c2
-rw-r--r--drivers/net/dsa/mv88e6131.c4
-rw-r--r--drivers/net/dsa/mv88e6xxx.c12
-rw-r--r--drivers/net/ethernet/altera/altera_msgdma.c15
-rw-r--r--drivers/net/ethernet/altera/altera_sgdma.c33
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig11
-rw-r--r--drivers/net/ethernet/broadcom/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c3
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c1614
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h677
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c15
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c7
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h14
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c370
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c193
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c251
-rw-r--r--drivers/net/ethernet/ethoc.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h8
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c11
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c1
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c19
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c1
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c23
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h41
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c50
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c278
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c36
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c111
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c22
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h41
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h4
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c316
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c15
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c93
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h72
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h96
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h96
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c48
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h47
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c56
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h47
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c47
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.h47
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c47
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h49
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c52
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h47
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h54
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h49
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c112
-rw-r--r--drivers/net/ethernet/intel/igb/igb_hwmon.c47
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c161
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h18
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c27
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c35
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h15
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c319
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c49
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c85
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c85
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c246
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h40
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/profile.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/reset.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c4
-rw-r--r--drivers/net/ethernet/neterion/s2io.c9
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig11
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h33
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c44
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c14
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c13
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c32
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c31
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c56
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c37
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c188
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c119
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c68
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c50
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c7
-rw-r--r--drivers/net/ethernet/ti/cpmac.c14
-rw-r--r--drivers/net/ethernet/ti/cpsw.c56
-rw-r--r--drivers/net/ethernet/ti/cpts.c11
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c48
-rw-r--r--drivers/net/ethernet/via/Kconfig2
-rw-r--r--drivers/net/ethernet/via/via-rhine.c506
-rw-r--r--drivers/net/hyperv/hyperv_net.h159
-rw-r--r--drivers/net/hyperv/netvsc.c529
-rw-r--r--drivers/net/hyperv/netvsc_drv.c111
-rw-r--r--drivers/net/hyperv/rndis_filter.c193
-rw-r--r--drivers/net/ieee802154/at86rf230.c133
-rw-r--r--drivers/net/irda/w83977af_ir.c33
-rw-r--r--drivers/net/macvlan.c119
-rw-r--r--drivers/net/phy/at803x.c39
-rw-r--r--drivers/net/phy/mdio_bus.c67
-rw-r--r--drivers/net/phy/micrel.c106
-rw-r--r--drivers/net/phy/phy_device.c3
-rw-r--r--drivers/net/phy/smsc.c3
-rw-r--r--drivers/net/phy/vitesse.c3
-rw-r--r--drivers/net/virtio_net.c7
-rw-r--r--drivers/net/vxlan.c63
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c356
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h15
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c85
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h18
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c109
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c12
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h21
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c513
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c766
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c219
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c183
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c62
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h98
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/Kconfig30
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c9
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.c3
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.h4
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_mbox.c23
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_pipe.c10
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c1
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c10
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c17
-rw-r--r--drivers/net/wireless/ath/ath6kl/target.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c31
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c19
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9340_initvals.h8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar953x_initvals.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h12
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c109
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c3
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/rx_reorder.c17
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h3
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c11
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h50
-rw-r--r--drivers/net/wireless/b43/b43.h4
-rw-r--r--drivers/net/wireless/b43/main.c97
-rw-r--r--drivers/net/wireless/b43/phy_common.c6
-rw-r--r--drivers/net/wireless/b43/phy_g.c6
-rw-r--r--drivers/net/wireless/b43/phy_n.c304
-rw-r--r--drivers/net/wireless/b43/radio_2056.c418
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c72
-rw-r--r--drivers/net/wireless/b43/tables_nphy.h3
-rw-r--r--drivers/net/wireless/b43/wa.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c3
-rw-r--r--drivers/net/wireless/cw1200/sta.c3
-rw-r--r--drivers/net/wireless/cw1200/sta.h3
-rw-r--r--drivers/net/wireless/iwlegacy/3945.c2
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c2
-rw-r--r--drivers/net/wireless/iwlegacy/common.c3
-rw-r--r--drivers/net/wireless/iwlegacy/common.h3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/lib.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c5
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h27
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c25
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-op-mode.h25
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h10
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex.c34
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c94
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c13
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c15
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h38
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h17
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h26
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h46
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h54
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-error-dump.h6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c12
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c49
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h26
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c56
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power.c272
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c330
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.h24
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c45
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c20
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sf.c3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c177
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.h3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c71
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tt.c10
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c11
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c46
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h9
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c84
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c47
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c62
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c5
-rw-r--r--drivers/net/wireless/mwifiex/11ac.c3
-rw-r--r--drivers/net/wireless/mwifiex/11n.c3
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c21
-rw-r--r--drivers/net/wireless/mwifiex/README7
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c2
-rw-r--r--drivers/net/wireless/mwifiex/debugfs.c25
-rw-r--r--drivers/net/wireless/mwifiex/decl.h8
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h2
-rw-r--r--drivers/net/wireless/mwifiex/main.c2
-rw-r--r--drivers/net/wireless/mwifiex/main.h2
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c3
-rw-r--r--drivers/net/wireless/mwifiex/scan.c37
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c9
-rw-r--r--drivers/net/wireless/mwifiex/sdio.h18
-rw-r--r--drivers/net/wireless/mwifiex/uap_cmd.c8
-rw-r--r--drivers/net/wireless/mwifiex/usb.c55
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c9
-rw-r--r--drivers/net/wireless/p54/main.c3
-rw-r--r--drivers/net/wireless/ray_cs.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c4
-rw-r--r--drivers/net/wireless/rsi/rsi_mgmt.h1
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c15
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c10
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c10
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/Makefile4
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c10
-rw-r--r--drivers/net/wireless/rtl818x/rtl818x.h6
-rw-r--r--drivers/net/wireless/rtlwifi/core.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/sw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/sw.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c3
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c28
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c23
-rw-r--r--drivers/ptp/ptp_clock.c5
-rw-r--r--drivers/s390/net/claw.c2
-rw-r--r--drivers/s390/net/ctcm_main.c2
-rw-r--r--drivers/s390/net/lcs.c2
-rw-r--r--drivers/s390/net/qeth_core.h6
-rw-r--r--drivers/s390/net/qeth_core_main.c77
-rw-r--r--drivers/s390/net/qeth_core_sys.c22
-rw-r--r--drivers/s390/net/qeth_l2_main.c11
-rw-r--r--drivers/s390/net/qeth_l3_main.c7
-rw-r--r--drivers/staging/rtl8821ae/core.c14
334 files changed, 11023 insertions, 6192 deletions
diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h
index 7399303d7d99..dc79f88f8717 100644
--- a/drivers/bluetooth/btmrvl_drv.h
+++ b/drivers/bluetooth/btmrvl_drv.h
@@ -59,6 +59,8 @@ struct btmrvl_device {
};
struct btmrvl_adapter {
+ void *hw_regs_buf;
+ u8 *hw_regs;
u32 int_count;
struct sk_buff_head tx_queue;
u8 psmode;
@@ -140,7 +142,7 @@ void btmrvl_interrupt(struct btmrvl_private *priv);
bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb);
int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb);
-int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd);
+int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, u8 subcmd);
int btmrvl_send_hscfg_cmd(struct btmrvl_private *priv);
int btmrvl_enable_ps(struct btmrvl_private *priv);
int btmrvl_prepare_command(struct btmrvl_private *priv);
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 2c4997ce2484..e9dbddb0b8f1 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -24,6 +24,7 @@
#include <net/bluetooth/hci_core.h>
#include "btmrvl_drv.h"
+#include "btmrvl_sdio.h"
#define VERSION "1.0"
@@ -201,7 +202,7 @@ static int btmrvl_send_sync_cmd(struct btmrvl_private *priv, u16 opcode,
return 0;
}
-int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd)
+int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, u8 subcmd)
{
int ret;
@@ -337,10 +338,25 @@ static int btmrvl_tx_pkt(struct btmrvl_private *priv, struct sk_buff *skb)
static void btmrvl_init_adapter(struct btmrvl_private *priv)
{
+ int buf_size;
+
skb_queue_head_init(&priv->adapter->tx_queue);
priv->adapter->ps_state = PS_AWAKE;
+ buf_size = ALIGN_SZ(SDIO_BLOCK_SIZE, BTSDIO_DMA_ALIGN);
+ priv->adapter->hw_regs_buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!priv->adapter->hw_regs_buf) {
+ priv->adapter->hw_regs = NULL;
+ BT_ERR("Unable to allocate buffer for hw_regs.");
+ } else {
+ priv->adapter->hw_regs =
+ (u8 *)ALIGN_ADDR(priv->adapter->hw_regs_buf,
+ BTSDIO_DMA_ALIGN);
+ BT_DBG("hw_regs_buf=%p hw_regs=%p",
+ priv->adapter->hw_regs_buf, priv->adapter->hw_regs);
+ }
+
init_waitqueue_head(&priv->adapter->cmd_wait_q);
}
@@ -348,6 +364,7 @@ static void btmrvl_free_adapter(struct btmrvl_private *priv)
{
skb_queue_purge(&priv->adapter->tx_queue);
+ kfree(priv->adapter->hw_regs_buf);
kfree(priv->adapter);
priv->adapter = NULL;
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 1b52c9f5230d..9dedca516ff5 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -64,6 +64,7 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_8688 = {
.io_port_0 = 0x00,
.io_port_1 = 0x01,
.io_port_2 = 0x02,
+ .int_read_to_clear = false,
};
static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = {
.cfg = 0x00,
@@ -80,6 +81,7 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = {
.io_port_0 = 0x78,
.io_port_1 = 0x79,
.io_port_2 = 0x7a,
+ .int_read_to_clear = false,
};
static const struct btmrvl_sdio_card_reg btmrvl_reg_88xx = {
@@ -97,6 +99,9 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_88xx = {
.io_port_0 = 0xd8,
.io_port_1 = 0xd9,
.io_port_2 = 0xda,
+ .int_read_to_clear = true,
+ .host_int_rsr = 0x01,
+ .card_misc_cfg = 0xcc,
};
static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
@@ -667,46 +672,78 @@ static int btmrvl_sdio_process_int_status(struct btmrvl_private *priv)
return 0;
}
-static void btmrvl_sdio_interrupt(struct sdio_func *func)
+static int btmrvl_sdio_read_to_clear(struct btmrvl_sdio_card *card, u8 *ireg)
{
- struct btmrvl_private *priv;
- struct btmrvl_sdio_card *card;
- ulong flags;
- u8 ireg = 0;
+ struct btmrvl_adapter *adapter = card->priv->adapter;
int ret;
- card = sdio_get_drvdata(func);
- if (!card || !card->priv) {
- BT_ERR("sbi_interrupt(%p) card or priv is "
- "NULL, card=%p\n", func, card);
- return;
+ ret = sdio_readsb(card->func, adapter->hw_regs, 0, SDIO_BLOCK_SIZE);
+ if (ret) {
+ BT_ERR("sdio_readsb: read int hw_regs failed: %d", ret);
+ return ret;
}
- priv = card->priv;
+ *ireg = adapter->hw_regs[card->reg->host_intstatus];
+ BT_DBG("hw_regs[%#x]=%#x", card->reg->host_intstatus, *ireg);
+
+ return 0;
+}
- ireg = sdio_readb(card->func, card->reg->host_intstatus, &ret);
+static int btmrvl_sdio_write_to_clear(struct btmrvl_sdio_card *card, u8 *ireg)
+{
+ int ret;
+
+ *ireg = sdio_readb(card->func, card->reg->host_intstatus, &ret);
if (ret) {
- BT_ERR("sdio_readb: read int status register failed");
- return;
+ BT_ERR("sdio_readb: read int status failed: %d", ret);
+ return ret;
}
- if (ireg != 0) {
+ if (*ireg) {
/*
* DN_LD_HOST_INT_STATUS and/or UP_LD_HOST_INT_STATUS
* Clear the interrupt status register and re-enable the
* interrupt.
*/
- BT_DBG("ireg = 0x%x", ireg);
+ BT_DBG("int_status = 0x%x", *ireg);
- sdio_writeb(card->func, ~(ireg) & (DN_LD_HOST_INT_STATUS |
- UP_LD_HOST_INT_STATUS),
- card->reg->host_intstatus, &ret);
+ sdio_writeb(card->func, ~(*ireg) & (DN_LD_HOST_INT_STATUS |
+ UP_LD_HOST_INT_STATUS),
+ card->reg->host_intstatus, &ret);
if (ret) {
- BT_ERR("sdio_writeb: clear int status register failed");
- return;
+ BT_ERR("sdio_writeb: clear int status failed: %d", ret);
+ return ret;
}
}
+ return 0;
+}
+
+static void btmrvl_sdio_interrupt(struct sdio_func *func)
+{
+ struct btmrvl_private *priv;
+ struct btmrvl_sdio_card *card;
+ ulong flags;
+ u8 ireg = 0;
+ int ret;
+
+ card = sdio_get_drvdata(func);
+ if (!card || !card->priv) {
+ BT_ERR("sbi_interrupt(%p) card or priv is "
+ "NULL, card=%p\n", func, card);
+ return;
+ }
+
+ priv = card->priv;
+
+ if (card->reg->int_read_to_clear)
+ ret = btmrvl_sdio_read_to_clear(card, &ireg);
+ else
+ ret = btmrvl_sdio_write_to_clear(card, &ireg);
+
+ if (ret)
+ return;
+
spin_lock_irqsave(&priv->driver_lock, flags);
sdio_ireg |= ireg;
spin_unlock_irqrestore(&priv->driver_lock, flags);
@@ -777,6 +814,30 @@ static int btmrvl_sdio_register_dev(struct btmrvl_sdio_card *card)
BT_DBG("SDIO FUNC%d IO port: 0x%x", func->num, card->ioport);
+ if (card->reg->int_read_to_clear) {
+ reg = sdio_readb(func, card->reg->host_int_rsr, &ret);
+ if (ret < 0) {
+ ret = -EIO;
+ goto release_irq;
+ }
+ sdio_writeb(func, reg | 0x3f, card->reg->host_int_rsr, &ret);
+ if (ret < 0) {
+ ret = -EIO;
+ goto release_irq;
+ }
+
+ reg = sdio_readb(func, card->reg->card_misc_cfg, &ret);
+ if (ret < 0) {
+ ret = -EIO;
+ goto release_irq;
+ }
+ sdio_writeb(func, reg | 0x10, card->reg->card_misc_cfg, &ret);
+ if (ret < 0) {
+ ret = -EIO;
+ goto release_irq;
+ }
+ }
+
sdio_set_drvdata(func, card);
sdio_release_host(func);
diff --git a/drivers/bluetooth/btmrvl_sdio.h b/drivers/bluetooth/btmrvl_sdio.h
index 43d35a609ca9..d4dd3b0fa53d 100644
--- a/drivers/bluetooth/btmrvl_sdio.h
+++ b/drivers/bluetooth/btmrvl_sdio.h
@@ -78,6 +78,9 @@ struct btmrvl_sdio_card_reg {
u8 io_port_0;
u8 io_port_1;
u8 io_port_2;
+ bool int_read_to_clear;
+ u8 host_int_rsr;
+ u8 card_misc_cfg;
};
struct btmrvl_sdio_card {
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index 7048a583fe51..66db9a803373 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -55,13 +55,6 @@ struct h4_struct {
struct sk_buff_head txq;
};
-/* H4 receiver States */
-#define H4_W4_PACKET_TYPE 0
-#define H4_W4_EVENT_HDR 1
-#define H4_W4_ACL_HDR 2
-#define H4_W4_SCO_HDR 3
-#define H4_W4_DATA 4
-
/* Initialize protocol */
static int h4_open(struct hci_uart *hu)
{
diff --git a/drivers/clk/ti/clk-43xx.c b/drivers/clk/ti/clk-43xx.c
index 67c8de572c50..b4877e0ee910 100644
--- a/drivers/clk/ti/clk-43xx.c
+++ b/drivers/clk/ti/clk-43xx.c
@@ -110,9 +110,25 @@ static struct ti_dt_clk am43xx_clks[] = {
int __init am43xx_dt_clk_init(void)
{
+ struct clk *clk1, *clk2;
+
ti_dt_clocks_register(am43xx_clks);
omap2_clk_disable_autoidle_all();
+ /*
+ * cpsw_cpts_rft_clk has got the choice of 3 clocksources
+ * dpll_core_m4_ck, dpll_core_m5_ck and dpll_disp_m2_ck.
+ * By default dpll_core_m4_ck is selected, witn this as clock
+ * source the CPTS doesnot work properly. It gives clockcheck errors
+ * while running PTP.
+ * clockcheck: clock jumped backward or running slower than expected!
+ * By selecting dpll_core_m5_ck as the clocksource fixes this issue.
+ * In AM335x dpll_core_m5_ck is the default clocksource.
+ */
+ clk1 = clk_get_sys(NULL, "cpsw_cpts_rft_clk");
+ clk2 = clk_get_sys(NULL, "dpll_core_m5_ck");
+ clk_set_parent(clk1, clk2);
+
return 0;
}
diff --git a/drivers/isdn/hisax/hfc4s8s_l1.c b/drivers/isdn/hisax/hfc4s8s_l1.c
index 414dbf6da89a..fc9f9d03fa13 100644
--- a/drivers/isdn/hisax/hfc4s8s_l1.c
+++ b/drivers/isdn/hisax/hfc4s8s_l1.c
@@ -197,25 +197,6 @@ typedef struct _hfc4s8s_hw {
-/***************************/
-/* inline function defines */
-/***************************/
-#ifdef HISAX_HFC4S8S_PCIMEM /* inline functions memory mapped */
-
-/* memory write and dummy IO read to avoid PCI byte merge problems */
-#define Write_hfc8(a, b, c) {(*((volatile u_char *)(a->membase + b)) = c); inb(a->iobase + 4);}
-/* memory write without dummy IO access for fifo data access */
-#define fWrite_hfc8(a, b, c) (*((volatile u_char *)(a->membase + b)) = c)
-#define Read_hfc8(a, b) (*((volatile u_char *)(a->membase + b)))
-#define Write_hfc16(a, b, c) (*((volatile unsigned short *)(a->membase + b)) = c)
-#define Read_hfc16(a, b) (*((volatile unsigned short *)(a->membase + b)))
-#define Write_hfc32(a, b, c) (*((volatile unsigned long *)(a->membase + b)) = c)
-#define Read_hfc32(a, b) (*((volatile unsigned long *)(a->membase + b)))
-#define wait_busy(a) {while ((Read_hfc8(a, R_STATUS) & M_BUSY));}
-#define PCI_ENA_MEMIO 0x03
-
-#else
-
/* inline functions io mapped */
static inline void
SetRegAddr(hfc4s8s_hw *a, u_char b)
@@ -306,8 +287,6 @@ wait_busy(hfc4s8s_hw *a)
#define PCI_ENA_REGIO 0x01
-#endif /* HISAX_HFC4S8S_PCIMEM */
-
/******************************************************/
/* function to read critical counter registers that */
/* may be updated by the chip during read */
@@ -724,26 +703,15 @@ rx_d_frame(struct hfc4s8s_l1 *l1p, int ech)
return;
} else {
/* read errornous D frame */
-
-#ifndef HISAX_HFC4S8S_PCIMEM
SetRegAddr(l1p->hw, A_FIFO_DATA0);
-#endif
while (z1 >= 4) {
-#ifdef HISAX_HFC4S8S_PCIMEM
- Read_hfc32(l1p->hw, A_FIFO_DATA0);
-#else
fRead_hfc32(l1p->hw);
-#endif
z1 -= 4;
}
while (z1--)
-#ifdef HISAX_HFC4S8S_PCIMEM
- Read_hfc8(l1p->hw, A_FIFO_DATA0);
-#else
- fRead_hfc8(l1p->hw);
-#endif
+ fRead_hfc8(l1p->hw);
Write_hfc8(l1p->hw, A_INC_RES_FIFO, 1);
wait_busy(l1p->hw);
@@ -753,27 +721,16 @@ rx_d_frame(struct hfc4s8s_l1 *l1p, int ech)
cp = skb->data;
-#ifndef HISAX_HFC4S8S_PCIMEM
SetRegAddr(l1p->hw, A_FIFO_DATA0);
-#endif
while (z1 >= 4) {
-#ifdef HISAX_HFC4S8S_PCIMEM
- *((unsigned long *) cp) =
- Read_hfc32(l1p->hw, A_FIFO_DATA0);
-#else
*((unsigned long *) cp) = fRead_hfc32(l1p->hw);
-#endif
cp += 4;
z1 -= 4;
}
while (z1--)
-#ifdef HISAX_HFC4S8S_PCIMEM
- *cp++ = Read_hfc8(l1p->hw, A_FIFO_DATA0);
-#else
- *cp++ = fRead_hfc8(l1p->hw);
-#endif
+ *cp++ = fRead_hfc8(l1p->hw);
Write_hfc8(l1p->hw, A_INC_RES_FIFO, 1); /* increment f counter */
wait_busy(l1p->hw);
@@ -859,28 +816,17 @@ rx_b_frame(struct hfc4s8s_btype *bch)
wait_busy(l1->hw);
return;
}
-#ifndef HISAX_HFC4S8S_PCIMEM
SetRegAddr(l1->hw, A_FIFO_DATA0);
-#endif
while (z1 >= 4) {
-#ifdef HISAX_HFC4S8S_PCIMEM
- *((unsigned long *) bch->rx_ptr) =
- Read_hfc32(l1->hw, A_FIFO_DATA0);
-#else
*((unsigned long *) bch->rx_ptr) =
fRead_hfc32(l1->hw);
-#endif
bch->rx_ptr += 4;
z1 -= 4;
}
while (z1--)
-#ifdef HISAX_HFC4S8S_PCIMEM
- *(bch->rx_ptr++) = Read_hfc8(l1->hw, A_FIFO_DATA0);
-#else
- *(bch->rx_ptr++) = fRead_hfc8(l1->hw);
-#endif
+ *(bch->rx_ptr++) = fRead_hfc8(l1->hw);
if (hdlc_complete) {
/* increment f counter */
@@ -940,29 +886,17 @@ tx_d_frame(struct hfc4s8s_l1 *l1p)
if ((skb = skb_dequeue(&l1p->d_tx_queue))) {
cp = skb->data;
cnt = skb->len;
-#ifndef HISAX_HFC4S8S_PCIMEM
SetRegAddr(l1p->hw, A_FIFO_DATA0);
-#endif
while (cnt >= 4) {
-#ifdef HISAX_HFC4S8S_PCIMEM
- fWrite_hfc32(l1p->hw, A_FIFO_DATA0,
- *(unsigned long *) cp);
-#else
SetRegAddr(l1p->hw, A_FIFO_DATA0);
fWrite_hfc32(l1p->hw, *(unsigned long *) cp);
-#endif
cp += 4;
cnt -= 4;
}
-#ifdef HISAX_HFC4S8S_PCIMEM
- while (cnt--)
- fWrite_hfc8(l1p->hw, A_FIFO_DATA0, *cp++);
-#else
while (cnt--)
fWrite_hfc8(l1p->hw, *cp++);
-#endif
l1p->tx_cnt = skb->truesize;
Write_hfc8(l1p->hw, A_INC_RES_FIFO, 1); /* increment f counter */
@@ -1037,26 +971,15 @@ tx_b_frame(struct hfc4s8s_btype *bch)
cp = skb->data + bch->tx_cnt;
bch->tx_cnt += cnt;
-#ifndef HISAX_HFC4S8S_PCIMEM
SetRegAddr(l1->hw, A_FIFO_DATA0);
-#endif
while (cnt >= 4) {
-#ifdef HISAX_HFC4S8S_PCIMEM
- fWrite_hfc32(l1->hw, A_FIFO_DATA0,
- *(unsigned long *) cp);
-#else
fWrite_hfc32(l1->hw, *(unsigned long *) cp);
-#endif
cp += 4;
cnt -= 4;
}
while (cnt--)
-#ifdef HISAX_HFC4S8S_PCIMEM
- fWrite_hfc8(l1->hw, A_FIFO_DATA0, *cp++);
-#else
- fWrite_hfc8(l1->hw, *cp++);
-#endif
+ fWrite_hfc8(l1->hw, *cp++);
if (bch->tx_cnt >= skb->len) {
if (bch->mode == L1_MODE_HDLC) {
@@ -1281,10 +1204,8 @@ hfc4s8s_interrupt(int intno, void *dev_id)
if (!hw || !(hw->mr.r_irq_ctrl & M_GLOB_IRQ_EN))
return IRQ_NONE;
-#ifndef HISAX_HFC4S8S_PCIMEM
/* read current selected regsister */
old_ioreg = GetRegAddr(hw);
-#endif
/* Layer 1 State change */
hw->mr.r_irq_statech |=
@@ -1292,9 +1213,7 @@ hfc4s8s_interrupt(int intno, void *dev_id)
if (!
(b = (Read_hfc8(hw, R_STATUS) & (M_MISC_IRQSTA | M_FR_IRQSTA)))
&& !hw->mr.r_irq_statech) {
-#ifndef HISAX_HFC4S8S_PCIMEM
SetRegAddr(hw, old_ioreg);
-#endif
return IRQ_NONE;
}
@@ -1322,9 +1241,7 @@ hfc4s8s_interrupt(int intno, void *dev_id)
/* queue the request to allow other cards to interrupt */
schedule_work(&hw->tqueue);
-#ifndef HISAX_HFC4S8S_PCIMEM
SetRegAddr(hw, old_ioreg);
-#endif
return IRQ_HANDLED;
} /* hfc4s8s_interrupt */
@@ -1471,13 +1388,8 @@ static void
release_pci_ports(hfc4s8s_hw *hw)
{
pci_write_config_word(hw->pdev, PCI_COMMAND, 0);
-#ifdef HISAX_HFC4S8S_PCIMEM
- if (hw->membase)
- iounmap((void *) hw->membase);
-#else
if (hw->iobase)
release_region(hw->iobase, 8);
-#endif
}
/*****************************************/
@@ -1486,11 +1398,7 @@ release_pci_ports(hfc4s8s_hw *hw)
static void
enable_pci_ports(hfc4s8s_hw *hw)
{
-#ifdef HISAX_HFC4S8S_PCIMEM
- pci_write_config_word(hw->pdev, PCI_COMMAND, PCI_ENA_MEMIO);
-#else
pci_write_config_word(hw->pdev, PCI_COMMAND, PCI_ENA_REGIO);
-#endif
}
/*************************************/
@@ -1561,15 +1469,9 @@ setup_instance(hfc4s8s_hw *hw)
hw->irq);
goto out;
}
-#ifdef HISAX_HFC4S8S_PCIMEM
- printk(KERN_INFO
- "HFC-4S/8S: found PCI card at membase 0x%p, irq %d\n",
- hw->hw_membase, hw->irq);
-#else
printk(KERN_INFO
"HFC-4S/8S: found PCI card at iobase 0x%x, irq %d\n",
hw->iobase, hw->irq);
-#endif
hfc_hardware_enable(hw, 1, 0);
@@ -1614,17 +1516,12 @@ hfc4s8s_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->irq = pdev->irq;
hw->iobase = pci_resource_start(pdev, 0);
-#ifdef HISAX_HFC4S8S_PCIMEM
- hw->hw_membase = (u_char *) pci_resource_start(pdev, 1);
- hw->membase = ioremap((ulong) hw->hw_membase, 256);
-#else
if (!request_region(hw->iobase, 8, hw->card_name)) {
printk(KERN_INFO
"HFC-4S/8S: failed to request address space at 0x%04x\n",
hw->iobase);
goto out;
}
-#endif
pci_set_drvdata(pdev, hw);
err = setup_instance(hw);
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index b667a51ed215..9a0d61e0c188 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2440,7 +2440,7 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
goto err_free;
}
- slave_agg_no = bond_xmit_hash(bond, skb, slaves_in_agg);
+ slave_agg_no = bond_xmit_hash(bond, skb) % slaves_in_agg;
first_ok_slave = NULL;
bond_for_each_slave_rcu(bond, slave, iter) {
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 9f69e818b000..70de039dad2e 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1347,6 +1347,77 @@ void bond_alb_deinitialize(struct bonding *bond)
rlb_deinitialize(bond);
}
+static int bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond,
+ struct slave *tx_slave)
+{
+ struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+ struct ethhdr *eth_data = eth_hdr(skb);
+
+ if (!tx_slave) {
+ /* unbalanced or unassigned, send through primary */
+ tx_slave = rcu_dereference(bond->curr_active_slave);
+ if (bond->params.tlb_dynamic_lb)
+ bond_info->unbalanced_load += skb->len;
+ }
+
+ if (tx_slave && SLAVE_IS_OK(tx_slave)) {
+ if (tx_slave != rcu_dereference(bond->curr_active_slave)) {
+ ether_addr_copy(eth_data->h_source,
+ tx_slave->dev->dev_addr);
+ }
+
+ bond_dev_queue_xmit(bond, skb, tx_slave->dev);
+ goto out;
+ }
+
+ if (tx_slave && bond->params.tlb_dynamic_lb) {
+ _lock_tx_hashtbl(bond);
+ __tlb_clear_slave(bond, tx_slave, 0);
+ _unlock_tx_hashtbl(bond);
+ }
+
+ /* no suitable interface, frame not sent */
+ dev_kfree_skb_any(skb);
+out:
+ return NETDEV_TX_OK;
+}
+
+int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ struct ethhdr *eth_data;
+ struct slave *tx_slave = NULL;
+ u32 hash_index;
+
+ skb_reset_mac_header(skb);
+ eth_data = eth_hdr(skb);
+
+ /* Do not TX balance any multicast or broadcast */
+ if (!is_multicast_ether_addr(eth_data->h_dest)) {
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ case htons(ETH_P_IPX):
+ /* In case of IPX, it will falback to L2 hash */
+ case htons(ETH_P_IPV6):
+ hash_index = bond_xmit_hash(bond, skb);
+ if (bond->params.tlb_dynamic_lb) {
+ tx_slave = tlb_choose_channel(bond,
+ hash_index & 0xFF,
+ skb->len);
+ } else {
+ struct list_head *iter;
+ int idx = hash_index % bond->slave_cnt;
+
+ bond_for_each_slave_rcu(bond, tx_slave, iter)
+ if (--idx < 0)
+ break;
+ }
+ break;
+ }
+ }
+ return bond_do_alb_xmit(skb, bond, tx_slave);
+}
+
int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
@@ -1355,7 +1426,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
struct slave *tx_slave = NULL;
static const __be32 ip_bcast = htonl(0xffffffff);
int hash_size = 0;
- int do_tx_balance = 1;
+ bool do_tx_balance = true;
u32 hash_index = 0;
const u8 *hash_start = NULL;
struct ipv6hdr *ip6hdr;
@@ -1370,7 +1441,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast) ||
(iph->daddr == ip_bcast) ||
(iph->protocol == IPPROTO_IGMP)) {
- do_tx_balance = 0;
+ do_tx_balance = false;
break;
}
hash_start = (char *)&(iph->daddr);
@@ -1382,7 +1453,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
* that here just in case.
*/
if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast)) {
- do_tx_balance = 0;
+ do_tx_balance = false;
break;
}
@@ -1390,7 +1461,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
* broadcasts in IPv4.
*/
if (ether_addr_equal_64bits(eth_data->h_dest, mac_v6_allmcast)) {
- do_tx_balance = 0;
+ do_tx_balance = false;
break;
}
@@ -1400,7 +1471,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
*/
ip6hdr = ipv6_hdr(skb);
if (ipv6_addr_any(&ip6hdr->saddr)) {
- do_tx_balance = 0;
+ do_tx_balance = false;
break;
}
@@ -1410,7 +1481,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
case ETH_P_IPX:
if (ipx_hdr(skb)->ipx_checksum != IPX_NO_CHECKSUM) {
/* something is wrong with this packet */
- do_tx_balance = 0;
+ do_tx_balance = false;
break;
}
@@ -1419,7 +1490,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
* this family since it has an "ARP" like
* mechanism
*/
- do_tx_balance = 0;
+ do_tx_balance = false;
break;
}
@@ -1427,12 +1498,12 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
hash_size = ETH_ALEN;
break;
case ETH_P_ARP:
- do_tx_balance = 0;
+ do_tx_balance = false;
if (bond_info->rlb_enabled)
tx_slave = rlb_arp_xmit(skb, bond);
break;
default:
- do_tx_balance = 0;
+ do_tx_balance = false;
break;
}
@@ -1441,32 +1512,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
tx_slave = tlb_choose_channel(bond, hash_index, skb->len);
}
- if (!tx_slave) {
- /* unbalanced or unassigned, send through primary */
- tx_slave = rcu_dereference(bond->curr_active_slave);
- bond_info->unbalanced_load += skb->len;
- }
-
- if (tx_slave && SLAVE_IS_OK(tx_slave)) {
- if (tx_slave != rcu_dereference(bond->curr_active_slave)) {
- ether_addr_copy(eth_data->h_source,
- tx_slave->dev->dev_addr);
- }
-
- bond_dev_queue_xmit(bond, skb, tx_slave->dev);
- goto out;
- }
-
- if (tx_slave) {
- _lock_tx_hashtbl(bond);
- __tlb_clear_slave(bond, tx_slave, 0);
- _unlock_tx_hashtbl(bond);
- }
-
- /* no suitable interface, frame not sent */
- dev_kfree_skb_any(skb);
-out:
- return NETDEV_TX_OK;
+ return bond_do_alb_xmit(skb, bond, tx_slave);
}
void bond_alb_monitor(struct work_struct *work)
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
index e09dd4bfafff..5fc76c01636c 100644
--- a/drivers/net/bonding/bond_alb.h
+++ b/drivers/net/bonding/bond_alb.h
@@ -175,6 +175,7 @@ void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave);
void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link);
void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave);
int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
+int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
void bond_alb_monitor(struct work_struct *);
int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr);
void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 69aff72c8957..a1741cb23100 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2291,8 +2291,8 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
int alen, is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
if (!slave_do_arp_validate(bond, slave)) {
- if ((slave_do_arp_validate_only(bond, slave) && is_arp) ||
- !slave_do_arp_validate_only(bond, slave))
+ if ((slave_do_arp_validate_only(bond) && is_arp) ||
+ !slave_do_arp_validate_only(bond))
slave->last_rx = jiffies;
return RX_HANDLER_ANOTHER;
} else if (!is_arp) {
@@ -3015,20 +3015,18 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
* bond_xmit_hash - generate a hash value based on the xmit policy
* @bond: bonding device
* @skb: buffer to use for headers
- * @count: modulo value
*
* This function will extract the necessary headers from the skb buffer and use
* them to generate a hash based on the xmit_policy set in the bonding device
- * which will be reduced modulo count before returning.
*/
-int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count)
+u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
{
struct flow_keys flow;
u32 hash;
if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
!bond_flow_dissect(bond, skb, &flow))
- return bond_eth_hash(skb) % count;
+ return bond_eth_hash(skb);
if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 ||
bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23)
@@ -3039,7 +3037,7 @@ int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count)
hash ^= (hash >> 16);
hash ^= (hash >> 8);
- return hash % count;
+ return hash;
}
/*-------------------------- Device entry points ----------------------------*/
@@ -3098,7 +3096,8 @@ static int bond_open(struct net_device *bond_dev)
*/
if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB)))
return -ENOMEM;
- queue_delayed_work(bond->wq, &bond->alb_work, 0);
+ if (bond->params.tlb_dynamic_lb)
+ queue_delayed_work(bond->wq, &bond->alb_work, 0);
}
if (bond->params.miimon) /* link check interval, in milliseconds. */
@@ -3666,7 +3665,7 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- bond_xmit_slave_id(bond, skb, bond_xmit_hash(bond, skb, bond->slave_cnt));
+ bond_xmit_slave_id(bond, skb, bond_xmit_hash(bond, skb) % bond->slave_cnt);
return NETDEV_TX_OK;
}
@@ -3776,8 +3775,9 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
case BOND_MODE_8023AD:
return bond_3ad_xmit_xor(skb, dev);
case BOND_MODE_ALB:
- case BOND_MODE_TLB:
return bond_alb_xmit(skb, dev);
+ case BOND_MODE_TLB:
+ return bond_tlb_xmit(skb, dev);
default:
/* Should never happen, mode already checked */
pr_err("%s: Error: Unknown bonding mode %d\n",
@@ -3998,7 +3998,8 @@ static int bond_check_params(struct bond_params *params)
if (xmit_hash_policy) {
if ((bond_mode != BOND_MODE_XOR) &&
- (bond_mode != BOND_MODE_8023AD)) {
+ (bond_mode != BOND_MODE_8023AD) &&
+ (bond_mode != BOND_MODE_TLB)) {
pr_info("xmit_hash_policy param is irrelevant in mode %s\n",
bond_mode_name(bond_mode));
} else {
@@ -4304,6 +4305,7 @@ static int bond_check_params(struct bond_params *params)
params->min_links = min_links;
params->lp_interval = lp_interval;
params->packets_per_slave = packets_per_slave;
+ params->tlb_dynamic_lb = 1; /* Default value */
if (packets_per_slave > 0) {
params->reciprocal_packets_per_slave =
reciprocal_value(packets_per_slave);
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 724e30fa20b9..6dc49da106d6 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -70,6 +70,8 @@ static int bond_option_mode_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_slaves_set(struct bonding *bond,
const struct bond_opt_value *newval);
+static int bond_option_tlb_dynamic_lb_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
static const struct bond_opt_value bond_mode_tbl[] = {
@@ -179,6 +181,12 @@ static const struct bond_opt_value bond_lp_interval_tbl[] = {
{ NULL, -1, 0},
};
+static const struct bond_opt_value bond_tlb_dynamic_lb_tbl[] = {
+ { "off", 0, 0},
+ { "on", 1, BOND_VALFLAG_DEFAULT},
+ { NULL, -1, 0}
+};
+
static const struct bond_option bond_opts[] = {
[BOND_OPT_MODE] = {
.id = BOND_OPT_MODE,
@@ -199,7 +207,7 @@ static const struct bond_option bond_opts[] = {
[BOND_OPT_XMIT_HASH] = {
.id = BOND_OPT_XMIT_HASH,
.name = "xmit_hash_policy",
- .desc = "balance-xor and 802.3ad hashing method",
+ .desc = "balance-xor, 802.3ad, and tlb hashing method",
.values = bond_xmit_hashtype_tbl,
.set = bond_option_xmit_hash_policy_set
},
@@ -364,9 +372,33 @@ static const struct bond_option bond_opts[] = {
.flags = BOND_OPTFLAG_RAWVAL,
.set = bond_option_slaves_set
},
+ [BOND_OPT_TLB_DYNAMIC_LB] = {
+ .id = BOND_OPT_TLB_DYNAMIC_LB,
+ .name = "tlb_dynamic_lb",
+ .desc = "Enable dynamic flow shuffling",
+ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_TLB)),
+ .values = bond_tlb_dynamic_lb_tbl,
+ .flags = BOND_OPTFLAG_IFDOWN,
+ .set = bond_option_tlb_dynamic_lb_set,
+ },
{ }
};
+/* Searches for an option by name */
+const struct bond_option *bond_opt_get_by_name(const char *name)
+{
+ const struct bond_option *opt;
+ int option;
+
+ for (option = 0; option < BOND_OPT_LAST; option++) {
+ opt = bond_opt_get(option);
+ if (opt && !strcmp(opt->name, name))
+ return opt;
+ }
+
+ return NULL;
+}
+
/* Searches for a value in opt's values[] table */
const struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val)
{
@@ -745,6 +777,10 @@ static int bond_option_active_slave_set(struct bonding *bond,
return ret;
}
+/* There are two tricky bits here. First, if MII monitoring is activated, then
+ * we must disable ARP monitoring. Second, if the timer isn't running, we must
+ * start it.
+ */
static int bond_option_miimon_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
@@ -783,6 +819,10 @@ static int bond_option_miimon_set(struct bonding *bond,
return 0;
}
+/* Set up and down delays. These must be multiples of the
+ * MII monitoring value, and are stored internally as the multiplier.
+ * Thus, we must translate to MS for the real world.
+ */
static int bond_option_updelay_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
@@ -841,6 +881,10 @@ static int bond_option_use_carrier_set(struct bonding *bond,
return 0;
}
+/* There are two tricky bits here. First, if ARP monitoring is activated, then
+ * we must disable MII monitoring. Second, if the ARP timer isn't running,
+ * we must start it.
+ */
static int bond_option_arp_interval_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
@@ -1337,3 +1381,13 @@ err_no_cmd:
ret = -EPERM;
goto out;
}
+
+static int bond_option_tlb_dynamic_lb_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ pr_info("%s: Setting dynamic-lb to %s (%llu)\n",
+ bond->dev->name, newval->string, newval->value);
+ bond->params.tlb_dynamic_lb = newval->value;
+
+ return 0;
+}
diff --git a/drivers/net/bonding/bond_options.h b/drivers/net/bonding/bond_options.h
index 12be9e1bfb0c..17ded5b29176 100644
--- a/drivers/net/bonding/bond_options.h
+++ b/drivers/net/bonding/bond_options.h
@@ -62,6 +62,7 @@ enum {
BOND_OPT_RESEND_IGMP,
BOND_OPT_LP_INTERVAL,
BOND_OPT_SLAVES,
+ BOND_OPT_TLB_DYNAMIC_LB,
BOND_OPT_LAST
};
@@ -104,6 +105,7 @@ int bond_opt_tryset_rtnl(struct bonding *bond, unsigned int option, char *buf);
const struct bond_opt_value *bond_opt_parse(const struct bond_option *opt,
struct bond_opt_value *val);
const struct bond_option *bond_opt_get(unsigned int option);
+const struct bond_option *bond_opt_get_by_name(const char *name);
const struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val);
/* This helper is used to initialize a bond_opt_value structure for parameter
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 5f6babcfc26e..39c4d8d61074 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -45,8 +45,7 @@
#define to_dev(obj) container_of(obj, struct device, kobj)
#define to_bond(cd) ((struct bonding *)(netdev_priv(to_net_dev(cd))))
-/*
- * "show" function for the bond_masters attribute.
+/* "show" function for the bond_masters attribute.
* The class parameter is ignored.
*/
static ssize_t bonding_show_bonds(struct class *cls,
@@ -88,14 +87,12 @@ static struct net_device *bond_get_by_name(struct bond_net *bn, const char *ifna
return NULL;
}
-/*
- * "store" function for the bond_masters attribute. This is what
+/* "store" function for the bond_masters attribute. This is what
* creates and deletes entire bonds.
*
* The class parameter is ignored.
*
*/
-
static ssize_t bonding_store_bonds(struct class *cls,
struct class_attribute *attr,
const char *buffer, size_t count)
@@ -158,9 +155,26 @@ static const struct class_attribute class_attr_bonding_masters = {
.store = bonding_store_bonds,
};
-/*
- * Show the slaves in the current bond.
- */
+/* Generic "store" method for bonding sysfs option setting */
+static ssize_t bonding_sysfs_store_option(struct device *d,
+ struct device_attribute *attr,
+ const char *buffer, size_t count)
+{
+ struct bonding *bond = to_bond(d);
+ const struct bond_option *opt;
+ int ret;
+
+ opt = bond_opt_get_by_name(attr->attr.name);
+ if (WARN_ON(!opt))
+ return -ENOENT;
+ ret = bond_opt_tryset_rtnl(bond, opt->id, (char *)buffer);
+ if (!ret)
+ ret = count;
+
+ return ret;
+}
+
+/* Show the slaves in the current bond. */
static ssize_t bonding_show_slaves(struct device *d,
struct device_attribute *attr, char *buf)
{
@@ -190,32 +204,10 @@ static ssize_t bonding_show_slaves(struct device *d,
return res;
}
-
-/*
- * Set the slaves in the current bond.
- * This is supposed to be only thin wrapper for bond_enslave and bond_release.
- * All hard work should be done there.
- */
-static ssize_t bonding_store_slaves(struct device *d,
- struct device_attribute *attr,
- const char *buffer, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_SLAVES, (char *)buffer);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(slaves, S_IRUGO | S_IWUSR, bonding_show_slaves,
- bonding_store_slaves);
+ bonding_sysfs_store_option);
-/*
- * Show and set the bonding mode. The bond interface must be down to
- * change the mode.
- */
+/* Show the bonding mode. */
static ssize_t bonding_show_mode(struct device *d,
struct device_attribute *attr, char *buf)
{
@@ -226,26 +218,10 @@ static ssize_t bonding_show_mode(struct device *d,
return sprintf(buf, "%s %d\n", val->string, bond->params.mode);
}
-
-static ssize_t bonding_store_mode(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_MODE, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
- bonding_show_mode, bonding_store_mode);
+ bonding_show_mode, bonding_sysfs_store_option);
-/*
- * Show and set the bonding transmit hash method.
- */
+/* Show the bonding transmit hash method. */
static ssize_t bonding_show_xmit_hash(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -257,26 +233,10 @@ static ssize_t bonding_show_xmit_hash(struct device *d,
return sprintf(buf, "%s %d\n", val->string, bond->params.xmit_policy);
}
-
-static ssize_t bonding_store_xmit_hash(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_XMIT_HASH, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(xmit_hash_policy, S_IRUGO | S_IWUSR,
- bonding_show_xmit_hash, bonding_store_xmit_hash);
+ bonding_show_xmit_hash, bonding_sysfs_store_option);
-/*
- * Show and set arp_validate.
- */
+/* Show arp_validate. */
static ssize_t bonding_show_arp_validate(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -289,26 +249,10 @@ static ssize_t bonding_show_arp_validate(struct device *d,
return sprintf(buf, "%s %d\n", val->string, bond->params.arp_validate);
}
-
-static ssize_t bonding_store_arp_validate(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_VALIDATE, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
-
static DEVICE_ATTR(arp_validate, S_IRUGO | S_IWUSR, bonding_show_arp_validate,
- bonding_store_arp_validate);
-/*
- * Show and set arp_all_targets.
- */
+ bonding_sysfs_store_option);
+
+/* Show arp_all_targets. */
static ssize_t bonding_show_arp_all_targets(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -321,28 +265,10 @@ static ssize_t bonding_show_arp_all_targets(struct device *d,
return sprintf(buf, "%s %d\n",
val->string, bond->params.arp_all_targets);
}
-
-static ssize_t bonding_store_arp_all_targets(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_ALL_TARGETS, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
-
static DEVICE_ATTR(arp_all_targets, S_IRUGO | S_IWUSR,
- bonding_show_arp_all_targets, bonding_store_arp_all_targets);
+ bonding_show_arp_all_targets, bonding_sysfs_store_option);
-/*
- * Show and store fail_over_mac. User only allowed to change the
- * value when there are no slaves.
- */
+/* Show fail_over_mac. */
static ssize_t bonding_show_fail_over_mac(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -355,30 +281,10 @@ static ssize_t bonding_show_fail_over_mac(struct device *d,
return sprintf(buf, "%s %d\n", val->string, bond->params.fail_over_mac);
}
-
-static ssize_t bonding_store_fail_over_mac(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_FAIL_OVER_MAC, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
-
static DEVICE_ATTR(fail_over_mac, S_IRUGO | S_IWUSR,
- bonding_show_fail_over_mac, bonding_store_fail_over_mac);
+ bonding_show_fail_over_mac, bonding_sysfs_store_option);
-/*
- * Show and set the arp timer interval. There are two tricky bits
- * here. First, if ARP monitoring is activated, then we must disable
- * MII monitoring. Second, if the ARP timer isn't running, we must
- * start it.
- */
+/* Show the arp timer interval. */
static ssize_t bonding_show_arp_interval(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -387,26 +293,10 @@ static ssize_t bonding_show_arp_interval(struct device *d,
return sprintf(buf, "%d\n", bond->params.arp_interval);
}
-
-static ssize_t bonding_store_arp_interval(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_INTERVAL, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(arp_interval, S_IRUGO | S_IWUSR,
- bonding_show_arp_interval, bonding_store_arp_interval);
+ bonding_show_arp_interval, bonding_sysfs_store_option);
-/*
- * Show and set the arp targets.
- */
+/* Show the arp targets. */
static ssize_t bonding_show_arp_targets(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -424,27 +314,10 @@ static ssize_t bonding_show_arp_targets(struct device *d,
return res;
}
+static DEVICE_ATTR(arp_ip_target, S_IRUGO | S_IWUSR,
+ bonding_show_arp_targets, bonding_sysfs_store_option);
-static ssize_t bonding_store_arp_targets(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_TARGETS, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
-static DEVICE_ATTR(arp_ip_target, S_IRUGO | S_IWUSR , bonding_show_arp_targets, bonding_store_arp_targets);
-
-/*
- * Show and set the up and down delays. These must be multiples of the
- * MII monitoring value, and are stored internally as the multiplier.
- * Thus, we must translate to MS for the real world.
- */
+/* Show the up and down delays. */
static ssize_t bonding_show_downdelay(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -453,22 +326,8 @@ static ssize_t bonding_show_downdelay(struct device *d,
return sprintf(buf, "%d\n", bond->params.downdelay * bond->params.miimon);
}
-
-static ssize_t bonding_store_downdelay(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_DOWNDELAY, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(downdelay, S_IRUGO | S_IWUSR,
- bonding_show_downdelay, bonding_store_downdelay);
+ bonding_show_downdelay, bonding_sysfs_store_option);
static ssize_t bonding_show_updelay(struct device *d,
struct device_attribute *attr,
@@ -479,27 +338,10 @@ static ssize_t bonding_show_updelay(struct device *d,
return sprintf(buf, "%d\n", bond->params.updelay * bond->params.miimon);
}
-
-static ssize_t bonding_store_updelay(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_UPDELAY, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(updelay, S_IRUGO | S_IWUSR,
- bonding_show_updelay, bonding_store_updelay);
+ bonding_show_updelay, bonding_sysfs_store_option);
-/*
- * Show and set the LACP interval. Interface must be down, and the mode
- * must be set to 802.3ad mode.
- */
+/* Show the LACP interval. */
static ssize_t bonding_show_lacp(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -511,22 +353,8 @@ static ssize_t bonding_show_lacp(struct device *d,
return sprintf(buf, "%s %d\n", val->string, bond->params.lacp_fast);
}
-
-static ssize_t bonding_store_lacp(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_LACP_RATE, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(lacp_rate, S_IRUGO | S_IWUSR,
- bonding_show_lacp, bonding_store_lacp);
+ bonding_show_lacp, bonding_sysfs_store_option);
static ssize_t bonding_show_min_links(struct device *d,
struct device_attribute *attr,
@@ -536,22 +364,8 @@ static ssize_t bonding_show_min_links(struct device *d,
return sprintf(buf, "%u\n", bond->params.min_links);
}
-
-static ssize_t bonding_store_min_links(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_MINLINKS, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(min_links, S_IRUGO | S_IWUSR,
- bonding_show_min_links, bonding_store_min_links);
+ bonding_show_min_links, bonding_sysfs_store_option);
static ssize_t bonding_show_ad_select(struct device *d,
struct device_attribute *attr,
@@ -564,27 +378,10 @@ static ssize_t bonding_show_ad_select(struct device *d,
return sprintf(buf, "%s %d\n", val->string, bond->params.ad_select);
}
-
-
-static ssize_t bonding_store_ad_select(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_AD_SELECT, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(ad_select, S_IRUGO | S_IWUSR,
- bonding_show_ad_select, bonding_store_ad_select);
+ bonding_show_ad_select, bonding_sysfs_store_option);
-/*
- * Show and set the number of peer notifications to send after a failover event.
- */
+/* Show and set the number of peer notifications to send after a failover event. */
static ssize_t bonding_show_num_peer_notif(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -611,12 +408,7 @@ static DEVICE_ATTR(num_grat_arp, S_IRUGO | S_IWUSR,
static DEVICE_ATTR(num_unsol_na, S_IRUGO | S_IWUSR,
bonding_show_num_peer_notif, bonding_store_num_peer_notif);
-/*
- * Show and set the MII monitor interval. There are two tricky bits
- * here. First, if MII monitoring is activated, then we must disable
- * ARP monitoring. Second, if the timer isn't running, we must
- * start it.
- */
+/* Show the MII monitor interval. */
static ssize_t bonding_show_miimon(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -625,30 +417,10 @@ static ssize_t bonding_show_miimon(struct device *d,
return sprintf(buf, "%d\n", bond->params.miimon);
}
-
-static ssize_t bonding_store_miimon(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_MIIMON, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(miimon, S_IRUGO | S_IWUSR,
- bonding_show_miimon, bonding_store_miimon);
+ bonding_show_miimon, bonding_sysfs_store_option);
-/*
- * Show and set the primary slave. The store function is much
- * simpler than bonding_store_slaves function because it only needs to
- * handle one interface name.
- * The bond must be a mode that supports a primary for this be
- * set.
- */
+/* Show the primary slave. */
static ssize_t bonding_show_primary(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -661,26 +433,10 @@ static ssize_t bonding_show_primary(struct device *d,
return count;
}
-
-static ssize_t bonding_store_primary(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_PRIMARY, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(primary, S_IRUGO | S_IWUSR,
- bonding_show_primary, bonding_store_primary);
+ bonding_show_primary, bonding_sysfs_store_option);
-/*
- * Show and set the primary_reselect flag.
- */
+/* Show the primary_reselect flag. */
static ssize_t bonding_show_primary_reselect(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -694,28 +450,10 @@ static ssize_t bonding_show_primary_reselect(struct device *d,
return sprintf(buf, "%s %d\n",
val->string, bond->params.primary_reselect);
}
-
-static ssize_t bonding_store_primary_reselect(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_PRIMARY_RESELECT,
- (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(primary_reselect, S_IRUGO | S_IWUSR,
- bonding_show_primary_reselect,
- bonding_store_primary_reselect);
+ bonding_show_primary_reselect, bonding_sysfs_store_option);
-/*
- * Show and set the use_carrier flag.
- */
+/* Show the use_carrier flag. */
static ssize_t bonding_show_carrier(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -724,27 +462,11 @@ static ssize_t bonding_show_carrier(struct device *d,
return sprintf(buf, "%d\n", bond->params.use_carrier);
}
-
-static ssize_t bonding_store_carrier(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_USE_CARRIER, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR,
- bonding_show_carrier, bonding_store_carrier);
+ bonding_show_carrier, bonding_sysfs_store_option);
-/*
- * Show and set currently active_slave.
- */
+/* Show currently active_slave. */
static ssize_t bonding_show_active_slave(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -761,27 +483,10 @@ static ssize_t bonding_show_active_slave(struct device *d,
return count;
}
-
-static ssize_t bonding_store_active_slave(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ACTIVE_SLAVE, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(active_slave, S_IRUGO | S_IWUSR,
- bonding_show_active_slave, bonding_store_active_slave);
-
+ bonding_show_active_slave, bonding_sysfs_store_option);
-/*
- * Show link status of the bond interface.
- */
+/* Show link status of the bond interface. */
static ssize_t bonding_show_mii_status(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -792,9 +497,7 @@ static ssize_t bonding_show_mii_status(struct device *d,
}
static DEVICE_ATTR(mii_status, S_IRUGO, bonding_show_mii_status, NULL);
-/*
- * Show current 802.3ad aggregator ID.
- */
+/* Show current 802.3ad aggregator ID. */
static ssize_t bonding_show_ad_aggregator(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -814,9 +517,7 @@ static ssize_t bonding_show_ad_aggregator(struct device *d,
static DEVICE_ATTR(ad_aggregator, S_IRUGO, bonding_show_ad_aggregator, NULL);
-/*
- * Show number of active 802.3ad ports.
- */
+/* Show number of active 802.3ad ports. */
static ssize_t bonding_show_ad_num_ports(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -836,9 +537,7 @@ static ssize_t bonding_show_ad_num_ports(struct device *d,
static DEVICE_ATTR(ad_num_ports, S_IRUGO, bonding_show_ad_num_ports, NULL);
-/*
- * Show current 802.3ad actor key.
- */
+/* Show current 802.3ad actor key. */
static ssize_t bonding_show_ad_actor_key(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -858,9 +557,7 @@ static ssize_t bonding_show_ad_actor_key(struct device *d,
static DEVICE_ATTR(ad_actor_key, S_IRUGO, bonding_show_ad_actor_key, NULL);
-/*
- * Show current 802.3ad partner key.
- */
+/* Show current 802.3ad partner key. */
static ssize_t bonding_show_ad_partner_key(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -880,9 +577,7 @@ static ssize_t bonding_show_ad_partner_key(struct device *d,
static DEVICE_ATTR(ad_partner_key, S_IRUGO, bonding_show_ad_partner_key, NULL);
-/*
- * Show current 802.3ad partner mac.
- */
+/* Show current 802.3ad partner mac. */
static ssize_t bonding_show_ad_partner_mac(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -900,9 +595,7 @@ static ssize_t bonding_show_ad_partner_mac(struct device *d,
}
static DEVICE_ATTR(ad_partner_mac, S_IRUGO, bonding_show_ad_partner_mac, NULL);
-/*
- * Show the queue_ids of the slaves in the current bond.
- */
+/* Show the queue_ids of the slaves in the current bond. */
static ssize_t bonding_show_queue_id(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -933,31 +626,11 @@ static ssize_t bonding_show_queue_id(struct device *d,
return res;
}
-
-/*
- * Set the queue_ids of the slaves in the current bond. The bond
- * interface must be enslaved for this to work.
- */
-static ssize_t bonding_store_queue_id(struct device *d,
- struct device_attribute *attr,
- const char *buffer, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_QUEUE_ID, (char *)buffer);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(queue_id, S_IRUGO | S_IWUSR, bonding_show_queue_id,
- bonding_store_queue_id);
+ bonding_sysfs_store_option);
-/*
- * Show and set the all_slaves_active flag.
- */
+/* Show the all_slaves_active flag. */
static ssize_t bonding_show_slaves_active(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -966,27 +639,10 @@ static ssize_t bonding_show_slaves_active(struct device *d,
return sprintf(buf, "%d\n", bond->params.all_slaves_active);
}
-
-static ssize_t bonding_store_slaves_active(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ALL_SLAVES_ACTIVE,
- (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR,
- bonding_show_slaves_active, bonding_store_slaves_active);
+ bonding_show_slaves_active, bonding_sysfs_store_option);
-/*
- * Show and set the number of IGMP membership reports to send on link failure
- */
+/* Show the number of IGMP membership reports to send on link failure */
static ssize_t bonding_show_resend_igmp(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -995,23 +651,8 @@ static ssize_t bonding_show_resend_igmp(struct device *d,
return sprintf(buf, "%d\n", bond->params.resend_igmp);
}
-
-static ssize_t bonding_store_resend_igmp(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_RESEND_IGMP, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
-
static DEVICE_ATTR(resend_igmp, S_IRUGO | S_IWUSR,
- bonding_show_resend_igmp, bonding_store_resend_igmp);
+ bonding_show_resend_igmp, bonding_sysfs_store_option);
static ssize_t bonding_show_lp_interval(struct device *d,
@@ -1019,25 +660,21 @@ static ssize_t bonding_show_lp_interval(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
+
return sprintf(buf, "%d\n", bond->params.lp_interval);
}
+static DEVICE_ATTR(lp_interval, S_IRUGO | S_IWUSR,
+ bonding_show_lp_interval, bonding_sysfs_store_option);
-static ssize_t bonding_store_lp_interval(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t bonding_show_tlb_dynamic_lb(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
{
struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_LP_INTERVAL, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
+ return sprintf(buf, "%d\n", bond->params.tlb_dynamic_lb);
}
-
-static DEVICE_ATTR(lp_interval, S_IRUGO | S_IWUSR,
- bonding_show_lp_interval, bonding_store_lp_interval);
+static DEVICE_ATTR(tlb_dynamic_lb, S_IRUGO | S_IWUSR,
+ bonding_show_tlb_dynamic_lb, bonding_sysfs_store_option);
static ssize_t bonding_show_packets_per_slave(struct device *d,
struct device_attribute *attr,
@@ -1045,27 +682,11 @@ static ssize_t bonding_show_packets_per_slave(struct device *d,
{
struct bonding *bond = to_bond(d);
unsigned int packets_per_slave = bond->params.packets_per_slave;
- return sprintf(buf, "%u\n", packets_per_slave);
-}
-
-static ssize_t bonding_store_packets_per_slave(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_PACKETS_PER_SLAVE,
- (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
+ return sprintf(buf, "%u\n", packets_per_slave);
}
-
static DEVICE_ATTR(packets_per_slave, S_IRUGO | S_IWUSR,
- bonding_show_packets_per_slave,
- bonding_store_packets_per_slave);
+ bonding_show_packets_per_slave, bonding_sysfs_store_option);
static struct attribute *per_bond_attrs[] = {
&dev_attr_slaves.attr,
@@ -1099,6 +720,7 @@ static struct attribute *per_bond_attrs[] = {
&dev_attr_min_links.attr,
&dev_attr_lp_interval.attr,
&dev_attr_packets_per_slave.attr,
+ &dev_attr_tlb_dynamic_lb.attr,
NULL,
};
@@ -1107,8 +729,7 @@ static struct attribute_group bonding_group = {
.attrs = per_bond_attrs,
};
-/*
- * Initialize sysfs. This sets up the bonding_masters file in
+/* Initialize sysfs. This sets up the bonding_masters file in
* /sys/class/net.
*/
int bond_create_sysfs(struct bond_net *bn)
@@ -1120,8 +741,7 @@ int bond_create_sysfs(struct bond_net *bn)
ret = netdev_class_create_file_ns(&bn->class_attr_bonding_masters,
bn->net);
- /*
- * Permit multiple loads of the module by ignoring failures to
+ /* Permit multiple loads of the module by ignoring failures to
* create the bonding_masters sysfs file. Bonding devices
* created by second or subsequent loads of the module will
* not be listed in, or controllable by, bonding_masters, but
@@ -1144,16 +764,13 @@ int bond_create_sysfs(struct bond_net *bn)
}
-/*
- * Remove /sys/class/net/bonding_masters.
- */
+/* Remove /sys/class/net/bonding_masters. */
void bond_destroy_sysfs(struct bond_net *bn)
{
netdev_class_remove_file_ns(&bn->class_attr_bonding_masters, bn->net);
}
-/*
- * Initialize sysfs for each bond. This sets up and registers
+/* Initialize sysfs for each bond. This sets up and registers
* the 'bondctl' directory for each individual bond under /sys/class/net.
*/
void bond_prepare_sysfs_group(struct bonding *bond)
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index b8bdd0acc8f3..1621226b8297 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -174,6 +174,7 @@ struct bond_params {
int resend_igmp;
int lp_interval;
int packets_per_slave;
+ int tlb_dynamic_lb;
struct reciprocal_value reciprocal_packets_per_slave;
};
@@ -182,8 +183,6 @@ struct bond_parm_tbl {
int mode;
};
-#define BOND_MAX_MODENAME_LEN 20
-
struct slave {
struct net_device *dev; /* first - useful for panic debug */
struct bonding *bond; /* our master */
@@ -395,8 +394,7 @@ static inline int slave_do_arp_validate(struct bonding *bond,
return bond->params.arp_validate & (1 << bond_slave_state(slave));
}
-static inline int slave_do_arp_validate_only(struct bonding *bond,
- struct slave *slave)
+static inline int slave_do_arp_validate_only(struct bonding *bond)
{
return bond->params.arp_validate & BOND_ARP_FILTER;
}
@@ -487,7 +485,14 @@ static inline bool slave_can_tx(struct slave *slave)
return false;
}
-struct bond_net;
+struct bond_net {
+ struct net *net; /* Associated network namespace */
+ struct list_head dev_list;
+#ifdef CONFIG_PROC_FS
+ struct proc_dir_entry *proc_dir;
+#endif
+ struct class_attribute class_attr_bonding_masters;
+};
int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave);
void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
@@ -499,7 +504,7 @@ int bond_sysfs_slave_add(struct slave *slave);
void bond_sysfs_slave_del(struct slave *slave);
int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev);
int bond_release(struct net_device *bond_dev, struct net_device *slave_dev);
-int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count);
+u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb);
void bond_select_active_slave(struct bonding *bond);
void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
void bond_create_debugfs(void);
@@ -516,15 +521,6 @@ struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond);
struct net_device *bond_option_active_slave_get(struct bonding *bond);
const char *bond_slave_link_status(s8 link);
-struct bond_net {
- struct net * net; /* Associated network namespace */
- struct list_head dev_list;
-#ifdef CONFIG_PROC_FS
- struct proc_dir_entry * proc_dir;
-#endif
- struct class_attribute class_attr_bonding_masters;
-};
-
#ifdef CONFIG_PROC_FS
void bond_create_proc_entry(struct bonding *bond);
void bond_remove_proc_entry(struct bonding *bond);
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 9e7d95dae2c7..4aacaa9b478a 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -77,12 +77,6 @@ config CAN_TI_HECC
Driver for TI HECC (High End CAN Controller) module found on many
TI devices. The device specifications are available from www.ti.com
-config CAN_MCP251X
- tristate "Microchip MCP251x SPI CAN controllers"
- depends on SPI && HAS_DMA
- ---help---
- Driver for the Microchip MCP251x SPI CAN controllers.
-
config CAN_BFIN
depends on BF534 || BF536 || BF537 || BF538 || BF539 || BF54x
tristate "Analog Devices Blackfin on-chip CAN"
@@ -133,6 +127,8 @@ source "drivers/net/can/c_can/Kconfig"
source "drivers/net/can/cc770/Kconfig"
+source "drivers/net/can/spi/Kconfig"
+
source "drivers/net/can/usb/Kconfig"
source "drivers/net/can/softing/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index c7440392adbb..c42058868b0f 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -10,6 +10,7 @@ can-dev-y := dev.o
can-dev-$(CONFIG_CAN_LEDS) += led.o
+obj-y += spi/
obj-y += usb/
obj-y += softing/
@@ -19,7 +20,6 @@ obj-$(CONFIG_CAN_C_CAN) += c_can/
obj-$(CONFIG_CAN_CC770) += cc770/
obj-$(CONFIG_CAN_AT91) += at91_can.o
obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
-obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
obj-$(CONFIG_CAN_BFIN) += bfin_can.o
obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o
obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index fe5f6303b584..58f71e1fcc4e 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -19,9 +19,13 @@
#include "c_can.h"
+#define PCI_DEVICE_ID_PCH_CAN 0x8818
+#define PCH_PCI_SOFT_RESET 0x01fc
+
enum c_can_pci_reg_align {
C_CAN_REG_ALIGN_16,
C_CAN_REG_ALIGN_32,
+ C_CAN_REG_32,
};
struct c_can_pci_data {
@@ -31,6 +35,10 @@ struct c_can_pci_data {
enum c_can_pci_reg_align reg_align;
/* Set the frequency */
unsigned int freq;
+ /* PCI bar number */
+ int bar;
+ /* Callback for reset */
+ void (*init)(const struct c_can_priv *priv, bool enable);
};
/*
@@ -63,6 +71,29 @@ static void c_can_pci_write_reg_aligned_to_32bit(struct c_can_priv *priv,
writew(val, priv->base + 2 * priv->regs[index]);
}
+static u16 c_can_pci_read_reg_32bit(struct c_can_priv *priv,
+ enum reg index)
+{
+ return (u16)ioread32(priv->base + 2 * priv->regs[index]);
+}
+
+static void c_can_pci_write_reg_32bit(struct c_can_priv *priv,
+ enum reg index, u16 val)
+{
+ iowrite32((u32)val, priv->base + 2 * priv->regs[index]);
+}
+
+static void c_can_pci_reset_pch(const struct c_can_priv *priv, bool enable)
+{
+ if (enable) {
+ u32 __iomem *addr = priv->base + PCH_PCI_SOFT_RESET;
+
+ /* write to sw reset register */
+ iowrite32(1, addr);
+ iowrite32(0, addr);
+ }
+}
+
static int c_can_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -90,7 +121,8 @@ static int c_can_pci_probe(struct pci_dev *pdev,
pci_set_master(pdev);
}
- addr = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
+ addr = pci_iomap(pdev, c_can_pci_data->bar,
+ pci_resource_len(pdev, c_can_pci_data->bar));
if (!addr) {
dev_err(&pdev->dev,
"device has no PCI memory resources, "
@@ -147,11 +179,17 @@ static int c_can_pci_probe(struct pci_dev *pdev,
priv->read_reg = c_can_pci_read_reg_aligned_to_16bit;
priv->write_reg = c_can_pci_write_reg_aligned_to_16bit;
break;
+ case C_CAN_REG_32:
+ priv->read_reg = c_can_pci_read_reg_32bit;
+ priv->write_reg = c_can_pci_write_reg_32bit;
+ break;
default:
ret = -EINVAL;
goto out_free_c_can;
}
+ priv->raminit = c_can_pci_data->init;
+
ret = register_c_can_dev(dev);
if (ret) {
dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
@@ -198,6 +236,15 @@ static struct c_can_pci_data c_can_sta2x11= {
.type = BOSCH_C_CAN,
.reg_align = C_CAN_REG_ALIGN_32,
.freq = 52000000, /* 52 Mhz */
+ .bar = 0,
+};
+
+static struct c_can_pci_data c_can_pch = {
+ .type = BOSCH_C_CAN,
+ .reg_align = C_CAN_REG_32,
+ .freq = 50000000, /* 50 MHz */
+ .init = c_can_pci_reset_pch,
+ .bar = 1,
};
#define C_CAN_ID(_vend, _dev, _driverdata) { \
@@ -207,6 +254,8 @@ static struct c_can_pci_data c_can_sta2x11= {
static DEFINE_PCI_DEVICE_TABLE(c_can_pci_tbl) = {
C_CAN_ID(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_CAN,
c_can_sta2x11),
+ C_CAN_ID(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PCH_CAN,
+ c_can_pch),
{},
};
static struct pci_driver c_can_pci_driver = {
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 7d8c8f3672dd..bacd236ce306 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -556,15 +556,6 @@ failed:
/*
* netdev sysfs
*/
-static ssize_t show_channel(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct net_device *ndev = to_net_dev(dev);
- struct softing_priv *priv = netdev2softing(ndev);
-
- return sprintf(buf, "%i\n", priv->index);
-}
-
static ssize_t show_chip(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -609,12 +600,10 @@ static ssize_t store_output(struct device *dev, struct device_attribute *attr,
return count;
}
-static const DEVICE_ATTR(channel, S_IRUGO, show_channel, NULL);
static const DEVICE_ATTR(chip, S_IRUGO, show_chip, NULL);
static const DEVICE_ATTR(output, S_IRUGO | S_IWUSR, show_output, store_output);
static const struct attribute *const netdev_sysfs_attrs[] = {
- &dev_attr_channel.attr,
&dev_attr_chip.attr,
&dev_attr_output.attr,
NULL,
@@ -679,17 +668,20 @@ static int softing_netdev_register(struct net_device *netdev)
{
int ret;
- netdev->sysfs_groups[0] = &netdev_sysfs_group;
ret = register_candev(netdev);
if (ret) {
dev_alert(&netdev->dev, "register failed\n");
return ret;
}
+ if (sysfs_create_group(&netdev->dev.kobj, &netdev_sysfs_group) < 0)
+ netdev_alert(netdev, "sysfs group failed\n");
+
return 0;
}
static void softing_netdev_cleanup(struct net_device *netdev)
{
+ sysfs_remove_group(&netdev->dev.kobj, &netdev_sysfs_group);
unregister_candev(netdev);
free_candev(netdev);
}
@@ -721,8 +713,6 @@ DEV_ATTR_RO(firmware_version, id.fw_version);
DEV_ATTR_RO_STR(hardware, pdat->name);
DEV_ATTR_RO(hardware_version, id.hw_version);
DEV_ATTR_RO(license, id.license);
-DEV_ATTR_RO(frequency, id.freq);
-DEV_ATTR_RO(txpending, tx.pending);
static struct attribute *softing_pdev_attrs[] = {
&dev_attr_serial.attr,
@@ -731,8 +721,6 @@ static struct attribute *softing_pdev_attrs[] = {
&dev_attr_hardware.attr,
&dev_attr_hardware_version.attr,
&dev_attr_license.attr,
- &dev_attr_frequency.attr,
- &dev_attr_txpending.attr,
NULL,
};
diff --git a/drivers/net/can/spi/Kconfig b/drivers/net/can/spi/Kconfig
new file mode 100644
index 000000000000..148cae5871a6
--- /dev/null
+++ b/drivers/net/can/spi/Kconfig
@@ -0,0 +1,10 @@
+menu "CAN SPI interfaces"
+ depends on SPI
+
+config CAN_MCP251X
+ tristate "Microchip MCP251x SPI CAN controllers"
+ depends on HAS_DMA
+ ---help---
+ Driver for the Microchip MCP251x SPI CAN controllers.
+
+endmenu
diff --git a/drivers/net/can/spi/Makefile b/drivers/net/can/spi/Makefile
new file mode 100644
index 000000000000..90bcacffbc65
--- /dev/null
+++ b/drivers/net/can/spi/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the Linux Controller Area Network SPI drivers.
+#
+
+
+obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
+
+ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 28c11f815245..bc235f9dc754 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -214,6 +214,8 @@
#define TX_ECHO_SKB_MAX 1
+#define MCP251X_OST_DELAY_MS (5)
+
#define DEVICE_NAME "mcp251x"
static int mcp251x_enable_dma; /* Enable SPI DMA. Default: 0 (Off) */
@@ -624,50 +626,45 @@ static int mcp251x_setup(struct net_device *net, struct mcp251x_priv *priv,
static int mcp251x_hw_reset(struct spi_device *spi)
{
struct mcp251x_priv *priv = spi_get_drvdata(spi);
+ u8 reg;
int ret;
- unsigned long timeout;
+
+ /* Wait for oscillator startup timer after power up */
+ mdelay(MCP251X_OST_DELAY_MS);
priv->spi_tx_buf[0] = INSTRUCTION_RESET;
- ret = spi_write(spi, priv->spi_tx_buf, 1);
- if (ret) {
- dev_err(&spi->dev, "reset failed: ret = %d\n", ret);
- return -EIO;
- }
+ ret = mcp251x_spi_trans(spi, 1);
+ if (ret)
+ return ret;
+
+ /* Wait for oscillator startup timer after reset */
+ mdelay(MCP251X_OST_DELAY_MS);
+
+ reg = mcp251x_read_reg(spi, CANSTAT);
+ if ((reg & CANCTRL_REQOP_MASK) != CANCTRL_REQOP_CONF)
+ return -ENODEV;
- /* Wait for reset to finish */
- timeout = jiffies + HZ;
- mdelay(10);
- while ((mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK)
- != CANCTRL_REQOP_CONF) {
- schedule();
- if (time_after(jiffies, timeout)) {
- dev_err(&spi->dev, "MCP251x didn't"
- " enter in conf mode after reset\n");
- return -EBUSY;
- }
- }
return 0;
}
static int mcp251x_hw_probe(struct spi_device *spi)
{
- int st1, st2;
+ u8 ctrl;
+ int ret;
- mcp251x_hw_reset(spi);
+ ret = mcp251x_hw_reset(spi);
+ if (ret)
+ return ret;
- /*
- * Please note that these are "magic values" based on after
- * reset defaults taken from data sheet which allows us to see
- * if we really have a chip on the bus (we avoid common all
- * zeroes or all ones situations)
- */
- st1 = mcp251x_read_reg(spi, CANSTAT) & 0xEE;
- st2 = mcp251x_read_reg(spi, CANCTRL) & 0x17;
+ ctrl = mcp251x_read_reg(spi, CANCTRL);
+
+ dev_dbg(&spi->dev, "CANCTRL 0x%02x\n", ctrl);
- dev_dbg(&spi->dev, "CANSTAT 0x%02x CANCTRL 0x%02x\n", st1, st2);
+ /* Check for power up default value */
+ if ((ctrl & 0x17) != 0x07)
+ return -ENODEV;
- /* Check for power up default values */
- return (st1 == 0x80 && st2 == 0x07) ? 1 : 0;
+ return 0;
}
static int mcp251x_power_enable(struct regulator *reg, int enable)
@@ -776,7 +773,6 @@ static void mcp251x_restart_work_handler(struct work_struct *ws)
mutex_lock(&priv->mcp_lock);
if (priv->after_suspend) {
- mdelay(10);
mcp251x_hw_reset(spi);
mcp251x_setup(net, priv, spi);
if (priv->after_suspend & AFTER_SUSPEND_RESTART) {
@@ -1032,8 +1028,8 @@ static int mcp251x_can_probe(struct spi_device *spi)
struct mcp251x_platform_data *pdata = dev_get_platdata(&spi->dev);
struct net_device *net;
struct mcp251x_priv *priv;
- int freq, ret = -ENODEV;
struct clk *clk;
+ int freq, ret;
clk = devm_clk_get(&spi->dev, NULL);
if (IS_ERR(clk)) {
@@ -1076,6 +1072,18 @@ static int mcp251x_can_probe(struct spi_device *spi)
priv->net = net;
priv->clk = clk;
+ spi_set_drvdata(spi, priv);
+
+ /* Configure the SPI bus */
+ spi->bits_per_word = 8;
+ if (mcp251x_is_2510(spi))
+ spi->max_speed_hz = spi->max_speed_hz ? : 5 * 1000 * 1000;
+ else
+ spi->max_speed_hz = spi->max_speed_hz ? : 10 * 1000 * 1000;
+ ret = spi_setup(spi);
+ if (ret)
+ goto out_clk;
+
priv->power = devm_regulator_get(&spi->dev, "vdd");
priv->transceiver = devm_regulator_get(&spi->dev, "xceiver");
if ((PTR_ERR(priv->power) == -EPROBE_DEFER) ||
@@ -1088,8 +1096,6 @@ static int mcp251x_can_probe(struct spi_device *spi)
if (ret)
goto out_clk;
- spi_set_drvdata(spi, priv);
-
priv->spi = spi;
mutex_init(&priv->mcp_lock);
@@ -1134,20 +1140,11 @@ static int mcp251x_can_probe(struct spi_device *spi)
SET_NETDEV_DEV(net, &spi->dev);
- /* Configure the SPI bus */
- spi->mode = spi->mode ? : SPI_MODE_0;
- if (mcp251x_is_2510(spi))
- spi->max_speed_hz = spi->max_speed_hz ? : 5 * 1000 * 1000;
- else
- spi->max_speed_hz = spi->max_speed_hz ? : 10 * 1000 * 1000;
- spi->bits_per_word = 8;
- spi_setup(spi);
-
/* Here is OK to not lock the MCP, no one knows about it yet */
- if (!mcp251x_hw_probe(spi)) {
- ret = -ENODEV;
+ ret = mcp251x_hw_probe(spi);
+ if (ret)
goto error_probe;
- }
+
mcp251x_hw_sleep(spi);
ret = register_candev(net);
@@ -1156,7 +1153,7 @@ static int mcp251x_can_probe(struct spi_device *spi)
devm_can_led_init(net);
- return ret;
+ return 0;
error_probe:
if (mcp251x_enable_dma)
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index fc96a3d83ebe..0b918ebad76b 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -19,7 +19,7 @@ config CAN_KVASER_USB
This driver adds support for Kvaser CAN/USB devices like Kvaser
Leaf Light.
- The driver gives support for the following devices:
+ The driver provides support for the following devices:
- Kvaser Leaf Light
- Kvaser Leaf Professional HS
- Kvaser Leaf SemiPro HS
@@ -36,6 +36,8 @@ config CAN_KVASER_USB
- Kvaser Leaf Light "China"
- Kvaser BlackBird SemiPro
- Kvaser USBcan R
+ - Kvaser Leaf Light v2
+ - Kvaser Mini PCI Express HS
If unsure, say N.
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 4ca46edc061d..541fb7a05625 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -53,6 +53,8 @@
#define USB_OEM_MERCURY_PRODUCT_ID 34
#define USB_OEM_LEAF_PRODUCT_ID 35
#define USB_CAN_R_PRODUCT_ID 39
+#define USB_LEAF_LITE_V2_PRODUCT_ID 288
+#define USB_MINI_PCIE_HS_PRODUCT_ID 289
/* USB devices features */
#define KVASER_HAS_SILENT_MODE BIT(0)
@@ -356,6 +358,8 @@ static const struct usb_device_id kvaser_usb_table[] = {
.driver_info = KVASER_HAS_TXRX_ERRORS },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_CAN_R_PRODUCT_ID),
.driver_info = KVASER_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) },
{ }
};
MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
@@ -379,38 +383,43 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
void *buf;
int actual_len;
int err;
- int pos = 0;
+ int pos;
+ unsigned long to = jiffies + msecs_to_jiffies(USB_RECV_TIMEOUT);
buf = kzalloc(RX_BUFFER_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- err = usb_bulk_msg(dev->udev,
- usb_rcvbulkpipe(dev->udev,
- dev->bulk_in->bEndpointAddress),
- buf, RX_BUFFER_SIZE, &actual_len,
- USB_RECV_TIMEOUT);
- if (err < 0)
- goto end;
+ do {
+ err = usb_bulk_msg(dev->udev,
+ usb_rcvbulkpipe(dev->udev,
+ dev->bulk_in->bEndpointAddress),
+ buf, RX_BUFFER_SIZE, &actual_len,
+ USB_RECV_TIMEOUT);
+ if (err < 0)
+ goto end;
- while (pos <= actual_len - MSG_HEADER_LEN) {
- tmp = buf + pos;
+ pos = 0;
+ while (pos <= actual_len - MSG_HEADER_LEN) {
+ tmp = buf + pos;
- if (!tmp->len)
- break;
+ if (!tmp->len)
+ break;
- if (pos + tmp->len > actual_len) {
- dev_err(dev->udev->dev.parent, "Format error\n");
- break;
- }
+ if (pos + tmp->len > actual_len) {
+ dev_err(dev->udev->dev.parent,
+ "Format error\n");
+ break;
+ }
- if (tmp->id == id) {
- memcpy(msg, tmp, tmp->len);
- goto end;
- }
+ if (tmp->id == id) {
+ memcpy(msg, tmp, tmp->len);
+ goto end;
+ }
- pos += tmp->len;
- }
+ pos += tmp->len;
+ }
+ } while (time_before(jiffies, to));
err = -EINVAL;
diff --git a/drivers/net/dsa/mv88e6123_61_65.c b/drivers/net/dsa/mv88e6123_61_65.c
index 41ee5b6ae917..69c42513dd72 100644
--- a/drivers/net/dsa/mv88e6123_61_65.c
+++ b/drivers/net/dsa/mv88e6123_61_65.c
@@ -289,7 +289,7 @@ static int mv88e6123_61_65_setup_port(struct dsa_switch *ds, int p)
static int mv88e6123_61_65_setup(struct dsa_switch *ds)
{
- struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int i;
int ret;
diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c
index dadfafba64e9..953bc6a49e59 100644
--- a/drivers/net/dsa/mv88e6131.c
+++ b/drivers/net/dsa/mv88e6131.c
@@ -155,7 +155,7 @@ static int mv88e6131_setup_global(struct dsa_switch *ds)
static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
{
- struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int addr = REG_PORT(p);
u16 val;
@@ -274,7 +274,7 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
static int mv88e6131_setup(struct dsa_switch *ds)
{
- struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int i;
int ret;
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index 17314ed9456d..9ce2146346b6 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -74,7 +74,7 @@ int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
{
- struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret;
mutex_lock(&ps->smi_mutex);
@@ -118,7 +118,7 @@ int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
{
- struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret;
mutex_lock(&ps->smi_mutex);
@@ -256,7 +256,7 @@ static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
{
- struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret;
mutex_lock(&ps->ppu_mutex);
@@ -283,7 +283,7 @@ static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds)
{
- struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
/* Schedule a timer to re-enable the PHY polling unit. */
mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10));
@@ -292,7 +292,7 @@ static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds)
void mv88e6xxx_ppu_state_init(struct dsa_switch *ds)
{
- struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
mutex_init(&ps->ppu_mutex);
INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work);
@@ -463,7 +463,7 @@ void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
int nr_stats, struct mv88e6xxx_hw_stat *stats,
int port, uint64_t *data)
{
- struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret;
int i;
diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c
index 4d1f2fdd5c32..38c500f95b9e 100644
--- a/drivers/net/ethernet/altera/altera_msgdma.c
+++ b/drivers/net/ethernet/altera/altera_msgdma.c
@@ -37,10 +37,8 @@ void msgdma_start_rxdma(struct altera_tse_private *priv)
void msgdma_reset(struct altera_tse_private *priv)
{
int counter;
- struct msgdma_csr *txcsr =
- (struct msgdma_csr *)priv->tx_dma_csr;
- struct msgdma_csr *rxcsr =
- (struct msgdma_csr *)priv->rx_dma_csr;
+ struct msgdma_csr *txcsr = priv->tx_dma_csr;
+ struct msgdma_csr *rxcsr = priv->rx_dma_csr;
/* Reset Rx mSGDMA */
iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status);
@@ -138,8 +136,7 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
u32 ready = 0;
u32 inuse;
u32 status;
- struct msgdma_csr *txcsr =
- (struct msgdma_csr *)priv->tx_dma_csr;
+ struct msgdma_csr *txcsr = priv->tx_dma_csr;
/* Get number of sent descriptors */
inuse = ioread32(&txcsr->rw_fill_level) & 0xffff;
@@ -190,10 +187,8 @@ u32 msgdma_rx_status(struct altera_tse_private *priv)
u32 rxstatus = 0;
u32 pktlength;
u32 pktstatus;
- struct msgdma_csr *rxcsr =
- (struct msgdma_csr *)priv->rx_dma_csr;
- struct msgdma_response *rxresp =
- (struct msgdma_response *)priv->rx_dma_resp;
+ struct msgdma_csr *rxcsr = priv->rx_dma_csr;
+ struct msgdma_response *rxresp = priv->rx_dma_resp;
if (ioread32(&rxcsr->resp_fill_level) & 0xffff) {
pktlength = ioread32(&rxresp->bytes_transferred);
diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c
index 9ce8630692b6..dbd40e15b5cc 100644
--- a/drivers/net/ethernet/altera/altera_sgdma.c
+++ b/drivers/net/ethernet/altera/altera_sgdma.c
@@ -126,12 +126,12 @@ void sgdma_uninitialize(struct altera_tse_private *priv)
*/
void sgdma_reset(struct altera_tse_private *priv)
{
- u32 *ptxdescripmem = (u32 *)priv->tx_dma_desc;
+ u32 *ptxdescripmem = priv->tx_dma_desc;
u32 txdescriplen = priv->txdescmem;
- u32 *prxdescripmem = (u32 *)priv->rx_dma_desc;
+ u32 *prxdescripmem = priv->rx_dma_desc;
u32 rxdescriplen = priv->rxdescmem;
- struct sgdma_csr *ptxsgdma = (struct sgdma_csr *)priv->tx_dma_csr;
- struct sgdma_csr *prxsgdma = (struct sgdma_csr *)priv->rx_dma_csr;
+ struct sgdma_csr *ptxsgdma = priv->tx_dma_csr;
+ struct sgdma_csr *prxsgdma = priv->rx_dma_csr;
/* Initialize descriptor memory to 0 */
memset(ptxdescripmem, 0, txdescriplen);
@@ -167,13 +167,13 @@ void sgdma_disable_txirq(struct altera_tse_private *priv)
void sgdma_clear_rxirq(struct altera_tse_private *priv)
{
- struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
+ struct sgdma_csr *csr = priv->rx_dma_csr;
tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
}
void sgdma_clear_txirq(struct altera_tse_private *priv)
{
- struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
+ struct sgdma_csr *csr = priv->tx_dma_csr;
tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
}
@@ -185,8 +185,7 @@ void sgdma_clear_txirq(struct altera_tse_private *priv)
int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
{
int pktstx = 0;
- struct sgdma_descrip *descbase =
- (struct sgdma_descrip *)priv->tx_dma_desc;
+ struct sgdma_descrip *descbase = priv->tx_dma_desc;
struct sgdma_descrip *cdesc = &descbase[0];
struct sgdma_descrip *ndesc = &descbase[1];
@@ -219,7 +218,7 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
u32 sgdma_tx_completions(struct altera_tse_private *priv)
{
u32 ready = 0;
- struct sgdma_descrip *desc = (struct sgdma_descrip *)priv->tx_dma_desc;
+ struct sgdma_descrip *desc = priv->tx_dma_desc;
if (!sgdma_txbusy(priv) &&
((desc->control & SGDMA_CONTROL_HW_OWNED) == 0) &&
@@ -246,8 +245,8 @@ void sgdma_add_rx_desc(struct altera_tse_private *priv,
*/
u32 sgdma_rx_status(struct altera_tse_private *priv)
{
- struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
- struct sgdma_descrip *base = (struct sgdma_descrip *)priv->rx_dma_desc;
+ struct sgdma_csr *csr = priv->rx_dma_csr;
+ struct sgdma_descrip *base = priv->rx_dma_desc;
struct sgdma_descrip *desc = NULL;
int pktsrx;
unsigned int rxstatus = 0;
@@ -351,10 +350,8 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc,
*/
static int sgdma_async_read(struct altera_tse_private *priv)
{
- struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
- struct sgdma_descrip *descbase =
- (struct sgdma_descrip *)priv->rx_dma_desc;
-
+ struct sgdma_csr *csr = priv->rx_dma_csr;
+ struct sgdma_descrip *descbase = priv->rx_dma_desc;
struct sgdma_descrip *cdesc = &descbase[0];
struct sgdma_descrip *ndesc = &descbase[1];
@@ -397,7 +394,7 @@ static int sgdma_async_read(struct altera_tse_private *priv)
static int sgdma_async_write(struct altera_tse_private *priv,
struct sgdma_descrip *desc)
{
- struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
+ struct sgdma_csr *csr = priv->tx_dma_csr;
if (sgdma_txbusy(priv))
return 0;
@@ -518,7 +515,7 @@ queue_rx_peekhead(struct altera_tse_private *priv)
*/
static int sgdma_rxbusy(struct altera_tse_private *priv)
{
- struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
+ struct sgdma_csr *csr = priv->rx_dma_csr;
return ioread32(&csr->status) & SGDMA_STSREG_BUSY;
}
@@ -528,7 +525,7 @@ static int sgdma_rxbusy(struct altera_tse_private *priv)
static int sgdma_txbusy(struct altera_tse_private *priv)
{
int delay = 0;
- struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
+ struct sgdma_csr *csr = priv->tx_dma_csr;
/* if DMA is busy, wait for current transactino to finish */
while ((ioread32(&csr->status) & SGDMA_STSREG_BUSY) && (delay++ < 100))
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 85dbddd03722..3e488094b073 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -150,4 +150,15 @@ config BGMAC
In case of using this driver on BCM4706 it's also requires to enable
BCMA_DRIVER_GMAC_CMN to make it work.
+config SYSTEMPORT
+ tristate "Broadcom SYSTEMPORT internal MAC support"
+ depends on OF
+ select MII
+ select PHYLIB
+ select FIXED_PHY if SYSTEMPORT=y
+ help
+ This driver supports the built-in Ethernet MACs found in the
+ Broadcom BCM7xxx Set Top Box family chipset using an internal
+ Ethernet switch.
+
endif # NET_VENDOR_BROADCOM
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
index fd639a0d4c7d..e2a958a657e0 100644
--- a/drivers/net/ethernet/broadcom/Makefile
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_BNX2X) += bnx2x/
obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o
obj-$(CONFIG_TIGON3) += tg3.o
obj-$(CONFIG_BGMAC) += bgmac.o
+obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index a7d11f5565d6..8db34d389675 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1315,8 +1315,7 @@ static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
};
-#define BCM_ENET_STATS_LEN \
- (sizeof(bcm_enet_gstrings_stats) / sizeof(struct bcm_enet_stats))
+#define BCM_ENET_STATS_LEN ARRAY_SIZE(bcm_enet_gstrings_stats)
static const u32 unused_mib_regs[] = {
ETH_MIB_TX_ALL_OCTETS,
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
new file mode 100644
index 000000000000..4dc8d1e9829b
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -0,0 +1,1614 @@
+/*
+ * Broadcom BCM7xxx System Port Ethernet MAC driver
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+#include "bcmsysport.h"
+
+/* I/O accessors register helpers */
+#define BCM_SYSPORT_IO_MACRO(name, offset) \
+static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \
+{ \
+ u32 reg = __raw_readl(priv->base + offset + off); \
+ return reg; \
+} \
+static inline void name##_writel(struct bcm_sysport_priv *priv, \
+ u32 val, u32 off) \
+{ \
+ __raw_writel(val, priv->base + offset + off); \
+} \
+
+BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
+BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
+BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
+BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
+BCM_SYSPORT_IO_MACRO(rdma, SYS_PORT_RDMA_OFFSET);
+BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
+BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
+BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
+BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
+BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
+
+/* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
+ * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
+ */
+#define BCM_SYSPORT_INTR_L2(which) \
+static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
+ u32 mask) \
+{ \
+ intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
+ priv->irq##which##_mask &= ~(mask); \
+} \
+static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
+ u32 mask) \
+{ \
+ intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \
+ priv->irq##which##_mask |= (mask); \
+} \
+
+BCM_SYSPORT_INTR_L2(0)
+BCM_SYSPORT_INTR_L2(1)
+
+/* Register accesses to GISB/RBUS registers are expensive (few hundred
+ * nanoseconds), so keep the check for 64-bits explicit here to save
+ * one register write per-packet on 32-bits platforms.
+ */
+static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,
+ void __iomem *d,
+ dma_addr_t addr)
+{
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ __raw_writel(upper_32_bits(addr) & DESC_ADDR_HI_MASK,
+ d + DESC_ADDR_HI_STATUS_LEN);
+#endif
+ __raw_writel(lower_32_bits(addr), d + DESC_ADDR_LO);
+}
+
+static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv,
+ struct dma_desc *desc,
+ unsigned int port)
+{
+ /* Ports are latched, so write upper address first */
+ tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port));
+ tdma_writel(priv, desc->addr_lo, TDMA_WRITE_PORT_LO(port));
+}
+
+/* Ethtool operations */
+static int bcm_sysport_set_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ return phy_ethtool_sset(priv->phydev, cmd);
+}
+
+static int bcm_sysport_get_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ return phy_ethtool_gset(priv->phydev, cmd);
+}
+
+static int bcm_sysport_set_rx_csum(struct net_device *dev,
+ netdev_features_t wanted)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ u32 reg;
+
+ priv->rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
+ reg = rxchk_readl(priv, RXCHK_CONTROL);
+ if (priv->rx_csum_en)
+ reg |= RXCHK_EN;
+ else
+ reg &= ~RXCHK_EN;
+
+ /* If UniMAC forwards CRC, we need to skip over it to get
+ * a valid CHK bit to be set in the per-packet status word
+ */
+ if (priv->rx_csum_en && priv->crc_fwd)
+ reg |= RXCHK_SKIP_FCS;
+ else
+ reg &= ~RXCHK_SKIP_FCS;
+
+ rxchk_writel(priv, reg, RXCHK_CONTROL);
+
+ return 0;
+}
+
+static int bcm_sysport_set_tx_csum(struct net_device *dev,
+ netdev_features_t wanted)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ u32 reg;
+
+ /* Hardware transmit checksum requires us to enable the Transmit status
+ * block prepended to the packet contents
+ */
+ priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
+ reg = tdma_readl(priv, TDMA_CONTROL);
+ if (priv->tsb_en)
+ reg |= TSB_EN;
+ else
+ reg &= ~TSB_EN;
+ tdma_writel(priv, reg, TDMA_CONTROL);
+
+ return 0;
+}
+
+static int bcm_sysport_set_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = features ^ dev->features;
+ netdev_features_t wanted = dev->wanted_features;
+ int ret = 0;
+
+ if (changed & NETIF_F_RXCSUM)
+ ret = bcm_sysport_set_rx_csum(dev, wanted);
+ if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
+ ret = bcm_sysport_set_tx_csum(dev, wanted);
+
+ return ret;
+}
+
+/* Hardware counters must be kept in sync because the order/offset
+ * is important here (order in structure declaration = order in hardware)
+ */
+static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
+ /* general stats */
+ STAT_NETDEV(rx_packets),
+ STAT_NETDEV(tx_packets),
+ STAT_NETDEV(rx_bytes),
+ STAT_NETDEV(tx_bytes),
+ STAT_NETDEV(rx_errors),
+ STAT_NETDEV(tx_errors),
+ STAT_NETDEV(rx_dropped),
+ STAT_NETDEV(tx_dropped),
+ STAT_NETDEV(multicast),
+ /* UniMAC RSV counters */
+ STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
+ STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
+ STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
+ STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
+ STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
+ STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
+ STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
+ STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
+ STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
+ STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
+ STAT_MIB_RX("rx_pkts", mib.rx.pkt),
+ STAT_MIB_RX("rx_bytes", mib.rx.bytes),
+ STAT_MIB_RX("rx_multicast", mib.rx.mca),
+ STAT_MIB_RX("rx_broadcast", mib.rx.bca),
+ STAT_MIB_RX("rx_fcs", mib.rx.fcs),
+ STAT_MIB_RX("rx_control", mib.rx.cf),
+ STAT_MIB_RX("rx_pause", mib.rx.pf),
+ STAT_MIB_RX("rx_unknown", mib.rx.uo),
+ STAT_MIB_RX("rx_align", mib.rx.aln),
+ STAT_MIB_RX("rx_outrange", mib.rx.flr),
+ STAT_MIB_RX("rx_code", mib.rx.cde),
+ STAT_MIB_RX("rx_carrier", mib.rx.fcr),
+ STAT_MIB_RX("rx_oversize", mib.rx.ovr),
+ STAT_MIB_RX("rx_jabber", mib.rx.jbr),
+ STAT_MIB_RX("rx_mtu_err", mib.rx.mtue),
+ STAT_MIB_RX("rx_good_pkts", mib.rx.pok),
+ STAT_MIB_RX("rx_unicast", mib.rx.uc),
+ STAT_MIB_RX("rx_ppp", mib.rx.ppp),
+ STAT_MIB_RX("rx_crc", mib.rx.rcrc),
+ /* UniMAC TSV counters */
+ STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
+ STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
+ STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
+ STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
+ STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
+ STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
+ STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
+ STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
+ STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
+ STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
+ STAT_MIB_TX("tx_pkts", mib.tx.pkts),
+ STAT_MIB_TX("tx_multicast", mib.tx.mca),
+ STAT_MIB_TX("tx_broadcast", mib.tx.bca),
+ STAT_MIB_TX("tx_pause", mib.tx.pf),
+ STAT_MIB_TX("tx_control", mib.tx.cf),
+ STAT_MIB_TX("tx_fcs_err", mib.tx.fcs),
+ STAT_MIB_TX("tx_oversize", mib.tx.ovr),
+ STAT_MIB_TX("tx_defer", mib.tx.drf),
+ STAT_MIB_TX("tx_excess_defer", mib.tx.edf),
+ STAT_MIB_TX("tx_single_col", mib.tx.scl),
+ STAT_MIB_TX("tx_multi_col", mib.tx.mcl),
+ STAT_MIB_TX("tx_late_col", mib.tx.lcl),
+ STAT_MIB_TX("tx_excess_col", mib.tx.ecl),
+ STAT_MIB_TX("tx_frags", mib.tx.frg),
+ STAT_MIB_TX("tx_total_col", mib.tx.ncl),
+ STAT_MIB_TX("tx_jabber", mib.tx.jbr),
+ STAT_MIB_TX("tx_bytes", mib.tx.bytes),
+ STAT_MIB_TX("tx_good_pkts", mib.tx.pok),
+ STAT_MIB_TX("tx_unicast", mib.tx.uc),
+ /* UniMAC RUNT counters */
+ STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
+ STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
+ STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
+ STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
+ /* RXCHK misc statistics */
+ STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR),
+ STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc,
+ RXCHK_OTHER_DISC_CNTR),
+ /* RBUF misc statistics */
+ STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
+ STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
+};
+
+#define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
+
+static void bcm_sysport_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->version, "0.1", sizeof(info->version));
+ strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
+ info->n_stats = BCM_SYSPORT_STATS_LEN;
+}
+
+static u32 bcm_sysport_get_msglvl(struct net_device *dev)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+ return priv->msg_enable;
+}
+
+static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+ priv->msg_enable = enable;
+}
+
+static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
+{
+ switch (string_set) {
+ case ETH_SS_STATS:
+ return BCM_SYSPORT_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void bcm_sysport_get_strings(struct net_device *dev,
+ u32 stringset, u8 *data)
+{
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
+ memcpy(data + i * ETH_GSTRING_LEN,
+ bcm_sysport_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
+{
+ int i, j = 0;
+
+ for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
+ const struct bcm_sysport_stats *s;
+ u8 offset = 0;
+ u32 val = 0;
+ char *p;
+
+ s = &bcm_sysport_gstrings_stats[i];
+ switch (s->type) {
+ case BCM_SYSPORT_STAT_NETDEV:
+ continue;
+ case BCM_SYSPORT_STAT_MIB_RX:
+ case BCM_SYSPORT_STAT_MIB_TX:
+ case BCM_SYSPORT_STAT_RUNT:
+ if (s->type != BCM_SYSPORT_STAT_MIB_RX)
+ offset = UMAC_MIB_STAT_OFFSET;
+ val = umac_readl(priv, UMAC_MIB_START + j + offset);
+ break;
+ case BCM_SYSPORT_STAT_RXCHK:
+ val = rxchk_readl(priv, s->reg_offset);
+ if (val == ~0)
+ rxchk_writel(priv, 0, s->reg_offset);
+ break;
+ case BCM_SYSPORT_STAT_RBUF:
+ val = rbuf_readl(priv, s->reg_offset);
+ if (val == ~0)
+ rbuf_writel(priv, 0, s->reg_offset);
+ break;
+ }
+
+ j += s->stat_sizeof;
+ p = (char *)priv + s->stat_offset;
+ *(u32 *)p = val;
+ }
+
+ netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
+}
+
+static void bcm_sysport_get_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ int i;
+
+ if (netif_running(dev))
+ bcm_sysport_update_mib_counters(priv);
+
+ for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
+ const struct bcm_sysport_stats *s;
+ char *p;
+
+ s = &bcm_sysport_gstrings_stats[i];
+ if (s->type == BCM_SYSPORT_STAT_NETDEV)
+ p = (char *)&dev->stats;
+ else
+ p = (char *)priv;
+ p += s->stat_offset;
+ data[i] = *(u32 *)p;
+ }
+}
+
+static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
+{
+ dev_kfree_skb_any(cb->skb);
+ cb->skb = NULL;
+ dma_unmap_addr_set(cb, dma_addr, 0);
+}
+
+static int bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
+ struct bcm_sysport_cb *cb)
+{
+ struct device *kdev = &priv->pdev->dev;
+ struct net_device *ndev = priv->netdev;
+ dma_addr_t mapping;
+ int ret;
+
+ cb->skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
+ if (!cb->skb) {
+ netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
+ return -ENOMEM;
+ }
+
+ mapping = dma_map_single(kdev, cb->skb->data,
+ RX_BUF_LENGTH, DMA_FROM_DEVICE);
+ ret = dma_mapping_error(kdev, mapping);
+ if (ret) {
+ bcm_sysport_free_cb(cb);
+ netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
+ return ret;
+ }
+
+ dma_unmap_addr_set(cb, dma_addr, mapping);
+ dma_desc_set_addr(priv, priv->rx_bd_assign_ptr, mapping);
+
+ priv->rx_bd_assign_index++;
+ priv->rx_bd_assign_index &= (priv->num_rx_bds - 1);
+ priv->rx_bd_assign_ptr = priv->rx_bds +
+ (priv->rx_bd_assign_index * DESC_SIZE);
+
+ netif_dbg(priv, rx_status, ndev, "RX refill\n");
+
+ return 0;
+}
+
+static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
+{
+ struct bcm_sysport_cb *cb;
+ int ret = 0;
+ unsigned int i;
+
+ for (i = 0; i < priv->num_rx_bds; i++) {
+ cb = &priv->rx_cbs[priv->rx_bd_assign_index];
+ if (cb->skb)
+ continue;
+
+ ret = bcm_sysport_rx_refill(priv, cb);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+/* Poll the hardware for up to budget packets to process */
+static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
+ unsigned int budget)
+{
+ struct device *kdev = &priv->pdev->dev;
+ struct net_device *ndev = priv->netdev;
+ unsigned int processed = 0, to_process;
+ struct bcm_sysport_cb *cb;
+ struct sk_buff *skb;
+ unsigned int p_index;
+ u16 len, status;
+ struct rsb *rsb;
+
+ /* Determine how much we should process since last call */
+ p_index = rdma_readl(priv, RDMA_PROD_INDEX);
+ p_index &= RDMA_PROD_INDEX_MASK;
+
+ if (p_index < priv->rx_c_index)
+ to_process = (RDMA_CONS_INDEX_MASK + 1) -
+ priv->rx_c_index + p_index;
+ else
+ to_process = p_index - priv->rx_c_index;
+
+ netif_dbg(priv, rx_status, ndev,
+ "p_index=%d rx_c_index=%d to_process=%d\n",
+ p_index, priv->rx_c_index, to_process);
+
+ while ((processed < to_process) &&
+ (processed < budget)) {
+
+ cb = &priv->rx_cbs[priv->rx_read_ptr];
+ skb = cb->skb;
+ dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
+ dma_unmap_len(cb, dma_len), DMA_FROM_DEVICE);
+
+ /* Extract the Receive Status Block prepended */
+ rsb = (struct rsb *)skb->data;
+ len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
+ status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
+ DESC_STATUS_MASK;
+
+ processed++;
+ priv->rx_read_ptr++;
+ if (priv->rx_read_ptr == priv->num_rx_bds)
+ priv->rx_read_ptr = 0;
+
+ netif_dbg(priv, rx_status, ndev,
+ "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
+ p_index, priv->rx_c_index, priv->rx_read_ptr,
+ len, status);
+
+ if (unlikely(!skb)) {
+ netif_err(priv, rx_err, ndev, "out of memory!\n");
+ ndev->stats.rx_dropped++;
+ ndev->stats.rx_errors++;
+ goto refill;
+ }
+
+ if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
+ netif_err(priv, rx_status, ndev, "fragmented packet!\n");
+ ndev->stats.rx_dropped++;
+ ndev->stats.rx_errors++;
+ bcm_sysport_free_cb(cb);
+ goto refill;
+ }
+
+ if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
+ netif_err(priv, rx_err, ndev, "error packet\n");
+ if (RX_STATUS_OVFLOW)
+ ndev->stats.rx_over_errors++;
+ ndev->stats.rx_dropped++;
+ ndev->stats.rx_errors++;
+ bcm_sysport_free_cb(cb);
+ goto refill;
+ }
+
+ skb_put(skb, len);
+
+ /* Hardware validated our checksum */
+ if (likely(status & DESC_L4_CSUM))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* Hardware pre-pends packets with 2bytes between Ethernet
+ * and IP header plus we have the Receive Status Block, strip
+ * off all of this from the SKB.
+ */
+ skb_pull(skb, sizeof(*rsb) + 2);
+ len -= (sizeof(*rsb) + 2);
+
+ /* UniMAC may forward CRC */
+ if (priv->crc_fwd) {
+ skb_trim(skb, len - ETH_FCS_LEN);
+ len -= ETH_FCS_LEN;
+ }
+
+ skb->protocol = eth_type_trans(skb, ndev);
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += len;
+
+ napi_gro_receive(&priv->napi, skb);
+refill:
+ bcm_sysport_rx_refill(priv, cb);
+ }
+
+ return processed;
+}
+
+static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_priv *priv,
+ struct bcm_sysport_cb *cb,
+ unsigned int *bytes_compl,
+ unsigned int *pkts_compl)
+{
+ struct device *kdev = &priv->pdev->dev;
+ struct net_device *ndev = priv->netdev;
+
+ if (cb->skb) {
+ ndev->stats.tx_bytes += cb->skb->len;
+ *bytes_compl += cb->skb->len;
+ dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
+ dma_unmap_len(cb, dma_len),
+ DMA_TO_DEVICE);
+ ndev->stats.tx_packets++;
+ (*pkts_compl)++;
+ bcm_sysport_free_cb(cb);
+ /* SKB fragment */
+ } else if (dma_unmap_addr(cb, dma_addr)) {
+ ndev->stats.tx_bytes += dma_unmap_len(cb, dma_len);
+ dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
+ dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
+ dma_unmap_addr_set(cb, dma_addr, 0);
+ }
+}
+
+/* Reclaim queued SKBs for transmission completion, lockless version */
+static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
+ struct bcm_sysport_tx_ring *ring)
+{
+ struct net_device *ndev = priv->netdev;
+ unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
+ unsigned int pkts_compl = 0, bytes_compl = 0;
+ struct bcm_sysport_cb *cb;
+ struct netdev_queue *txq;
+ u32 hw_ind;
+
+ txq = netdev_get_tx_queue(ndev, ring->index);
+
+ /* Compute how many descriptors have been processed since last call */
+ hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
+ c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
+ ring->p_index = (hw_ind & RING_PROD_INDEX_MASK);
+
+ last_c_index = ring->c_index;
+ num_tx_cbs = ring->size;
+
+ c_index &= (num_tx_cbs - 1);
+
+ if (c_index >= last_c_index)
+ last_tx_cn = c_index - last_c_index;
+ else
+ last_tx_cn = num_tx_cbs - last_c_index + c_index;
+
+ netif_dbg(priv, tx_done, ndev,
+ "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n",
+ ring->index, c_index, last_tx_cn, last_c_index);
+
+ while (last_tx_cn-- > 0) {
+ cb = ring->cbs + last_c_index;
+ bcm_sysport_tx_reclaim_one(priv, cb, &bytes_compl, &pkts_compl);
+
+ ring->desc_count++;
+ last_c_index++;
+ last_c_index &= (num_tx_cbs - 1);
+ }
+
+ ring->c_index = c_index;
+
+ if (netif_tx_queue_stopped(txq) && pkts_compl)
+ netif_tx_wake_queue(txq);
+
+ netif_dbg(priv, tx_done, ndev,
+ "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
+ ring->index, ring->c_index, pkts_compl, bytes_compl);
+
+ return pkts_compl;
+}
+
+/* Locked version of the per-ring TX reclaim routine */
+static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
+ struct bcm_sysport_tx_ring *ring)
+{
+ unsigned int released;
+
+ spin_lock(&ring->lock);
+ released = __bcm_sysport_tx_reclaim(priv, ring);
+ spin_unlock(&ring->lock);
+
+ return released;
+}
+
+static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
+{
+ struct bcm_sysport_tx_ring *ring =
+ container_of(napi, struct bcm_sysport_tx_ring, napi);
+ unsigned int work_done = 0;
+
+ work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+ /* re-enable TX interrupt */
+ intrl2_1_mask_clear(ring->priv, BIT(ring->index));
+ }
+
+ return work_done;
+}
+
+static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
+{
+ unsigned int q;
+
+ for (q = 0; q < priv->netdev->num_tx_queues; q++)
+ bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]);
+}
+
+static int bcm_sysport_poll(struct napi_struct *napi, int budget)
+{
+ struct bcm_sysport_priv *priv =
+ container_of(napi, struct bcm_sysport_priv, napi);
+ unsigned int work_done = 0;
+
+ work_done = bcm_sysport_desc_rx(priv, budget);
+
+ priv->rx_c_index += work_done;
+ priv->rx_c_index &= RDMA_CONS_INDEX_MASK;
+ rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+ /* re-enable RX interrupts */
+ intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
+ }
+
+ return work_done;
+}
+
+
+/* RX and misc interrupt routine */
+static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+ priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
+ ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
+ intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
+
+ if (unlikely(priv->irq0_stat == 0)) {
+ netdev_warn(priv->netdev, "spurious RX interrupt\n");
+ return IRQ_NONE;
+ }
+
+ if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) {
+ if (likely(napi_schedule_prep(&priv->napi))) {
+ /* disable RX interrupts */
+ intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
+ __napi_schedule(&priv->napi);
+ }
+ }
+
+ /* TX ring is full, perform a full reclaim since we do not know
+ * which one would trigger this interrupt
+ */
+ if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
+ bcm_sysport_tx_reclaim_all(priv);
+
+ return IRQ_HANDLED;
+}
+
+/* TX interrupt service routine */
+static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ struct bcm_sysport_tx_ring *txr;
+ unsigned int ring;
+
+ priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
+ ~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
+ intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+
+ if (unlikely(priv->irq1_stat == 0)) {
+ netdev_warn(priv->netdev, "spurious TX interrupt\n");
+ return IRQ_NONE;
+ }
+
+ for (ring = 0; ring < dev->num_tx_queues; ring++) {
+ if (!(priv->irq1_stat & BIT(ring)))
+ continue;
+
+ txr = &priv->tx_rings[ring];
+
+ if (likely(napi_schedule_prep(&txr->napi))) {
+ intrl2_1_mask_set(priv, BIT(ring));
+ __napi_schedule(&txr->napi);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev)
+{
+ struct sk_buff *nskb;
+ struct tsb *tsb;
+ u32 csum_info;
+ u8 ip_proto;
+ u16 csum_start;
+ u16 ip_ver;
+
+ /* Re-allocate SKB if needed */
+ if (unlikely(skb_headroom(skb) < sizeof(*tsb))) {
+ nskb = skb_realloc_headroom(skb, sizeof(*tsb));
+ dev_kfree_skb(skb);
+ if (!nskb) {
+ dev->stats.tx_errors++;
+ dev->stats.tx_dropped++;
+ return -ENOMEM;
+ }
+ skb = nskb;
+ }
+
+ tsb = (struct tsb *)skb_push(skb, sizeof(*tsb));
+ /* Zero-out TSB by default */
+ memset(tsb, 0, sizeof(*tsb));
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ ip_ver = htons(skb->protocol);
+ switch (ip_ver) {
+ case ETH_P_IP:
+ ip_proto = ip_hdr(skb)->protocol;
+ break;
+ case ETH_P_IPV6:
+ ip_proto = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ return 0;
+ }
+
+ /* Get the checksum offset and the L4 (transport) offset */
+ csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb);
+ csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK;
+ csum_info |= (csum_start << L4_PTR_SHIFT);
+
+ if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
+ csum_info |= L4_LENGTH_VALID;
+ if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
+ csum_info |= L4_UDP;
+ } else
+ csum_info = 0;
+
+ tsb->l4_ptr_dest_map = csum_info;
+ }
+
+ return 0;
+}
+
+static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ struct device *kdev = &priv->pdev->dev;
+ struct bcm_sysport_tx_ring *ring;
+ struct bcm_sysport_cb *cb;
+ struct netdev_queue *txq;
+ struct dma_desc *desc;
+ dma_addr_t mapping;
+ u32 len_status;
+ u16 queue;
+ int ret;
+
+ queue = skb_get_queue_mapping(skb);
+ txq = netdev_get_tx_queue(dev, queue);
+ ring = &priv->tx_rings[queue];
+
+ /* lock against tx reclaim in BH context */
+ spin_lock(&ring->lock);
+ if (unlikely(ring->desc_count == 0)) {
+ netif_tx_stop_queue(txq);
+ netdev_err(dev, "queue %d awake and ring full!\n", queue);
+ ret = NETDEV_TX_BUSY;
+ goto out;
+ }
+
+ /* Insert TSB and checksum infos */
+ if (priv->tsb_en) {
+ ret = bcm_sysport_insert_tsb(skb, dev);
+ if (ret) {
+ ret = NETDEV_TX_OK;
+ goto out;
+ }
+ }
+
+ mapping = dma_map_single(kdev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(kdev, mapping)) {
+ netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
+ skb->data, skb->len);
+ ret = NETDEV_TX_OK;
+ goto out;
+ }
+
+ /* Remember the SKB for future freeing */
+ cb = &ring->cbs[ring->curr_desc];
+ cb->skb = skb;
+ dma_unmap_addr_set(cb, dma_addr, mapping);
+ dma_unmap_len_set(cb, dma_len, skb->len);
+
+ /* Fetch a descriptor entry from our pool */
+ desc = ring->desc_cpu;
+
+ desc->addr_lo = lower_32_bits(mapping);
+ len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK;
+ len_status |= (skb->len << DESC_LEN_SHIFT);
+ len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) <<
+ DESC_STATUS_SHIFT;
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT);
+
+ ring->curr_desc++;
+ if (ring->curr_desc == ring->size)
+ ring->curr_desc = 0;
+ ring->desc_count--;
+
+ /* Ensure write completion of the descriptor status/length
+ * in DRAM before the System Port WRITE_PORT register latches
+ * the value
+ */
+ wmb();
+ desc->addr_status_len = len_status;
+ wmb();
+
+ /* Write this descriptor address to the RING write port */
+ tdma_port_write_desc_addr(priv, desc, ring->index);
+
+ /* Check ring space and update SW control flow */
+ if (ring->desc_count == 0)
+ netif_tx_stop_queue(txq);
+
+ netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n",
+ ring->index, ring->desc_count, ring->curr_desc);
+
+ ret = NETDEV_TX_OK;
+out:
+ spin_unlock(&ring->lock);
+ return ret;
+}
+
+static void bcm_sysport_tx_timeout(struct net_device *dev)
+{
+ netdev_warn(dev, "transmit timeout!\n");
+
+ dev->trans_start = jiffies;
+ dev->stats.tx_errors++;
+
+ netif_tx_wake_all_queues(dev);
+}
+
+/* phylib adjust link callback */
+static void bcm_sysport_adj_link(struct net_device *dev)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+ unsigned int changed = 0;
+ u32 cmd_bits = 0, reg;
+
+ if (priv->old_link != phydev->link) {
+ changed = 1;
+ priv->old_link = phydev->link;
+ }
+
+ if (priv->old_duplex != phydev->duplex) {
+ changed = 1;
+ priv->old_duplex = phydev->duplex;
+ }
+
+ switch (phydev->speed) {
+ case SPEED_2500:
+ cmd_bits = CMD_SPEED_2500;
+ break;
+ case SPEED_1000:
+ cmd_bits = CMD_SPEED_1000;
+ break;
+ case SPEED_100:
+ cmd_bits = CMD_SPEED_100;
+ break;
+ case SPEED_10:
+ cmd_bits = CMD_SPEED_10;
+ break;
+ default:
+ break;
+ }
+ cmd_bits <<= CMD_SPEED_SHIFT;
+
+ if (phydev->duplex == DUPLEX_HALF)
+ cmd_bits |= CMD_HD_EN;
+
+ if (priv->old_pause != phydev->pause) {
+ changed = 1;
+ priv->old_pause = phydev->pause;
+ }
+
+ if (!phydev->pause)
+ cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
+
+ reg = umac_readl(priv, UMAC_CMD);
+ reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
+ CMD_HD_EN | CMD_RX_PAUSE_IGNORE |
+ CMD_TX_PAUSE_IGNORE);
+ reg |= cmd_bits;
+ umac_writel(priv, reg, UMAC_CMD);
+
+ if (changed)
+ phy_print_status(priv->phydev);
+}
+
+static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
+ unsigned int index)
+{
+ struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
+ struct device *kdev = &priv->pdev->dev;
+ size_t size;
+ void *p;
+ u32 reg;
+
+ /* Simple descriptors partitioning for now */
+ size = 256;
+
+ /* We just need one DMA descriptor which is DMA-able, since writing to
+ * the port will allocate a new descriptor in its internal linked-list
+ */
+ p = dma_zalloc_coherent(kdev, 1, &ring->desc_dma, GFP_KERNEL);
+ if (!p) {
+ netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
+ return -ENOMEM;
+ }
+
+ ring->cbs = kzalloc(sizeof(struct bcm_sysport_cb) * size, GFP_KERNEL);
+ if (!ring->cbs) {
+ netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
+ return -ENOMEM;
+ }
+
+ /* Initialize SW view of the ring */
+ spin_lock_init(&ring->lock);
+ ring->priv = priv;
+ netif_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
+ ring->index = index;
+ ring->size = size;
+ ring->alloc_size = ring->size;
+ ring->desc_cpu = p;
+ ring->desc_count = ring->size;
+ ring->curr_desc = 0;
+
+ /* Initialize HW ring */
+ tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index));
+ tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index));
+ tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index));
+ tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index));
+ tdma_writel(priv, RING_IGNORE_STATUS, TDMA_DESC_RING_MAPPING(index));
+ tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index));
+
+ /* Program the number of descriptors as MAX_THRESHOLD and half of
+ * its size for the hysteresis trigger
+ */
+ tdma_writel(priv, ring->size |
+ 1 << RING_HYST_THRESH_SHIFT,
+ TDMA_DESC_RING_MAX_HYST(index));
+
+ /* Enable the ring queue in the arbiter */
+ reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN);
+ reg |= (1 << index);
+ tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN);
+
+ napi_enable(&ring->napi);
+
+ netif_dbg(priv, hw, priv->netdev,
+ "TDMA cfg, size=%d, desc_cpu=%p\n",
+ ring->size, ring->desc_cpu);
+
+ return 0;
+}
+
+static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
+ unsigned int index)
+{
+ struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
+ struct device *kdev = &priv->pdev->dev;
+ u32 reg;
+
+ /* Caller should stop the TDMA engine */
+ reg = tdma_readl(priv, TDMA_STATUS);
+ if (!(reg & TDMA_DISABLED))
+ netdev_warn(priv->netdev, "TDMA not stopped!\n");
+
+ napi_disable(&ring->napi);
+ netif_napi_del(&ring->napi);
+
+ bcm_sysport_tx_reclaim(priv, ring);
+
+ kfree(ring->cbs);
+ ring->cbs = NULL;
+
+ if (ring->desc_dma) {
+ dma_free_coherent(kdev, 1, ring->desc_cpu, ring->desc_dma);
+ ring->desc_dma = 0;
+ }
+ ring->size = 0;
+ ring->alloc_size = 0;
+
+ netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n");
+}
+
+/* RDMA helper */
+static inline int rdma_enable_set(struct bcm_sysport_priv *priv,
+ unsigned int enable)
+{
+ unsigned int timeout = 1000;
+ u32 reg;
+
+ reg = rdma_readl(priv, RDMA_CONTROL);
+ if (enable)
+ reg |= RDMA_EN;
+ else
+ reg &= ~RDMA_EN;
+ rdma_writel(priv, reg, RDMA_CONTROL);
+
+ /* Poll for RMDA disabling completion */
+ do {
+ reg = rdma_readl(priv, RDMA_STATUS);
+ if (!!(reg & RDMA_DISABLED) == !enable)
+ return 0;
+ usleep_range(1000, 2000);
+ } while (timeout-- > 0);
+
+ netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n");
+
+ return -ETIMEDOUT;
+}
+
+/* TDMA helper */
+static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
+ unsigned int enable)
+{
+ unsigned int timeout = 1000;
+ u32 reg;
+
+ reg = tdma_readl(priv, TDMA_CONTROL);
+ if (enable)
+ reg |= TDMA_EN;
+ else
+ reg &= ~TDMA_EN;
+ tdma_writel(priv, reg, TDMA_CONTROL);
+
+ /* Poll for TMDA disabling completion */
+ do {
+ reg = tdma_readl(priv, TDMA_STATUS);
+ if (!!(reg & TDMA_DISABLED) == !enable)
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while (timeout-- > 0);
+
+ netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n");
+
+ return -ETIMEDOUT;
+}
+
+static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
+{
+ u32 reg;
+ int ret;
+
+ /* Initialize SW view of the RX ring */
+ priv->num_rx_bds = NUM_RX_DESC;
+ priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
+ priv->rx_bd_assign_ptr = priv->rx_bds;
+ priv->rx_bd_assign_index = 0;
+ priv->rx_c_index = 0;
+ priv->rx_read_ptr = 0;
+ priv->rx_cbs = kzalloc(priv->num_rx_bds *
+ sizeof(struct bcm_sysport_cb), GFP_KERNEL);
+ if (!priv->rx_cbs) {
+ netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
+ return -ENOMEM;
+ }
+
+ ret = bcm_sysport_alloc_rx_bufs(priv);
+ if (ret) {
+ netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
+ return ret;
+ }
+
+ /* Initialize HW, ensure RDMA is disabled */
+ reg = rdma_readl(priv, RDMA_STATUS);
+ if (!(reg & RDMA_DISABLED))
+ rdma_enable_set(priv, 0);
+
+ rdma_writel(priv, 0, RDMA_WRITE_PTR_LO);
+ rdma_writel(priv, 0, RDMA_WRITE_PTR_HI);
+ rdma_writel(priv, 0, RDMA_PROD_INDEX);
+ rdma_writel(priv, 0, RDMA_CONS_INDEX);
+ rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT |
+ RX_BUF_LENGTH, RDMA_RING_BUF_SIZE);
+ /* Operate the queue in ring mode */
+ rdma_writel(priv, 0, RDMA_START_ADDR_HI);
+ rdma_writel(priv, 0, RDMA_START_ADDR_LO);
+ rdma_writel(priv, 0, RDMA_END_ADDR_HI);
+ rdma_writel(priv, NUM_HW_RX_DESC_WORDS - 1, RDMA_END_ADDR_LO);
+
+ rdma_writel(priv, 1, RDMA_MBDONE_INTR);
+
+ netif_dbg(priv, hw, priv->netdev,
+ "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
+ priv->num_rx_bds, priv->rx_bds);
+
+ return 0;
+}
+
+static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv)
+{
+ struct bcm_sysport_cb *cb;
+ unsigned int i;
+ u32 reg;
+
+ /* Caller should ensure RDMA is disabled */
+ reg = rdma_readl(priv, RDMA_STATUS);
+ if (!(reg & RDMA_DISABLED))
+ netdev_warn(priv->netdev, "RDMA not stopped!\n");
+
+ for (i = 0; i < priv->num_rx_bds; i++) {
+ cb = &priv->rx_cbs[i];
+ if (dma_unmap_addr(cb, dma_addr))
+ dma_unmap_single(&priv->pdev->dev,
+ dma_unmap_addr(cb, dma_addr),
+ RX_BUF_LENGTH, DMA_FROM_DEVICE);
+ bcm_sysport_free_cb(cb);
+ }
+
+ kfree(priv->rx_cbs);
+ priv->rx_cbs = NULL;
+
+ netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n");
+}
+
+static void bcm_sysport_set_rx_mode(struct net_device *dev)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ u32 reg;
+
+ reg = umac_readl(priv, UMAC_CMD);
+ if (dev->flags & IFF_PROMISC)
+ reg |= CMD_PROMISC;
+ else
+ reg &= ~CMD_PROMISC;
+ umac_writel(priv, reg, UMAC_CMD);
+
+ /* No support for ALLMULTI */
+ if (dev->flags & IFF_ALLMULTI)
+ return;
+}
+
+static inline void umac_enable_set(struct bcm_sysport_priv *priv,
+ unsigned int enable)
+{
+ u32 reg;
+
+ reg = umac_readl(priv, UMAC_CMD);
+ if (enable)
+ reg |= CMD_RX_EN | CMD_TX_EN;
+ else
+ reg &= ~(CMD_RX_EN | CMD_TX_EN);
+ umac_writel(priv, reg, UMAC_CMD);
+}
+
+static inline int umac_reset(struct bcm_sysport_priv *priv)
+{
+ unsigned int timeout = 0;
+ u32 reg;
+ int ret = 0;
+
+ umac_writel(priv, 0, UMAC_CMD);
+ while (timeout++ < 1000) {
+ reg = umac_readl(priv, UMAC_CMD);
+ if (!(reg & CMD_SW_RESET))
+ break;
+
+ udelay(1);
+ }
+
+ if (timeout == 1000) {
+ dev_err(&priv->pdev->dev,
+ "timeout waiting for MAC to come out of reset\n");
+ ret = -ETIMEDOUT;
+ }
+
+ return ret;
+}
+
+static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
+ unsigned char *addr)
+{
+ umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
+ (addr[2] << 8) | addr[3], UMAC_MAC0);
+ umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
+}
+
+static void topctrl_flush(struct bcm_sysport_priv *priv)
+{
+ topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
+ topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
+ mdelay(1);
+ topctrl_writel(priv, 0, RX_FLUSH_CNTL);
+ topctrl_writel(priv, 0, TX_FLUSH_CNTL);
+}
+
+static int bcm_sysport_open(struct net_device *dev)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ unsigned int i;
+ u32 reg;
+ int ret;
+
+ /* Reset UniMAC */
+ ret = umac_reset(priv);
+ if (ret) {
+ netdev_err(dev, "UniMAC reset failed\n");
+ return ret;
+ }
+
+ /* Flush TX and RX FIFOs at TOPCTRL level */
+ topctrl_flush(priv);
+
+ /* Disable the UniMAC RX/TX */
+ umac_enable_set(priv, 0);
+
+ /* Enable RBUF 2bytes alignment and Receive Status Block */
+ reg = rbuf_readl(priv, RBUF_CONTROL);
+ reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
+ rbuf_writel(priv, reg, RBUF_CONTROL);
+
+ /* Set maximum frame length */
+ umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
+
+ /* Set MAC address */
+ umac_set_hw_addr(priv, dev->dev_addr);
+
+ /* Read CRC forward */
+ priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
+
+ priv->phydev = of_phy_connect_fixed_link(dev, bcm_sysport_adj_link,
+ priv->phy_interface);
+ if (!priv->phydev) {
+ netdev_err(dev, "could not attach to PHY\n");
+ return -ENODEV;
+ }
+
+ /* Reset house keeping link status */
+ priv->old_duplex = -1;
+ priv->old_link = -1;
+ priv->old_pause = -1;
+
+ /* mask all interrupts and request them */
+ intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
+ intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+ intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
+ intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
+ intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+ intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
+
+ ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev);
+ if (ret) {
+ netdev_err(dev, "failed to request RX interrupt\n");
+ goto out_phy_disconnect;
+ }
+
+ ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0, dev->name, dev);
+ if (ret) {
+ netdev_err(dev, "failed to request TX interrupt\n");
+ goto out_free_irq0;
+ }
+
+ /* Initialize both hardware and software ring */
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ ret = bcm_sysport_init_tx_ring(priv, i);
+ if (ret) {
+ netdev_err(dev, "failed to initialize TX ring %d\n",
+ i);
+ goto out_free_tx_ring;
+ }
+ }
+
+ /* Initialize linked-list */
+ tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
+
+ /* Initialize RX ring */
+ ret = bcm_sysport_init_rx_ring(priv);
+ if (ret) {
+ netdev_err(dev, "failed to initialize RX ring\n");
+ goto out_free_rx_ring;
+ }
+
+ /* Turn on RDMA */
+ ret = rdma_enable_set(priv, 1);
+ if (ret)
+ goto out_free_rx_ring;
+
+ /* Enable RX interrupt and TX ring full interrupt */
+ intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
+
+ /* Turn on TDMA */
+ ret = tdma_enable_set(priv, 1);
+ if (ret)
+ goto out_clear_rx_int;
+
+ /* Enable NAPI */
+ napi_enable(&priv->napi);
+
+ /* Turn on UniMAC TX/RX */
+ umac_enable_set(priv, 1);
+
+ phy_start(priv->phydev);
+
+ /* Enable TX interrupts for the 32 TXQs */
+ intrl2_1_mask_clear(priv, 0xffffffff);
+
+ /* Last call before we start the real business */
+ netif_tx_start_all_queues(dev);
+
+ return 0;
+
+out_clear_rx_int:
+ intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
+out_free_rx_ring:
+ bcm_sysport_fini_rx_ring(priv);
+out_free_tx_ring:
+ for (i = 0; i < dev->num_tx_queues; i++)
+ bcm_sysport_fini_tx_ring(priv, i);
+ free_irq(priv->irq1, dev);
+out_free_irq0:
+ free_irq(priv->irq0, dev);
+out_phy_disconnect:
+ phy_disconnect(priv->phydev);
+ return ret;
+}
+
+static int bcm_sysport_stop(struct net_device *dev)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ unsigned int i;
+ u32 reg;
+ int ret;
+
+ /* stop all software from updating hardware */
+ netif_tx_stop_all_queues(dev);
+ napi_disable(&priv->napi);
+ phy_stop(priv->phydev);
+
+ /* mask all interrupts */
+ intrl2_0_mask_set(priv, 0xffffffff);
+ intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+ intrl2_1_mask_set(priv, 0xffffffff);
+ intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+
+ /* Disable UniMAC RX */
+ reg = umac_readl(priv, UMAC_CMD);
+ reg &= ~CMD_RX_EN;
+ umac_writel(priv, reg, UMAC_CMD);
+
+ ret = tdma_enable_set(priv, 0);
+ if (ret) {
+ netdev_err(dev, "timeout disabling RDMA\n");
+ return ret;
+ }
+
+ /* Wait for a maximum packet size to be drained */
+ usleep_range(2000, 3000);
+
+ ret = rdma_enable_set(priv, 0);
+ if (ret) {
+ netdev_err(dev, "timeout disabling TDMA\n");
+ return ret;
+ }
+
+ /* Disable UniMAC TX */
+ reg = umac_readl(priv, UMAC_CMD);
+ reg &= ~CMD_TX_EN;
+ umac_writel(priv, reg, UMAC_CMD);
+
+ /* Free RX/TX rings SW structures */
+ for (i = 0; i < dev->num_tx_queues; i++)
+ bcm_sysport_fini_tx_ring(priv, i);
+ bcm_sysport_fini_rx_ring(priv);
+
+ free_irq(priv->irq0, dev);
+ free_irq(priv->irq1, dev);
+
+ /* Disconnect from PHY */
+ phy_disconnect(priv->phydev);
+
+ return 0;
+}
+
+static struct ethtool_ops bcm_sysport_ethtool_ops = {
+ .get_settings = bcm_sysport_get_settings,
+ .set_settings = bcm_sysport_set_settings,
+ .get_drvinfo = bcm_sysport_get_drvinfo,
+ .get_msglevel = bcm_sysport_get_msglvl,
+ .set_msglevel = bcm_sysport_set_msglvl,
+ .get_link = ethtool_op_get_link,
+ .get_strings = bcm_sysport_get_strings,
+ .get_ethtool_stats = bcm_sysport_get_stats,
+ .get_sset_count = bcm_sysport_get_sset_count,
+};
+
+static const struct net_device_ops bcm_sysport_netdev_ops = {
+ .ndo_start_xmit = bcm_sysport_xmit,
+ .ndo_tx_timeout = bcm_sysport_tx_timeout,
+ .ndo_open = bcm_sysport_open,
+ .ndo_stop = bcm_sysport_stop,
+ .ndo_set_features = bcm_sysport_set_features,
+ .ndo_set_rx_mode = bcm_sysport_set_rx_mode,
+};
+
+#define REV_FMT "v%2x.%02x"
+
+static int bcm_sysport_probe(struct platform_device *pdev)
+{
+ struct bcm_sysport_priv *priv;
+ struct device_node *dn;
+ struct net_device *dev;
+ const void *macaddr;
+ struct resource *r;
+ u32 txq, rxq;
+ int ret;
+
+ dn = pdev->dev.of_node;
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ /* Read the Transmit/Receive Queue properties */
+ if (of_property_read_u32(dn, "systemport,num-txq", &txq))
+ txq = TDMA_NUM_RINGS;
+ if (of_property_read_u32(dn, "systemport,num-rxq", &rxq))
+ rxq = 1;
+
+ dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq);
+ if (!dev)
+ return -ENOMEM;
+
+ /* Initialize private members */
+ priv = netdev_priv(dev);
+
+ priv->irq0 = platform_get_irq(pdev, 0);
+ priv->irq1 = platform_get_irq(pdev, 1);
+ if (priv->irq0 <= 0 || priv->irq1 <= 0) {
+ dev_err(&pdev->dev, "invalid interrupts\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ priv->base = devm_request_and_ioremap(&pdev->dev, r);
+ if (!priv->base) {
+ dev_err(&pdev->dev, "register remap failed\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ priv->netdev = dev;
+ priv->pdev = pdev;
+
+ priv->phy_interface = of_get_phy_mode(dn);
+ /* Default to GMII interface mode */
+ if (priv->phy_interface < 0)
+ priv->phy_interface = PHY_INTERFACE_MODE_GMII;
+
+ /* Initialize netdevice members */
+ macaddr = of_get_mac_address(dn);
+ if (!macaddr || !is_valid_ether_addr(macaddr)) {
+ dev_warn(&pdev->dev, "using random Ethernet MAC\n");
+ random_ether_addr(dev->dev_addr);
+ } else {
+ ether_addr_copy(dev->dev_addr, macaddr);
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ dev_set_drvdata(&pdev->dev, dev);
+ SET_ETHTOOL_OPS(dev, &bcm_sysport_ethtool_ops);
+ dev->netdev_ops = &bcm_sysport_netdev_ops;
+ netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);
+
+ /* HW supported features, none enabled by default */
+ dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+
+ /* Set the needed headroom once and for all */
+ BUILD_BUG_ON(sizeof(struct tsb) != 8);
+ dev->needed_headroom += sizeof(struct tsb);
+
+ /* We are interfaced to a switch which handles the multicast
+ * filtering for us, so we do not support programming any
+ * multicast hash table in this Ethernet MAC.
+ */
+ dev->flags &= ~IFF_MULTICAST;
+
+ ret = register_netdev(dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register net_device\n");
+ goto err;
+ }
+
+ priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
+ dev_info(&pdev->dev,
+ "Broadcom SYSTEMPORT" REV_FMT
+ " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
+ (priv->rev >> 8) & 0xff, priv->rev & 0xff,
+ priv->base, priv->irq0, priv->irq1, txq, rxq);
+
+ return 0;
+err:
+ free_netdev(dev);
+ return ret;
+}
+
+static int bcm_sysport_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = dev_get_drvdata(&pdev->dev);
+
+ /* Not much to do, ndo_close has been called
+ * and we use managed allocations
+ */
+ unregister_netdev(dev);
+ free_netdev(dev);
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id bcm_sysport_of_match[] = {
+ { .compatible = "brcm,systemport-v1.00" },
+ { .compatible = "brcm,systemport" },
+ { /* sentinel */ }
+};
+
+static struct platform_driver bcm_sysport_driver = {
+ .probe = bcm_sysport_probe,
+ .remove = bcm_sysport_remove,
+ .driver = {
+ .name = "brcm-systemport",
+ .owner = THIS_MODULE,
+ .of_match_table = bcm_sysport_of_match,
+ },
+};
+module_platform_driver(bcm_sysport_driver);
+
+MODULE_AUTHOR("Broadcom Corporation");
+MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver");
+MODULE_ALIAS("platform:brcm-systemport");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
new file mode 100644
index 000000000000..a0441e7c83cd
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -0,0 +1,677 @@
+/*
+ * Broadcom BCM7xxx System Port Ethernet MAC driver
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __BCM_SYSPORT_H
+#define __BCM_SYSPORT_H
+
+#include <linux/if_vlan.h>
+
+/* Receive/transmit descriptor format */
+#define DESC_ADDR_HI_STATUS_LEN 0x00
+#define DESC_ADDR_HI_SHIFT 0
+#define DESC_ADDR_HI_MASK 0xff
+#define DESC_STATUS_SHIFT 8
+#define DESC_STATUS_MASK 0x3ff
+#define DESC_LEN_SHIFT 18
+#define DESC_LEN_MASK 0x7fff
+#define DESC_ADDR_LO 0x04
+
+/* HW supports 40-bit addressing hence the */
+#define DESC_SIZE (WORDS_PER_DESC * sizeof(u32))
+
+/* Default RX buffer allocation size */
+#define RX_BUF_LENGTH 2048
+
+/* Body(1500) + EH_SIZE(14) + VLANTAG(4) + BRCMTAG(6) + FCS(4) = 1528.
+ * 1536 is multiple of 256 bytes
+ */
+#define ENET_BRCM_TAG_LEN 6
+#define ENET_PAD 8
+#define UMAC_MAX_MTU_SIZE (ETH_DATA_LEN + ETH_HLEN + VLAN_HLEN + \
+ ENET_BRCM_TAG_LEN + ETH_FCS_LEN + ENET_PAD)
+
+/* Transmit status block */
+struct tsb {
+ u32 pcp_dei_vid;
+#define PCP_DEI_MASK 0xf
+#define VID_SHIFT 4
+#define VID_MASK 0xfff
+ u32 l4_ptr_dest_map;
+#define L4_CSUM_PTR_MASK 0x1ff
+#define L4_PTR_SHIFT 9
+#define L4_PTR_MASK 0x1ff
+#define L4_UDP (1 << 18)
+#define L4_LENGTH_VALID (1 << 19)
+#define DEST_MAP_SHIFT 20
+#define DEST_MAP_MASK 0x1ff
+};
+
+/* Receive status block uses the same
+ * definitions as the DMA descriptor
+ */
+struct rsb {
+ u32 rx_status_len;
+ u32 brcm_egress_tag;
+};
+
+/* Common Receive/Transmit status bits */
+#define DESC_L4_CSUM (1 << 7)
+#define DESC_SOP (1 << 8)
+#define DESC_EOP (1 << 9)
+
+/* Receive Status bits */
+#define RX_STATUS_UCAST 0
+#define RX_STATUS_BCAST 0x04
+#define RX_STATUS_MCAST 0x08
+#define RX_STATUS_L2_MCAST 0x0c
+#define RX_STATUS_ERR (1 << 4)
+#define RX_STATUS_OVFLOW (1 << 5)
+#define RX_STATUS_PARSE_FAIL (1 << 6)
+
+/* Transmit Status bits */
+#define TX_STATUS_VLAN_NO_ACT 0x00
+#define TX_STATUS_VLAN_PCP_TSB 0x01
+#define TX_STATUS_VLAN_QUEUE 0x02
+#define TX_STATUS_VLAN_VID_TSB 0x03
+#define TX_STATUS_OWR_CRC (1 << 2)
+#define TX_STATUS_APP_CRC (1 << 3)
+#define TX_STATUS_BRCM_TAG_NO_ACT 0
+#define TX_STATUS_BRCM_TAG_ZERO 0x10
+#define TX_STATUS_BRCM_TAG_ONE_QUEUE 0x20
+#define TX_STATUS_BRCM_TAG_ONE_TSB 0x30
+#define TX_STATUS_SKIP_BYTES (1 << 6)
+
+/* Specific register definitions */
+#define SYS_PORT_TOPCTRL_OFFSET 0
+#define REV_CNTL 0x00
+#define REV_MASK 0xffff
+
+#define RX_FLUSH_CNTL 0x04
+#define RX_FLUSH (1 << 0)
+
+#define TX_FLUSH_CNTL 0x08
+#define TX_FLUSH (1 << 0)
+
+#define MISC_CNTL 0x0c
+#define SYS_CLK_SEL (1 << 0)
+#define TDMA_EOP_SEL (1 << 1)
+
+/* Level-2 Interrupt controller offsets and defines */
+#define SYS_PORT_INTRL2_0_OFFSET 0x200
+#define SYS_PORT_INTRL2_1_OFFSET 0x240
+#define INTRL2_CPU_STATUS 0x00
+#define INTRL2_CPU_SET 0x04
+#define INTRL2_CPU_CLEAR 0x08
+#define INTRL2_CPU_MASK_STATUS 0x0c
+#define INTRL2_CPU_MASK_SET 0x10
+#define INTRL2_CPU_MASK_CLEAR 0x14
+
+/* Level-2 instance 0 interrupt bits */
+#define INTRL2_0_GISB_ERR (1 << 0)
+#define INTRL2_0_RBUF_OVFLOW (1 << 1)
+#define INTRL2_0_TBUF_UNDFLOW (1 << 2)
+#define INTRL2_0_MPD (1 << 3)
+#define INTRL2_0_BRCM_MATCH_TAG (1 << 4)
+#define INTRL2_0_RDMA_MBDONE (1 << 5)
+#define INTRL2_0_OVER_MAX_THRESH (1 << 6)
+#define INTRL2_0_BELOW_HYST_THRESH (1 << 7)
+#define INTRL2_0_FREE_LIST_EMPTY (1 << 8)
+#define INTRL2_0_TX_RING_FULL (1 << 9)
+#define INTRL2_0_DESC_ALLOC_ERR (1 << 10)
+#define INTRL2_0_UNEXP_PKTSIZE_ACK (1 << 11)
+
+/* RXCHK offset and defines */
+#define SYS_PORT_RXCHK_OFFSET 0x300
+
+#define RXCHK_CONTROL 0x00
+#define RXCHK_EN (1 << 0)
+#define RXCHK_SKIP_FCS (1 << 1)
+#define RXCHK_BAD_CSUM_DIS (1 << 2)
+#define RXCHK_BRCM_TAG_EN (1 << 3)
+#define RXCHK_BRCM_TAG_MATCH_SHIFT 4
+#define RXCHK_BRCM_TAG_MATCH_MASK 0xff
+#define RXCHK_PARSE_TNL (1 << 12)
+#define RXCHK_VIOL_EN (1 << 13)
+#define RXCHK_VIOL_DIS (1 << 14)
+#define RXCHK_INCOM_PKT (1 << 15)
+#define RXCHK_V6_DUPEXT_EN (1 << 16)
+#define RXCHK_V6_DUPEXT_DIS (1 << 17)
+#define RXCHK_ETHERTYPE_DIS (1 << 18)
+#define RXCHK_L2_HDR_DIS (1 << 19)
+#define RXCHK_L3_HDR_DIS (1 << 20)
+#define RXCHK_MAC_RX_ERR_DIS (1 << 21)
+#define RXCHK_PARSE_AUTH (1 << 22)
+
+#define RXCHK_BRCM_TAG0 0x04
+#define RXCHK_BRCM_TAG(i) ((i) * RXCHK_BRCM_TAG0)
+#define RXCHK_BRCM_TAG0_MASK 0x24
+#define RXCHK_BRCM_TAG_MASK(i) ((i) * RXCHK_BRCM_TAG0_MASK)
+#define RXCHK_BRCM_TAG_MATCH_STATUS 0x44
+#define RXCHK_ETHERTYPE 0x48
+#define RXCHK_BAD_CSUM_CNTR 0x4C
+#define RXCHK_OTHER_DISC_CNTR 0x50
+
+/* TXCHCK offsets and defines */
+#define SYS_PORT_TXCHK_OFFSET 0x380
+#define TXCHK_PKT_RDY_THRESH 0x00
+
+/* Receive buffer offset and defines */
+#define SYS_PORT_RBUF_OFFSET 0x400
+
+#define RBUF_CONTROL 0x00
+#define RBUF_RSB_EN (1 << 0)
+#define RBUF_4B_ALGN (1 << 1)
+#define RBUF_BRCM_TAG_STRIP (1 << 2)
+#define RBUF_BAD_PKT_DISC (1 << 3)
+#define RBUF_RESUME_THRESH_SHIFT 4
+#define RBUF_RESUME_THRESH_MASK 0xff
+#define RBUF_OK_TO_SEND_SHIFT 12
+#define RBUF_OK_TO_SEND_MASK 0xff
+#define RBUF_CRC_REPLACE (1 << 20)
+#define RBUF_OK_TO_SEND_MODE (1 << 21)
+#define RBUF_RSB_SWAP (1 << 22)
+#define RBUF_ACPI_EN (1 << 23)
+
+#define RBUF_PKT_RDY_THRESH 0x04
+
+#define RBUF_STATUS 0x08
+#define RBUF_WOL_MODE (1 << 0)
+#define RBUF_MPD (1 << 1)
+#define RBUF_ACPI (1 << 2)
+
+#define RBUF_OVFL_DISC_CNTR 0x0c
+#define RBUF_ERR_PKT_CNTR 0x10
+
+/* Transmit buffer offset and defines */
+#define SYS_PORT_TBUF_OFFSET 0x600
+
+#define TBUF_CONTROL 0x00
+#define TBUF_BP_EN (1 << 0)
+#define TBUF_MAX_PKT_THRESH_SHIFT 1
+#define TBUF_MAX_PKT_THRESH_MASK 0x1f
+#define TBUF_FULL_THRESH_SHIFT 8
+#define TBUF_FULL_THRESH_MASK 0x1f
+
+/* UniMAC offset and defines */
+#define SYS_PORT_UMAC_OFFSET 0x800
+
+#define UMAC_CMD 0x008
+#define CMD_TX_EN (1 << 0)
+#define CMD_RX_EN (1 << 1)
+#define CMD_SPEED_SHIFT 2
+#define CMD_SPEED_10 0
+#define CMD_SPEED_100 1
+#define CMD_SPEED_1000 2
+#define CMD_SPEED_2500 3
+#define CMD_SPEED_MASK 3
+#define CMD_PROMISC (1 << 4)
+#define CMD_PAD_EN (1 << 5)
+#define CMD_CRC_FWD (1 << 6)
+#define CMD_PAUSE_FWD (1 << 7)
+#define CMD_RX_PAUSE_IGNORE (1 << 8)
+#define CMD_TX_ADDR_INS (1 << 9)
+#define CMD_HD_EN (1 << 10)
+#define CMD_SW_RESET (1 << 13)
+#define CMD_LCL_LOOP_EN (1 << 15)
+#define CMD_AUTO_CONFIG (1 << 22)
+#define CMD_CNTL_FRM_EN (1 << 23)
+#define CMD_NO_LEN_CHK (1 << 24)
+#define CMD_RMT_LOOP_EN (1 << 25)
+#define CMD_PRBL_EN (1 << 27)
+#define CMD_TX_PAUSE_IGNORE (1 << 28)
+#define CMD_TX_RX_EN (1 << 29)
+#define CMD_RUNT_FILTER_DIS (1 << 30)
+
+#define UMAC_MAC0 0x00c
+#define UMAC_MAC1 0x010
+#define UMAC_MAX_FRAME_LEN 0x014
+
+#define UMAC_TX_FLUSH 0x334
+
+#define UMAC_MIB_START 0x400
+
+/* There is a 0xC gap between the end of RX and beginning of TX stats and then
+ * between the end of TX stats and the beginning of the RX RUNT
+ */
+#define UMAC_MIB_STAT_OFFSET 0xc
+
+#define UMAC_MIB_CTRL 0x580
+#define MIB_RX_CNT_RST (1 << 0)
+#define MIB_RUNT_CNT_RST (1 << 1)
+#define MIB_TX_CNT_RST (1 << 2)
+#define UMAC_MDF_CTRL 0x650
+#define UMAC_MDF_ADDR 0x654
+
+/* Receive DMA offset and defines */
+#define SYS_PORT_RDMA_OFFSET 0x2000
+
+#define RDMA_CONTROL 0x1000
+#define RDMA_EN (1 << 0)
+#define RDMA_RING_CFG (1 << 1)
+#define RDMA_DISC_EN (1 << 2)
+#define RDMA_BUF_DATA_OFFSET_SHIFT 4
+#define RDMA_BUF_DATA_OFFSET_MASK 0x3ff
+
+#define RDMA_STATUS 0x1004
+#define RDMA_DISABLED (1 << 0)
+#define RDMA_DESC_RAM_INIT_BUSY (1 << 1)
+#define RDMA_BP_STATUS (1 << 2)
+
+#define RDMA_SCB_BURST_SIZE 0x1008
+
+#define RDMA_RING_BUF_SIZE 0x100c
+#define RDMA_RING_SIZE_SHIFT 16
+
+#define RDMA_WRITE_PTR_HI 0x1010
+#define RDMA_WRITE_PTR_LO 0x1014
+#define RDMA_PROD_INDEX 0x1018
+#define RDMA_PROD_INDEX_MASK 0xffff
+
+#define RDMA_CONS_INDEX 0x101c
+#define RDMA_CONS_INDEX_MASK 0xffff
+
+#define RDMA_START_ADDR_HI 0x1020
+#define RDMA_START_ADDR_LO 0x1024
+#define RDMA_END_ADDR_HI 0x1028
+#define RDMA_END_ADDR_LO 0x102c
+
+#define RDMA_MBDONE_INTR 0x1030
+#define RDMA_INTR_THRESH_MASK 0xff
+#define RDMA_TIMEOUT_SHIFT 16
+#define RDMA_TIMEOUT_MASK 0xffff
+
+#define RDMA_XON_XOFF_THRESH 0x1034
+#define RDMA_XON_XOFF_THRESH_MASK 0xffff
+#define RDMA_XOFF_THRESH_SHIFT 16
+
+#define RDMA_READ_PTR_HI 0x1038
+#define RDMA_READ_PTR_LO 0x103c
+
+#define RDMA_OVERRIDE 0x1040
+#define RDMA_LE_MODE (1 << 0)
+#define RDMA_REG_MODE (1 << 1)
+
+#define RDMA_TEST 0x1044
+#define RDMA_TP_OUT_SEL (1 << 0)
+#define RDMA_MEM_SEL (1 << 1)
+
+#define RDMA_DEBUG 0x1048
+
+/* Transmit DMA offset and defines */
+#define TDMA_NUM_RINGS 32 /* rings = queues */
+#define TDMA_PORT_SIZE DESC_SIZE /* two 32-bits words */
+
+#define SYS_PORT_TDMA_OFFSET 0x4000
+#define TDMA_WRITE_PORT_OFFSET 0x0000
+#define TDMA_WRITE_PORT_HI(i) (TDMA_WRITE_PORT_OFFSET + \
+ (i) * TDMA_PORT_SIZE)
+#define TDMA_WRITE_PORT_LO(i) (TDMA_WRITE_PORT_OFFSET + \
+ sizeof(u32) + (i) * TDMA_PORT_SIZE)
+
+#define TDMA_READ_PORT_OFFSET (TDMA_WRITE_PORT_OFFSET + \
+ (TDMA_NUM_RINGS * TDMA_PORT_SIZE))
+#define TDMA_READ_PORT_HI(i) (TDMA_READ_PORT_OFFSET + \
+ (i) * TDMA_PORT_SIZE)
+#define TDMA_READ_PORT_LO(i) (TDMA_READ_PORT_OFFSET + \
+ sizeof(u32) + (i) * TDMA_PORT_SIZE)
+
+#define TDMA_READ_PORT_CMD_OFFSET (TDMA_READ_PORT_OFFSET + \
+ (TDMA_NUM_RINGS * TDMA_PORT_SIZE))
+#define TDMA_READ_PORT_CMD(i) (TDMA_READ_PORT_CMD_OFFSET + \
+ (i) * sizeof(u32))
+
+#define TDMA_DESC_RING_00_BASE (TDMA_READ_PORT_CMD_OFFSET + \
+ (TDMA_NUM_RINGS * sizeof(u32)))
+
+/* Register offsets and defines relatives to a specific ring number */
+#define RING_HEAD_TAIL_PTR 0x00
+#define RING_HEAD_MASK 0x7ff
+#define RING_TAIL_SHIFT 11
+#define RING_TAIL_MASK 0x7ff
+#define RING_FLUSH (1 << 24)
+#define RING_EN (1 << 25)
+
+#define RING_COUNT 0x04
+#define RING_COUNT_MASK 0x7ff
+#define RING_BUFF_DONE_SHIFT 11
+#define RING_BUFF_DONE_MASK 0x7ff
+
+#define RING_MAX_HYST 0x08
+#define RING_MAX_THRESH_MASK 0x7ff
+#define RING_HYST_THRESH_SHIFT 11
+#define RING_HYST_THRESH_MASK 0x7ff
+
+#define RING_INTR_CONTROL 0x0c
+#define RING_INTR_THRESH_MASK 0x7ff
+#define RING_EMPTY_INTR_EN (1 << 15)
+#define RING_TIMEOUT_SHIFT 16
+#define RING_TIMEOUT_MASK 0xffff
+
+#define RING_PROD_CONS_INDEX 0x10
+#define RING_PROD_INDEX_MASK 0xffff
+#define RING_CONS_INDEX_SHIFT 16
+#define RING_CONS_INDEX_MASK 0xffff
+
+#define RING_MAPPING 0x14
+#define RING_QID_MASK 0x3
+#define RING_PORT_ID_SHIFT 3
+#define RING_PORT_ID_MASK 0x7
+#define RING_IGNORE_STATUS (1 << 6)
+#define RING_FAILOVER_EN (1 << 7)
+#define RING_CREDIT_SHIFT 8
+#define RING_CREDIT_MASK 0xffff
+
+#define RING_PCP_DEI_VID 0x18
+#define RING_VID_MASK 0x7ff
+#define RING_DEI (1 << 12)
+#define RING_PCP_SHIFT 13
+#define RING_PCP_MASK 0x7
+#define RING_PKT_SIZE_ADJ_SHIFT 16
+#define RING_PKT_SIZE_ADJ_MASK 0xf
+
+#define TDMA_DESC_RING_SIZE 28
+
+/* Defininition for a given TX ring base address */
+#define TDMA_DESC_RING_BASE(i) (TDMA_DESC_RING_00_BASE + \
+ ((i) * TDMA_DESC_RING_SIZE))
+
+/* Ring indexed register addreses */
+#define TDMA_DESC_RING_HEAD_TAIL_PTR(i) (TDMA_DESC_RING_BASE(i) + \
+ RING_HEAD_TAIL_PTR)
+#define TDMA_DESC_RING_COUNT(i) (TDMA_DESC_RING_BASE(i) + \
+ RING_COUNT)
+#define TDMA_DESC_RING_MAX_HYST(i) (TDMA_DESC_RING_BASE(i) + \
+ RING_MAX_HYST)
+#define TDMA_DESC_RING_INTR_CONTROL(i) (TDMA_DESC_RING_BASE(i) + \
+ RING_INTR_CONTROL)
+#define TDMA_DESC_RING_PROD_CONS_INDEX(i) \
+ (TDMA_DESC_RING_BASE(i) + \
+ RING_PROD_CONS_INDEX)
+#define TDMA_DESC_RING_MAPPING(i) (TDMA_DESC_RING_BASE(i) + \
+ RING_MAPPING)
+#define TDMA_DESC_RING_PCP_DEI_VID(i) (TDMA_DESC_RING_BASE(i) + \
+ RING_PCP_DEI_VID)
+
+#define TDMA_CONTROL 0x600
+#define TDMA_EN (1 << 0)
+#define TSB_EN (1 << 1)
+#define TSB_SWAP (1 << 2)
+#define ACB_ALGO (1 << 3)
+#define BUF_DATA_OFFSET_SHIFT 4
+#define BUF_DATA_OFFSET_MASK 0x3ff
+#define VLAN_EN (1 << 14)
+#define SW_BRCM_TAG (1 << 15)
+#define WNC_KPT_SIZE_UPDATE (1 << 16)
+#define SYNC_PKT_SIZE (1 << 17)
+#define ACH_TXDONE_DELAY_SHIFT 18
+#define ACH_TXDONE_DELAY_MASK 0xff
+
+#define TDMA_STATUS 0x604
+#define TDMA_DISABLED (1 << 0)
+#define TDMA_LL_RAM_INIT_BUSY (1 << 1)
+
+#define TDMA_SCB_BURST_SIZE 0x608
+#define TDMA_OVER_MAX_THRESH_STATUS 0x60c
+#define TDMA_OVER_HYST_THRESH_STATUS 0x610
+#define TDMA_TPID 0x614
+
+#define TDMA_FREE_LIST_HEAD_TAIL_PTR 0x618
+#define TDMA_FREE_HEAD_MASK 0x7ff
+#define TDMA_FREE_TAIL_SHIFT 11
+#define TDMA_FREE_TAIL_MASK 0x7ff
+
+#define TDMA_FREE_LIST_COUNT 0x61c
+#define TDMA_FREE_LIST_COUNT_MASK 0x7ff
+
+#define TDMA_TIER2_ARB_CTRL 0x620
+#define TDMA_ARB_MODE_RR 0
+#define TDMA_ARB_MODE_WEIGHT_RR 0x1
+#define TDMA_ARB_MODE_STRICT 0x2
+#define TDMA_ARB_MODE_DEFICIT_RR 0x3
+#define TDMA_CREDIT_SHIFT 4
+#define TDMA_CREDIT_MASK 0xffff
+
+#define TDMA_TIER1_ARB_0_CTRL 0x624
+#define TDMA_ARB_EN (1 << 0)
+
+#define TDMA_TIER1_ARB_0_QUEUE_EN 0x628
+#define TDMA_TIER1_ARB_1_CTRL 0x62c
+#define TDMA_TIER1_ARB_1_QUEUE_EN 0x630
+#define TDMA_TIER1_ARB_2_CTRL 0x634
+#define TDMA_TIER1_ARB_2_QUEUE_EN 0x638
+#define TDMA_TIER1_ARB_3_CTRL 0x63c
+#define TDMA_TIER1_ARB_3_QUEUE_EN 0x640
+
+#define TDMA_SCB_ENDIAN_OVERRIDE 0x644
+#define TDMA_LE_MODE (1 << 0)
+#define TDMA_REG_MODE (1 << 1)
+
+#define TDMA_TEST 0x648
+#define TDMA_TP_OUT_SEL (1 << 0)
+#define TDMA_MEM_TM (1 << 1)
+
+#define TDMA_DEBUG 0x64c
+
+/* Transmit/Receive descriptor */
+struct dma_desc {
+ u32 addr_status_len;
+ u32 addr_lo;
+};
+
+/* Number of Receive hardware descriptor words */
+#define NUM_HW_RX_DESC_WORDS 1024
+/* Real number of usable descriptors */
+#define NUM_RX_DESC (NUM_HW_RX_DESC_WORDS / WORDS_PER_DESC)
+
+/* Internal linked-list RAM has up to 1536 entries */
+#define NUM_TX_DESC 1536
+
+#define WORDS_PER_DESC (sizeof(struct dma_desc) / sizeof(u32))
+
+/* Rx/Tx common counter group.*/
+struct bcm_sysport_pkt_counters {
+ u32 cnt_64; /* RO Received/Transmited 64 bytes packet */
+ u32 cnt_127; /* RO Rx/Tx 127 bytes packet */
+ u32 cnt_255; /* RO Rx/Tx 65-255 bytes packet */
+ u32 cnt_511; /* RO Rx/Tx 256-511 bytes packet */
+ u32 cnt_1023; /* RO Rx/Tx 512-1023 bytes packet */
+ u32 cnt_1518; /* RO Rx/Tx 1024-1518 bytes packet */
+ u32 cnt_mgv; /* RO Rx/Tx 1519-1522 good VLAN packet */
+ u32 cnt_2047; /* RO Rx/Tx 1522-2047 bytes packet*/
+ u32 cnt_4095; /* RO Rx/Tx 2048-4095 bytes packet*/
+ u32 cnt_9216; /* RO Rx/Tx 4096-9216 bytes packet*/
+};
+
+/* RSV, Receive Status Vector */
+struct bcm_sysport_rx_counters {
+ struct bcm_sysport_pkt_counters pkt_cnt;
+ u32 pkt; /* RO (0x428) Received pkt count*/
+ u32 bytes; /* RO Received byte count */
+ u32 mca; /* RO # of Received multicast pkt */
+ u32 bca; /* RO # of Receive broadcast pkt */
+ u32 fcs; /* RO # of Received FCS error */
+ u32 cf; /* RO # of Received control frame pkt*/
+ u32 pf; /* RO # of Received pause frame pkt */
+ u32 uo; /* RO # of unknown op code pkt */
+ u32 aln; /* RO # of alignment error count */
+ u32 flr; /* RO # of frame length out of range count */
+ u32 cde; /* RO # of code error pkt */
+ u32 fcr; /* RO # of carrier sense error pkt */
+ u32 ovr; /* RO # of oversize pkt*/
+ u32 jbr; /* RO # of jabber count */
+ u32 mtue; /* RO # of MTU error pkt*/
+ u32 pok; /* RO # of Received good pkt */
+ u32 uc; /* RO # of unicast pkt */
+ u32 ppp; /* RO # of PPP pkt */
+ u32 rcrc; /* RO (0x470),# of CRC match pkt */
+};
+
+/* TSV, Transmit Status Vector */
+struct bcm_sysport_tx_counters {
+ struct bcm_sysport_pkt_counters pkt_cnt;
+ u32 pkts; /* RO (0x4a8) Transmited pkt */
+ u32 mca; /* RO # of xmited multicast pkt */
+ u32 bca; /* RO # of xmited broadcast pkt */
+ u32 pf; /* RO # of xmited pause frame count */
+ u32 cf; /* RO # of xmited control frame count */
+ u32 fcs; /* RO # of xmited FCS error count */
+ u32 ovr; /* RO # of xmited oversize pkt */
+ u32 drf; /* RO # of xmited deferral pkt */
+ u32 edf; /* RO # of xmited Excessive deferral pkt*/
+ u32 scl; /* RO # of xmited single collision pkt */
+ u32 mcl; /* RO # of xmited multiple collision pkt*/
+ u32 lcl; /* RO # of xmited late collision pkt */
+ u32 ecl; /* RO # of xmited excessive collision pkt*/
+ u32 frg; /* RO # of xmited fragments pkt*/
+ u32 ncl; /* RO # of xmited total collision count */
+ u32 jbr; /* RO # of xmited jabber count*/
+ u32 bytes; /* RO # of xmited byte count */
+ u32 pok; /* RO # of xmited good pkt */
+ u32 uc; /* RO (0x0x4f0)# of xmited unitcast pkt */
+};
+
+struct bcm_sysport_mib {
+ struct bcm_sysport_rx_counters rx;
+ struct bcm_sysport_tx_counters tx;
+ u32 rx_runt_cnt;
+ u32 rx_runt_fcs;
+ u32 rx_runt_fcs_align;
+ u32 rx_runt_bytes;
+ u32 rxchk_bad_csum;
+ u32 rxchk_other_pkt_disc;
+ u32 rbuf_ovflow_cnt;
+ u32 rbuf_err_cnt;
+};
+
+/* HW maintains a large list of counters */
+enum bcm_sysport_stat_type {
+ BCM_SYSPORT_STAT_NETDEV = -1,
+ BCM_SYSPORT_STAT_MIB_RX,
+ BCM_SYSPORT_STAT_MIB_TX,
+ BCM_SYSPORT_STAT_RUNT,
+ BCM_SYSPORT_STAT_RXCHK,
+ BCM_SYSPORT_STAT_RBUF,
+};
+
+/* Macros to help define ethtool statistics */
+#define STAT_NETDEV(m) { \
+ .stat_string = __stringify(m), \
+ .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
+ .stat_offset = offsetof(struct net_device_stats, m), \
+ .type = BCM_SYSPORT_STAT_NETDEV, \
+}
+
+#define STAT_MIB(str, m, _type) { \
+ .stat_string = str, \
+ .stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \
+ .stat_offset = offsetof(struct bcm_sysport_priv, m), \
+ .type = _type, \
+}
+
+#define STAT_MIB_RX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_RX)
+#define STAT_MIB_TX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_TX)
+#define STAT_RUNT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_RUNT)
+
+#define STAT_RXCHK(str, m, ofs) { \
+ .stat_string = str, \
+ .stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \
+ .stat_offset = offsetof(struct bcm_sysport_priv, m), \
+ .type = BCM_SYSPORT_STAT_RXCHK, \
+ .reg_offset = ofs, \
+}
+
+#define STAT_RBUF(str, m, ofs) { \
+ .stat_string = str, \
+ .stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \
+ .stat_offset = offsetof(struct bcm_sysport_priv, m), \
+ .type = BCM_SYSPORT_STAT_RBUF, \
+ .reg_offset = ofs, \
+}
+
+struct bcm_sysport_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int stat_sizeof;
+ int stat_offset;
+ enum bcm_sysport_stat_type type;
+ /* reg offset from UMAC base for misc counters */
+ u16 reg_offset;
+};
+
+/* Software house keeping helper structure */
+struct bcm_sysport_cb {
+ struct sk_buff *skb; /* SKB for RX packets */
+ void __iomem *bd_addr; /* Buffer descriptor PHYS addr */
+
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
+ DEFINE_DMA_UNMAP_LEN(dma_len);
+};
+
+/* Software view of the TX ring */
+struct bcm_sysport_tx_ring {
+ spinlock_t lock; /* Ring lock for tx reclaim/xmit */
+ struct napi_struct napi; /* NAPI per tx queue */
+ dma_addr_t desc_dma; /* DMA cookie */
+ unsigned int index; /* Ring index */
+ unsigned int size; /* Ring current size */
+ unsigned int alloc_size; /* Ring one-time allocated size */
+ unsigned int desc_count; /* Number of descriptors */
+ unsigned int curr_desc; /* Current descriptor */
+ unsigned int c_index; /* Last consumer index */
+ unsigned int p_index; /* Current producer index */
+ struct bcm_sysport_cb *cbs; /* Transmit control blocks */
+ struct dma_desc *desc_cpu; /* CPU view of the descriptor */
+ struct bcm_sysport_priv *priv; /* private context backpointer */
+};
+
+/* Driver private structure */
+struct bcm_sysport_priv {
+ void __iomem *base;
+ u32 irq0_stat;
+ u32 irq0_mask;
+ u32 irq1_stat;
+ u32 irq1_mask;
+ struct napi_struct napi ____cacheline_aligned;
+ struct net_device *netdev;
+ struct platform_device *pdev;
+ int irq0;
+ int irq1;
+
+ /* Transmit rings */
+ struct bcm_sysport_tx_ring tx_rings[TDMA_NUM_RINGS];
+
+ /* Receive queue */
+ void __iomem *rx_bds;
+ void __iomem *rx_bd_assign_ptr;
+ unsigned int rx_bd_assign_index;
+ struct bcm_sysport_cb *rx_cbs;
+ unsigned int num_rx_bds;
+ unsigned int rx_read_ptr;
+ unsigned int rx_c_index;
+
+ /* PHY device */
+ struct phy_device *phydev;
+ phy_interface_t phy_interface;
+ int old_pause;
+ int old_link;
+ int old_duplex;
+
+ /* Misc fields */
+ unsigned int rx_csum_en:1;
+ unsigned int tsb_en:1;
+ unsigned int crc_fwd:1;
+ u16 rev;
+
+ /* MIB related fields */
+ struct bcm_sysport_mib mib;
+
+ /* Ethtool */
+ u32 msg_enable;
+};
+#endif /* __BCM_SYSPORT_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 24e16e3301e0..8efeed3325b5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2252,12 +2252,19 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
p->port_type == FW_PORT_TYPE_FIBER_XAUI)
cmd->port = PORT_FIBRE;
- else if (p->port_type == FW_PORT_TYPE_SFP) {
- if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
- p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
+ else if (p->port_type == FW_PORT_TYPE_SFP ||
+ p->port_type == FW_PORT_TYPE_QSFP_10G ||
+ p->port_type == FW_PORT_TYPE_QSFP) {
+ if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
+ p->mod_type == FW_PORT_MOD_TYPE_SR ||
+ p->mod_type == FW_PORT_MOD_TYPE_ER ||
+ p->mod_type == FW_PORT_MOD_TYPE_LRM)
+ cmd->port = PORT_FIBRE;
+ else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
+ p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
cmd->port = PORT_DA;
else
- cmd->port = PORT_FIBRE;
+ cmd->port = PORT_OTHER;
} else
cmd->port = PORT_OTHER;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index ca95cf2954eb..cced1a3d5181 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1697,7 +1697,8 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
return handle_trace_pkt(q->adap, si);
pkt = (const struct cpl_rx_pkt *)rsp;
- csum_ok = pkt->csum_calc && !pkt->err_vec;
+ csum_ok = pkt->csum_calc && !pkt->err_vec &&
+ (q->netdev->features & NETIF_F_RXCSUM);
if ((pkt->l2info & htonl(RXF_TCP)) &&
(q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
do_gro(rxq, si, pkt);
@@ -1720,8 +1721,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
rxq->stats.pkts++;
- if (csum_ok && (q->netdev->features & NETIF_F_RXCSUM) &&
- (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
+ if (csum_ok && (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
if (!pkt->ip_frag) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
rxq->stats.rx_cso++;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 9cfa4b4bb089..adebbf849cdb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1510,7 +1510,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
{
struct sk_buff *skb;
const struct cpl_rx_pkt *pkt = (void *)rsp;
- bool csum_ok = pkt->csum_calc && !pkt->err_vec;
+ bool csum_ok = pkt->csum_calc && !pkt->err_vec &&
+ (rspq->netdev->features & NETIF_F_RXCSUM);
struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
/*
@@ -1538,8 +1539,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
skb_record_rx_queue(skb, rspq->idx);
rxq->stats.pkts++;
- if (csum_ok && (rspq->netdev->features & NETIF_F_RXCSUM) &&
- !pkt->err_vec && (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) {
+ if (csum_ok && !pkt->err_vec &&
+ (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) {
if (!pkt->ip_frag)
skb->ip_summed = CHECKSUM_UNNECESSARY;
else {
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 97db5a7179df..31c376628bfd 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -120,6 +120,9 @@ static inline char *nic_name(struct pci_dev *pdev)
#define MAX_VFS 30 /* Max VFs supported by BE3 FW */
#define FW_VER_LEN 32
+#define RSS_INDIR_TABLE_LEN 128
+#define RSS_HASH_KEY_LEN 40
+
struct be_dma_mem {
void *va;
dma_addr_t dma;
@@ -409,6 +412,13 @@ struct be_resources {
u32 if_cap_flags;
};
+struct rss_info {
+ u64 rss_flags;
+ u8 rsstable[RSS_INDIR_TABLE_LEN];
+ u8 rss_queue[RSS_INDIR_TABLE_LEN];
+ u8 rss_hkey[RSS_HASH_KEY_LEN];
+};
+
struct be_adapter {
struct pci_dev *pdev;
struct net_device *netdev;
@@ -445,7 +455,7 @@ struct be_adapter {
struct be_drv_stats drv_stats;
struct be_aic_obj aic_obj[MAX_EVT_QS];
u16 vlans_added;
- u8 vlan_tag[VLAN_N_VID];
+ unsigned long vids[BITS_TO_LONGS(VLAN_N_VID)];
u8 vlan_prio_bmap; /* Available Priority BitMap */
u16 recommended_prio; /* Recommended Priority */
struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */
@@ -507,7 +517,7 @@ struct be_adapter {
u32 msg_enable;
int be_get_temp_freq;
u8 pf_number;
- u64 rss_flags;
+ struct rss_info rss_info;
};
#define be_physfn(adapter) (!adapter->virtfn)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index d1ec15af0d24..476752d0a6a4 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -52,8 +52,7 @@ static struct be_cmd_priv_map cmd_priv_map[] = {
}
};
-static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode,
- u8 subsystem)
+static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
{
int i;
int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
@@ -197,7 +196,7 @@ done:
/* Link state evt is a string of bytes; no need for endian swapping */
static void be_async_link_state_process(struct be_adapter *adapter,
- struct be_async_event_link_state *evt)
+ struct be_async_event_link_state *evt)
{
/* When link status changes, link speed must be re-queried from FW */
adapter->phy.link_speed = -1;
@@ -221,7 +220,9 @@ static void be_async_link_state_process(struct be_adapter *adapter,
/* Grp5 CoS Priority evt */
static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
- struct be_async_event_grp5_cos_priority *evt)
+ struct
+ be_async_event_grp5_cos_priority
+ *evt)
{
if (evt->valid) {
adapter->vlan_prio_bmap = evt->available_priority_bmap;
@@ -233,7 +234,9 @@ static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
/* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
- struct be_async_event_grp5_qos_link_speed *evt)
+ struct
+ be_async_event_grp5_qos_link_speed
+ *evt)
{
if (adapter->phy.link_speed >= 0 &&
evt->physical_port == adapter->port_num)
@@ -242,7 +245,9 @@ static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
/*Grp5 PVID evt*/
static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
- struct be_async_event_grp5_pvid_state *evt)
+ struct
+ be_async_event_grp5_pvid_state
+ *evt)
{
if (evt->enabled) {
adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
@@ -253,7 +258,7 @@ static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
}
static void be_async_grp5_evt_process(struct be_adapter *adapter,
- u32 trailer, struct be_mcc_compl *evt)
+ u32 trailer, struct be_mcc_compl *evt)
{
u8 event_type = 0;
@@ -281,7 +286,7 @@ static void be_async_grp5_evt_process(struct be_adapter *adapter,
}
static void be_async_dbg_evt_process(struct be_adapter *adapter,
- u32 trailer, struct be_mcc_compl *cmp)
+ u32 trailer, struct be_mcc_compl *cmp)
{
u8 event_type = 0;
struct be_async_event_qnq *evt = (struct be_async_event_qnq *) cmp;
@@ -370,10 +375,10 @@ int be_process_mcc(struct be_adapter *adapter)
(struct be_async_event_link_state *) compl);
else if (is_grp5_evt(compl->flags))
be_async_grp5_evt_process(adapter,
- compl->flags, compl);
+ compl->flags, compl);
else if (is_dbg_evt(compl->flags))
be_async_dbg_evt_process(adapter,
- compl->flags, compl);
+ compl->flags, compl);
} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
status = be_mcc_compl_process(adapter, compl);
atomic_dec(&mcc_obj->q.used);
@@ -560,10 +565,8 @@ static bool lancer_provisioning_error(struct be_adapter *adapter)
u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
- sliport_err1 = ioread32(adapter->db +
- SLIPORT_ERROR1_OFFSET);
- sliport_err2 = ioread32(adapter->db +
- SLIPORT_ERROR2_OFFSET);
+ sliport_err1 = ioread32(adapter->db + SLIPORT_ERROR1_OFFSET);
+ sliport_err2 = ioread32(adapter->db + SLIPORT_ERROR2_OFFSET);
if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 &&
sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2)
@@ -630,8 +633,7 @@ int be_fw_wait_ready(struct be_adapter *adapter)
if (stage == POST_STAGE_ARMFW_RDY)
return 0;
- dev_info(dev, "Waiting for POST, %ds elapsed\n",
- timeout);
+ dev_info(dev, "Waiting for POST, %ds elapsed\n", timeout);
if (msleep_interruptible(2000)) {
dev_err(dev, "Waiting for POST aborted\n");
return -EINTR;
@@ -649,8 +651,7 @@ static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
return &wrb->payload.sgl[0];
}
-static inline void fill_wrb_tags(struct be_mcc_wrb *wrb,
- unsigned long addr)
+static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, unsigned long addr)
{
wrb->tag0 = addr & 0xFFFFFFFF;
wrb->tag1 = upper_32_bits(addr);
@@ -659,8 +660,9 @@ static inline void fill_wrb_tags(struct be_mcc_wrb *wrb,
/* Don't touch the hdr after it's prepared */
/* mem will be NULL for embedded commands */
static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
- u8 subsystem, u8 opcode, int cmd_len,
- struct be_mcc_wrb *wrb, struct be_dma_mem *mem)
+ u8 subsystem, u8 opcode, int cmd_len,
+ struct be_mcc_wrb *wrb,
+ struct be_dma_mem *mem)
{
struct be_sge *sge;
@@ -683,7 +685,7 @@ static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
}
static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
- struct be_dma_mem *mem)
+ struct be_dma_mem *mem)
{
int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
u64 dma = (u64)mem->dma;
@@ -868,7 +870,8 @@ int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb,
+ NULL);
/* Support for EQ_CREATEv2 available only SH-R onwards */
if (!(BEx_chip(adapter) || lancer_chip(adapter)))
@@ -917,7 +920,8 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb,
+ NULL);
req->type = MAC_ADDRESS_TYPE_NETWORK;
if (permanent) {
req->permanent = 1;
@@ -940,7 +944,7 @@ err:
/* Uses synchronous MCCQ */
int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
- u32 if_id, u32 *pmac_id, u32 domain)
+ u32 if_id, u32 *pmac_id, u32 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_pmac_add *req;
@@ -956,7 +960,8 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb,
+ NULL);
req->hdr.domain = domain;
req->if_id = cpu_to_le32(if_id);
@@ -1012,7 +1017,7 @@ err:
/* Uses Mbox */
int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
- struct be_queue_info *eq, bool no_delay, int coalesce_wm)
+ struct be_queue_info *eq, bool no_delay, int coalesce_wm)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_cq_create *req;
@@ -1028,17 +1033,18 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
ctxt = &req->context;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb,
+ NULL);
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
if (BEx_chip(adapter)) {
AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
- coalesce_wm);
+ coalesce_wm);
AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
- ctxt, no_delay);
+ ctxt, no_delay);
AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
- __ilog2_u32(cq->len/256));
+ __ilog2_u32(cq->len / 256));
AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
@@ -1053,14 +1059,12 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
ctxt, coalesce_wm);
AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
- no_delay);
+ no_delay);
AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
- __ilog2_u32(cq->len/256));
+ __ilog2_u32(cq->len / 256));
AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
- AMAP_SET_BITS(struct amap_cq_context_v2, eventable,
- ctxt, 1);
- AMAP_SET_BITS(struct amap_cq_context_v2, eqid,
- ctxt, eq->id);
+ AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
}
be_dws_cpu_to_le(ctxt, sizeof(req->context));
@@ -1088,8 +1092,8 @@ static u32 be_encoded_q_len(int q_len)
}
static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
- struct be_queue_info *mccq,
- struct be_queue_info *cq)
+ struct be_queue_info *mccq,
+ struct be_queue_info *cq)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_mcc_ext_create *req;
@@ -1105,13 +1109,14 @@ static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
ctxt = &req->context;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb,
+ NULL);
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
if (BEx_chip(adapter)) {
AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
- be_encoded_q_len(mccq->len));
+ be_encoded_q_len(mccq->len));
AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
} else {
req->hdr.version = 1;
@@ -1145,8 +1150,8 @@ static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
}
static int be_cmd_mccq_org_create(struct be_adapter *adapter,
- struct be_queue_info *mccq,
- struct be_queue_info *cq)
+ struct be_queue_info *mccq,
+ struct be_queue_info *cq)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_mcc_create *req;
@@ -1162,13 +1167,14 @@ static int be_cmd_mccq_org_create(struct be_adapter *adapter,
ctxt = &req->context;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb,
+ NULL);
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
- be_encoded_q_len(mccq->len));
+ be_encoded_q_len(mccq->len));
AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
be_dws_cpu_to_le(ctxt, sizeof(req->context));
@@ -1187,8 +1193,7 @@ static int be_cmd_mccq_org_create(struct be_adapter *adapter,
}
int be_cmd_mccq_create(struct be_adapter *adapter,
- struct be_queue_info *mccq,
- struct be_queue_info *cq)
+ struct be_queue_info *mccq, struct be_queue_info *cq)
{
int status;
@@ -1213,7 +1218,7 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
req = embedded_payload(&wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
+ OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
if (lancer_chip(adapter)) {
req->hdr.version = 1;
@@ -1250,8 +1255,8 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
/* Uses MCC */
int be_cmd_rxq_create(struct be_adapter *adapter,
- struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
- u32 if_id, u32 rss, u8 *rss_id)
+ struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
+ u32 if_id, u32 rss, u8 *rss_id)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_eth_rx_create *req;
@@ -1268,7 +1273,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
+ OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
req->cq_id = cpu_to_le16(cq_id);
req->frag_size = fls(frag_size) - 1;
@@ -1295,7 +1300,7 @@ err:
* Uses Mbox
*/
int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
- int queue_type)
+ int queue_type)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_q_destroy *req;
@@ -1334,7 +1339,7 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
}
be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
- NULL);
+ NULL);
req->id = cpu_to_le16(q->id);
status = be_mbox_notify_wait(adapter);
@@ -1361,7 +1366,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
+ OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
req->id = cpu_to_le16(q->id);
status = be_mcc_notify_wait(adapter);
@@ -1384,7 +1389,8 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
req = embedded_payload(&wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), &wrb, NULL);
+ OPCODE_COMMON_NTWK_INTERFACE_CREATE,
+ sizeof(*req), &wrb, NULL);
req->hdr.domain = domain;
req->capability_flags = cpu_to_le32(cap_flags);
req->enable_flags = cpu_to_le32(en_flags);
@@ -1422,7 +1428,8 @@ int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_NTWK_INTERFACE_DESTROY,
+ sizeof(*req), wrb, NULL);
req->hdr.domain = domain;
req->interface_id = cpu_to_le32(interface_id);
@@ -1452,7 +1459,8 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
hdr = nonemb_cmd->va;
be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
+ OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb,
+ nonemb_cmd);
/* version 1 of the cmd is not supported only by BE2 */
if (BE2_chip(adapter))
@@ -1472,7 +1480,7 @@ err:
/* Lancer Stats */
int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
- struct be_dma_mem *nonemb_cmd)
+ struct be_dma_mem *nonemb_cmd)
{
struct be_mcc_wrb *wrb;
@@ -1493,8 +1501,8 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
req = nonemb_cmd->va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb,
- nonemb_cmd);
+ OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size,
+ wrb, nonemb_cmd);
req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
req->cmd_params.params.reset_stats = 0;
@@ -1553,7 +1561,8 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_NTWK_LINK_STATUS_QUERY,
+ sizeof(*req), wrb, NULL);
/* version 1 of the cmd is not supported only by BE2 */
if (!BE2_chip(adapter))
@@ -1598,8 +1607,8 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req),
- wrb, NULL);
+ OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES,
+ sizeof(*req), wrb, NULL);
be_mcc_notify(adapter);
@@ -1625,7 +1634,8 @@ int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb,
+ NULL);
req->fat_operation = cpu_to_le32(QUERY_FAT);
status = be_mcc_notify_wait(adapter);
if (!status) {
@@ -1655,8 +1665,8 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
- get_fat_cmd.size,
- &get_fat_cmd.dma);
+ get_fat_cmd.size,
+ &get_fat_cmd.dma);
if (!get_fat_cmd.va) {
status = -ENOMEM;
dev_err(&adapter->pdev->dev,
@@ -1679,8 +1689,8 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_MANAGE_FAT, payload_len, wrb,
- &get_fat_cmd);
+ OPCODE_COMMON_MANAGE_FAT, payload_len,
+ wrb, &get_fat_cmd);
req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
req->read_log_offset = cpu_to_le32(log_offset);
@@ -1691,8 +1701,8 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
if (!status) {
struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
memcpy(buf + offset,
- resp->data_buffer,
- le32_to_cpu(resp->read_log_length));
+ resp->data_buffer,
+ le32_to_cpu(resp->read_log_length));
} else {
dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
goto err;
@@ -1702,14 +1712,13 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
}
err:
pci_free_consistent(adapter->pdev, get_fat_cmd.size,
- get_fat_cmd.va,
- get_fat_cmd.dma);
+ get_fat_cmd.va, get_fat_cmd.dma);
spin_unlock_bh(&adapter->mcc_lock);
}
/* Uses synchronous mcc */
int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
- char *fw_on_flash)
+ char *fw_on_flash)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_fw_version *req;
@@ -1726,7 +1735,8 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb,
+ NULL);
status = be_mcc_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
@@ -1759,7 +1769,8 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb,
+ NULL);
req->num_eq = cpu_to_le32(num);
for (i = 0; i < num; i++) {
@@ -1777,7 +1788,7 @@ err:
/* Uses sycnhronous mcc */
int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
- u32 num, bool promiscuous)
+ u32 num)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_vlan_config *req;
@@ -1793,19 +1804,16 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req),
+ wrb, NULL);
req->interface_id = if_id;
- req->promiscuous = promiscuous;
req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
req->num_vlan = num;
- if (!promiscuous) {
- memcpy(req->normal_vlan, vtag_array,
- req->num_vlan * sizeof(vtag_array[0]));
- }
+ memcpy(req->normal_vlan, vtag_array,
+ req->num_vlan * sizeof(vtag_array[0]));
status = be_mcc_notify_wait(adapter);
-
err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
@@ -1827,18 +1835,19 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
}
memset(req, 0, sizeof(*req));
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
- wrb, mem);
+ OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
+ wrb, mem);
req->if_id = cpu_to_le32(adapter->if_handle);
if (flags & IFF_PROMISC) {
req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
- BE_IF_FLAGS_VLAN_PROMISCUOUS |
- BE_IF_FLAGS_MCAST_PROMISCUOUS);
+ BE_IF_FLAGS_VLAN_PROMISCUOUS |
+ BE_IF_FLAGS_MCAST_PROMISCUOUS);
if (value == ON)
- req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
- BE_IF_FLAGS_VLAN_PROMISCUOUS |
- BE_IF_FLAGS_MCAST_PROMISCUOUS);
+ req->if_flags =
+ cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
+ BE_IF_FLAGS_VLAN_PROMISCUOUS |
+ BE_IF_FLAGS_MCAST_PROMISCUOUS);
} else if (flags & IFF_ALLMULTI) {
req->if_flags_mask = req->if_flags =
cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
@@ -1867,7 +1876,7 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
}
if ((req->if_flags_mask & cpu_to_le32(be_if_cap_flags(adapter))) !=
- req->if_flags_mask) {
+ req->if_flags_mask) {
dev_warn(&adapter->pdev->dev,
"Cannot set rx filter flags 0x%x\n",
req->if_flags_mask);
@@ -1905,7 +1914,8 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req),
+ wrb, NULL);
req->tx_flow_control = cpu_to_le16((u16)tx_fc);
req->rx_flow_control = cpu_to_le16((u16)rx_fc);
@@ -1938,7 +1948,8 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req),
+ wrb, NULL);
status = be_mcc_notify_wait(adapter);
if (!status) {
@@ -1968,7 +1979,8 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
+ sizeof(*req), wrb, NULL);
status = be_mbox_notify_wait(adapter);
if (!status) {
@@ -2011,7 +2023,8 @@ int be_cmd_reset_function(struct be_adapter *adapter)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb,
+ NULL);
status = be_mbox_notify_wait(adapter);
@@ -2020,47 +2033,47 @@ int be_cmd_reset_function(struct be_adapter *adapter)
}
int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
- u32 rss_hash_opts, u16 table_size)
+ u32 rss_hash_opts, u16 table_size, u8 *rss_hkey)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_rss_config *req;
- u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e,
- 0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2,
- 0x3ea83c02, 0x4a110304};
int status;
if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
return 0;
- if (mutex_lock_interruptible(&adapter->mbox_lock))
- return -1;
+ spin_lock_bh(&adapter->mcc_lock);
- wrb = wrb_from_mbox(adapter);
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
+ OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
req->if_id = cpu_to_le32(adapter->if_handle);
req->enable_rss = cpu_to_le16(rss_hash_opts);
req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
- if (lancer_chip(adapter) || skyhawk_chip(adapter))
+ if (!BEx_chip(adapter))
req->hdr.version = 1;
memcpy(req->cpu_table, rsstable, table_size);
- memcpy(req->hash, myhash, sizeof(myhash));
+ memcpy(req->hash, rss_hkey, RSS_HASH_KEY_LEN);
be_dws_cpu_to_le(req->hash, sizeof(req->hash));
- status = be_mbox_notify_wait(adapter);
-
- mutex_unlock(&adapter->mbox_lock);
+ status = be_mcc_notify_wait(adapter);
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
/* Uses sync mcc */
int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
- u8 bcn, u8 sts, u8 state)
+ u8 bcn, u8 sts, u8 state)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_enable_disable_beacon *req;
@@ -2076,7 +2089,8 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_ENABLE_DISABLE_BEACON,
+ sizeof(*req), wrb, NULL);
req->port_num = port_num;
req->beacon_state = state;
@@ -2107,7 +2121,8 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req),
+ wrb, NULL);
req->port_num = port_num;
@@ -2146,20 +2161,20 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_WRITE_OBJECT,
- sizeof(struct lancer_cmd_req_write_object), wrb,
- NULL);
+ OPCODE_COMMON_WRITE_OBJECT,
+ sizeof(struct lancer_cmd_req_write_object), wrb,
+ NULL);
ctxt = &req->context;
AMAP_SET_BITS(struct amap_lancer_write_obj_context,
- write_length, ctxt, data_size);
+ write_length, ctxt, data_size);
if (data_size == 0)
AMAP_SET_BITS(struct amap_lancer_write_obj_context,
- eof, ctxt, 1);
+ eof, ctxt, 1);
else
AMAP_SET_BITS(struct amap_lancer_write_obj_context,
- eof, ctxt, 0);
+ eof, ctxt, 0);
be_dws_cpu_to_le(ctxt, sizeof(req->context));
req->write_offset = cpu_to_le32(data_offset);
@@ -2167,8 +2182,8 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
req->descriptor_count = cpu_to_le32(1);
req->buf_len = cpu_to_le32(data_size);
req->addr_low = cpu_to_le32((cmd->dma +
- sizeof(struct lancer_cmd_req_write_object))
- & 0xFFFFFFFF);
+ sizeof(struct lancer_cmd_req_write_object))
+ & 0xFFFFFFFF);
req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
sizeof(struct lancer_cmd_req_write_object)));
@@ -2197,8 +2212,8 @@ err_unlock:
}
int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
- u32 data_size, u32 data_offset, const char *obj_name,
- u32 *data_read, u32 *eof, u8 *addn_status)
+ u32 data_size, u32 data_offset, const char *obj_name,
+ u32 *data_read, u32 *eof, u8 *addn_status)
{
struct be_mcc_wrb *wrb;
struct lancer_cmd_req_read_object *req;
@@ -2216,9 +2231,9 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_READ_OBJECT,
- sizeof(struct lancer_cmd_req_read_object), wrb,
- NULL);
+ OPCODE_COMMON_READ_OBJECT,
+ sizeof(struct lancer_cmd_req_read_object), wrb,
+ NULL);
req->desired_read_len = cpu_to_le32(data_size);
req->read_offset = cpu_to_le32(data_offset);
@@ -2244,7 +2259,7 @@ err_unlock:
}
int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
- u32 flash_type, u32 flash_opcode, u32 buf_size)
+ u32 flash_type, u32 flash_opcode, u32 buf_size)
{
struct be_mcc_wrb *wrb;
struct be_cmd_write_flashrom *req;
@@ -2261,7 +2276,8 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
req = cmd->va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, cmd);
+ OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb,
+ cmd);
req->params.op_type = cpu_to_le32(flash_type);
req->params.op_code = cpu_to_le32(flash_opcode);
@@ -2318,7 +2334,7 @@ err:
}
int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
- struct be_dma_mem *nonemb_cmd)
+ struct be_dma_mem *nonemb_cmd)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_acpi_wol_magic_config *req;
@@ -2334,8 +2350,8 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
req = nonemb_cmd->va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), wrb,
- nonemb_cmd);
+ OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req),
+ wrb, nonemb_cmd);
memcpy(req->magic_mac, mac, ETH_ALEN);
status = be_mcc_notify_wait(adapter);
@@ -2363,8 +2379,8 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
- OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), wrb,
- NULL);
+ OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req),
+ wrb, NULL);
req->src_port = port_num;
req->dest_port = port_num;
@@ -2378,7 +2394,8 @@ err:
}
int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
- u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
+ u32 loopback_type, u32 pkt_size, u32 num_pkts,
+ u64 pattern)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_loopback_test *req;
@@ -2396,7 +2413,8 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
- OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
+ OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb,
+ NULL);
req->hdr.timeout = cpu_to_le32(15);
req->pattern = cpu_to_le64(pattern);
@@ -2421,7 +2439,7 @@ err:
}
int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
- u32 byte_cnt, struct be_dma_mem *cmd)
+ u32 byte_cnt, struct be_dma_mem *cmd)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_ddrdma_test *req;
@@ -2437,7 +2455,8 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
}
req = cmd->va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
- OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, cmd);
+ OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb,
+ cmd);
req->pattern = cpu_to_le64(pattern);
req->byte_count = cpu_to_le32(byte_cnt);
@@ -2465,7 +2484,7 @@ err:
}
int be_cmd_get_seeprom_data(struct be_adapter *adapter,
- struct be_dma_mem *nonemb_cmd)
+ struct be_dma_mem *nonemb_cmd)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_seeprom_read *req;
@@ -2481,8 +2500,8 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
req = nonemb_cmd->va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
- nonemb_cmd);
+ OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
+ nonemb_cmd);
status = be_mcc_notify_wait(adapter);
@@ -2510,8 +2529,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
goto err;
}
cmd.size = sizeof(struct be_cmd_req_get_phy_info);
- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
- &cmd.dma);
+ cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
if (!cmd.va) {
dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
status = -ENOMEM;
@@ -2521,8 +2539,8 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
req = cmd.va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
- wrb, &cmd);
+ OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
+ wrb, &cmd);
status = be_mcc_notify_wait(adapter);
if (!status) {
@@ -2544,8 +2562,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
BE_SUPPORTED_SPEED_1GBPS;
}
}
- pci_free_consistent(adapter->pdev, cmd.size,
- cmd.va, cmd.dma);
+ pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
@@ -2568,7 +2585,7 @@ int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
req->hdr.domain = domain;
req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
@@ -2597,10 +2614,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
- &attribs_cmd.dma);
+ &attribs_cmd.dma);
if (!attribs_cmd.va) {
- dev_err(&adapter->pdev->dev,
- "Memory allocation failure\n");
+ dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
status = -ENOMEM;
goto err;
}
@@ -2613,8 +2629,8 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
req = attribs_cmd.va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, wrb,
- &attribs_cmd);
+ OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len,
+ wrb, &attribs_cmd);
status = be_mbox_notify_wait(adapter);
if (!status) {
@@ -2649,7 +2665,8 @@ int be_cmd_req_native_mode(struct be_adapter *adapter)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP,
+ sizeof(*req), wrb, NULL);
req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
CAPABILITY_BE3_NATIVE_ERX_API);
@@ -2762,12 +2779,12 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
- get_mac_list_cmd.size,
- &get_mac_list_cmd.dma);
+ get_mac_list_cmd.size,
+ &get_mac_list_cmd.dma);
if (!get_mac_list_cmd.va) {
dev_err(&adapter->pdev->dev,
- "Memory allocation failure during GET_MAC_LIST\n");
+ "Memory allocation failure during GET_MAC_LIST\n");
return -ENOMEM;
}
@@ -2831,18 +2848,18 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
/* If no active mac_id found, return first mac addr */
*pmac_id_valid = false;
memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
- ETH_ALEN);
+ ETH_ALEN);
}
out:
spin_unlock_bh(&adapter->mcc_lock);
pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
- get_mac_list_cmd.va, get_mac_list_cmd.dma);
+ get_mac_list_cmd.va, get_mac_list_cmd.dma);
return status;
}
-int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, u8 *mac,
- u32 if_handle, bool active, u32 domain)
+int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id,
+ u8 *mac, u32 if_handle, bool active, u32 domain)
{
if (!active)
@@ -2892,7 +2909,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_req_set_mac_list);
cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
- &cmd.dma, GFP_KERNEL);
+ &cmd.dma, GFP_KERNEL);
if (!cmd.va)
return -ENOMEM;
@@ -2906,8 +2923,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
req = cmd.va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
- wrb, &cmd);
+ OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
+ wrb, &cmd);
req->hdr.domain = domain;
req->mac_count = mac_count;
@@ -2917,8 +2934,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
status = be_mcc_notify_wait(adapter);
err:
- dma_free_coherent(&adapter->pdev->dev, cmd.size,
- cmd.va, cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2963,7 +2979,8 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
ctxt = &req->context;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb,
+ NULL);
req->hdr.domain = domain;
AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
@@ -3009,7 +3026,8 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
ctxt = &req->context;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb,
+ NULL);
req->hdr.domain = domain;
AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
@@ -3027,10 +3045,9 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
if (!status) {
struct be_cmd_resp_get_hsw_config *resp =
embedded_payload(wrb);
- be_dws_le_to_cpu(&resp->context,
- sizeof(resp->context));
+ be_dws_le_to_cpu(&resp->context, sizeof(resp->context));
vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
- pvid, &resp->context);
+ pvid, &resp->context);
if (pvid)
*pvid = le16_to_cpu(vid);
if (mode)
@@ -3062,11 +3079,9 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
- &cmd.dma);
+ cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
if (!cmd.va) {
- dev_err(&adapter->pdev->dev,
- "Memory allocation failure\n");
+ dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
status = -ENOMEM;
goto err;
}
@@ -3349,8 +3364,7 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_resp_get_func_config);
- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
- &cmd.dma);
+ cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
if (!cmd.va) {
dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
status = -ENOMEM;
@@ -3396,7 +3410,7 @@ err:
/* Uses mbox */
static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
- u8 domain, struct be_dma_mem *cmd)
+ u8 domain, struct be_dma_mem *cmd)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_profile_config *req;
@@ -3424,7 +3438,7 @@ static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
/* Uses sync mcc */
static int be_cmd_get_profile_config_mccq(struct be_adapter *adapter,
- u8 domain, struct be_dma_mem *cmd)
+ u8 domain, struct be_dma_mem *cmd)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_profile_config *req;
@@ -3484,8 +3498,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
resp = cmd.va;
desc_count = le32_to_cpu(resp->desc_count);
- pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
- desc_count);
+ pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
+ desc_count);
if (pcie)
res->max_vfs = le16_to_cpu(pcie->num_vfs);
@@ -3859,7 +3873,7 @@ err:
}
int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
- int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
+ int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
{
struct be_adapter *adapter = netdev_priv(netdev_handle);
struct be_mcc_wrb *wrb;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index b60e4d53c1c9..228d4b611084 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -2060,7 +2060,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
char *fw_on_flash);
int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num);
int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
- u32 num, bool promiscuous);
+ u32 num);
int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc);
int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc);
@@ -2068,7 +2068,7 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
u32 *function_mode, u32 *function_caps, u16 *asic_rev);
int be_cmd_reset_function(struct be_adapter *adapter);
int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
- u32 rss_hash_opts, u16 table_size);
+ u32 rss_hash_opts, u16 table_size, u8 *rss_hkey);
int be_process_mcc(struct be_adapter *adapter);
int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, u8 beacon,
u8 status, u8 state);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 15ba96cba65d..970ae337daac 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -132,6 +132,7 @@ static const struct be_ethtool_stat et_rx_stats[] = {
{DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */
{DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */
{DRVSTAT_RX_INFO(rx_compl)},
+ {DRVSTAT_RX_INFO(rx_compl_err)},
{DRVSTAT_RX_INFO(rx_mcast_pkts)},
/* Number of page allocation failures while posting receive buffers
* to HW.
@@ -181,7 +182,7 @@ static const char et_self_tests[][ETH_GSTRING_LEN] = {
#define BE_NO_LOOPBACK 0xff
static void be_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *drvinfo)
+ struct ethtool_drvinfo *drvinfo)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -201,8 +202,7 @@ static void be_get_drvinfo(struct net_device *netdev,
drvinfo->eedump_len = 0;
}
-static u32
-lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
+static u32 lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
{
u32 data_read = 0, eof;
u8 addn_status;
@@ -212,14 +212,14 @@ lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
memset(&data_len_cmd, 0, sizeof(data_len_cmd));
/* data_offset and data_size should be 0 to get reg len */
status = lancer_cmd_read_object(adapter, &data_len_cmd, 0, 0,
- file_name, &data_read, &eof, &addn_status);
+ file_name, &data_read, &eof,
+ &addn_status);
return data_read;
}
-static int
-lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
- u32 buf_len, void *buf)
+static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
+ u32 buf_len, void *buf)
{
struct be_dma_mem read_cmd;
u32 read_len = 0, total_read_len = 0, chunk_size;
@@ -229,11 +229,11 @@ lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
read_cmd.size = LANCER_READ_FILE_CHUNK;
read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size,
- &read_cmd.dma);
+ &read_cmd.dma);
if (!read_cmd.va) {
dev_err(&adapter->pdev->dev,
- "Memory allocation failure while reading dump\n");
+ "Memory allocation failure while reading dump\n");
return -ENOMEM;
}
@@ -242,8 +242,8 @@ lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
LANCER_READ_FILE_CHUNK);
chunk_size = ALIGN(chunk_size, 4);
status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size,
- total_read_len, file_name, &read_len,
- &eof, &addn_status);
+ total_read_len, file_name,
+ &read_len, &eof, &addn_status);
if (!status) {
memcpy(buf + total_read_len, read_cmd.va, read_len);
total_read_len += read_len;
@@ -254,13 +254,12 @@ lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
}
}
pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va,
- read_cmd.dma);
+ read_cmd.dma);
return status;
}
-static int
-be_get_reg_len(struct net_device *netdev)
+static int be_get_reg_len(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
u32 log_size = 0;
@@ -271,7 +270,7 @@ be_get_reg_len(struct net_device *netdev)
if (be_physfn(adapter)) {
if (lancer_chip(adapter))
log_size = lancer_cmd_get_file_len(adapter,
- LANCER_FW_DUMP_FILE);
+ LANCER_FW_DUMP_FILE);
else
be_cmd_get_reg_len(adapter, &log_size);
}
@@ -287,7 +286,7 @@ be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
memset(buf, 0, regs->len);
if (lancer_chip(adapter))
lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE,
- regs->len, buf);
+ regs->len, buf);
else
be_cmd_get_regs(adapter, regs->len, buf);
}
@@ -337,9 +336,8 @@ static int be_set_coalesce(struct net_device *netdev,
return 0;
}
-static void
-be_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats, uint64_t *data)
+static void be_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, uint64_t *data)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_rx_obj *rxo;
@@ -390,9 +388,8 @@ be_get_ethtool_stats(struct net_device *netdev,
}
}
-static void
-be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
- uint8_t *data)
+static void be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
+ uint8_t *data)
{
struct be_adapter *adapter = netdev_priv(netdev);
int i, j;
@@ -642,16 +639,15 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
adapter->rx_fc = ecmd->rx_pause;
status = be_cmd_set_flow_control(adapter,
- adapter->tx_fc, adapter->rx_fc);
+ adapter->tx_fc, adapter->rx_fc);
if (status)
dev_warn(&adapter->pdev->dev, "Pause param set failed.\n");
return status;
}
-static int
-be_set_phys_id(struct net_device *netdev,
- enum ethtool_phys_id_state state)
+static int be_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -708,8 +704,7 @@ static int be_set_dump(struct net_device *netdev, struct ethtool_dump *dump)
return status;
}
-static void
-be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+static void be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -723,8 +718,7 @@ be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
memset(&wol->sopass, 0, sizeof(wol->sopass));
}
-static int
-be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -744,8 +738,7 @@ be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return 0;
}
-static int
-be_test_ddr_dma(struct be_adapter *adapter)
+static int be_test_ddr_dma(struct be_adapter *adapter)
{
int ret, i;
struct be_dma_mem ddrdma_cmd;
@@ -761,7 +754,7 @@ be_test_ddr_dma(struct be_adapter *adapter)
for (i = 0; i < 2; i++) {
ret = be_cmd_ddr_dma_test(adapter, pattern[i],
- 4096, &ddrdma_cmd);
+ 4096, &ddrdma_cmd);
if (ret != 0)
goto err;
}
@@ -773,20 +766,17 @@ err:
}
static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
- u64 *status)
+ u64 *status)
{
- be_cmd_set_loopback(adapter, adapter->hba_port_num,
- loopback_type, 1);
+ be_cmd_set_loopback(adapter, adapter->hba_port_num, loopback_type, 1);
*status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
- loopback_type, 1500,
- 2, 0xabc);
- be_cmd_set_loopback(adapter, adapter->hba_port_num,
- BE_NO_LOOPBACK, 1);
+ loopback_type, 1500, 2, 0xabc);
+ be_cmd_set_loopback(adapter, adapter->hba_port_num, BE_NO_LOOPBACK, 1);
return *status;
}
-static void
-be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
+static void be_self_test(struct net_device *netdev, struct ethtool_test *test,
+ u64 *data)
{
struct be_adapter *adapter = netdev_priv(netdev);
int status;
@@ -801,12 +791,10 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
if (test->flags & ETH_TEST_FL_OFFLINE) {
- if (be_loopback_test(adapter, BE_MAC_LOOPBACK,
- &data[0]) != 0)
+ if (be_loopback_test(adapter, BE_MAC_LOOPBACK, &data[0]) != 0)
test->flags |= ETH_TEST_FL_FAILED;
- if (be_loopback_test(adapter, BE_PHY_LOOPBACK,
- &data[1]) != 0)
+ if (be_loopback_test(adapter, BE_PHY_LOOPBACK, &data[1]) != 0)
test->flags |= ETH_TEST_FL_FAILED;
if (test->flags & ETH_TEST_FL_EXTERNAL_LB) {
@@ -832,16 +820,14 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
}
}
-static int
-be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
+static int be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
{
struct be_adapter *adapter = netdev_priv(netdev);
return be_load_fw(adapter, efl->data);
}
-static int
-be_get_eeprom_len(struct net_device *netdev)
+static int be_get_eeprom_len(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -851,18 +837,17 @@ be_get_eeprom_len(struct net_device *netdev)
if (lancer_chip(adapter)) {
if (be_physfn(adapter))
return lancer_cmd_get_file_len(adapter,
- LANCER_VPD_PF_FILE);
+ LANCER_VPD_PF_FILE);
else
return lancer_cmd_get_file_len(adapter,
- LANCER_VPD_VF_FILE);
+ LANCER_VPD_VF_FILE);
} else {
return BE_READ_SEEPROM_LEN;
}
}
-static int
-be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
- uint8_t *data)
+static int be_read_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, uint8_t *data)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_dma_mem eeprom_cmd;
@@ -875,10 +860,10 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
if (lancer_chip(adapter)) {
if (be_physfn(adapter))
return lancer_cmd_read_file(adapter, LANCER_VPD_PF_FILE,
- eeprom->len, data);
+ eeprom->len, data);
else
return lancer_cmd_read_file(adapter, LANCER_VPD_VF_FILE,
- eeprom->len, data);
+ eeprom->len, data);
}
eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
@@ -933,27 +918,27 @@ static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
switch (flow_type) {
case TCP_V4_FLOW:
- if (adapter->rss_flags & RSS_ENABLE_IPV4)
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV4)
data |= RXH_IP_DST | RXH_IP_SRC;
- if (adapter->rss_flags & RSS_ENABLE_TCP_IPV4)
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_TCP_IPV4)
data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
case UDP_V4_FLOW:
- if (adapter->rss_flags & RSS_ENABLE_IPV4)
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV4)
data |= RXH_IP_DST | RXH_IP_SRC;
- if (adapter->rss_flags & RSS_ENABLE_UDP_IPV4)
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_UDP_IPV4)
data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
case TCP_V6_FLOW:
- if (adapter->rss_flags & RSS_ENABLE_IPV6)
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV6)
data |= RXH_IP_DST | RXH_IP_SRC;
- if (adapter->rss_flags & RSS_ENABLE_TCP_IPV6)
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_TCP_IPV6)
data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
case UDP_V6_FLOW:
- if (adapter->rss_flags & RSS_ENABLE_IPV6)
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV6)
data |= RXH_IP_DST | RXH_IP_SRC;
- if (adapter->rss_flags & RSS_ENABLE_UDP_IPV6)
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_UDP_IPV6)
data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
}
@@ -962,7 +947,7 @@ static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
}
static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
- u32 *rule_locs)
+ u32 *rule_locs)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -992,7 +977,7 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
struct be_rx_obj *rxo;
int status = 0, i, j;
u8 rsstable[128];
- u32 rss_flags = adapter->rss_flags;
+ u32 rss_flags = adapter->rss_info.rss_flags;
if (cmd->data != L3_RSS_FLAGS &&
cmd->data != (L3_RSS_FLAGS | L4_RSS_FLAGS))
@@ -1039,7 +1024,7 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
return -EINVAL;
}
- if (rss_flags == adapter->rss_flags)
+ if (rss_flags == adapter->rss_info.rss_flags)
return status;
if (be_multi_rxq(adapter)) {
@@ -1051,9 +1036,11 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
}
}
}
- status = be_cmd_rss_config(adapter, rsstable, rss_flags, 128);
+
+ status = be_cmd_rss_config(adapter, adapter->rss_info.rsstable,
+ rss_flags, 128, adapter->rss_info.rss_hkey);
if (!status)
- adapter->rss_flags = rss_flags;
+ adapter->rss_info.rss_flags = rss_flags;
return status;
}
@@ -1103,6 +1090,68 @@ static int be_set_channels(struct net_device *netdev,
return be_update_queues(adapter);
}
+static u32 be_get_rxfh_indir_size(struct net_device *netdev)
+{
+ return RSS_INDIR_TABLE_LEN;
+}
+
+static u32 be_get_rxfh_key_size(struct net_device *netdev)
+{
+ return RSS_HASH_KEY_LEN;
+}
+
+static int be_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ int i;
+ struct rss_info *rss = &adapter->rss_info;
+
+ if (indir) {
+ for (i = 0; i < RSS_INDIR_TABLE_LEN; i++)
+ indir[i] = rss->rss_queue[i];
+ }
+
+ if (hkey)
+ memcpy(hkey, rss->rss_hkey, RSS_HASH_KEY_LEN);
+
+ return 0;
+}
+
+static int be_set_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey)
+{
+ int rc = 0, i, j;
+ struct be_adapter *adapter = netdev_priv(netdev);
+ u8 rsstable[RSS_INDIR_TABLE_LEN];
+
+ if (indir) {
+ struct be_rx_obj *rxo;
+ for (i = 0; i < RSS_INDIR_TABLE_LEN; i++) {
+ j = indir[i];
+ rxo = &adapter->rx_obj[j];
+ rsstable[i] = rxo->rss_id;
+ adapter->rss_info.rss_queue[i] = j;
+ }
+ } else {
+ memcpy(rsstable, adapter->rss_info.rsstable,
+ RSS_INDIR_TABLE_LEN);
+ }
+
+ if (!hkey)
+ hkey = adapter->rss_info.rss_hkey;
+
+ rc = be_cmd_rss_config(adapter, rsstable,
+ adapter->rss_info.rss_flags,
+ RSS_INDIR_TABLE_LEN, hkey);
+ if (rc) {
+ adapter->rss_info.rss_flags = RSS_ENABLE_NONE;
+ return -EIO;
+ }
+ memcpy(adapter->rss_info.rss_hkey, hkey, RSS_HASH_KEY_LEN);
+ memcpy(adapter->rss_info.rsstable, rsstable,
+ RSS_INDIR_TABLE_LEN);
+ return 0;
+}
+
const struct ethtool_ops be_ethtool_ops = {
.get_settings = be_get_settings,
.get_drvinfo = be_get_drvinfo,
@@ -1129,6 +1178,10 @@ const struct ethtool_ops be_ethtool_ops = {
.self_test = be_self_test,
.get_rxnfc = be_get_rxnfc,
.set_rxnfc = be_set_rxnfc,
+ .get_rxfh_indir_size = be_get_rxfh_indir_size,
+ .get_rxfh_key_size = be_get_rxfh_key_size,
+ .get_rxfh = be_get_rxfh,
+ .set_rxfh = be_set_rxfh,
.get_channels = be_get_channels,
.set_channels = be_set_channels
};
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index a18645407d21..3f04356afa82 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -134,7 +134,7 @@ static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
}
static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
- u16 len, u16 entry_size)
+ u16 len, u16 entry_size)
{
struct be_dma_mem *mem = &q->dma_mem;
@@ -154,7 +154,7 @@ static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
u32 reg, enabled;
pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
- &reg);
+ &reg);
enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
if (!enabled && enable)
@@ -165,7 +165,7 @@ static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
return;
pci_write_config_dword(adapter->pdev,
- PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
+ PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
}
static void be_intr_set(struct be_adapter *adapter, bool enable)
@@ -206,12 +206,11 @@ static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
}
static void be_eq_notify(struct be_adapter *adapter, u16 qid,
- bool arm, bool clear_int, u16 num_popped)
+ bool arm, bool clear_int, u16 num_popped)
{
u32 val = 0;
val |= qid & DB_EQ_RING_ID_MASK;
- val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
- DB_EQ_RING_ID_EXT_MASK_SHIFT);
+ val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
if (adapter->eeh_error)
return;
@@ -477,7 +476,7 @@ static void populate_be_v2_stats(struct be_adapter *adapter)
drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
- if (be_roce_supported(adapter)) {
+ if (be_roce_supported(adapter)) {
drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
drvs->rx_roce_frames = port_stats->roce_frames_received;
@@ -491,8 +490,7 @@ static void populate_lancer_stats(struct be_adapter *adapter)
{
struct be_drv_stats *drvs = &adapter->drv_stats;
- struct lancer_pport_stats *pport_stats =
- pport_stats_from_cmd(adapter);
+ struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
@@ -539,8 +537,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
}
static void populate_erx_stats(struct be_adapter *adapter,
- struct be_rx_obj *rxo,
- u32 erx_stat)
+ struct be_rx_obj *rxo, u32 erx_stat)
{
if (!BEx_chip(adapter))
rx_stats(rxo)->rx_drops_no_frags = erx_stat;
@@ -579,7 +576,7 @@ void be_parse_stats(struct be_adapter *adapter)
}
static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64 *stats)
+ struct rtnl_link_stats64 *stats)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_drv_stats *drvs = &adapter->drv_stats;
@@ -660,7 +657,8 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status)
}
static void be_tx_stats_update(struct be_tx_obj *txo,
- u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
+ u32 wrb_cnt, u32 copied, u32 gso_segs,
+ bool stopped)
{
struct be_tx_stats *stats = tx_stats(txo);
@@ -676,7 +674,7 @@ static void be_tx_stats_update(struct be_tx_obj *txo,
/* Determine number of WRB entries needed to xmit data in an skb */
static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
- bool *dummy)
+ bool *dummy)
{
int cnt = (skb->len > skb->data_len);
@@ -704,7 +702,7 @@ static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
}
static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
u8 vlan_prio;
u16 vlan_tag;
@@ -733,7 +731,8 @@ static u16 skb_ip_proto(struct sk_buff *skb)
}
static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
- struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
+ struct sk_buff *skb, u32 wrb_cnt, u32 len,
+ bool skip_hw_vlan)
{
u16 vlan_tag, proto;
@@ -774,7 +773,7 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
}
static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
- bool unmap_single)
+ bool unmap_single)
{
dma_addr_t dma;
@@ -791,8 +790,8 @@ static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
}
static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
- struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
- bool skip_hw_vlan)
+ struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
+ bool skip_hw_vlan)
{
dma_addr_t busaddr;
int i, copied = 0;
@@ -821,8 +820,7 @@ static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- const struct skb_frag_struct *frag =
- &skb_shinfo(skb)->frags[i];
+ const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
busaddr = skb_frag_dma_map(dev, frag, 0,
skb_frag_size(frag), DMA_TO_DEVICE);
if (dma_mapping_error(dev, busaddr))
@@ -927,8 +925,7 @@ static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
}
-static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
- struct sk_buff *skb)
+static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
{
return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
}
@@ -959,7 +956,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
*/
if (be_pvid_tagging_enabled(adapter) &&
veh->h_vlan_proto == htons(ETH_P_8021Q))
- *skip_hw_vlan = true;
+ *skip_hw_vlan = true;
/* HW has a bug wherein it will calculate CSUM for VLAN
* pkts even though it is disabled.
@@ -1077,16 +1074,15 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
{
struct be_adapter *adapter = netdev_priv(netdev);
if (new_mtu < BE_MIN_MTU ||
- new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
- (ETH_HLEN + ETH_FCS_LEN))) {
+ new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))) {
dev_info(&adapter->pdev->dev,
- "MTU must be between %d and %d bytes\n",
- BE_MIN_MTU,
- (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
+ "MTU must be between %d and %d bytes\n",
+ BE_MIN_MTU,
+ (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
return -EINVAL;
}
dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
- netdev->mtu, new_mtu);
+ netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
return 0;
}
@@ -1098,7 +1094,7 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
static int be_vid_config(struct be_adapter *adapter)
{
u16 vids[BE_NUM_VLANS_SUPPORTED];
- u16 num = 0, i;
+ u16 num = 0, i = 0;
int status = 0;
/* No need to further configure vids if in promiscuous mode */
@@ -1109,13 +1105,10 @@ static int be_vid_config(struct be_adapter *adapter)
goto set_vlan_promisc;
/* Construct VLAN Table to give to HW */
- for (i = 0; i < VLAN_N_VID; i++)
- if (adapter->vlan_tag[i])
- vids[num++] = cpu_to_le16(i);
-
- status = be_cmd_vlan_config(adapter, adapter->if_handle,
- vids, num, 0);
+ for_each_set_bit(i, adapter->vids, VLAN_N_VID)
+ vids[num++] = cpu_to_le16(i);
+ status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
if (status) {
/* Set to VLAN promisc mode as setting VLAN filter failed */
if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
@@ -1160,16 +1153,16 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
if (lancer_chip(adapter) && vid == 0)
return status;
- if (adapter->vlan_tag[vid])
+ if (test_bit(vid, adapter->vids))
return status;
- adapter->vlan_tag[vid] = 1;
+ set_bit(vid, adapter->vids);
adapter->vlans_added++;
status = be_vid_config(adapter);
if (status) {
adapter->vlans_added--;
- adapter->vlan_tag[vid] = 0;
+ clear_bit(vid, adapter->vids);
}
return status;
@@ -1184,12 +1177,12 @@ static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
if (lancer_chip(adapter) && vid == 0)
goto ret;
- adapter->vlan_tag[vid] = 0;
+ clear_bit(vid, adapter->vids);
status = be_vid_config(adapter);
if (!status)
adapter->vlans_added--;
else
- adapter->vlan_tag[vid] = 1;
+ set_bit(vid, adapter->vids);
ret:
return status;
}
@@ -1254,8 +1247,10 @@ static void be_set_rx_mode(struct net_device *netdev)
/* Set to MCAST promisc mode if setting MULTICAST address fails */
if (status) {
- dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
- dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
+ dev_info(&adapter->pdev->dev,
+ "Exhausted multicast HW filters.\n");
+ dev_info(&adapter->pdev->dev,
+ "Disabling HW multicast filtering.\n");
be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
}
done:
@@ -1287,7 +1282,7 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
if (status)
dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
- mac, vf);
+ mac, vf);
else
memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
@@ -1295,7 +1290,7 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
}
static int be_get_vf_config(struct net_device *netdev, int vf,
- struct ifla_vf_info *vi)
+ struct ifla_vf_info *vi)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
@@ -1316,8 +1311,7 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
return 0;
}
-static int be_set_vf_vlan(struct net_device *netdev,
- int vf, u16 vlan, u8 qos)
+static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
@@ -1348,8 +1342,7 @@ static int be_set_vf_vlan(struct net_device *netdev,
return status;
}
-static int be_set_vf_tx_rate(struct net_device *netdev,
- int vf, int rate)
+static int be_set_vf_tx_rate(struct net_device *netdev, int vf, int rate)
{
struct be_adapter *adapter = netdev_priv(netdev);
int status = 0;
@@ -1369,7 +1362,7 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
status = be_cmd_config_qos(adapter, rate / 10, vf + 1);
if (status)
dev_err(&adapter->pdev->dev,
- "tx rate %d on VF %d failed\n", rate, vf);
+ "tx rate %d on VF %d failed\n", rate, vf);
else
adapter->vf_cfg[vf].tx_rate = rate;
return status;
@@ -1469,7 +1462,7 @@ modify_eqd:
}
static void be_rx_stats_update(struct be_rx_obj *rxo,
- struct be_rx_compl_info *rxcp)
+ struct be_rx_compl_info *rxcp)
{
struct be_rx_stats *stats = rx_stats(rxo);
@@ -1566,7 +1559,8 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
skb_frag_set_page(skb, 0, page_info->page);
skb_shinfo(skb)->frags[0].page_offset =
page_info->page_offset + hdr_len;
- skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
+ skb_frag_size_set(&skb_shinfo(skb)->frags[0],
+ curr_frag_len - hdr_len);
skb->data_len = curr_frag_len - hdr_len;
skb->truesize += rx_frag_size;
skb->tail += hdr_len;
@@ -1725,8 +1719,8 @@ static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
if (rxcp->vlanf) {
rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
compl);
- rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
- compl);
+ rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1,
+ vlan_tag, compl);
}
rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
rxcp->tunneled =
@@ -1757,8 +1751,8 @@ static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
if (rxcp->vlanf) {
rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
compl);
- rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
- compl);
+ rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
+ vlan_tag, compl);
}
rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
@@ -1799,7 +1793,7 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
rxcp->vlan_tag = swab16(rxcp->vlan_tag);
if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
- !adapter->vlan_tag[rxcp->vlan_tag])
+ !test_bit(rxcp->vlan_tag, adapter->vids))
rxcp->vlanf = 0;
}
@@ -1915,7 +1909,7 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
}
static u16 be_tx_compl_process(struct be_adapter *adapter,
- struct be_tx_obj *txo, u16 last_index)
+ struct be_tx_obj *txo, u16 last_index)
{
struct be_queue_info *txq = &txo->q;
struct be_eth_wrb *wrb;
@@ -2122,7 +2116,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
eq = &eqo->q;
rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
- sizeof(struct be_eq_entry));
+ sizeof(struct be_eq_entry));
if (rc)
return rc;
@@ -2155,7 +2149,7 @@ static int be_mcc_queues_create(struct be_adapter *adapter)
cq = &adapter->mcc_obj.cq;
if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
- sizeof(struct be_mcc_compl)))
+ sizeof(struct be_mcc_compl)))
goto err;
/* Use the default EQ for MCC completions */
@@ -2275,7 +2269,7 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
rxo->adapter = adapter;
cq = &rxo->cq;
rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
- sizeof(struct be_eth_rx_compl));
+ sizeof(struct be_eth_rx_compl));
if (rc)
return rc;
@@ -2339,7 +2333,7 @@ static inline bool do_gro(struct be_rx_compl_info *rxcp)
}
static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
- int budget, int polling)
+ int budget, int polling)
{
struct be_adapter *adapter = rxo->adapter;
struct be_queue_info *rx_cq = &rxo->cq;
@@ -2365,7 +2359,7 @@ static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
* promiscuous mode on some skews
*/
if (unlikely(rxcp->port != adapter->port_num &&
- !lancer_chip(adapter))) {
+ !lancer_chip(adapter))) {
be_rx_compl_discard(rxo, rxcp);
goto loop_continue;
}
@@ -2405,8 +2399,9 @@ static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
if (!txcp)
break;
num_wrbs += be_tx_compl_process(adapter, txo,
- AMAP_GET_BITS(struct amap_eth_tx_compl,
- wrb_index, txcp));
+ AMAP_GET_BITS(struct
+ amap_eth_tx_compl,
+ wrb_index, txcp));
}
if (work_done) {
@@ -2416,7 +2411,7 @@ static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
/* As Tx wrbs have been freed up, wake up netdev queue
* if it was stopped due to lack of tx wrbs. */
if (__netif_subqueue_stopped(adapter->netdev, idx) &&
- atomic_read(&txo->q.used) < txo->q.len / 2) {
+ atomic_read(&txo->q.used) < txo->q.len / 2) {
netif_wake_subqueue(adapter->netdev, idx);
}
@@ -2510,9 +2505,9 @@ void be_detect_error(struct be_adapter *adapter)
sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
sliport_err1 = ioread32(adapter->db +
- SLIPORT_ERROR1_OFFSET);
+ SLIPORT_ERROR1_OFFSET);
sliport_err2 = ioread32(adapter->db +
- SLIPORT_ERROR2_OFFSET);
+ SLIPORT_ERROR2_OFFSET);
adapter->hw_error = true;
/* Do not log error messages if its a FW reset */
if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
@@ -2531,13 +2526,13 @@ void be_detect_error(struct be_adapter *adapter)
}
} else {
pci_read_config_dword(adapter->pdev,
- PCICFG_UE_STATUS_LOW, &ue_lo);
+ PCICFG_UE_STATUS_LOW, &ue_lo);
pci_read_config_dword(adapter->pdev,
- PCICFG_UE_STATUS_HIGH, &ue_hi);
+ PCICFG_UE_STATUS_HIGH, &ue_hi);
pci_read_config_dword(adapter->pdev,
- PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
+ PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
pci_read_config_dword(adapter->pdev,
- PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
+ PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
ue_lo = (ue_lo & ~ue_lo_mask);
ue_hi = (ue_hi & ~ue_hi_mask);
@@ -2624,7 +2619,7 @@ fail:
}
static inline int be_msix_vec_get(struct be_adapter *adapter,
- struct be_eq_obj *eqo)
+ struct be_eq_obj *eqo)
{
return adapter->msix_entries[eqo->msix_idx].vector;
}
@@ -2648,7 +2643,7 @@ err_msix:
for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
free_irq(be_msix_vec_get(adapter, eqo), eqo);
dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
- status);
+ status);
be_msix_disable(adapter);
return status;
}
@@ -2774,7 +2769,8 @@ static int be_rx_qs_create(struct be_adapter *adapter)
{
struct be_rx_obj *rxo;
int rc, i, j;
- u8 rsstable[128];
+ u8 rss_hkey[RSS_HASH_KEY_LEN];
+ struct rss_info *rss = &adapter->rss_info;
for_all_rx_queues(adapter, rxo, i) {
rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
@@ -2799,31 +2795,36 @@ static int be_rx_qs_create(struct be_adapter *adapter)
}
if (be_multi_rxq(adapter)) {
- for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
+ for (j = 0; j < RSS_INDIR_TABLE_LEN;
+ j += adapter->num_rx_qs - 1) {
for_all_rss_queues(adapter, rxo, i) {
- if ((j + i) >= 128)
+ if ((j + i) >= RSS_INDIR_TABLE_LEN)
break;
- rsstable[j + i] = rxo->rss_id;
+ rss->rsstable[j + i] = rxo->rss_id;
+ rss->rss_queue[j + i] = i;
}
}
- adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
- RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
+ rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
+ RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
if (!BEx_chip(adapter))
- adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
- RSS_ENABLE_UDP_IPV6;
+ rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
+ RSS_ENABLE_UDP_IPV6;
} else {
/* Disable RSS, if only default RX Q is created */
- adapter->rss_flags = RSS_ENABLE_NONE;
+ rss->rss_flags = RSS_ENABLE_NONE;
}
- rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
- 128);
+ get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN);
+ rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
+ 128, rss_hkey);
if (rc) {
- adapter->rss_flags = RSS_ENABLE_NONE;
+ rss->rss_flags = RSS_ENABLE_NONE;
return rc;
}
+ memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN);
+
/* First time posting */
for_all_rx_queues(adapter, rxo, i)
be_post_rx_frags(rxo, GFP_KERNEL);
@@ -2896,7 +2897,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
if (enable) {
status = pci_write_config_dword(adapter->pdev,
- PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
+ PCICFG_PM_CONTROL_OFFSET,
+ PCICFG_PM_CONTROL_MASK);
if (status) {
dev_err(&adapter->pdev->dev,
"Could not enable Wake-on-lan\n");
@@ -2905,7 +2907,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
return status;
}
status = be_cmd_enable_magic_wol(adapter,
- adapter->netdev->dev_addr, &cmd);
+ adapter->netdev->dev_addr,
+ &cmd);
pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
} else {
@@ -2944,7 +2947,8 @@ static int be_vf_eth_addr_config(struct be_adapter *adapter)
if (status)
dev_err(&adapter->pdev->dev,
- "Mac address assignment failed for VF %d\n", vf);
+ "Mac address assignment failed for VF %d\n",
+ vf);
else
memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
@@ -3086,9 +3090,11 @@ static int be_vfs_if_create(struct be_adapter *adapter)
/* If a FW profile exists, then cap_flags are updated */
en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
- BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
- status = be_cmd_if_create(adapter, cap_flags, en_flags,
- &vf_cfg->if_handle, vf + 1);
+ BE_IF_FLAGS_BROADCAST |
+ BE_IF_FLAGS_MULTICAST);
+ status =
+ be_cmd_if_create(adapter, cap_flags, en_flags,
+ &vf_cfg->if_handle, vf + 1);
if (status)
goto err;
}
@@ -3594,8 +3600,8 @@ static void be_netpoll(struct net_device *netdev)
static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
static bool be_flash_redboot(struct be_adapter *adapter,
- const u8 *p, u32 img_start, int image_size,
- int hdr_size)
+ const u8 *p, u32 img_start, int image_size,
+ int hdr_size)
{
u32 crc_offset;
u8 flashed_crc[4];
@@ -3605,11 +3611,10 @@ static bool be_flash_redboot(struct be_adapter *adapter,
p += crc_offset;
- status = be_cmd_get_flash_crc(adapter, flashed_crc,
- (image_size - 4));
+ status = be_cmd_get_flash_crc(adapter, flashed_crc, (image_size - 4));
if (status) {
dev_err(&adapter->pdev->dev,
- "could not get crc from flash, not flashing redboot\n");
+ "could not get crc from flash, not flashing redboot\n");
return false;
}
@@ -3649,8 +3654,8 @@ static bool is_comp_in_ufi(struct be_adapter *adapter,
}
static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
- int header_size,
- const struct firmware *fw)
+ int header_size,
+ const struct firmware *fw)
{
struct flash_section_info *fsec = NULL;
const u8 *p = fw->data;
@@ -3666,7 +3671,7 @@ static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
}
static int be_flash(struct be_adapter *adapter, const u8 *img,
- struct be_dma_mem *flash_cmd, int optype, int img_size)
+ struct be_dma_mem *flash_cmd, int optype, int img_size)
{
u32 total_bytes = 0, flash_op, num_bytes = 0;
int status = 0;
@@ -3693,7 +3698,7 @@ static int be_flash(struct be_adapter *adapter, const u8 *img,
memcpy(req->data_buf, img, num_bytes);
img += num_bytes;
status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
- flash_op, num_bytes);
+ flash_op, num_bytes);
if (status) {
if (status == ILLEGAL_IOCTL_REQ &&
optype == OPTYPE_PHY_FW)
@@ -3708,10 +3713,8 @@ static int be_flash(struct be_adapter *adapter, const u8 *img,
/* For BE2, BE3 and BE3-R */
static int be_flash_BEx(struct be_adapter *adapter,
- const struct firmware *fw,
- struct be_dma_mem *flash_cmd,
- int num_of_images)
-
+ const struct firmware *fw,
+ struct be_dma_mem *flash_cmd, int num_of_images)
{
int status = 0, i, filehdr_size = 0;
int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
@@ -3793,8 +3796,10 @@ static int be_flash_BEx(struct be_adapter *adapter,
if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
redboot = be_flash_redboot(adapter, fw->data,
- pflashcomp[i].offset, pflashcomp[i].size,
- filehdr_size + img_hdrs_size);
+ pflashcomp[i].offset,
+ pflashcomp[i].size,
+ filehdr_size +
+ img_hdrs_size);
if (!redboot)
continue;
}
@@ -3805,7 +3810,7 @@ static int be_flash_BEx(struct be_adapter *adapter,
return -1;
status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
- pflashcomp[i].size);
+ pflashcomp[i].size);
if (status) {
dev_err(&adapter->pdev->dev,
"Flashing section type %d failed.\n",
@@ -3817,8 +3822,8 @@ static int be_flash_BEx(struct be_adapter *adapter,
}
static int be_flash_skyhawk(struct be_adapter *adapter,
- const struct firmware *fw,
- struct be_dma_mem *flash_cmd, int num_of_images)
+ const struct firmware *fw,
+ struct be_dma_mem *flash_cmd, int num_of_images)
{
int status = 0, i, filehdr_size = 0;
int img_offset, img_size, img_optype, redboot;
@@ -3866,8 +3871,9 @@ static int be_flash_skyhawk(struct be_adapter *adapter,
if (img_optype == OPTYPE_REDBOOT) {
redboot = be_flash_redboot(adapter, fw->data,
- img_offset, img_size,
- filehdr_size + img_hdrs_size);
+ img_offset, img_size,
+ filehdr_size +
+ img_hdrs_size);
if (!redboot)
continue;
}
@@ -3889,7 +3895,7 @@ static int be_flash_skyhawk(struct be_adapter *adapter,
}
static int lancer_fw_download(struct be_adapter *adapter,
- const struct firmware *fw)
+ const struct firmware *fw)
{
#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
@@ -3955,7 +3961,7 @@ static int lancer_fw_download(struct be_adapter *adapter,
}
dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
- flash_cmd.dma);
+ flash_cmd.dma);
if (status) {
dev_err(&adapter->pdev->dev,
"Firmware load error. "
@@ -3976,9 +3982,8 @@ static int lancer_fw_download(struct be_adapter *adapter,
goto lancer_fw_exit;
}
} else if (change_status != LANCER_NO_RESET_NEEDED) {
- dev_err(&adapter->pdev->dev,
- "System reboot required for new FW"
- " to be active\n");
+ dev_err(&adapter->pdev->dev,
+ "System reboot required for new FW to be active\n");
}
dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
@@ -4042,7 +4047,7 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
switch (ufi_type) {
case UFI_TYPE4:
status = be_flash_skyhawk(adapter, fw,
- &flash_cmd, num_imgs);
+ &flash_cmd, num_imgs);
break;
case UFI_TYPE3R:
status = be_flash_BEx(adapter, fw, &flash_cmd,
@@ -4112,8 +4117,7 @@ fw_exit:
return status;
}
-static int be_ndo_bridge_setlink(struct net_device *dev,
- struct nlmsghdr *nlh)
+static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
{
struct be_adapter *adapter = netdev_priv(dev);
struct nlattr *attr, *br_spec;
@@ -4155,8 +4159,7 @@ err:
}
static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
- struct net_device *dev,
- u32 filter_mask)
+ struct net_device *dev, u32 filter_mask)
{
struct be_adapter *adapter = netdev_priv(dev);
int status = 0;
@@ -4870,7 +4873,7 @@ static void be_shutdown(struct pci_dev *pdev)
}
static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
+ pci_channel_state_t state)
{
struct be_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 8b70ca7e342b..f3658bdb64cc 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -769,11 +769,6 @@ static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return phy_mii_ioctl(phy, ifr, cmd);
}
-static int ethoc_config(struct net_device *dev, struct ifmap *map)
-{
- return -ENOSYS;
-}
-
static void ethoc_do_set_mac_address(struct net_device *dev)
{
struct ethoc *priv = netdev_priv(dev);
@@ -995,7 +990,6 @@ static const struct net_device_ops ethoc_netdev_ops = {
.ndo_open = ethoc_open,
.ndo_stop = ethoc_stop,
.ndo_do_ioctl = ethoc_ioctl,
- .ndo_set_config = ethoc_config,
.ndo_set_mac_address = ethoc_set_mac_address,
.ndo_set_rx_mode = ethoc_set_multicast_list,
.ndo_change_mtu = ethoc_change_mtu,
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 1471c5464a89..e27e60910949 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -265,10 +265,10 @@ struct e1000_adapter {
u32 tx_hwtstamp_timeouts;
/* Rx */
- bool (*clean_rx) (struct e1000_ring *ring, int *work_done,
- int work_to_do) ____cacheline_aligned_in_smp;
- void (*alloc_rx_buf) (struct e1000_ring *ring, int cleaned_count,
- gfp_t gfp);
+ bool (*clean_rx)(struct e1000_ring *ring, int *work_done,
+ int work_to_do) ____cacheline_aligned_in_smp;
+ void (*alloc_rx_buf)(struct e1000_ring *ring, int cleaned_count,
+ gfp_t gfp);
struct e1000_ring *rx_ring;
u32 rx_int_delay;
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index cad250bc1b99..4e5ad7ebe1f2 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -169,6 +169,7 @@ static int e1000_get_settings(struct net_device *netdev,
}
} else if (!pm_runtime_suspended(netdev->dev.parent)) {
u32 status = er32(STATUS);
+
if (status & E1000_STATUS_LU) {
if (status & E1000_STATUS_SPEED_1000)
speed = SPEED_1000;
@@ -783,25 +784,26 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
reg + (offset << 2), val,
(test[pat] & write & mask));
*data = reg;
- return 1;
+ return true;
}
}
- return 0;
+ return false;
}
static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
int reg, u32 mask, u32 write)
{
u32 val;
+
__ew32(&adapter->hw, reg, write & mask);
val = __er32(&adapter->hw, reg);
if ((write & mask) != (val & mask)) {
e_err("set/check test failed (reg 0x%05X): got 0x%08X expected 0x%08X\n",
reg, (val & mask), (write & mask));
*data = reg;
- return 1;
+ return true;
}
- return 0;
+ return false;
}
#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \
@@ -1717,6 +1719,7 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
*data = 0;
if (hw->phy.media_type == e1000_media_type_internal_serdes) {
int i = 0;
+
hw->mac.serdes_has_link = false;
/* On some blade server designs, link establishment
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index f0bbd4246d71..5f5539561661 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1334,6 +1334,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
if (((hw->mac.type == e1000_pch2lan) ||
(hw->mac.type == e1000_pch_lpt)) && link) {
u32 reg;
+
reg = er32(STATUS);
if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
u16 emi_addr;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 3e69386add04..e4207efd13f8 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -599,6 +599,7 @@ static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i)
if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) {
u32 rctl = er32(RCTL);
+
ew32(RCTL, rctl & ~E1000_RCTL_EN);
e_err("ME firmware caused invalid RDT - resetting\n");
schedule_work(&adapter->reset_task);
@@ -615,6 +616,7 @@ static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i)
if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) {
u32 tctl = er32(TCTL);
+
ew32(TCTL, tctl & ~E1000_TCTL_EN);
e_err("ME firmware caused invalid TDT - resetting\n");
schedule_work(&adapter->reset_task);
@@ -1198,6 +1200,7 @@ static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
(count < tx_ring->count)) {
bool cleaned = false;
+
rmb(); /* read buffer_info after eop_desc */
for (; !cleaned; count++) {
tx_desc = E1000_TX_DESC(*tx_ring, i);
@@ -1753,6 +1756,7 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
adapter->flags & FLAG_RX_NEEDS_RESTART) {
/* disable receives */
u32 rctl = er32(RCTL);
+
ew32(RCTL, rctl & ~E1000_RCTL_EN);
adapter->flags |= FLAG_RESTART_NOW;
}
@@ -1960,6 +1964,7 @@ static void e1000_configure_msix(struct e1000_adapter *adapter)
/* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
if (hw->mac.type == e1000_82574) {
u32 rfctl = er32(RFCTL);
+
rfctl |= E1000_RFCTL_ACK_DIS;
ew32(RFCTL, rfctl);
}
@@ -2204,6 +2209,7 @@ static void e1000_irq_disable(struct e1000_adapter *adapter)
if (adapter->msix_entries) {
int i;
+
for (i = 0; i < adapter->num_vectors; i++)
synchronize_irq(adapter->msix_entries[i].vector);
} else {
@@ -2921,6 +2927,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
if (adapter->flags2 & FLAG2_DMA_BURST) {
u32 txdctl = er32(TXDCTL(0));
+
txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH |
E1000_TXDCTL_WTHRESH);
/* set up some performance related parameters to encourage the
@@ -3239,6 +3246,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
if (adapter->flags & FLAG_IS_ICH) {
u32 rxdctl = er32(RXDCTL(0));
+
ew32(RXDCTL(0), rxdctl | 0x3);
}
@@ -4695,6 +4703,7 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
/* Correctable ECC Errors */
if (hw->mac.type == e1000_pch_lpt) {
u32 pbeccsts = er32(PBECCSTS);
+
adapter->corr_errors +=
pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
adapter->uncorr_errors +=
@@ -4808,6 +4817,7 @@ static void e1000e_enable_receives(struct e1000_adapter *adapter)
(adapter->flags & FLAG_RESTART_NOW)) {
struct e1000_hw *hw = &adapter->hw;
u32 rctl = er32(RCTL);
+
ew32(RCTL, rctl | E1000_RCTL_EN);
adapter->flags &= ~FLAG_RESTART_NOW;
}
@@ -4930,6 +4940,7 @@ static void e1000_watchdog_task(struct work_struct *work)
if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
!txb2b) {
u32 tarc0;
+
tarc0 = er32(TARC(0));
tarc0 &= ~SPEED_MODE_BIT;
ew32(TARC(0), tarc0);
@@ -5170,7 +5181,7 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
__be16 protocol;
if (skb->ip_summed != CHECKSUM_PARTIAL)
- return 0;
+ return false;
if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
@@ -5215,7 +5226,7 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
i = 0;
tx_ring->next_to_use = i;
- return 1;
+ return true;
}
static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
@@ -6209,6 +6220,7 @@ static int __e1000_resume(struct pci_dev *pdev)
e1e_wphy(&adapter->hw, BM_WUS, ~0);
} else {
u32 wus = er32(WUS);
+
if (wus) {
e_info("MAC Wakeup cause - %s\n",
wus & E1000_WUS_EX ? "Unicast Packet" :
@@ -7027,7 +7039,7 @@ static const struct pci_error_handlers e1000_err_handler = {
.resume = e1000_io_resume,
};
-static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
+static const struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
@@ -7144,6 +7156,7 @@ static struct pci_driver e1000_driver = {
static int __init e1000_init_module(void)
{
int ret;
+
pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
e1000e_driver_version);
pr_info("Copyright(c) 1999 - 2014 Intel Corporation.\n");
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index a9a976f04bff..b1f212b7baf7 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -398,6 +398,7 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
/* Loop to allow for up to whole page write of eeprom */
while (widx < words) {
u16 word_out = data[widx];
+
word_out = (word_out >> 8) | (word_out << 8);
e1000_shift_out_eec_bits(hw, word_out, 16);
widx++;
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index d0ac0f3249c8..aa1923f7ebdd 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -436,6 +436,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
if (num_IntMode > bd) {
unsigned int int_mode = IntMode[bd];
+
e1000_validate_option(&int_mode, &opt, adapter);
adapter->int_mode = int_mode;
} else {
@@ -457,6 +458,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
if (num_SmartPowerDownEnable > bd) {
unsigned int spd = SmartPowerDownEnable[bd];
+
e1000_validate_option(&spd, &opt, adapter);
if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && spd)
adapter->flags |= FLAG_SMART_POWER_DOWN;
@@ -473,6 +475,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
if (num_CrcStripping > bd) {
unsigned int crc_stripping = CrcStripping[bd];
+
e1000_validate_option(&crc_stripping, &opt, adapter);
if (crc_stripping == OPTION_ENABLED) {
adapter->flags2 |= FLAG2_CRC_STRIPPING;
@@ -495,6 +498,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
if (num_KumeranLockLoss > bd) {
unsigned int kmrn_lock_loss = KumeranLockLoss[bd];
+
e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
enabled = kmrn_lock_loss;
}
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 00b3fc98bf30..b2005e13fb01 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -2896,6 +2896,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
(hw->phy.addr == 2) &&
!(MAX_PHY_REG_ADDRESS & reg) && (data & (1 << 11))) {
u16 data2 = 0x7EFF;
+
ret_val = e1000_access_phy_debug_regs_hv(hw,
(1 << 6) | 0x3,
&data2, false);
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index beb7b4393a6c..a46571c1e9f1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -329,9 +329,7 @@ struct i40e_pf {
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_caps;
struct sk_buff *ptp_tx_skb;
- struct work_struct ptp_tx_work;
struct hwtstamp_config tstamp_config;
- unsigned long ptp_tx_start;
unsigned long last_rx_ptp_check;
spinlock_t tmreg_lock; /* Used to protect the device time registers. */
u64 ptp_base_adj;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index ed3902bf249b..34415d342ece 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -33,6 +33,16 @@
static void i40e_resume_aq(struct i40e_hw *hw);
/**
+ * i40e_is_nvm_update_op - return true if this is an NVM update operation
+ * @desc: API request descriptor
+ **/
+static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
+{
+ return (desc->opcode == i40e_aqc_opc_nvm_erase) ||
+ (desc->opcode == i40e_aqc_opc_nvm_update);
+}
+
+/**
* i40e_adminq_init_regs - Initialize AdminQ registers
* @hw: pointer to the hardware structure
*
@@ -585,6 +595,7 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
/* pre-emptive resource lock release */
i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
+ hw->aq.nvm_busy = false;
ret_code = i40e_aq_set_hmc_resource_profile(hw,
I40E_HMC_PROFILE_DEFAULT,
@@ -708,6 +719,12 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
goto asq_send_command_exit;
}
+ if (i40e_is_nvm_update_op(desc) && hw->aq.nvm_busy) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: NVM busy.\n");
+ status = I40E_ERR_NVM;
+ goto asq_send_command_exit;
+ }
+
details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
if (cmd_details) {
*details = *cmd_details;
@@ -835,6 +852,9 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
}
+ if (i40e_is_nvm_update_op(desc))
+ hw->aq.nvm_busy = true;
+
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
@@ -929,6 +949,9 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
e->msg_size);
}
+ if (i40e_is_nvm_update_op(&e->desc))
+ hw->aq.nvm_busy = false;
+
/* Restore the original datalen and buffer address in the desc,
* FW updates datalen to indicate the event message
* size
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index 993f7685a911..b1552fbc48a0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -90,6 +90,7 @@ struct i40e_adminq_info {
u16 fw_min_ver; /* firmware minor version */
u16 api_maj_ver; /* api major version */
u16 api_min_ver; /* api minor version */
+ bool nvm_busy;
struct mutex asq_mutex; /* Send queue lock */
struct mutex arq_mutex; /* Receive queue lock */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 7b6374a8f8da..f2ba4b76ecd3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -182,9 +182,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_add_mirror_rule = 0x0260,
i40e_aqc_opc_delete_mirror_rule = 0x0261,
- i40e_aqc_opc_set_storm_control_config = 0x0280,
- i40e_aqc_opc_get_storm_control_config = 0x0281,
-
/* DCB commands */
i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
i40e_aqc_opc_dcb_updated = 0x0302,
@@ -207,6 +204,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
i40e_aqc_opc_suspend_port_tx = 0x041B,
i40e_aqc_opc_resume_port_tx = 0x041C,
+ i40e_aqc_opc_configure_partition_bw = 0x041D,
/* hmc */
i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
@@ -1289,27 +1287,6 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
-/* Set Storm Control Configuration (direct 0x0280)
- * Get Storm Control Configuration (direct 0x0281)
- * the command and response use the same descriptor structure
- */
-struct i40e_aqc_set_get_storm_control_config {
- __le32 broadcast_threshold;
- __le32 multicast_threshold;
- __le32 control_flags;
-#define I40E_AQC_STORM_CONTROL_MDIPW 0x01
-#define I40E_AQC_STORM_CONTROL_MDICW 0x02
-#define I40E_AQC_STORM_CONTROL_BDIPW 0x04
-#define I40E_AQC_STORM_CONTROL_BDICW 0x08
-#define I40E_AQC_STORM_CONTROL_BIDU 0x10
-#define I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT 8
-#define I40E_AQC_STORM_CONTROL_INTERVAL_MASK (0x3FF << \
- I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT)
- u8 reserved[4];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_set_get_storm_control_config);
-
/* DCB 0x03xx*/
/* PFC Ignore (direct 0x0301)
@@ -1499,6 +1476,15 @@ struct i40e_aqc_query_switching_comp_bw_config_resp {
* (direct 0x041B and 0x041C) uses the generic SEID struct
*/
+/* Configure partition BW
+ * (indirect 0x041D)
+ */
+struct i40e_aqc_configure_partition_bw_data {
+ __le16 pf_valid_bits;
+ u8 min_bw[16]; /* guaranteed bandwidth */
+ u8 max_bw[16]; /* bandwidth limit */
+};
+
/* Get and set the active HMC resource profile and status.
* (direct 0x0500) and (direct 0x0501)
*/
@@ -1583,11 +1569,8 @@ struct i40e_aq_get_phy_abilities_resp {
#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04
-#define I40E_AQ_PHY_FLAG_AN_SHIFT 3
-#define I40E_AQ_PHY_FLAG_AN_MASK (0x3 << I40E_AQ_PHY_FLAG_AN_SHIFT)
-#define I40E_AQ_PHY_FLAG_AN_OFF 0x00 /* link forced on */
-#define I40E_AQ_PHY_FLAG_AN_OFF_LINK_DOWN 0x01
-#define I40E_AQ_PHY_FLAG_AN_ON 0x02
+#define I40E_AQ_PHY_LINK_ENABLED 0x08
+#define I40E_AQ_PHY_AN_ENABLED 0x10
#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
__le16 eee_capability;
#define I40E_AQ_EEE_100BASE_TX 0x0002
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 922cdcc45c54..22eefda3a530 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -975,6 +975,13 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
hw_link_info->an_info = resp->an_info;
hw_link_info->ext_info = resp->ext_info;
hw_link_info->loopback = resp->loopback;
+ hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size);
+ hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
+
+ if (resp->config & I40E_AQ_CONFIG_CRC_ENA)
+ hw_link_info->crc_enable = true;
+ else
+ hw_link_info->crc_enable = false;
if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_ENABLE))
hw_link_info->lse_enable = true;
@@ -1300,6 +1307,7 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
struct i40e_aqc_driver_version *cmd =
(struct i40e_aqc_driver_version *)&desc.params.raw;
i40e_status status;
+ u16 len;
if (dv == NULL)
return I40E_ERR_PARAM;
@@ -1311,7 +1319,14 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
cmd->driver_minor_ver = dv->minor_version;
cmd->driver_build_ver = dv->build_version;
cmd->driver_subbuild_ver = dv->subbuild_version;
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ len = 0;
+ while (len < sizeof(dv->driver_string) &&
+ (dv->driver_string[len] < 0x80) &&
+ dv->driver_string[len])
+ len++;
+ status = i40e_asq_send_command(hw, &desc, dv->driver_string,
+ len, cmd_details);
return status;
}
@@ -2094,8 +2109,8 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
* @cmd_details: pointer to command details structure or NULL
**/
i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
- u16 udp_port, u8 header_len,
- u8 protocol_index, u8 *filter_index,
+ u16 udp_port, u8 protocol_index,
+ u8 *filter_index,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
@@ -2253,6 +2268,35 @@ static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
}
/**
+ * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid
+ * @credit: BW limit credits (0 = disabled)
+ * @max_credit: Max BW limit credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_credit,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_configure_vsi_bw_limit *cmd =
+ (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_configure_vsi_bw_limit);
+
+ cmd->vsi_seid = cpu_to_le16(seid);
+ cmd->credit = cpu_to_le16(credit);
+ cmd->max_credit = max_credit;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC
* @hw: pointer to the hw struct
* @seid: VSI seid
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 3c37386fd138..1aaec400b28e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1744,10 +1744,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, false);
} else if (strncmp(cmd_buf, "fd-atr on", 9) == 0) {
i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, true);
- } else if (strncmp(cmd_buf, "fd-sb off", 9) == 0) {
- i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_SB_ENABLED, false);
- } else if (strncmp(cmd_buf, "fd-sb on", 8) == 0) {
- i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_SB_ENABLED, true);
} else if (strncmp(cmd_buf, "lldp", 4) == 0) {
if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
int ret;
@@ -1967,8 +1963,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, " rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
dev_info(&pf->pdev->dev, " fd-atr off\n");
dev_info(&pf->pdev->dev, " fd-atr on\n");
- dev_info(&pf->pdev->dev, " fd-sb off\n");
- dev_info(&pf->pdev->dev, " fd-sb on\n");
dev_info(&pf->pdev->dev, " lldp start\n");
dev_info(&pf->pdev->dev, " lldp stop\n");
dev_info(&pf->pdev->dev, " lldp get local\n");
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 03d99cbc5c25..0cf47c958081 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -649,7 +649,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
rcu_read_lock();
- for (j = 0; j < vsi->num_queue_pairs; j++, i += 4) {
+ for (j = 0; j < vsi->num_queue_pairs; j++) {
struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
struct i40e_ring *rx_ring;
@@ -662,14 +662,16 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
data[i] = tx_ring->stats.packets;
data[i + 1] = tx_ring->stats.bytes;
} while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
+ i += 2;
/* Rx ring is the 2nd half of the queue pair */
rx_ring = &tx_ring[1];
do {
start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
- data[i + 2] = rx_ring->stats.packets;
- data[i + 3] = rx_ring->stats.bytes;
+ data[i] = rx_ring->stats.packets;
+ data[i + 1] = rx_ring->stats.bytes;
} while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
+ i += 2;
}
rcu_read_unlock();
if (vsi == pf->vsi[pf->lan_vsi]) {
@@ -1189,6 +1191,12 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
return -EINVAL;
fsp->flow_type = rule->flow_type;
+ if (fsp->flow_type == IP_USER_FLOW) {
+ fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ fsp->h_u.usr_ip4_spec.proto = 0;
+ fsp->m_u.usr_ip4_spec.proto = 0;
+ }
+
fsp->h_u.tcp_ip4_spec.psrc = rule->src_port;
fsp->h_u.tcp_ip4_spec.pdst = rule->dst_port;
fsp->h_u.tcp_ip4_spec.ip4src = rule->src_ip[0];
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
index d5d98fe2691d..5c341aeb5d53 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -747,6 +747,7 @@ static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = {
{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena), 1, 195 },
{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena), 1, 196 },
{ I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh), 3, 198 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, prefena), 1, 201 },
{ 0 }
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
index 341de925a298..eb65fe23c4a7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
@@ -56,6 +56,7 @@ struct i40e_hmc_obj_rxq {
u8 tphdata_ena;
u8 tphhead_ena;
u8 lrxqthresh;
+ u8 prefena; /* NOTE: normally must be set to 1 at init */
};
/* Tx queue context data */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index cf0761f08911..e399f9b70777 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -39,7 +39,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 0
#define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 36
+#define DRV_VERSION_BUILD 46
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -2312,6 +2312,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
rx_ctx.crcstrip = 1;
rx_ctx.l2tsel = 1;
rx_ctx.showiv = 1;
+ /* set the prefena field to 1 because the manual says to */
+ rx_ctx.prefena = 1;
/* clear the context in the HMC */
err = i40e_clear_lan_rx_queue_context(hw, pf_q);
@@ -3160,9 +3162,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
usleep_range(1000, 2000);
}
/* Skip if the queue is already in the requested state */
- if (enable && (tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
- continue;
- if (!enable && !(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
continue;
/* turn on/off the queue */
@@ -3178,13 +3178,8 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
/* wait for the change to finish */
for (j = 0; j < 10; j++) {
tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
- if (enable) {
- if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
- break;
- } else {
- if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
- break;
- }
+ if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ break;
udelay(10);
}
@@ -3223,15 +3218,9 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
usleep_range(1000, 2000);
}
- if (enable) {
- /* is STAT set ? */
- if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
- continue;
- } else {
- /* is !STAT set ? */
- if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
- continue;
- }
+ /* Skip if the queue is already in the requested state */
+ if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
+ continue;
/* turn on/off the queue */
if (enable)
@@ -3244,13 +3233,8 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
for (j = 0; j < 10; j++) {
rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
- if (enable) {
- if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
- break;
- } else {
- if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
- break;
- }
+ if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
+ break;
udelay(10);
}
@@ -3513,6 +3497,19 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
}
/**
+ * i40e_vsi_close - Shut down a VSI
+ * @vsi: the vsi to be quelled
+ **/
+static void i40e_vsi_close(struct i40e_vsi *vsi)
+{
+ if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
+ i40e_down(vsi);
+ i40e_vsi_free_irq(vsi);
+ i40e_vsi_free_tx_resources(vsi);
+ i40e_vsi_free_rx_resources(vsi);
+}
+
+/**
* i40e_quiesce_vsi - Pause a given VSI
* @vsi: the VSI being paused
**/
@@ -3525,8 +3522,7 @@ static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
if (vsi->netdev && netif_running(vsi->netdev)) {
vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
} else {
- set_bit(__I40E_DOWN, &vsi->state);
- i40e_down(vsi);
+ i40e_vsi_close(vsi);
}
}
@@ -3543,7 +3539,7 @@ static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
if (vsi->netdev && netif_running(vsi->netdev))
vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
else
- i40e_up(vsi); /* this clears the DOWN bit */
+ i40e_vsi_open(vsi); /* this clears the DOWN bit */
}
/**
@@ -4028,6 +4024,8 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
pf->vsi[v]->seid);
/* Will try to configure as many components */
} else {
+ /* Re-configure VSI vectors based on updated TC map */
+ i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
if (pf->vsi[v]->netdev)
i40e_dcbnl_set_all(pf->vsi[v]);
}
@@ -4067,6 +4065,9 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
DCB_CAP_DCBX_VER_IEEE;
pf->flags |= I40E_FLAG_DCB_ENABLED;
}
+ } else {
+ dev_info(&pf->pdev->dev, "AQ Querying DCB configuration failed: %d\n",
+ pf->hw.aq.asq_last_status);
}
out:
@@ -4309,24 +4310,32 @@ int i40e_vsi_open(struct i40e_vsi *vsi)
if (err)
goto err_setup_rx;
- if (!vsi->netdev) {
+ if (vsi->netdev) {
+ snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
+ dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
+ err = i40e_vsi_request_irq(vsi, int_name);
+ if (err)
+ goto err_setup_rx;
+
+ /* Notify the stack of the actual queue counts. */
+ err = netif_set_real_num_tx_queues(vsi->netdev,
+ vsi->num_queue_pairs);
+ if (err)
+ goto err_set_queues;
+
+ err = netif_set_real_num_rx_queues(vsi->netdev,
+ vsi->num_queue_pairs);
+ if (err)
+ goto err_set_queues;
+
+ } else if (vsi->type == I40E_VSI_FDIR) {
+ snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
+ dev_driver_string(&pf->pdev->dev));
+ err = i40e_vsi_request_irq(vsi, int_name);
+ } else {
err = EINVAL;
goto err_setup_rx;
}
- snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
- dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
- err = i40e_vsi_request_irq(vsi, int_name);
- if (err)
- goto err_setup_rx;
-
- /* Notify the stack of the actual queue counts. */
- err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_queue_pairs);
- if (err)
- goto err_set_queues;
-
- err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_queue_pairs);
- if (err)
- goto err_set_queues;
err = i40e_up_complete(vsi);
if (err)
@@ -4383,14 +4392,7 @@ static int i40e_close(struct net_device *netdev)
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
- if (test_and_set_bit(__I40E_DOWN, &vsi->state))
- return 0;
-
- i40e_down(vsi);
- i40e_vsi_free_irq(vsi);
-
- i40e_vsi_free_tx_resources(vsi);
- i40e_vsi_free_rx_resources(vsi);
+ i40e_vsi_close(vsi);
return 0;
}
@@ -5226,9 +5228,6 @@ static int i40e_get_capabilities(struct i40e_pf *pf)
}
} while (err);
- /* increment MSI-X count because current FW skips one */
- pf->hw.func_caps.num_msix_vectors++;
-
if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) ||
(pf->hw.aq.fw_maj_ver < 2)) {
pf->hw.func_caps.num_msix_vectors++;
@@ -5267,8 +5266,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi);
static void i40e_fdir_sb_setup(struct i40e_pf *pf)
{
struct i40e_vsi *vsi;
- bool new_vsi = false;
- int err, i;
+ int i;
if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
return;
@@ -5288,47 +5286,12 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf)
pf->vsi[pf->lan_vsi]->seid, 0);
if (!vsi) {
dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
- goto err_vsi;
+ pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ return;
}
- new_vsi = true;
}
- i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
- err = i40e_vsi_setup_tx_resources(vsi);
- if (err)
- goto err_setup_tx;
- err = i40e_vsi_setup_rx_resources(vsi);
- if (err)
- goto err_setup_rx;
-
- if (new_vsi) {
- char int_name[IFNAMSIZ + 9];
- err = i40e_vsi_configure(vsi);
- if (err)
- goto err_setup_rx;
- snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
- dev_driver_string(&pf->pdev->dev));
- err = i40e_vsi_request_irq(vsi, int_name);
- if (err)
- goto err_setup_rx;
- err = i40e_up_complete(vsi);
- if (err)
- goto err_up_complete;
- clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
- }
-
- return;
-
-err_up_complete:
- i40e_down(vsi);
- i40e_vsi_free_irq(vsi);
-err_setup_rx:
- i40e_vsi_free_rx_resources(vsi);
-err_setup_tx:
- i40e_vsi_free_tx_resources(vsi);
-err_vsi:
- pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
- i40e_vsi_clear(vsi);
+ i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
}
/**
@@ -5642,7 +5605,6 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
**/
static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
{
- const int vxlan_hdr_qwords = 4;
struct i40e_hw *hw = &pf->hw;
i40e_status ret;
u8 filter_index;
@@ -5660,7 +5622,6 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
port = pf->vxlan_ports[i];
ret = port ?
i40e_aq_add_udp_tunnel(hw, ntohs(port),
- vxlan_hdr_qwords,
I40E_AQC_TUNNEL_TYPE_VXLAN,
&filter_index, NULL)
: i40e_aq_del_udp_tunnel(hw, i, NULL);
@@ -6649,6 +6610,96 @@ static void i40e_del_vxlan_port(struct net_device *netdev,
}
#endif
+#ifdef HAVE_FDB_OPS
+#ifdef USE_CONST_DEV_UC_CHAR
+static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr,
+ u16 flags)
+#else
+static int i40e_ndo_fdb_add(struct ndmsg *ndm,
+ struct net_device *dev,
+ unsigned char *addr,
+ u16 flags)
+#endif
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_pf *pf = np->vsi->back;
+ int err = 0;
+
+ if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
+ return -EOPNOTSUPP;
+
+ /* Hardware does not support aging addresses so if a
+ * ndm_state is given only allow permanent addresses
+ */
+ if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
+ netdev_info(dev, "FDB only supports static addresses\n");
+ return -EINVAL;
+ }
+
+ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
+ err = dev_uc_add_excl(dev, addr);
+ else if (is_multicast_ether_addr(addr))
+ err = dev_mc_add_excl(dev, addr);
+ else
+ err = -EINVAL;
+
+ /* Only return duplicate errors if NLM_F_EXCL is set */
+ if (err == -EEXIST && !(flags & NLM_F_EXCL))
+ err = 0;
+
+ return err;
+}
+
+#ifndef USE_DEFAULT_FDB_DEL_DUMP
+#ifdef USE_CONST_DEV_UC_CHAR
+static int i40e_ndo_fdb_del(struct ndmsg *ndm,
+ struct net_device *dev,
+ const unsigned char *addr)
+#else
+static int i40e_ndo_fdb_del(struct ndmsg *ndm,
+ struct net_device *dev,
+ unsigned char *addr)
+#endif
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_pf *pf = np->vsi->back;
+ int err = -EOPNOTSUPP;
+
+ if (ndm->ndm_state & NUD_PERMANENT) {
+ netdev_info(dev, "FDB only supports static addresses\n");
+ return -EINVAL;
+ }
+
+ if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
+ if (is_unicast_ether_addr(addr))
+ err = dev_uc_del(dev, addr);
+ else if (is_multicast_ether_addr(addr))
+ err = dev_mc_del(dev, addr);
+ else
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int i40e_ndo_fdb_dump(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct net_device *dev,
+ int idx)
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ if (pf->flags & I40E_FLAG_SRIOV_ENABLED)
+ idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
+
+ return idx;
+}
+
+#endif /* USE_DEFAULT_FDB_DEL_DUMP */
+#endif /* HAVE_FDB_OPS */
static const struct net_device_ops i40e_netdev_ops = {
.ndo_open = i40e_open,
.ndo_stop = i40e_close,
@@ -6676,6 +6727,13 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_add_vxlan_port = i40e_add_vxlan_port,
.ndo_del_vxlan_port = i40e_del_vxlan_port,
#endif
+#ifdef HAVE_FDB_OPS
+ .ndo_fdb_add = i40e_ndo_fdb_add,
+#ifndef USE_DEFAULT_FDB_DEL_DUMP
+ .ndo_fdb_del = i40e_ndo_fdb_del,
+ .ndo_fdb_dump = i40e_ndo_fdb_dump,
+#endif
+#endif
};
/**
@@ -6720,10 +6778,12 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
NETIF_F_TSO_ECN |
NETIF_F_TSO6 |
NETIF_F_RXCSUM |
- NETIF_F_NTUPLE |
NETIF_F_RXHASH |
0;
+ if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
+ netdev->features |= NETIF_F_NTUPLE;
+
/* copy netdev features into list of user selectable features */
netdev->hw_features |= netdev->features;
@@ -6982,11 +7042,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
unregister_netdev(vsi->netdev);
}
} else {
- if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
- i40e_down(vsi);
- i40e_vsi_free_irq(vsi);
- i40e_vsi_free_tx_resources(vsi);
- i40e_vsi_free_rx_resources(vsi);
+ i40e_vsi_close(vsi);
}
i40e_vsi_disable_irq(vsi);
}
@@ -8090,6 +8146,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
u16 link_status;
int err = 0;
u32 len;
+ u32 i;
err = pci_enable_device_mem(pdev);
if (err)
@@ -8243,7 +8300,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err) {
dev_info(&pdev->dev, "init_pf_dcb failed: %d\n", err);
pf->flags &= ~I40E_FLAG_DCB_ENABLED;
- goto err_init_dcb;
+ /* Continue without DCB enabled */
}
#endif /* CONFIG_I40E_DCB */
@@ -8279,6 +8336,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
goto err_vsis;
}
+ /* if FDIR VSI was set up, start it now */
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
+ i40e_vsi_open(pf->vsi[i]);
+ break;
+ }
+ }
/* The main driver is (mostly) up and happy. We need to set this state
* before setting up the misc vector or we get a race and the vector
@@ -8332,6 +8396,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dv.minor_version = DRV_VERSION_MINOR;
dv.build_version = DRV_VERSION_BUILD;
dv.subbuild_version = 0;
+ strncpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
/* since everything's happy, start the service_task timer */
@@ -8373,9 +8438,6 @@ err_vsis:
err_switch_setup:
i40e_reset_interrupt_capability(pf);
del_timer_sync(&pf->service_timer);
-#ifdef CONFIG_I40E_DCB
-err_init_dcb:
-#endif /* CONFIG_I40E_DCB */
err_mac_addr:
err_configure_lan_hmc:
(void)i40e_shutdown_lan_hmc(hw);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 9cd57e617959..d351832bf235 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -157,8 +157,8 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
- u16 udp_port, u8 header_len,
- u8 protocol_index, u8 *filter_index,
+ u16 udp_port, u8 protocol_index,
+ u8 *filter_index,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
struct i40e_asq_cmd_details *cmd_details);
@@ -167,6 +167,9 @@ i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
u16 flags, u8 *mac_addr,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_credit,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index e61e63720800..1fedc7a1589d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -217,40 +217,6 @@ static int i40e_ptp_settime(struct ptp_clock_info *ptp,
}
/**
- * i40e_ptp_tx_work
- * @work: pointer to work struct
- *
- * This work function polls the PRTTSYN_STAT_0.TXTIME bit to determine when a
- * Tx timestamp event has occurred, in order to pass the Tx timestamp value up
- * the stack in the skb.
- */
-static void i40e_ptp_tx_work(struct work_struct *work)
-{
- struct i40e_pf *pf = container_of(work, struct i40e_pf,
- ptp_tx_work);
- struct i40e_hw *hw = &pf->hw;
- u32 prttsyn_stat_0;
-
- if (!pf->ptp_tx_skb)
- return;
-
- if (time_is_before_jiffies(pf->ptp_tx_start +
- I40E_PTP_TX_TIMEOUT)) {
- dev_kfree_skb_any(pf->ptp_tx_skb);
- pf->ptp_tx_skb = NULL;
- pf->tx_hwtstamp_timeouts++;
- dev_warn(&pf->pdev->dev, "clearing Tx timestamp hang\n");
- return;
- }
-
- prttsyn_stat_0 = rd32(hw, I40E_PRTTSYN_STAT_0);
- if (prttsyn_stat_0 & I40E_PRTTSYN_STAT_0_TXTIME_MASK)
- i40e_ptp_tx_hwtstamp(pf);
- else
- schedule_work(&pf->ptp_tx_work);
-}
-
-/**
* i40e_ptp_enable - Enable/disable ancillary features of the PHC subsystem
* @ptp: The PTP clock structure
* @rq: The requested feature to change
@@ -608,7 +574,6 @@ void i40e_ptp_init(struct i40e_pf *pf)
u32 regval;
spin_lock_init(&pf->tmreg_lock);
- INIT_WORK(&pf->ptp_tx_work, i40e_ptp_tx_work);
dev_info(&pf->pdev->dev, "%s: added PHC on %s\n", __func__,
netdev->name);
@@ -647,7 +612,6 @@ void i40e_ptp_stop(struct i40e_pf *pf)
pf->ptp_tx = false;
pf->ptp_rx = false;
- cancel_work_sync(&pf->ptp_tx_work);
if (pf->ptp_tx_skb) {
dev_kfree_skb_any(pf->ptp_tx_skb);
pf->ptp_tx_skb = NULL;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 9478ddc66caf..ece7ae99b03a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1825,9 +1825,6 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
*cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
I40E_TXD_CTX_QW1_CMD_SHIFT;
- pf->ptp_tx_start = jiffies;
- schedule_work(&pf->ptp_tx_work);
-
return 1;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 71a968fe557f..c4df8bac2db1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -167,6 +167,9 @@ struct i40e_link_status {
u8 loopback;
/* is Link Status Event notification to SW enabled */
bool lse_enable;
+ u16 max_frame_size;
+ bool crc_enable;
+ u8 pacing;
};
struct i40e_phy_info {
@@ -409,6 +412,7 @@ struct i40e_driver_version {
u8 minor_version;
u8 build_version;
u8 subbuild_version;
+ u8 driver_string[32];
};
/* RX Descriptors */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 02c11a7f7d29..82e7abf64308 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -29,6 +29,24 @@
/***********************misc routines*****************************/
/**
+ * i40e_vc_disable_vf
+ * @pf: pointer to the pf info
+ * @vf: pointer to the vf info
+ *
+ * Disable the VF through a SW reset
+ **/
+static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg;
+
+ reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
+ reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+ wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
+ i40e_flush(hw);
+}
+
+/**
* i40e_vc_isvalid_vsi_id
* @vf: pointer to the vf info
* @vsi_id: vf relative vsi id
@@ -416,6 +434,15 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
if (ret)
dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
+ /* Set VF bandwidth if specified */
+ if (vf->tx_rate) {
+ ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
+ vf->tx_rate / 50, 0, NULL);
+ if (ret)
+ dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
+ vf->vf_id, ret);
+ }
+
error_alloc_vsi_res:
return ret;
}
@@ -2022,10 +2049,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
}
/* delete the temporary mac address */
- i40e_del_filter(vsi, vf->default_lan_addr.addr, 0, true, false);
+ i40e_del_filter(vsi, vf->default_lan_addr.addr, vf->port_vlan_id,
+ true, false);
/* add the new mac address */
- f = i40e_add_filter(vsi, mac, 0, true, false);
+ f = i40e_add_filter(vsi, mac, vf->port_vlan_id, true, false);
if (!f) {
dev_err(&pf->pdev->dev,
"Unable to add VF ucast filter\n");
@@ -2088,18 +2116,28 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
goto error_pvid;
}
- if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi))
+ if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi)) {
dev_err(&pf->pdev->dev,
"VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
vf_id);
+ /* Administrator Error - knock the VF offline until he does
+ * the right thing by reconfiguring his network correctly
+ * and then reloading the VF driver.
+ */
+ i40e_vc_disable_vf(pf, vf);
+ }
/* Check for condition where there was already a port VLAN ID
* filter set and now it is being deleted by setting it to zero.
+ * Additionally check for the condition where there was a port
+ * VLAN but now there is a new and different port VLAN being set.
* Before deleting all the old VLAN filters we must add new ones
* with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
* MAC addresses deleted.
*/
- if (!(vlan_id || qos) && vsi->info.pvid)
+ if ((!(vlan_id || qos) ||
+ (vlan_id | qos) != le16_to_cpu(vsi->info.pvid)) &&
+ vsi->info.pvid)
ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY);
if (vsi->info.pvid) {
@@ -2160,7 +2198,61 @@ error_pvid:
**/
int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate)
{
- return -EOPNOTSUPP;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_vsi *vsi;
+ struct i40e_vf *vf;
+ int speed = 0;
+ int ret = 0;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ vf = &(pf->vf[vf_id]);
+ vsi = pf->vsi[vf->lan_vsi_index];
+ if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+ dev_err(&pf->pdev->dev, "Uninitialized VF %d.\n", vf_id);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ switch (pf->hw.phy.link_info.link_speed) {
+ case I40E_LINK_SPEED_40GB:
+ speed = 40000;
+ break;
+ case I40E_LINK_SPEED_10GB:
+ speed = 10000;
+ break;
+ case I40E_LINK_SPEED_1GB:
+ speed = 1000;
+ break;
+ default:
+ break;
+ }
+
+ if (tx_rate > speed) {
+ dev_err(&pf->pdev->dev, "Invalid tx rate %d specified for vf %d.",
+ tx_rate, vf->vf_id);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* Tx rate credits are in values of 50Mbps, 0 is disabled*/
+ ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, tx_rate / 50, 0,
+ NULL);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Unable to set tx rate, error code %d.\n",
+ ret);
+ ret = -EIO;
+ goto error;
+ }
+ vf->tx_rate = tx_rate;
+error:
+ return ret;
}
/**
@@ -2200,10 +2292,17 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN);
- ivi->tx_rate = 0;
+ ivi->tx_rate = vf->tx_rate;
ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
I40E_VLAN_PRIORITY_SHIFT;
+ if (vf->link_forced == false)
+ ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
+ else if (vf->link_up == true)
+ ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
+ else
+ ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
+
ret = 0;
error_param:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 389c47f396d5..ba3d1f8414be 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -98,6 +98,7 @@ struct i40e_vf {
unsigned long vf_caps; /* vf's adv. capabilities */
unsigned long vf_states; /* vf's runtime states */
+ unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
bool link_forced;
bool link_up; /* only valid if vf link is forced */
};
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index 5470ce95936e..c79df257f830 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -28,6 +28,16 @@
#include "i40e_prototype.h"
/**
+ * i40e_is_nvm_update_op - return true if this is an NVM update operation
+ * @desc: API request descriptor
+ **/
+static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
+{
+ return (desc->opcode == i40e_aqc_opc_nvm_erase) ||
+ (desc->opcode == i40e_aqc_opc_nvm_update);
+}
+
+/**
* i40e_adminq_init_regs - Initialize AdminQ registers
* @hw: pointer to the hardware structure
*
@@ -659,6 +669,12 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
goto asq_send_command_exit;
}
+ if (i40e_is_nvm_update_op(desc) && hw->aq.nvm_busy) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: NVM busy.\n");
+ status = I40E_ERR_NVM;
+ goto asq_send_command_exit;
+ }
+
details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
if (cmd_details) {
*details = *cmd_details;
@@ -786,6 +802,9 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
}
+ if (i40e_is_nvm_update_op(desc))
+ hw->aq.nvm_busy = true;
+
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
@@ -880,6 +899,9 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
e->msg_size);
}
+ if (i40e_is_nvm_update_op(&e->desc))
+ hw->aq.nvm_busy = false;
+
/* Restore the original datalen and buffer address in the desc,
* FW updates datalen to indicate the event message
* size
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
index 8f72c31d95cc..7d24be528601 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
@@ -87,6 +87,7 @@ struct i40e_adminq_info {
u16 fw_min_ver; /* firmware minor version */
u16 api_maj_ver; /* api major version */
u16 api_min_ver; /* api minor version */
+ bool nvm_busy;
struct mutex asq_mutex; /* Send queue lock */
struct mutex arq_mutex; /* Receive queue lock */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index 97662b6bd98a..6e617669c326 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -180,9 +180,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_add_mirror_rule = 0x0260,
i40e_aqc_opc_delete_mirror_rule = 0x0261,
- i40e_aqc_opc_set_storm_control_config = 0x0280,
- i40e_aqc_opc_get_storm_control_config = 0x0281,
-
/* DCB commands */
i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
i40e_aqc_opc_dcb_updated = 0x0302,
@@ -205,6 +202,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
i40e_aqc_opc_suspend_port_tx = 0x041B,
i40e_aqc_opc_resume_port_tx = 0x041C,
+ i40e_aqc_opc_configure_partition_bw = 0x041D,
/* hmc */
i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
@@ -1289,27 +1287,6 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
-/* Set Storm Control Configuration (direct 0x0280)
- * Get Storm Control Configuration (direct 0x0281)
- * the command and response use the same descriptor structure
- */
-struct i40e_aqc_set_get_storm_control_config {
- __le32 broadcast_threshold;
- __le32 multicast_threshold;
- __le32 control_flags;
-#define I40E_AQC_STORM_CONTROL_MDIPW 0x01
-#define I40E_AQC_STORM_CONTROL_MDICW 0x02
-#define I40E_AQC_STORM_CONTROL_BDIPW 0x04
-#define I40E_AQC_STORM_CONTROL_BDICW 0x08
-#define I40E_AQC_STORM_CONTROL_BIDU 0x10
-#define I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT 8
-#define I40E_AQC_STORM_CONTROL_INTERVAL_MASK (0x3FF << \
- I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT)
- u8 reserved[4];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_set_get_storm_control_config);
-
/* DCB 0x03xx*/
/* PFC Ignore (direct 0x0301)
@@ -1499,6 +1476,15 @@ struct i40e_aqc_query_switching_comp_bw_config_resp {
* (direct 0x041B and 0x041C) uses the generic SEID struct
*/
+/* Configure partition BW
+ * (indirect 0x041D)
+ */
+struct i40e_aqc_configure_partition_bw_data {
+ __le16 pf_valid_bits;
+ u8 min_bw[16]; /* guaranteed bandwidth */
+ u8 max_bw[16]; /* bandwidth limit */
+};
+
/* Get and set the active HMC resource profile and status.
* (direct 0x0500) and (direct 0x0501)
*/
@@ -1583,11 +1569,8 @@ struct i40e_aq_get_phy_abilities_resp {
#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04
-#define I40E_AQ_PHY_FLAG_AN_SHIFT 3
-#define I40E_AQ_PHY_FLAG_AN_MASK (0x3 << I40E_AQ_PHY_FLAG_AN_SHIFT)
-#define I40E_AQ_PHY_FLAG_AN_OFF 0x00 /* link forced on */
-#define I40E_AQ_PHY_FLAG_AN_OFF_LINK_DOWN 0x01
-#define I40E_AQ_PHY_FLAG_AN_ON 0x02
+#define I40E_AQ_PHY_LINK_ENABLED 0x08
+#define I40E_AQ_PHY_AN_ENABLED 0x10
#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
__le16 eee_capability;
#define I40E_AQ_EEE_100BASE_TX 0x0002
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
index 17e42ca26d0b..775fcb2463d7 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
@@ -53,6 +53,7 @@ struct i40e_hmc_obj_rxq {
u8 tphdata_ena;
u8 tphhead_ena;
u8 lrxqthresh;
+ u8 prefena; /* NOTE: normally must be set to 1 at init */
};
/* Tx queue context data */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 4673b3381edd..51a6dee3c7b1 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -173,6 +173,9 @@ struct i40e_link_status {
u8 loopback;
/* is Link Status Event notification to SW enabled */
bool lse_enable;
+ u16 max_frame_size;
+ bool crc_enable;
+ u8 pacing;
};
struct i40e_phy_info {
@@ -415,6 +418,7 @@ struct i40e_driver_version {
u8 minor_version;
u8 build_version;
u8 subbuild_version;
+ u8 driver_string[32];
};
/* RX Descriptors */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index 8b0db1ce179c..a46be016039e 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -365,6 +365,316 @@ static int i40evf_set_coalesce(struct net_device *netdev,
return 0;
}
+/**
+ * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
+ * @adapter: board private structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the flow is supported, else Invalid Input.
+ **/
+static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
+ ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
+
+ /* We always hash on IP src and dest addresses */
+ cmd->data = RXH_IP_SRC | RXH_IP_DST;
+
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case UDP_V4_FLOW:
+ if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case IPV4_FLOW:
+ break;
+
+ case TCP_V6_FLOW:
+ if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case UDP_V6_FLOW:
+ if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case IPV6_FLOW:
+ break;
+ default:
+ cmd->data = 0;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * i40evf_get_rxnfc - command to get RX flow classification rules
+ * @netdev: network interface device structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the command is supported.
+ **/
+static int i40evf_get_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = adapter->vsi_res->num_queue_pairs;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXFH:
+ ret = i40evf_get_rss_hash_opts(adapter, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * i40evf_set_rss_hash_opt - Enable/Disable flow types for RSS hash
+ * @adapter: board private structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the flow input set is supported.
+ **/
+static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
+ struct ethtool_rxnfc *nfc)
+{
+ struct i40e_hw *hw = &adapter->hw;
+
+ u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
+ ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
+
+ /* RSS does not support anything other than hashing
+ * to queues on src and dst IPs and ports
+ */
+ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ /* We need at least the IP SRC and DEST fields for hashing */
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case TCP_V6_FLOW:
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case UDP_V4_FLOW:
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case UDP_V6_FLOW:
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ if ((nfc->data & RXH_L4_B_0_1) ||
+ (nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+ break;
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ if ((nfc->data & RXH_L4_B_0_1) ||
+ (nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+ break;
+ case IPV4_FLOW:
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
+ break;
+ case IPV6_FLOW:
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
+ wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
+ i40e_flush(hw);
+
+ return 0;
+}
+
+/**
+ * i40evf_set_rxnfc - command to set RX flow classification rules
+ * @netdev: network interface device structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the command is supported.
+ **/
+static int i40evf_set_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = i40evf_set_rss_hash_opt(adapter, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * i40evf_get_channels: get the number of channels supported by the device
+ * @netdev: network interface device structure
+ * @ch: channel information structure
+ *
+ * For the purposes of our device, we only use combined channels, i.e. a tx/rx
+ * queue pair. Report one extra channel to match our "other" MSI-X vector.
+ **/
+static void i40evf_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ /* Report maximum channels */
+ ch->max_combined = adapter->vsi_res->num_queue_pairs;
+
+ ch->max_other = NONQ_VECS;
+ ch->other_count = NONQ_VECS;
+
+ ch->combined_count = adapter->vsi_res->num_queue_pairs;
+}
+
+/**
+ * i40evf_get_rxfh_indir_size - get the rx flow hash indirection table size
+ * @netdev: network interface device structure
+ *
+ * Returns the table size.
+ **/
+static u32 i40evf_get_rxfh_indir_size(struct net_device *netdev)
+{
+ return (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4;
+}
+
+/**
+ * i40evf_get_rxfh_indir - get the rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @indir: indirection table
+ *
+ * Reads the indirection table directly from the hardware. Always returns 0.
+ **/
+static int i40evf_get_rxfh_indir(struct net_device *netdev, u32 *indir)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_hw *hw = &adapter->hw;
+ u32 hlut_val;
+ int i, j;
+
+ for (i = 0, j = 0; i < I40E_VFQF_HLUT_MAX_INDEX; i++) {
+ hlut_val = rd32(hw, I40E_VFQF_HLUT(i));
+ indir[j++] = hlut_val & 0xff;
+ indir[j++] = (hlut_val >> 8) & 0xff;
+ indir[j++] = (hlut_val >> 16) & 0xff;
+ indir[j++] = (hlut_val >> 24) & 0xff;
+ }
+ return 0;
+}
+
+/**
+ * i40evf_set_rxfh_indir - set the rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @indir: indirection table
+ *
+ * Returns -EINVAL if the table specifies an inavlid queue id, otherwise
+ * returns 0 after programming the table.
+ **/
+static int i40evf_set_rxfh_indir(struct net_device *netdev, const u32 *indir)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_hw *hw = &adapter->hw;
+ u32 hlut_val;
+ int i, j;
+
+ for (i = 0, j = 0; i < I40E_VFQF_HLUT_MAX_INDEX + 1; i++) {
+ hlut_val = indir[j++];
+ hlut_val |= indir[j++] << 8;
+ hlut_val |= indir[j++] << 16;
+ hlut_val |= indir[j++] << 24;
+ wr32(hw, I40E_VFQF_HLUT(i), hlut_val);
+ }
+
+ return 0;
+}
+
static struct ethtool_ops i40evf_ethtool_ops = {
.get_settings = i40evf_get_settings,
.get_drvinfo = i40evf_get_drvinfo,
@@ -378,6 +688,12 @@ static struct ethtool_ops i40evf_ethtool_ops = {
.set_msglevel = i40evf_set_msglevel,
.get_coalesce = i40evf_get_coalesce,
.set_coalesce = i40evf_set_coalesce,
+ .get_rxnfc = i40evf_get_rxnfc,
+ .set_rxnfc = i40evf_set_rxnfc,
+ .get_rxfh_indir_size = i40evf_get_rxfh_indir_size,
+ .get_rxfh_indir = i40evf_get_rxfh_indir,
+ .set_rxfh_indir = i40evf_set_rxfh_indir,
+ .get_channels = i40evf_get_channels,
};
/**
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 2797548fde0d..6edd581dffa7 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -25,13 +25,15 @@
#include "i40e_prototype.h"
static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter);
static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter);
+static void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter);
+static void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter);
static int i40evf_close(struct net_device *netdev);
char i40evf_driver_name[] = "i40evf";
static const char i40evf_driver_string[] =
"Intel(R) XL710 X710 Virtual Function Network Driver";
-#define DRV_VERSION "0.9.16"
+#define DRV_VERSION "0.9.23"
const char i40evf_driver_version[] = DRV_VERSION;
static const char i40evf_copyright[] =
"Copyright (c) 2013 - 2014 Intel Corporation.";
@@ -1309,7 +1311,6 @@ static void i40evf_watchdog_task(struct work_struct *work)
goto restart_watchdog;
if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
- dev_info(&adapter->pdev->dev, "Checking for redemption\n");
if ((rd32(hw, I40E_VFGEN_RSTAT) & 0x3) == I40E_VFR_VFACTIVE) {
/* A chance for redemption! */
dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
@@ -1534,9 +1535,13 @@ static void i40evf_reset_task(struct work_struct *work)
rstat_val);
adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
- if (netif_running(adapter->netdev))
- i40evf_close(adapter->netdev);
-
+ if (netif_running(adapter->netdev)) {
+ set_bit(__I40E_DOWN, &adapter->vsi.state);
+ i40evf_down(adapter);
+ i40evf_free_traffic_irqs(adapter);
+ i40evf_free_all_tx_resources(adapter);
+ i40evf_free_all_rx_resources(adapter);
+ }
i40evf_free_misc_irq(adapter);
i40evf_reset_interrupt_capability(adapter);
i40evf_free_queues(adapter);
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index fa36fe12e775..2e36c670d8df 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
/* e1000_82575
* e1000_82576
@@ -73,9 +70,8 @@ static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw);
static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw);
static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw);
static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw);
-static const u16 e1000_82580_rxpbs_table[] =
- { 36, 72, 144, 1, 2, 4, 8, 16,
- 35, 70, 140 };
+static const u16 e1000_82580_rxpbs_table[] = {
+ 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 };
/**
* igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
@@ -526,7 +522,7 @@ out:
static s32 igb_get_invariants_82575(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
- struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575;
+ struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
s32 ret_val;
u32 ctrl_ext = 0;
u32 link_mode = 0;
@@ -1180,8 +1176,8 @@ static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
{
u32 swfw_sync;
- while (igb_get_hw_semaphore(hw) != 0);
- /* Empty */
+ while (igb_get_hw_semaphore(hw) != 0)
+ ; /* Empty */
swfw_sync = rd32(E1000_SW_FW_SYNC);
swfw_sync &= ~mask;
@@ -1216,7 +1212,7 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
while (timeout) {
if (rd32(E1000_EEMNGCTL) & mask)
break;
- msleep(1);
+ usleep_range(1000, 2000);
timeout--;
}
if (!timeout)
@@ -1269,7 +1265,7 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw)
if (hw->phy.media_type != e1000_media_type_copper) {
ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed,
- &duplex);
+ &duplex);
/* Use this flag to determine if link needs to be checked or
* not. If we have link clear the flag so that we do not
* continue to check for link.
@@ -1316,7 +1312,7 @@ void igb_power_up_serdes_link_82575(struct e1000_hw *hw)
/* flush the write to verify completion */
wrfl();
- msleep(1);
+ usleep_range(1000, 2000);
}
/**
@@ -1411,7 +1407,7 @@ void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
/* flush the write to verify completion */
wrfl();
- msleep(1);
+ usleep_range(1000, 2000);
}
}
@@ -1436,9 +1432,8 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw)
/* set the completion timeout for interface */
ret_val = igb_set_pcie_completion_timeout(hw);
- if (ret_val) {
+ if (ret_val)
hw_dbg("PCI-E Set completion timeout has failed.\n");
- }
hw_dbg("Masking off all interrupts\n");
wr32(E1000_IMC, 0xffffffff);
@@ -1447,7 +1442,7 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw)
wr32(E1000_TCTL, E1000_TCTL_PSP);
wrfl();
- msleep(10);
+ usleep_range(10000, 20000);
ctrl = rd32(E1000_CTRL);
@@ -1676,7 +1671,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
hw->mac.type == e1000_82576) {
ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data);
if (ret_val) {
- printk(KERN_DEBUG "NVM Read Error\n\n");
+ hw_dbg(KERN_DEBUG "NVM Read Error\n\n");
return ret_val;
}
@@ -1689,7 +1684,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
* link either autoneg or be forced to 1000/Full
*/
ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
- E1000_CTRL_FD | E1000_CTRL_FRCDPX;
+ E1000_CTRL_FD | E1000_CTRL_FRCDPX;
/* set speed of 1000/Full if speed/duplex is forced */
reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
@@ -1925,7 +1920,7 @@ void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
}
/* Poll all queues to verify they have shut down */
for (ms_wait = 0; ms_wait < 10; ms_wait++) {
- msleep(1);
+ usleep_range(1000, 2000);
rx_enabled = 0;
for (i = 0; i < 4; i++)
rx_enabled |= rd32(E1000_RXDCTL(i));
@@ -1953,7 +1948,7 @@ void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
wr32(E1000_RCTL, temp_rctl);
wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN);
wrfl();
- msleep(2);
+ usleep_range(2000, 3000);
/* Enable RX queues that were previously enabled and restore our
* previous state
@@ -2005,14 +2000,14 @@ static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw)
* 16ms to 55ms
*/
ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
- &pcie_devctl2);
+ &pcie_devctl2);
if (ret_val)
goto out;
pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
- &pcie_devctl2);
+ &pcie_devctl2);
out:
/* disable completion timeout resend */
gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
@@ -2241,7 +2236,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
wr32(E1000_TCTL, E1000_TCTL_PSP);
wrfl();
- msleep(10);
+ usleep_range(10000, 11000);
/* Determine whether or not a global dev reset is requested */
if (global_device_reset &&
@@ -2259,7 +2254,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
/* Add delay to insure DEV_RST has time to complete */
if (global_device_reset)
- msleep(5);
+ usleep_range(5000, 6000);
ret_val = igb_get_auto_rd_done(hw);
if (ret_val) {
@@ -2436,8 +2431,7 @@ static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
if (ret_val) {
- hw_dbg("NVM Read Error while updating checksum"
- " compatibility bit.\n");
+ hw_dbg("NVM Read Error while updating checksum compatibility bit.\n");
goto out;
}
@@ -2447,8 +2441,7 @@ static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
&nvm_data);
if (ret_val) {
- hw_dbg("NVM Write Error while updating checksum"
- " compatibility bit.\n");
+ hw_dbg("NVM Write Error while updating checksum compatibility bit.\n");
goto out;
}
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index 09d78be72416..b407c55738fa 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_82575_H_
#define _E1000_82575_H_
@@ -37,9 +34,9 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
u8 data);
#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
- (ID_LED_DEF1_DEF2 << 8) | \
- (ID_LED_DEF1_DEF2 << 4) | \
- (ID_LED_OFF1_ON2))
+ (ID_LED_DEF1_DEF2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_OFF1_ON2))
#define E1000_RAR_ENTRIES_82575 16
#define E1000_RAR_ENTRIES_82576 24
@@ -67,16 +64,16 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000
#define E1000_EICR_TX_QUEUE ( \
- E1000_EICR_TX_QUEUE0 | \
- E1000_EICR_TX_QUEUE1 | \
- E1000_EICR_TX_QUEUE2 | \
- E1000_EICR_TX_QUEUE3)
+ E1000_EICR_TX_QUEUE0 | \
+ E1000_EICR_TX_QUEUE1 | \
+ E1000_EICR_TX_QUEUE2 | \
+ E1000_EICR_TX_QUEUE3)
#define E1000_EICR_RX_QUEUE ( \
- E1000_EICR_RX_QUEUE0 | \
- E1000_EICR_RX_QUEUE1 | \
- E1000_EICR_RX_QUEUE2 | \
- E1000_EICR_RX_QUEUE3)
+ E1000_EICR_RX_QUEUE0 | \
+ E1000_EICR_RX_QUEUE1 | \
+ E1000_EICR_RX_QUEUE2 | \
+ E1000_EICR_RX_QUEUE3)
/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
@@ -92,8 +89,7 @@ union e1000_adv_rx_desc {
struct {
struct {
__le16 pkt_info; /* RSS type, Packet type */
- __le16 hdr_info; /* Split Header,
- * header buffer length */
+ __le16 hdr_info; /* Split Head, buf len */
} lo_dword;
union {
__le32 rss; /* RSS Hash */
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index b05bf925ac72..f85be6695e44 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_DEFINES_H_
#define _E1000_DEFINES_H_
@@ -101,11 +98,11 @@
/* Same mask, but for extended and packet split descriptors */
#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
- E1000_RXDEXT_STATERR_CE | \
- E1000_RXDEXT_STATERR_SE | \
- E1000_RXDEXT_STATERR_SEQ | \
- E1000_RXDEXT_STATERR_CXE | \
- E1000_RXDEXT_STATERR_RXE)
+ E1000_RXDEXT_STATERR_CE | \
+ E1000_RXDEXT_STATERR_SE | \
+ E1000_RXDEXT_STATERR_SEQ | \
+ E1000_RXDEXT_STATERR_CXE | \
+ E1000_RXDEXT_STATERR_RXE)
#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000
@@ -307,33 +304,25 @@
#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
/* DMA Coalescing register fields */
-#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coalescing
- * Watchdog Timer */
-#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coalescing Receive
- * Threshold */
+#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coal Watchdog Timer */
+#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coal Rx Threshold */
#define E1000_DMACR_DMACTHR_SHIFT 16
-#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe
- * transactions */
+#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe trans */
#define E1000_DMACR_DMAC_LX_SHIFT 28
#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */
/* DMA Coalescing BMC-to-OS Watchdog Enable */
#define E1000_DMACR_DC_BMC2OSW_EN 0x00008000
-#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coalescing Transmit
- * Threshold */
+#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coal Tx Threshold */
#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */
-#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Receive Traffic Rate
- * Threshold */
-#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rcv packet rate in
- * current window */
+#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Rx Traffic Rate Thresh */
+#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rx pkt rate curr window */
-#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rcv Traffic
- * Current Cnt */
+#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rx Current Cnt */
-#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* Flow ctrl Rcv Threshold
- * High val */
+#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* FC Rx Thresh High val */
#define E1000_FCRTC_RTH_COAL_SHIFT 4
#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision */
@@ -406,12 +395,12 @@
* o LSC = Link Status Change
*/
#define IMS_ENABLE_MASK ( \
- E1000_IMS_RXT0 | \
- E1000_IMS_TXDW | \
- E1000_IMS_RXDMT0 | \
- E1000_IMS_RXSEQ | \
- E1000_IMS_LSC | \
- E1000_IMS_DOUTSYNC)
+ E1000_IMS_RXT0 | \
+ E1000_IMS_TXDW | \
+ E1000_IMS_RXDMT0 | \
+ E1000_IMS_RXSEQ | \
+ E1000_IMS_LSC | \
+ E1000_IMS_DOUTSYNC)
/* Interrupt Mask Set */
#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
@@ -1011,8 +1000,7 @@
#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
/* DMA Coalescing register fields */
-#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision based
- on DMA coal */
+#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power on DMA coal */
/* Tx Rate-Scheduler Config fields */
#define E1000_RTTBCNRC_RS_ENA 0x80000000
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index 10741d170f2d..89925e405849 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -1,28 +1,24 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_HW_H_
#define _E1000_HW_H_
@@ -320,15 +316,15 @@ struct e1000_host_mng_command_info {
#include "e1000_mbx.h"
struct e1000_mac_operations {
- s32 (*check_for_link)(struct e1000_hw *);
- s32 (*reset_hw)(struct e1000_hw *);
- s32 (*init_hw)(struct e1000_hw *);
+ s32 (*check_for_link)(struct e1000_hw *);
+ s32 (*reset_hw)(struct e1000_hw *);
+ s32 (*init_hw)(struct e1000_hw *);
bool (*check_mng_mode)(struct e1000_hw *);
- s32 (*setup_physical_interface)(struct e1000_hw *);
+ s32 (*setup_physical_interface)(struct e1000_hw *);
void (*rar_set)(struct e1000_hw *, u8 *, u32);
- s32 (*read_mac_addr)(struct e1000_hw *);
- s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
- s32 (*acquire_swfw_sync)(struct e1000_hw *, u16);
+ s32 (*read_mac_addr)(struct e1000_hw *);
+ s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
+ s32 (*acquire_swfw_sync)(struct e1000_hw *, u16);
void (*release_swfw_sync)(struct e1000_hw *, u16);
#ifdef CONFIG_IGB_HWMON
s32 (*get_thermal_sensor_data)(struct e1000_hw *);
@@ -338,31 +334,31 @@ struct e1000_mac_operations {
};
struct e1000_phy_operations {
- s32 (*acquire)(struct e1000_hw *);
- s32 (*check_polarity)(struct e1000_hw *);
- s32 (*check_reset_block)(struct e1000_hw *);
- s32 (*force_speed_duplex)(struct e1000_hw *);
- s32 (*get_cfg_done)(struct e1000_hw *hw);
- s32 (*get_cable_length)(struct e1000_hw *);
- s32 (*get_phy_info)(struct e1000_hw *);
- s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
+ s32 (*acquire)(struct e1000_hw *);
+ s32 (*check_polarity)(struct e1000_hw *);
+ s32 (*check_reset_block)(struct e1000_hw *);
+ s32 (*force_speed_duplex)(struct e1000_hw *);
+ s32 (*get_cfg_done)(struct e1000_hw *hw);
+ s32 (*get_cable_length)(struct e1000_hw *);
+ s32 (*get_phy_info)(struct e1000_hw *);
+ s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
void (*release)(struct e1000_hw *);
- s32 (*reset)(struct e1000_hw *);
- s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
- s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
- s32 (*write_reg)(struct e1000_hw *, u32, u16);
+ s32 (*reset)(struct e1000_hw *);
+ s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
+ s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
+ s32 (*write_reg)(struct e1000_hw *, u32, u16);
s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *);
s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8);
};
struct e1000_nvm_operations {
- s32 (*acquire)(struct e1000_hw *);
- s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
+ s32 (*acquire)(struct e1000_hw *);
+ s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
void (*release)(struct e1000_hw *);
- s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
- s32 (*update)(struct e1000_hw *);
- s32 (*validate)(struct e1000_hw *);
- s32 (*valid_led_default)(struct e1000_hw *, u16 *);
+ s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
+ s32 (*update)(struct e1000_hw *);
+ s32 (*validate)(struct e1000_hw *);
+ s32 (*valid_led_default)(struct e1000_hw *, u16 *);
};
#define E1000_MAX_SENSORS 3
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index f67f8a170b90..2231598fb42d 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
/* e1000_i210
* e1000_i211
@@ -435,6 +432,7 @@ static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset,
*data = ID_LED_RESERVED_FFFF;
ret_val = E1000_SUCCESS;
}
+ break;
case NVM_SUB_DEV_ID:
*data = hw->subsystem_device_id;
break;
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index 907fe99a9813..9f34976687ba 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_I210_H_
#define _E1000_I210_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 1e0c404db81a..2a88595f956c 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include <linux/if_ether.h>
#include <linux/delay.h>
@@ -442,7 +439,7 @@ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
* The caller must have a packed mc_addr_list of multicast addresses.
**/
void igb_update_mc_addr_list(struct e1000_hw *hw,
- u8 *mc_addr_list, u32 mc_addr_count)
+ u8 *mc_addr_list, u32 mc_addr_count)
{
u32 hash_value, hash_bit, hash_reg;
int i;
@@ -866,8 +863,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
goto out;
if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
- hw_dbg("Copper PHY and Auto Neg "
- "has not completed.\n");
+ hw_dbg("Copper PHY and Auto Neg has not completed.\n");
goto out;
}
@@ -1265,7 +1261,7 @@ s32 igb_get_auto_rd_done(struct e1000_hw *hw)
while (i < AUTO_READ_DONE_TIMEOUT) {
if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD)
break;
- msleep(1);
+ usleep_range(1000, 2000);
i++;
}
@@ -1298,7 +1294,7 @@ static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
}
if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
- switch(hw->phy.media_type) {
+ switch (hw->phy.media_type) {
case e1000_media_type_internal_serdes:
*data = ID_LED_DEFAULT_82575_SERDES;
break;
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index 99299ba8ee3a..ea24961b0d70 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_MAC_H_
#define _E1000_MAC_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c
index d5b121771c31..162cc49345d0 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include "e1000_mbx.h"
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h
index f52f5515e5a8..d20af6b2f581 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_MBX_H_
#define _E1000_MBX_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index 9abf82919c65..92bcdbe756b2 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -1,28 +1,24 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include <linux/if_ether.h>
#include <linux/delay.h>
@@ -480,6 +476,7 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
/* Loop to allow for up to whole page write of eeprom */
while (widx < words) {
u16 word_out = data[widx];
+
word_out = (word_out >> 8) | (word_out << 8);
igb_shift_out_eec_bits(hw, word_out, 16);
widx++;
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h
index 5b101170b17e..febc9cdb7391 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.h
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_NVM_H_
#define _E1000_NVM_H_
@@ -32,7 +29,7 @@ void igb_release_nvm(struct e1000_hw *hw);
s32 igb_read_mac_addr(struct e1000_hw *hw);
s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num);
s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num,
- u32 part_num_size);
+ u32 part_num_size);
s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 4009bbab7407..424f16c43759 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include <linux/if_ether.h>
#include <linux/delay.h>
@@ -924,8 +921,7 @@ static s32 igb_copper_link_autoneg(struct e1000_hw *hw)
if (phy->autoneg_wait_to_complete) {
ret_val = igb_wait_autoneg(hw);
if (ret_val) {
- hw_dbg("Error while waiting for "
- "autoneg to complete\n");
+ hw_dbg("Error while waiting for autoneg to complete\n");
goto out;
}
}
@@ -2244,7 +2240,7 @@ void igb_power_down_phy_copper(struct e1000_hw *hw)
hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg);
}
hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
- msleep(1);
+ usleep_range(1000, 2000);
}
/**
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 4c2c36c46a73..fe921e29dda8 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_PHY_H_
#define _E1000_PHY_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index bdb246e848e1..833bbb948d97 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_REGS_H_
#define _E1000_REGS_H_
@@ -301,9 +298,9 @@
#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */
#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4))
#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
- (0x054E0 + ((_i - 16) * 8)))
+ (0x054E0 + ((_i - 16) * 8)))
#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
- (0x054E4 + ((_i - 16) * 8)))
+ (0x054E4 + ((_i - 16) * 8)))
#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8))
#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4))
#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
@@ -358,8 +355,7 @@
#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n)))
#define E1000_DVMOLR(_n) (0x0C038 + (64 * (_n)))
-#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine
- * Filter - RW */
+#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN VM Filter */
#define E1000_VMVIR(_n) (0x03700 + (4 * (_n)))
struct e1000_hw;
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 27130065d92a..06102d1f7c03 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -1,29 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
/* Linux PRO/1000 Ethernet Driver main header file */
@@ -198,6 +194,7 @@ struct igb_tx_buffer {
unsigned int bytecount;
u16 gso_segs;
__be16 protocol;
+
DEFINE_DMA_UNMAP_ADDR(dma);
DEFINE_DMA_UNMAP_LEN(len);
u32 tx_flags;
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index e5570acbeea8..333a2b0bbada 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
/* ethtool support for igb */
@@ -286,7 +283,7 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
}
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
if (ecmd->autoneg == AUTONEG_ENABLE) {
hw->mac.autoneg = 1;
@@ -399,7 +396,7 @@ static int igb_set_pauseparam(struct net_device *netdev,
adapter->fc_autoneg = pause->autoneg;
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
if (adapter->fc_autoneg == AUTONEG_ENABLE) {
hw->fc.requested_mode = e1000_fc_default;
@@ -886,7 +883,7 @@ static int igb_set_ringparam(struct net_device *netdev,
}
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
if (!netif_running(adapter->netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++)
@@ -1060,8 +1057,8 @@ static struct igb_reg_test reg_test_i350[] = {
{ E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
{ E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
{ E1000_RA, 0, 16, TABLE64_TEST_LO,
0xFFFFFFFF, 0xFFFFFFFF },
@@ -1103,8 +1100,8 @@ static struct igb_reg_test reg_test_82580[] = {
{ E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
{ E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
{ E1000_RA, 0, 16, TABLE64_TEST_LO,
0xFFFFFFFF, 0xFFFFFFFF },
@@ -1132,8 +1129,10 @@ static struct igb_reg_test reg_test_82576[] = {
{ E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
/* Enable all RX queues before testing. */
- { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
- { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
+ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0,
+ E1000_RXDCTL_QUEUE_ENABLE },
+ { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0,
+ E1000_RXDCTL_QUEUE_ENABLE },
/* RDH is read-only for 82576, only test RDT. */
{ E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
@@ -1149,14 +1148,14 @@ static struct igb_reg_test reg_test_82576[] = {
{ E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
{ E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
{ E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_RA, 0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
{ E1000_RA2, 0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_RA2, 0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
- { E1000_MTA, 0, 128,TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ 0, 0, 0, 0 }
};
@@ -1170,7 +1169,8 @@ static struct igb_reg_test reg_test_82575[] = {
{ E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
/* Enable all four RX queues before testing. */
- { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
+ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0,
+ E1000_RXDCTL_QUEUE_ENABLE },
/* RDH is read-only for 82575, only test RDT. */
{ E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
@@ -1196,8 +1196,8 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
{
struct e1000_hw *hw = &adapter->hw;
u32 pat, val;
- static const u32 _test[] =
- {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
+ static const u32 _test[] = {
+ 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
wr32(reg, (_test[pat] & write));
val = rd32(reg) & mask;
@@ -1206,11 +1206,11 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
"pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
reg, val, (_test[pat] & write & mask));
*data = reg;
- return 1;
+ return true;
}
}
- return 0;
+ return false;
}
static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
@@ -1218,17 +1218,18 @@ static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
{
struct e1000_hw *hw = &adapter->hw;
u32 val;
+
wr32(reg, write & mask);
val = rd32(reg);
if ((write & mask) != (val & mask)) {
dev_err(&adapter->pdev->dev,
- "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", reg,
- (val & mask), (write & mask));
+ "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
+ reg, (val & mask), (write & mask));
*data = reg;
- return 1;
+ return true;
}
- return 0;
+ return false;
}
#define REG_PATTERN_TEST(reg, mask, write) \
@@ -1387,14 +1388,14 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
/* Hook up test interrupt handler just for this test */
if (adapter->flags & IGB_FLAG_HAS_MSIX) {
if (request_irq(adapter->msix_entries[0].vector,
- igb_test_intr, 0, netdev->name, adapter)) {
+ igb_test_intr, 0, netdev->name, adapter)) {
*data = 1;
return -1;
}
} else if (adapter->flags & IGB_FLAG_HAS_MSI) {
shared_int = false;
if (request_irq(irq,
- igb_test_intr, 0, netdev->name, adapter)) {
+ igb_test_intr, 0, netdev->name, adapter)) {
*data = 1;
return -1;
}
@@ -1412,7 +1413,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
/* Disable all the interrupts */
wr32(E1000_IMC, ~0);
wrfl();
- msleep(10);
+ usleep_range(10000, 11000);
/* Define all writable bits for ICS */
switch (hw->mac.type) {
@@ -1459,7 +1460,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
wr32(E1000_IMC, mask);
wr32(E1000_ICS, mask);
wrfl();
- msleep(10);
+ usleep_range(10000, 11000);
if (adapter->test_icr & mask) {
*data = 3;
@@ -1481,7 +1482,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
wr32(E1000_IMS, mask);
wr32(E1000_ICS, mask);
wrfl();
- msleep(10);
+ usleep_range(10000, 11000);
if (!(adapter->test_icr & mask)) {
*data = 4;
@@ -1503,7 +1504,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
wr32(E1000_IMC, ~mask);
wr32(E1000_ICS, ~mask);
wrfl();
- msleep(10);
+ usleep_range(10000, 11000);
if (adapter->test_icr & mask) {
*data = 5;
@@ -1515,7 +1516,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
/* Disable all the interrupts */
wr32(E1000_IMC, ~0);
wrfl();
- msleep(10);
+ usleep_range(10000, 11000);
/* Unhook test interrupt handler */
if (adapter->flags & IGB_FLAG_HAS_MSIX)
@@ -1949,6 +1950,7 @@ static int igb_link_test(struct igb_adapter *adapter, u64 *data)
*data = 0;
if (hw->phy.media_type == e1000_media_type_internal_serdes) {
int i = 0;
+
hw->mac.serdes_has_link = false;
/* On some blade server designs, link establishment
@@ -2413,9 +2415,11 @@ static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
switch (cmd->flow_type) {
case TCP_V4_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
case UDP_V4_FLOW:
if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
case AH_V4_FLOW:
@@ -2425,9 +2429,11 @@ static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
break;
case TCP_V6_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
case UDP_V6_FLOW:
if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
case SCTP_V6_FLOW:
case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c
index 8333f67acf96..44b6a68f1af7 100644
--- a/drivers/net/ethernet/intel/igb/igb_hwmon.c
+++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include "igb.h"
#include "e1000_82575.h"
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 16430a8440fa..bfcda8a455f4 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -75,7 +72,7 @@ static const struct e1000_info *igb_info_tbl[] = {
[board_82575] = &e1000_82575_info,
};
-static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
+static const struct pci_device_id igb_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
@@ -117,7 +114,6 @@ static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
-void igb_reset(struct igb_adapter *);
static int igb_setup_all_tx_resources(struct igb_adapter *);
static int igb_setup_all_rx_resources(struct igb_adapter *);
static void igb_free_all_tx_resources(struct igb_adapter *);
@@ -141,7 +137,7 @@ static void igb_watchdog(unsigned long);
static void igb_watchdog_task(struct work_struct *);
static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats);
+ struct rtnl_link_stats64 *stats);
static int igb_change_mtu(struct net_device *, int);
static int igb_set_mac(struct net_device *, void *);
static void igb_set_uta(struct igb_adapter *adapter);
@@ -159,7 +155,8 @@ static bool igb_clean_rx_irq(struct igb_q_vector *, int);
static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
static void igb_tx_timeout(struct net_device *);
static void igb_reset_task(struct work_struct *);
-static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features);
+static void igb_vlan_mode(struct net_device *netdev,
+ netdev_features_t features);
static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
static void igb_restore_vlan(struct igb_adapter *);
@@ -215,10 +212,9 @@ static struct notifier_block dca_notifier = {
static void igb_netpoll(struct net_device *);
#endif
#ifdef CONFIG_PCI_IOV
-static unsigned int max_vfs = 0;
+static unsigned int max_vfs;
module_param(max_vfs, uint, 0);
-MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
- "per physical function");
+MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function");
#endif /* CONFIG_PCI_IOV */
static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
@@ -384,8 +380,7 @@ static void igb_dump(struct igb_adapter *adapter)
/* Print netdevice Info */
if (netdev) {
dev_info(&adapter->pdev->dev, "Net device Info\n");
- pr_info("Device Name state trans_start "
- "last_rx\n");
+ pr_info("Device Name state trans_start last_rx\n");
pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
netdev->state, netdev->trans_start, netdev->last_rx);
}
@@ -438,9 +433,7 @@ static void igb_dump(struct igb_adapter *adapter)
pr_info("------------------------------------\n");
pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
pr_info("------------------------------------\n");
- pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] "
- "[bi->dma ] leng ntw timestamp "
- "bi->skb\n");
+ pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n");
for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
const char *next_desc;
@@ -458,9 +451,8 @@ static void igb_dump(struct igb_adapter *adapter)
else
next_desc = "";
- pr_info("T [0x%03X] %016llX %016llX %016llX"
- " %04X %p %016llX %p%s\n", i,
- le64_to_cpu(u0->a),
+ pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n",
+ i, le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
(u64)dma_unmap_addr(buffer_info, dma),
dma_unmap_len(buffer_info, len),
@@ -519,10 +511,8 @@ rx_ring_summary:
pr_info("------------------------------------\n");
pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
pr_info("------------------------------------\n");
- pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] "
- "[bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
- pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] -----"
- "----------- [bi->skb] <-- Adv Rx Write-Back format\n");
+ pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
+ pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
for (i = 0; i < rx_ring->count; i++) {
const char *next_desc;
@@ -584,7 +574,7 @@ static int igb_get_i2c_data(void *data)
struct e1000_hw *hw = &adapter->hw;
s32 i2cctl = rd32(E1000_I2CPARAMS);
- return ((i2cctl & E1000_I2C_DATA_IN) != 0);
+ return !!(i2cctl & E1000_I2C_DATA_IN);
}
/**
@@ -648,7 +638,7 @@ static int igb_get_i2c_clk(void *data)
struct e1000_hw *hw = &adapter->hw;
s32 i2cctl = rd32(E1000_I2CPARAMS);
- return ((i2cctl & E1000_I2C_CLK_IN) != 0);
+ return !!(i2cctl & E1000_I2C_CLK_IN);
}
static const struct i2c_algo_bit_data igb_i2c_algo = {
@@ -681,9 +671,9 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
static int __init igb_init_module(void)
{
int ret;
+
pr_info("%s - version %s\n",
igb_driver_string, igb_driver_version);
-
pr_info("%s\n", igb_copyright);
#ifdef CONFIG_IGB_DCA
@@ -736,12 +726,14 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
adapter->rx_ring[i]->reg_idx = rbase_offset +
Q_IDX_82576(i);
}
+ /* Fall through */
case e1000_82575:
case e1000_82580:
case e1000_i350:
case e1000_i354:
case e1000_i210:
case e1000_i211:
+ /* Fall through */
default:
for (; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i]->reg_idx = rbase_offset + i;
@@ -1292,8 +1284,7 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
if (adapter->hw.mac.type >= e1000_82576)
set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
- /*
- * On i350, i354, i210, and i211, loopback VLAN packets
+ /* On i350, i354, i210, and i211, loopback VLAN packets
* have the tag byte-swapped.
*/
if (adapter->hw.mac.type >= e1000_i350)
@@ -1345,6 +1336,7 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
for (; v_idx < q_vectors; v_idx++) {
int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
+
err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
tqpv, txr_idx, rqpv, rxr_idx);
@@ -1484,6 +1476,7 @@ static void igb_irq_disable(struct igb_adapter *adapter)
*/
if (adapter->flags & IGB_FLAG_HAS_MSIX) {
u32 regval = rd32(E1000_EIAM);
+
wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
wr32(E1000_EIMC, adapter->eims_enable_mask);
regval = rd32(E1000_EIAC);
@@ -1495,6 +1488,7 @@ static void igb_irq_disable(struct igb_adapter *adapter)
wrfl();
if (adapter->flags & IGB_FLAG_HAS_MSIX) {
int i;
+
for (i = 0; i < adapter->num_q_vectors; i++)
synchronize_irq(adapter->msix_entries[i].vector);
} else {
@@ -1513,6 +1507,7 @@ static void igb_irq_enable(struct igb_adapter *adapter)
if (adapter->flags & IGB_FLAG_HAS_MSIX) {
u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
u32 regval = rd32(E1000_EIAC);
+
wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
regval = rd32(E1000_EIAM);
wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
@@ -1745,6 +1740,7 @@ int igb_up(struct igb_adapter *adapter)
/* notify VFs that reset has been completed */
if (adapter->vfs_allocated_count) {
u32 reg_data = rd32(E1000_CTRL_EXT);
+
reg_data |= E1000_CTRL_EXT_PFRSTD;
wr32(E1000_CTRL_EXT, reg_data);
}
@@ -1787,7 +1783,7 @@ void igb_down(struct igb_adapter *adapter)
wr32(E1000_TCTL, tctl);
/* flush both disables and wait for them to finish */
wrfl();
- msleep(10);
+ usleep_range(10000, 11000);
igb_irq_disable(adapter);
@@ -1827,7 +1823,7 @@ void igb_reinit_locked(struct igb_adapter *adapter)
{
WARN_ON(in_interrupt());
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
igb_down(adapter);
igb_up(adapter);
clear_bit(__IGB_RESETTING, &adapter->state);
@@ -1960,6 +1956,7 @@ void igb_reset(struct igb_adapter *adapter)
/* disable receive for all VFs and wait one second */
if (adapter->vfs_allocated_count) {
int i;
+
for (i = 0 ; i < adapter->vfs_allocated_count; i++)
adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
@@ -2529,7 +2526,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* let the f/w know that the h/w is now under the control of the
- * driver. */
+ * driver.
+ */
igb_get_hw_control(adapter);
strcpy(netdev->name, "eth%d");
@@ -3077,6 +3075,7 @@ static int __igb_open(struct net_device *netdev, bool resuming)
/* notify VFs that reset has been completed */
if (adapter->vfs_allocated_count) {
u32 reg_data = rd32(E1000_CTRL_EXT);
+
reg_data |= E1000_CTRL_EXT_PFRSTD;
wr32(E1000_CTRL_EXT, reg_data);
}
@@ -3248,7 +3247,7 @@ void igb_setup_tctl(struct igb_adapter *adapter)
* Configure a transmit ring after a reset.
**/
void igb_configure_tx_ring(struct igb_adapter *adapter,
- struct igb_ring *ring)
+ struct igb_ring *ring)
{
struct e1000_hw *hw = &adapter->hw;
u32 txdctl = 0;
@@ -3389,7 +3388,8 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
if (adapter->rss_indir_tbl_init != num_rx_queues) {
for (j = 0; j < IGB_RETA_SIZE; j++)
- adapter->rss_indir_tbl[j] = (j * num_rx_queues) / IGB_RETA_SIZE;
+ adapter->rss_indir_tbl[j] =
+ (j * num_rx_queues) / IGB_RETA_SIZE;
adapter->rss_indir_tbl_init = num_rx_queues;
}
igb_write_rss_indir_tbl(adapter);
@@ -3430,6 +3430,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
if (hw->mac.type > e1000_82575) {
/* Set the default pool for the PF's first queue */
u32 vtctl = rd32(E1000_VT_CTL);
+
vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
E1000_VT_CTL_DISABLE_DEF_POOL);
vtctl |= adapter->vfs_allocated_count <<
@@ -3511,7 +3512,7 @@ void igb_setup_rctl(struct igb_adapter *adapter)
}
static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
- int vfn)
+ int vfn)
{
struct e1000_hw *hw = &adapter->hw;
u32 vmolr;
@@ -4058,7 +4059,8 @@ static void igb_check_wvbr(struct igb_adapter *adapter)
switch (hw->mac.type) {
case e1000_82576:
case e1000_i350:
- if (!(wvbr = rd32(E1000_WVBR)))
+ wvbr = rd32(E1000_WVBR);
+ if (!wvbr)
return;
break;
default:
@@ -4077,7 +4079,7 @@ static void igb_spoof_check(struct igb_adapter *adapter)
if (!adapter->wvbr)
return;
- for(j = 0; j < adapter->vfs_allocated_count; j++) {
+ for (j = 0; j < adapter->vfs_allocated_count; j++) {
if (adapter->wvbr & (1 << j) ||
adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
dev_warn(&adapter->pdev->dev,
@@ -4209,14 +4211,15 @@ static void igb_watchdog_task(struct work_struct *work)
if (!netif_carrier_ok(netdev)) {
u32 ctrl;
+
hw->mac.ops.get_speed_and_duplex(hw,
&adapter->link_speed,
&adapter->link_duplex);
ctrl = rd32(E1000_CTRL);
/* Links status message must follow this format */
- printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s "
- "Duplex, Flow Control: %s\n",
+ netdev_info(netdev,
+ "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
netdev->name,
adapter->link_speed,
adapter->link_duplex == FULL_DUPLEX ?
@@ -4242,11 +4245,8 @@ static void igb_watchdog_task(struct work_struct *work)
/* check for thermal sensor event */
if (igb_thermal_sensor_event(hw,
- E1000_THSTAT_LINK_THROTTLE)) {
- netdev_info(netdev, "The network adapter link "
- "speed was downshifted because it "
- "overheated\n");
- }
+ E1000_THSTAT_LINK_THROTTLE))
+ netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n");
/* adjust timeout factor according to speed/duplex */
adapter->tx_timeout_factor = 1;
@@ -4277,12 +4277,11 @@ static void igb_watchdog_task(struct work_struct *work)
/* check for thermal sensor event */
if (igb_thermal_sensor_event(hw,
E1000_THSTAT_PWR_DOWN)) {
- netdev_err(netdev, "The network adapter was "
- "stopped because it overheated\n");
+ netdev_err(netdev, "The network adapter was stopped because it overheated\n");
}
/* Links status message must follow this format */
- printk(KERN_INFO "igb: %s NIC Link is Down\n",
+ netdev_info(netdev, "igb: %s NIC Link is Down\n",
netdev->name);
netif_carrier_off(netdev);
@@ -4344,6 +4343,7 @@ static void igb_watchdog_task(struct work_struct *work)
/* Cause software interrupt to ensure Rx ring is cleaned */
if (adapter->flags & IGB_FLAG_HAS_MSIX) {
u32 eics = 0;
+
for (i = 0; i < adapter->num_q_vectors; i++)
eics |= adapter->q_vector[i]->eims_value;
wr32(E1000_EICS, eics);
@@ -4483,13 +4483,12 @@ static void igb_update_itr(struct igb_q_vector *q_vector,
case low_latency: /* 50 usec aka 20000 ints/s */
if (bytes > 10000) {
/* this if handles the TSO accounting */
- if (bytes/packets > 8000) {
+ if (bytes/packets > 8000)
itrval = bulk_latency;
- } else if ((packets < 10) || ((bytes/packets) > 1200)) {
+ else if ((packets < 10) || ((bytes/packets) > 1200))
itrval = bulk_latency;
- } else if ((packets > 35)) {
+ else if ((packets > 35))
itrval = lowest_latency;
- }
} else if (bytes/packets > 2000) {
itrval = bulk_latency;
} else if (packets <= 2 && bytes < 512) {
@@ -4675,6 +4674,7 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
return;
} else {
u8 l4_hdr = 0;
+
switch (first->protocol) {
case htons(ETH_P_IP):
vlan_macip_lens |= skb_network_header_len(skb);
@@ -4962,6 +4962,7 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
*/
if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) {
unsigned short f;
+
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
} else {
@@ -5140,7 +5141,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
/* igb_down has a dependency on max_frame_size */
adapter->max_frame_size = max_frame;
@@ -5621,6 +5622,7 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
vmolr |= E1000_VMOLR_MPME;
} else if (vf_data->num_vf_mc_hashes) {
int j;
+
vmolr |= E1000_VMOLR_ROMPE;
for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
@@ -5672,6 +5674,7 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
for (i = 0; i < adapter->vfs_allocated_count; i++) {
u32 vmolr = rd32(E1000_VMOLR(i));
+
vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
vf_data = &adapter->vf_data[i];
@@ -5770,6 +5773,7 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
if (!adapter->vf_data[vf].vlans_enabled) {
u32 size;
+
reg = rd32(E1000_VMOLR(vf));
size = reg & E1000_VMOLR_RLPML_MASK;
size += 4;
@@ -5798,6 +5802,7 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
adapter->vf_data[vf].vlans_enabled--;
if (!adapter->vf_data[vf].vlans_enabled) {
u32 size;
+
reg = rd32(E1000_VMOLR(vf));
size = reg & E1000_VMOLR_RLPML_MASK;
size -= 4;
@@ -5902,8 +5907,8 @@ static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
*/
if (!add && (adapter->netdev->flags & IFF_PROMISC)) {
u32 vlvf, bits;
-
int regndx = igb_find_vlvf_entry(adapter, vid);
+
if (regndx < 0)
goto out;
/* See if any other pools are set for this VLAN filter
@@ -6494,7 +6499,7 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
/* transfer page from old buffer to new buffer */
- memcpy(new_buff, old_buff, sizeof(struct igb_rx_buffer));
+ *new_buff = *old_buff;
/* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
@@ -6963,6 +6968,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
u16 vid;
+
if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
vid = be16_to_cpu(rx_desc->wb.upper.vlan);
@@ -7051,7 +7057,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
if (cleaned_count)
igb_alloc_rx_buffers(rx_ring, cleaned_count);
- return (total_packets < budget);
+ return total_packets < budget;
}
static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
@@ -7172,7 +7178,7 @@ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
break;
case SIOCGMIIREG:
if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
- &data->val_out))
+ &data->val_out))
return -EIO;
break;
case SIOCSMIIREG:
@@ -7955,11 +7961,13 @@ static void igb_vmm_control(struct igb_adapter *adapter)
reg = rd32(E1000_DTXCTL);
reg |= E1000_DTXCTL_VLAN_ADDED;
wr32(E1000_DTXCTL, reg);
+ /* Fall through */
case e1000_82580:
/* enable replication vlan tag stripping */
reg = rd32(E1000_RPLOLR);
reg |= E1000_RPLOLR_STRVLAN;
wr32(E1000_RPLOLR, reg);
+ /* Fall through */
case e1000_i350:
/* none of the above registers are supported by i350 */
break;
@@ -8049,6 +8057,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
} /* endif adapter->dmac is not disabled */
} else if (hw->mac.type == e1000_82580) {
u32 reg = rd32(E1000_PCIEMISC);
+
wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
wr32(E1000_DMACR, 0);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index c6c4ca7d68e6..c688c8a4c063 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -155,7 +155,6 @@ struct vf_data_storage {
struct vf_macvlans {
struct list_head l;
int vf;
- int rar_entry;
bool free;
bool is_macvlan;
u8 vf_macvlan[ETH_ALEN];
@@ -613,6 +612,15 @@ static inline void ixgbe_write_tail(struct ixgbe_ring *ring, u32 value)
#define MAX_MSIX_VECTORS_82598 18
#define MAX_Q_VECTORS_82598 16
+struct ixgbe_mac_addr {
+ u8 addr[ETH_ALEN];
+ u16 queue;
+ u16 state; /* bitmask */
+};
+#define IXGBE_MAC_STATE_DEFAULT 0x1
+#define IXGBE_MAC_STATE_MODIFIED 0x2
+#define IXGBE_MAC_STATE_IN_USE 0x4
+
#define MAX_Q_VECTORS MAX_Q_VECTORS_82599
#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599
@@ -785,6 +793,7 @@ struct ixgbe_adapter {
u32 timer_event_accumulator;
u32 vferr_refcount;
+ struct ixgbe_mac_addr *mac_table;
struct kobject *info_kobj;
#ifdef CONFIG_IXGBE_HWMON
struct hwmon_buff *ixgbe_hwmon_buff;
@@ -863,6 +872,13 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter);
int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
u16 subdevice_id);
+#ifdef CONFIG_PCI_IOV
+void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter);
+#endif
+int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
+ u8 *addr, u16 queue);
+int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
+ u8 *addr, u16 queue);
void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *,
struct ixgbe_ring *);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 4c78ea8946c1..1c52e4753480 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -337,19 +337,25 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
int i;
bool link_up;
- /*
- * Validate the water mark configuration for packet buffer 0. Zero
- * water marks indicate that the packet buffer was not configured
- * and the watermarks for packet buffer 0 should always be configured.
- */
- if (!hw->fc.low_water ||
- !hw->fc.high_water[0] ||
- !hw->fc.pause_time) {
- hw_dbg(hw, "Invalid water mark configuration\n");
+ /* Validate the water mark configuration */
+ if (!hw->fc.pause_time) {
ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
goto out;
}
+ /* Low water mark of zero causes XOFF floods */
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+ hw->fc.high_water[i]) {
+ if (!hw->fc.low_water[i] ||
+ hw->fc.low_water[i] >= hw->fc.high_water[i]) {
+ hw_dbg(hw, "Invalid water mark configuration\n");
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+ }
+ }
+
/*
* On 82598 having Rx FC on causes resets while doing 1G
* so if it's on turn it off once we know link_speed. For
@@ -432,12 +438,11 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
- fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
-
/* Set up and enable Rx high/low water mark thresholds, enable XON. */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
hw->fc.high_water[i]) {
+ fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 981b8a7b100d..bdc55819179d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -271,6 +271,7 @@ out:
**/
s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
{
+ s32 ret_val;
u32 ctrl_ext;
/* Set the media type */
@@ -292,12 +293,15 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
IXGBE_WRITE_FLUSH(hw);
/* Setup flow control */
- ixgbe_setup_fc(hw);
+ ret_val = ixgbe_setup_fc(hw);
+ if (!ret_val)
+ goto out;
/* Clear adapter stopped flag */
hw->adapter_stopped = false;
- return 0;
+out:
+ return ret_val;
}
/**
@@ -2106,19 +2110,25 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
u32 fcrtl, fcrth;
int i;
- /*
- * Validate the water mark configuration for packet buffer 0. Zero
- * water marks indicate that the packet buffer was not configured
- * and the watermarks for packet buffer 0 should always be configured.
- */
- if (!hw->fc.low_water ||
- !hw->fc.high_water[0] ||
- !hw->fc.pause_time) {
- hw_dbg(hw, "Invalid water mark configuration\n");
+ /* Validate the water mark configuration. */
+ if (!hw->fc.pause_time) {
ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
goto out;
}
+ /* Low water mark of zero causes XOFF floods */
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+ hw->fc.high_water[i]) {
+ if (!hw->fc.low_water[i] ||
+ hw->fc.low_water[i] >= hw->fc.high_water[i]) {
+ hw_dbg(hw, "Invalid water mark configuration\n");
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+ }
+ }
+
/* Negotiate the fc mode to use */
ixgbe_fc_autoneg(hw);
@@ -2181,12 +2191,11 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
- fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
-
/* Set up and enable Rx high/low water mark thresholds, enable XON. */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
hw->fc.high_water[i]) {
+ fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
} else {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index f12c40fb5537..d15ff2e5edb7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -141,8 +141,6 @@ static inline bool ixgbe_removed(void __iomem *addr)
return unlikely(!addr);
}
-void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg);
-
static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value)
{
u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
@@ -172,18 +170,7 @@ static inline void ixgbe_write_reg64(struct ixgbe_hw *hw, u32 reg, u64 value)
}
#define IXGBE_WRITE_REG64(a, reg, value) ixgbe_write_reg64((a), (reg), (value))
-static inline u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
-{
- u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
- u32 value;
-
- if (ixgbe_removed(reg_addr))
- return IXGBE_FAILED_READ_REG;
- value = readl(reg_addr + reg);
- if (unlikely(value == IXGBE_FAILED_READ_REG))
- ixgbe_check_remove(hw, reg);
- return value;
-}
+u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg);
#define IXGBE_READ_REG(a, reg) ixgbe_read_reg((a), (reg))
#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) \
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
index 7a77f37a7cbc..d3ba63f9ad37 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -208,7 +208,6 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
- fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
/* Configure PFC Tx thresholds per TC */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
if (!(pfc_en & (1 << i))) {
@@ -217,6 +216,7 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
continue;
}
+ fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index bdb99b3b0f30..3b932fe64ab6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -242,7 +242,6 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
max_tc = prio_tc[i];
}
- fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
/* Configure PFC Tx thresholds per TC */
for (i = 0; i <= max_tc; i++) {
@@ -257,6 +256,7 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
if (enabled) {
reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
+ fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
} else {
reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
index b16cc786750d..0772b7730fce 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
@@ -81,9 +81,7 @@ struct ixgbe_fcoe {
void *extra_ddp_buffer;
dma_addr_t extra_ddp_buffer_dma;
unsigned long mode;
-#ifdef CONFIG_IXGBE_DCB
u8 up;
-#endif
};
#endif /* _IXGBE_FCOE_H */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index d62e7a25cf97..8089ea9f2fba 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -301,7 +301,7 @@ static void ixgbe_remove_adapter(struct ixgbe_hw *hw)
ixgbe_service_event_schedule(adapter);
}
-void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
+static void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
{
u32 value;
@@ -320,6 +320,32 @@ void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
ixgbe_remove_adapter(hw);
}
+/**
+ * ixgbe_read_reg - Read from device register
+ * @hw: hw specific details
+ * @reg: offset of register to read
+ *
+ * Returns : value read or IXGBE_FAILED_READ_REG if removed
+ *
+ * This function is used to read device registers. It checks for device
+ * removal by confirming any read that returns all ones by checking the
+ * status register value for all ones. This function avoids reading from
+ * the hardware if a removal was previously detected in which case it
+ * returns IXGBE_FAILED_READ_REG (all ones).
+ */
+u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
+{
+ u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+ u32 value;
+
+ if (ixgbe_removed(reg_addr))
+ return IXGBE_FAILED_READ_REG;
+ value = readl(reg_addr + reg);
+ if (unlikely(value == IXGBE_FAILED_READ_REG))
+ ixgbe_check_remove(hw, reg);
+ return value;
+}
+
static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev)
{
u16 value;
@@ -3743,35 +3769,6 @@ static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
}
/**
- * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering
- * @adapter: driver data
- */
-static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u32 vlnctrl;
-
- vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
-}
-
-/**
- * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering
- * @adapter: driver data
- */
-static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u32 vlnctrl;
-
- vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- vlnctrl |= IXGBE_VLNCTRL_VFE;
- vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
-}
-
-/**
* ixgbe_vlan_strip_disable - helper to disable hw vlan stripping
* @adapter: driver data
*/
@@ -3850,6 +3847,158 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_write_mc_addr_list - write multicast addresses to MTA
+ * @netdev: network interface device structure
+ *
+ * Writes multicast address list to the MTA hash table.
+ * Returns: -ENOMEM on failure
+ * 0 on no addresses written
+ * X on writing X addresses to MTA
+ **/
+static int ixgbe_write_mc_addr_list(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (!netif_running(netdev))
+ return 0;
+
+ if (hw->mac.ops.update_mc_addr_list)
+ hw->mac.ops.update_mc_addr_list(hw, netdev);
+ else
+ return -ENOMEM;
+
+#ifdef CONFIG_PCI_IOV
+ ixgbe_restore_vf_multicasts(adapter);
+#endif
+
+ return netdev_mc_count(netdev);
+}
+
+#ifdef CONFIG_PCI_IOV
+void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE)
+ hw->mac.ops.set_rar(hw, i, adapter->mac_table[i].addr,
+ adapter->mac_table[i].queue,
+ IXGBE_RAH_AV);
+ else
+ hw->mac.ops.clear_rar(hw, i);
+
+ adapter->mac_table[i].state &= ~(IXGBE_MAC_STATE_MODIFIED);
+ }
+}
+#endif
+
+static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ if (adapter->mac_table[i].state & IXGBE_MAC_STATE_MODIFIED) {
+ if (adapter->mac_table[i].state &
+ IXGBE_MAC_STATE_IN_USE)
+ hw->mac.ops.set_rar(hw, i,
+ adapter->mac_table[i].addr,
+ adapter->mac_table[i].queue,
+ IXGBE_RAH_AV);
+ else
+ hw->mac.ops.clear_rar(hw, i);
+
+ adapter->mac_table[i].state &=
+ ~(IXGBE_MAC_STATE_MODIFIED);
+ }
+ }
+}
+
+static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter)
+{
+ int i;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
+ adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
+ memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+ adapter->mac_table[i].queue = 0;
+ }
+ ixgbe_sync_mac_table(adapter);
+}
+
+static int ixgbe_available_rars(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i, count = 0;
+
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ if (adapter->mac_table[i].state == 0)
+ count++;
+ }
+ return count;
+}
+
+/* this function destroys the first RAR entry */
+static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter,
+ u8 *addr)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN);
+ adapter->mac_table[0].queue = VMDQ_P(0);
+ adapter->mac_table[0].state = (IXGBE_MAC_STATE_DEFAULT |
+ IXGBE_MAC_STATE_IN_USE);
+ hw->mac.ops.set_rar(hw, 0, adapter->mac_table[0].addr,
+ adapter->mac_table[0].queue,
+ IXGBE_RAH_AV);
+}
+
+int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE)
+ continue;
+ adapter->mac_table[i].state |= (IXGBE_MAC_STATE_MODIFIED |
+ IXGBE_MAC_STATE_IN_USE);
+ ether_addr_copy(adapter->mac_table[i].addr, addr);
+ adapter->mac_table[i].queue = queue;
+ ixgbe_sync_mac_table(adapter);
+ return i;
+ }
+ return -ENOMEM;
+}
+
+int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue)
+{
+ /* search table for addr, if found, set to 0 and sync */
+ int i;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ if (ether_addr_equal(addr, adapter->mac_table[i].addr) &&
+ adapter->mac_table[i].queue == queue) {
+ adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
+ adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
+ memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+ adapter->mac_table[i].queue = 0;
+ ixgbe_sync_mac_table(adapter);
+ return 0;
+ }
+ }
+ return -ENOMEM;
+}
+/**
* ixgbe_write_uc_addr_list - write unicast addresses to RAR table
* @netdev: network interface device structure
*
@@ -3858,39 +4007,23 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
* 0 on no addresses written
* X on writing X addresses to the RAR table
**/
-static int ixgbe_write_uc_addr_list(struct net_device *netdev)
+static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_hw *hw = &adapter->hw;
- unsigned int rar_entries = hw->mac.num_rar_entries - 1;
int count = 0;
- /* In SR-IOV/VMDQ modes significantly less RAR entries are available */
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
- rar_entries = IXGBE_MAX_PF_MACVLANS - 1;
-
/* return ENOMEM indicating insufficient memory for addresses */
- if (netdev_uc_count(netdev) > rar_entries)
+ if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter))
return -ENOMEM;
if (!netdev_uc_empty(netdev)) {
struct netdev_hw_addr *ha;
- /* return error if we do not support writing to RAR table */
- if (!hw->mac.ops.set_rar)
- return -ENOMEM;
-
netdev_for_each_uc_addr(ha, netdev) {
- if (!rar_entries)
- break;
- hw->mac.ops.set_rar(hw, rar_entries--, ha->addr,
- VMDQ_P(0), IXGBE_RAH_AV);
+ ixgbe_del_mac_filter(adapter, ha->addr, vfn);
+ ixgbe_add_mac_filter(adapter, ha->addr, vfn);
count++;
}
}
- /* write the addresses in reverse order to avoid write combining */
- for (; rar_entries > 0 ; rar_entries--)
- hw->mac.ops.clear_rar(hw, rar_entries);
-
return count;
}
@@ -3908,11 +4041,12 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
+ u32 vlnctrl;
int count;
/* Check for Promiscuous and All Multicast modes */
-
fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
/* set all bits that we expect to always be set */
fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */
@@ -3922,26 +4056,24 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
/* clear the bits we are changing the status of */
fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
-
+ vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
if (netdev->flags & IFF_PROMISC) {
hw->addr_ctrl.user_set_promisc = true;
fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
- vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
+ vmolr |= IXGBE_VMOLR_MPE;
/* Only disable hardware filter vlans in promiscuous mode
* if SR-IOV and VMDQ are disabled - otherwise ensure
* that hardware VLAN filters remain enabled.
*/
if (!(adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED |
IXGBE_FLAG_SRIOV_ENABLED)))
- ixgbe_vlan_filter_disable(adapter);
- else
- ixgbe_vlan_filter_enable(adapter);
+ vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
} else {
if (netdev->flags & IFF_ALLMULTI) {
fctrl |= IXGBE_FCTRL_MPE;
vmolr |= IXGBE_VMOLR_MPE;
}
- ixgbe_vlan_filter_enable(adapter);
+ vlnctrl |= IXGBE_VLNCTRL_VFE;
hw->addr_ctrl.user_set_promisc = false;
}
@@ -3950,7 +4082,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
* sufficient space to store all the addresses then enable
* unicast promiscuous mode
*/
- count = ixgbe_write_uc_addr_list(netdev);
+ count = ixgbe_write_uc_addr_list(netdev, VMDQ_P(0));
if (count < 0) {
fctrl |= IXGBE_FCTRL_UPE;
vmolr |= IXGBE_VMOLR_ROPE;
@@ -3960,11 +4092,13 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
* then we should just turn on promiscuous mode so
* that we can at least receive multicast traffic
*/
- hw->mac.ops.update_mc_addr_list(hw, netdev);
- vmolr |= IXGBE_VMOLR_ROMPE;
-
- if (adapter->num_vfs)
- ixgbe_restore_vf_multicasts(adapter);
+ count = ixgbe_write_mc_addr_list(netdev);
+ if (count < 0) {
+ fctrl |= IXGBE_FCTRL_MPE;
+ vmolr |= IXGBE_VMOLR_MPE;
+ } else if (count) {
+ vmolr |= IXGBE_VMOLR_ROMPE;
+ }
if (hw->mac.type != ixgbe_mac_82598EB) {
vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &
@@ -3985,6 +4119,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
/* NOTE: VLAN filtering is disabled by setting PROMISC */
}
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
@@ -4101,8 +4236,8 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
(tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
(pb == ixgbe_fcoe_get_tc(adapter)))
tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
-
#endif
+
/* Calculate delay value for device */
switch (hw->mac.type) {
case ixgbe_mac_X540:
@@ -4143,7 +4278,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
* @adapter: board private structure to calculate for
* @pb: packet buffer to calculate
*/
-static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter)
+static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
{
struct ixgbe_hw *hw = &adapter->hw;
struct net_device *dev = adapter->netdev;
@@ -4153,6 +4288,14 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter)
/* Calculate max LAN frame size */
tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
+#ifdef IXGBE_FCOE
+ /* FCoE traffic class uses FCOE jumbo frames */
+ if ((dev->features & NETIF_F_FCOE_MTU) &&
+ (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
+ (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up)))
+ tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
+#endif
+
/* Calculate delay value for device */
switch (hw->mac.type) {
case ixgbe_mac_X540:
@@ -4179,15 +4322,17 @@ static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
if (!num_tc)
num_tc = 1;
- hw->fc.low_water = ixgbe_lpbthresh(adapter);
-
for (i = 0; i < num_tc; i++) {
hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i);
+ hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i);
/* Low water marks must not be larger than high water marks */
- if (hw->fc.low_water > hw->fc.high_water[i])
- hw->fc.low_water = 0;
+ if (hw->fc.low_water[i] > hw->fc.high_water[i])
+ hw->fc.low_water[i] = 0;
}
+
+ for (; i < MAX_TRAFFIC_CLASS; i++)
+ hw->fc.high_water[i] = 0;
}
static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
@@ -4249,20 +4394,10 @@ static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool,
vmolr |= IXGBE_VMOLR_ROMPE;
hw->mac.ops.update_mc_addr_list(hw, dev);
}
- ixgbe_write_uc_addr_list(adapter->netdev);
+ ixgbe_write_uc_addr_list(adapter->netdev, pool);
IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
}
-static void ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
- u8 *addr, u16 pool)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- unsigned int entry;
-
- entry = hw->mac.num_rar_entries - pool;
- hw->mac.ops.set_rar(hw, entry, addr, VMDQ_P(pool), IXGBE_RAH_AV);
-}
-
static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
{
struct ixgbe_adapter *adapter = vadapter->real_adapter;
@@ -4742,7 +4877,9 @@ void ixgbe_up(struct ixgbe_adapter *adapter)
void ixgbe_reset(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
int err;
+ u8 old_addr[ETH_ALEN];
if (ixgbe_removed(hw->hw_addr))
return;
@@ -4778,9 +4915,10 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
}
clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
-
- /* reprogram the RAR[0] in case user changed it. */
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV);
+ /* do not flush user set addresses */
+ memcpy(old_addr, &adapter->mac_table[0].addr, netdev->addr_len);
+ ixgbe_flush_sw_mac_table(adapter);
+ ixgbe_mac_set_default_filter(adapter, old_addr);
/* update SAN MAC vmdq pool selection */
if (hw->mac.san_mac_rar_index)
@@ -5026,6 +5164,10 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
#endif /* CONFIG_IXGBE_DCB */
#endif /* IXGBE_FCOE */
+ adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) *
+ hw->mac.num_rar_entries,
+ GFP_ATOMIC);
+
/* Set MAC specific capability flags and exceptions */
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
@@ -7172,16 +7314,17 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
struct sockaddr *addr = p;
+ int ret;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
+ ixgbe_del_mac_filter(adapter, hw->mac.addr, VMDQ_P(0));
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV);
-
- return 0;
+ ret = ixgbe_add_mac_filter(adapter, hw->mac.addr, VMDQ_P(0));
+ return ret > 0 ? 0 : ret;
}
static int
@@ -8187,6 +8330,8 @@ skip_sriov:
goto err_sw_init;
}
+ ixgbe_mac_set_default_filter(adapter, hw->mac.perm_addr);
+
setup_timer(&adapter->service_timer, &ixgbe_service_timer,
(unsigned long) adapter);
@@ -8319,6 +8464,7 @@ err_sw_init:
ixgbe_disable_sriov(adapter);
adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
iounmap(adapter->io_addr);
+ kfree(adapter->mac_table);
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
@@ -8392,6 +8538,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
e_dev_info("complete\n");
+ kfree(adapter->mac_table);
free_netdev(netdev);
pci_disable_pcie_error_reporting(pdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index e6c68d396c99..a01417c06620 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -72,8 +72,6 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
for (i = 0; i < num_vf_macvlans; i++) {
mv_list->vf = -1;
mv_list->free = true;
- mv_list->rar_entry = hw->mac.num_rar_entries -
- (i + adapter->num_vfs + 1);
list_add(&mv_list->l, &adapter->vf_mvs.l);
mv_list++;
}
@@ -327,6 +325,7 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
u32 vector_bit;
u32 vector_reg;
u32 mta_reg;
+ u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
/* only so many hash values supported */
entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
@@ -353,25 +352,13 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
mta_reg |= (1 << vector_bit);
IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
}
+ vmolr |= IXGBE_VMOLR_ROMPE;
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
return 0;
}
-static void ixgbe_restore_vf_macvlans(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- struct list_head *pos;
- struct vf_macvlans *entry;
-
- list_for_each(pos, &adapter->vf_mvs.l) {
- entry = list_entry(pos, struct vf_macvlans, l);
- if (!entry->free)
- hw->mac.ops.set_rar(hw, entry->rar_entry,
- entry->vf_macvlan,
- entry->vf, IXGBE_RAH_AV);
- }
-}
-
+#ifdef CONFIG_PCI_IOV
void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -382,6 +369,7 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
u32 mta_reg;
for (i = 0; i < adapter->num_vfs; i++) {
+ u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(i));
vfinfo = &adapter->vfinfo[i];
for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
hw->addr_ctrl.mta_in_use++;
@@ -391,11 +379,18 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
mta_reg |= (1 << vector_bit);
IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
}
+
+ if (vfinfo->num_vf_mc_hashes)
+ vmolr |= IXGBE_VMOLR_ROMPE;
+ else
+ vmolr &= ~IXGBE_VMOLR_ROMPE;
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
}
/* Restore any VF macvlans */
- ixgbe_restore_vf_macvlans(adapter);
+ ixgbe_full_sync_mac_table(adapter);
}
+#endif
static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
u32 vf)
@@ -495,8 +490,7 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
{
u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
- vmolr |= (IXGBE_VMOLR_ROMPE |
- IXGBE_VMOLR_BAM);
+ vmolr |= IXGBE_VMOLR_BAM;
if (aupe)
vmolr |= IXGBE_VMOLR_AUPE;
else
@@ -514,7 +508,6 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
- int rar_entry = hw->mac.num_rar_entries - (vf + 1);
u8 num_tcs = netdev_get_num_tc(adapter->netdev);
/* add PF assigned VLAN or VLAN 0 */
@@ -544,7 +537,7 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
/* Flush and reset the mta with the new values */
ixgbe_set_rx_mode(adapter->netdev);
- hw->mac.ops.clear_rar(hw, rar_entry);
+ ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
/* reset VF api back to unknown */
adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10;
@@ -553,11 +546,9 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
int vf, unsigned char *mac_addr)
{
- struct ixgbe_hw *hw = &adapter->hw;
- int rar_entry = hw->mac.num_rar_entries - (vf + 1);
-
+ ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
- hw->mac.ops.set_rar(hw, rar_entry, mac_addr, vf, IXGBE_RAH_AV);
+ ixgbe_add_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
return 0;
}
@@ -565,7 +556,6 @@ static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
int vf, int index, unsigned char *mac_addr)
{
- struct ixgbe_hw *hw = &adapter->hw;
struct list_head *pos;
struct vf_macvlans *entry;
@@ -576,7 +566,8 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
entry->vf = -1;
entry->free = true;
entry->is_macvlan = false;
- hw->mac.ops.clear_rar(hw, entry->rar_entry);
+ ixgbe_del_mac_filter(adapter,
+ entry->vf_macvlan, vf);
}
}
}
@@ -612,7 +603,7 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
entry->vf = vf;
memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
- hw->mac.ops.set_rar(hw, entry->rar_entry, mac_addr, vf, IXGBE_RAH_AV);
+ ixgbe_add_mac_filter(adapter, mac_addr, vf);
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 139eaddfb2ed..cea640147604 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -34,7 +34,9 @@
*/
#define IXGBE_MAX_VFS_DRV_LIMIT (IXGBE_MAX_VF_FUNCTIONS - 1)
+#ifdef CONFIG_PCI_IOV
void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
+#endif
void ixgbe_msg_task(struct ixgbe_adapter *adapter);
int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 8a6ff2423f07..551d6089a4d3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -2746,7 +2746,7 @@ struct ixgbe_bus_info {
/* Flow control parameters */
struct ixgbe_fc_info {
u32 high_water[MAX_TRAFFIC_CLASS]; /* Flow Control High-water */
- u32 low_water; /* Flow Control Low-water */
+ u32 low_water[MAX_TRAFFIC_CLASS]; /* Flow Control Low-water */
u16 pause_time; /* Flow Control Pause timer */
bool send_xon; /* Flow control send XON */
bool strict_ieee; /* Strict IEEE mode */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index d0799e8e31e4..eacce3a2e9ec 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -85,7 +85,7 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbevf_pci_tbl) = {
MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
-MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
+MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 78099eab7673..24201033661b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -212,8 +212,7 @@ static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
/* First, verify that the master reports correct status */
if (comm_pending(dev)) {
- mlx4_warn(dev, "Communication channel is not idle."
- "my toggle is %d (cmd:0x%x)\n",
+ mlx4_warn(dev, "Communication channel is not idle - my toggle is %d (cmd:0x%x)\n",
priv->cmd.comm_toggle, cmd);
return -EAGAIN;
}
@@ -422,9 +421,8 @@ static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
*out_param =
be64_to_cpu(vhcr->out_param);
else {
- mlx4_err(dev, "response expected while"
- "output mailbox is NULL for "
- "command 0x%x\n", op);
+ mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
+ op);
vhcr->status = CMD_STAT_BAD_PARAM;
}
}
@@ -439,16 +437,15 @@ static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
*out_param =
be64_to_cpu(vhcr->out_param);
else {
- mlx4_err(dev, "response expected while"
- "output mailbox is NULL for "
- "command 0x%x\n", op);
+ mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
+ op);
vhcr->status = CMD_STAT_BAD_PARAM;
}
}
ret = mlx4_status_to_errno(vhcr->status);
} else
- mlx4_err(dev, "failed execution of VHCR_POST command"
- "opcode 0x%x\n", op);
+ mlx4_err(dev, "failed execution of VHCR_POST command opcode 0x%x\n",
+ op);
}
mutex_unlock(&priv->cmd.slave_cmd_mutex);
@@ -625,9 +622,8 @@ static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
(slave & ~0x7f) | (size & 0xff)) {
- mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx "
- "master_addr:0x%llx slave_id:%d size:%d\n",
- slave_addr, master_addr, slave, size);
+ mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx master_addr:0x%llx slave_id:%d size:%d\n",
+ slave_addr, master_addr, slave, size);
return -EINVAL;
}
@@ -788,8 +784,7 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
((smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) ||
(smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
smp->method == IB_MGMT_METHOD_SET))) {
- mlx4_err(dev, "slave %d is trying to execute a Subnet MGMT MAD, "
- "class 0x%x, method 0x%x for attr 0x%x. Rejecting\n",
+ mlx4_err(dev, "slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x for attr 0x%x - Rejecting\n",
slave, smp->method, smp->mgmt_class,
be16_to_cpu(smp->attr_id));
return -EPERM;
@@ -1409,8 +1404,8 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
ALIGN(sizeof(struct mlx4_vhcr_cmd),
MLX4_ACCESS_MEM_ALIGN), 1);
if (ret) {
- mlx4_err(dev, "%s:Failed reading vhcr"
- "ret: 0x%x\n", __func__, ret);
+ mlx4_err(dev, "%s: Failed reading vhcr ret: 0x%x\n",
+ __func__, ret);
kfree(vhcr);
return ret;
}
@@ -1461,9 +1456,8 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
/* Apply permission and bound checks if applicable */
if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
- mlx4_warn(dev, "Command:0x%x from slave: %d failed protection "
- "checks for resource_id:%d\n", vhcr->op, slave,
- vhcr->in_modifier);
+ mlx4_warn(dev, "Command:0x%x from slave: %d failed protection checks for resource_id:%d\n",
+ vhcr->op, slave, vhcr->in_modifier);
vhcr_cmd->status = CMD_STAT_BAD_OP;
goto out_status;
}
@@ -1502,8 +1496,7 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
}
if (err) {
- mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with"
- " error:%d, status %d\n",
+ mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
vhcr->op, slave, vhcr->errno, err);
vhcr_cmd->status = mlx4_errno_to_status(err);
goto out_status;
@@ -1537,8 +1530,8 @@ out_status:
__func__);
else if (vhcr->e_bit &&
mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
- mlx4_warn(dev, "Failed to generate command completion "
- "eqe for slave %d\n", slave);
+ mlx4_warn(dev, "Failed to generate command completion eqe for slave %d\n",
+ slave);
}
out:
@@ -1577,8 +1570,9 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n",
slave, port);
- mlx4_dbg(dev, "vlan %d QoS %d link down %d\n", vp_admin->default_vlan,
- vp_admin->default_qos, vp_admin->link_state);
+ mlx4_dbg(dev, "vlan %d QoS %d link down %d\n",
+ vp_admin->default_vlan, vp_admin->default_qos,
+ vp_admin->link_state);
work = kzalloc(sizeof(*work), GFP_KERNEL);
if (!work)
@@ -1591,7 +1585,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
&admin_vlan_ix);
if (err) {
kfree(work);
- mlx4_warn((&priv->dev),
+ mlx4_warn(&priv->dev,
"No vlan resources slave %d, port %d\n",
slave, port);
return err;
@@ -1600,7 +1594,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
admin_vlan_ix = NO_INDX;
}
work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
- mlx4_dbg((&(priv->dev)),
+ mlx4_dbg(&priv->dev,
"alloc vlan %d idx %d slave %d port %d\n",
(int)(vp_admin->default_vlan),
admin_vlan_ix, slave, port);
@@ -1661,12 +1655,12 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
vp_admin->default_vlan, &(vp_oper->vlan_idx));
if (err) {
vp_oper->vlan_idx = NO_INDX;
- mlx4_warn((&priv->dev),
+ mlx4_warn(&priv->dev,
"No vlan resorces slave %d, port %d\n",
slave, port);
return err;
}
- mlx4_dbg((&(priv->dev)), "alloc vlan %d idx %d slave %d port %d\n",
+ mlx4_dbg(&priv->dev, "alloc vlan %d idx %d slave %d port %d\n",
(int)(vp_oper->state.default_vlan),
vp_oper->vlan_idx, slave, port);
}
@@ -1677,12 +1671,12 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
if (0 > vp_oper->mac_idx) {
err = vp_oper->mac_idx;
vp_oper->mac_idx = NO_INDX;
- mlx4_warn((&priv->dev),
+ mlx4_warn(&priv->dev,
"No mac resorces slave %d, port %d\n",
slave, port);
return err;
}
- mlx4_dbg((&(priv->dev)), "alloc mac %llx idx %d slave %d port %d\n",
+ mlx4_dbg(&priv->dev, "alloc mac %llx idx %d slave %d port %d\n",
vp_oper->state.mac, vp_oper->mac_idx, slave, port);
}
}
@@ -1731,8 +1725,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
slave_state[slave].comm_toggle ^= 1;
reply = (u32) slave_state[slave].comm_toggle << 31;
if (toggle != slave_state[slave].comm_toggle) {
- mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER"
- "STATE COMPROMISIED ***\n", toggle, slave);
+ mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER STATE COMPROMISED ***\n",
+ toggle, slave);
goto reset_slave;
}
if (cmd == MLX4_COMM_CMD_RESET) {
@@ -1759,8 +1753,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
/*command from slave in the middle of FLR*/
if (cmd != MLX4_COMM_CMD_RESET &&
MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
- mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) "
- "in the middle of FLR\n", slave, cmd);
+ mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) in the middle of FLR\n",
+ slave, cmd);
return;
}
@@ -1798,8 +1792,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
mutex_lock(&priv->cmd.slave_cmd_mutex);
if (mlx4_master_process_vhcr(dev, slave, NULL)) {
- mlx4_err(dev, "Failed processing vhcr for slave:%d,"
- " resetting slave.\n", slave);
+ mlx4_err(dev, "Failed processing vhcr for slave:%d, resetting slave\n",
+ slave);
mutex_unlock(&priv->cmd.slave_cmd_mutex);
goto reset_slave;
}
@@ -1816,8 +1810,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
is_going_down = 1;
spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
if (is_going_down) {
- mlx4_warn(dev, "Slave is going down aborting command(%d)"
- " executing from slave:%d\n",
+ mlx4_warn(dev, "Slave is going down aborting command(%d) executing from slave:%d\n",
cmd, slave);
return;
}
@@ -1880,9 +1873,8 @@ void mlx4_master_comm_channel(struct work_struct *work)
if (toggle != slt) {
if (master->slave_state[slave].comm_toggle
!= slt) {
- printk(KERN_INFO "slave %d out of sync."
- " read toggle %d, state toggle %d. "
- "Resynching.\n", slave, slt,
+ printk(KERN_INFO "slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n",
+ slave, slt,
master->slave_state[slave].comm_toggle);
master->slave_state[slave].comm_toggle =
slt;
@@ -1896,8 +1888,7 @@ void mlx4_master_comm_channel(struct work_struct *work)
}
if (reported && reported != served)
- mlx4_warn(dev, "Got command event with bitmask from %d slaves"
- " but %d were served\n",
+ mlx4_warn(dev, "Got command event with bitmask from %d slaves but %d were served\n",
reported, served);
if (mlx4_ARM_COMM_CHANNEL(dev))
@@ -1953,7 +1944,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
ioremap(pci_resource_start(dev->pdev, 2) +
MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
if (!priv->mfunc.comm) {
- mlx4_err(dev, "Couldn't map communication vector.\n");
+ mlx4_err(dev, "Couldn't map communication vector\n");
goto err_vhcr;
}
@@ -2080,7 +2071,7 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) +
MLX4_HCR_BASE, MLX4_HCR_SIZE);
if (!priv->cmd.hcr) {
- mlx4_err(dev, "Couldn't map command register.\n");
+ mlx4_err(dev, "Couldn't map command register\n");
return -ENOMEM;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index c2cd8d31bcad..636963db598a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -125,8 +125,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
&cq->vector)) {
cq->vector = (cq->ring + 1 + priv->port)
% mdev->dev->caps.num_comp_vectors;
- mlx4_warn(mdev, "Failed Assigning an EQ to "
- "%s ,Falling back to legacy EQ's\n",
+ mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n",
name);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 3e8d33605fe7..c3736045e7af 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -925,13 +925,13 @@ static int mlx4_en_flow_replace(struct net_device *dev,
qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
} else {
if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
- en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist.\n",
+ en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n",
cmd->fs.ring_cookie);
return -EINVAL;
}
qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn;
if (!qpn) {
- en_warn(priv, "rxnfc: RX ring (%llu) is inactive.\n",
+ en_warn(priv, "rxnfc: RX ring (%llu) is inactive\n",
cmd->fs.ring_cookie);
return -EINVAL;
}
@@ -956,7 +956,7 @@ static int mlx4_en_flow_replace(struct net_device *dev,
}
err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id);
if (err) {
- en_err(priv, "Fail to attach network rule at location %d.\n",
+ en_err(priv, "Fail to attach network rule at location %d\n",
cmd->fs.location);
goto out_free_list;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 0c59d4fe7e3a..f953c1d7eae6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -133,7 +133,7 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
MLX4_EN_MAX_TX_RING_P_UP);
if (params->udp_rss && !(mdev->dev->caps.flags
& MLX4_DEV_CAP_FLAG_UDP_RSS)) {
- mlx4_warn(mdev, "UDP RSS is not supported on this device.\n");
+ mlx4_warn(mdev, "UDP RSS is not supported on this device\n");
params->udp_rss = 0;
}
for (i = 1; i <= MLX4_MAX_PORTS; i++) {
@@ -251,8 +251,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
mdev->LSO_support = !!(dev->caps.flags & (1 << 15));
if (!mdev->LSO_support)
- mlx4_warn(mdev, "LSO not supported, please upgrade to later "
- "FW version to enable LSO\n");
+ mlx4_warn(mdev, "LSO not supported, please upgrade to later FW version to enable LSO\n");
if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull,
MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ,
@@ -268,7 +267,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
/* Build device profile according to supplied module parameters */
err = mlx4_en_get_profile(mdev);
if (err) {
- mlx4_err(mdev, "Bad module parameters, aborting.\n");
+ mlx4_err(mdev, "Bad module parameters, aborting\n");
goto err_mr;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 7e4b1720c3d1..fba3c8e77626 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1576,7 +1576,7 @@ int mlx4_en_start_port(struct net_device *dev)
cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
err = mlx4_en_set_cq_moder(priv, cq);
if (err) {
- en_err(priv, "Failed setting cq moderation parameters");
+ en_err(priv, "Failed setting cq moderation parameters\n");
mlx4_en_deactivate_cq(priv, cq);
goto cq_err;
}
@@ -1615,7 +1615,7 @@ int mlx4_en_start_port(struct net_device *dev)
}
err = mlx4_en_set_cq_moder(priv, cq);
if (err) {
- en_err(priv, "Failed setting cq moderation parameters");
+ en_err(priv, "Failed setting cq moderation parameters\n");
mlx4_en_deactivate_cq(priv, cq);
goto tx_err;
}
@@ -2594,8 +2594,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
prof->tx_pause, prof->tx_ppp,
prof->rx_pause, prof->rx_ppp);
if (err) {
- en_err(priv, "Failed setting port general configurations "
- "for port %d, with error %d\n", priv->port, err);
+ en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
+ priv->port, err);
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index ba049ae88749..a1512450816d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -270,13 +270,11 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
ring->actual_size,
GFP_KERNEL)) {
if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
- en_err(priv, "Failed to allocate "
- "enough rx buffers\n");
+ en_err(priv, "Failed to allocate enough rx buffers\n");
return -ENOMEM;
} else {
new_size = rounddown_pow_of_two(ring->actual_size);
- en_warn(priv, "Only %d buffers allocated "
- "reducing ring size to %d",
+ en_warn(priv, "Only %d buffers allocated reducing ring size to %d\n",
ring->actual_size, new_size);
goto reduce_rings;
}
@@ -685,10 +683,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* Drop packet on bad receive or bad checksum */
if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
MLX4_CQE_OPCODE_ERROR)) {
- en_err(priv, "CQE completed in error - vendor "
- "syndrom:%d syndrom:%d\n",
- ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome,
- ((struct mlx4_err_cqe *) cqe)->syndrome);
+ en_err(priv, "CQE completed in error - vendor syndrom:%d syndrom:%d\n",
+ ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome,
+ ((struct mlx4_err_cqe *)cqe)->syndrome);
goto next;
}
if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
@@ -944,8 +941,8 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
priv->rx_skb_size = eff_mtu;
priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct mlx4_en_rx_alloc));
- en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d "
- "num_frags:%d):\n", eff_mtu, priv->num_frags);
+ en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d num_frags:%d):\n",
+ eff_mtu, priv->num_frags);
for (i = 0; i < priv->num_frags; i++) {
en_err(priv,
" frag:%d - size:%d prefix:%d align:%d stride:%d\n",
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index dd1f6d346459..89585c6311c3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -108,9 +108,9 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring->buf = ring->wqres.buf.direct.buf;
- en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d "
- "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
- ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
+ en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d buf_size:%d dma:%llx\n",
+ ring, ring->buf, ring->size, ring->buf_size,
+ (unsigned long long) ring->wqres.buf.direct.map);
ring->qpn = qpn;
err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
@@ -122,7 +122,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
err = mlx4_bf_alloc(mdev->dev, &ring->bf, node);
if (err) {
- en_dbg(DRV, priv, "working without blueflame (%d)", err);
+ en_dbg(DRV, priv, "working without blueflame (%d)\n", err);
ring->bf.uar = &mdev->priv_uar;
ring->bf.uar->map = mdev->uar_map;
ring->bf_enabled = false;
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index d501a2b0fb79..6c088bc1845b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -152,14 +152,13 @@ void mlx4_gen_slave_eqe(struct work_struct *work)
if (i != dev->caps.function &&
master->slave_state[i].active)
if (mlx4_GEN_EQE(dev, i, eqe))
- mlx4_warn(dev, "Failed to "
- " generate event "
- "for slave %d\n", i);
+ mlx4_warn(dev, "Failed to generate event for slave %d\n",
+ i);
}
} else {
if (mlx4_GEN_EQE(dev, slave, eqe))
- mlx4_warn(dev, "Failed to generate event "
- "for slave %d\n", slave);
+ mlx4_warn(dev, "Failed to generate event for slave %d\n",
+ slave);
}
++slave_eq->cons;
}
@@ -177,8 +176,8 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
s_eqe = &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)];
if ((!!(s_eqe->owner & 0x80)) ^
(!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) {
- mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. "
- "No free EQE on slave events queue\n", slave);
+ mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. No free EQE on slave events queue\n",
+ slave);
spin_unlock_irqrestore(&slave_eq->event_lock, flags);
return;
}
@@ -375,9 +374,9 @@ int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave,
}
break;
default:
- pr_err("%s: BUG!!! UNKNOWN state: "
- "slave:%d, port:%d\n", __func__, slave, port);
- goto out;
+ pr_err("%s: BUG!!! UNKNOWN state: slave:%d, port:%d\n",
+ __func__, slave, port);
+ goto out;
}
ret = mlx4_get_slave_port_state(dev, slave, port);
@@ -425,8 +424,8 @@ void mlx4_master_handle_slave_flr(struct work_struct *work)
for (i = 0 ; i < dev->num_slaves; i++) {
if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) {
- mlx4_dbg(dev, "mlx4_handle_slave_flr: "
- "clean slave: %d\n", i);
+ mlx4_dbg(dev, "mlx4_handle_slave_flr: clean slave: %d\n",
+ i);
mlx4_delete_all_resources_for_slave(dev, i);
/*return the slave to running mode*/
@@ -438,8 +437,8 @@ void mlx4_master_handle_slave_flr(struct work_struct *work)
err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (err)
- mlx4_warn(dev, "Failed to notify FW on "
- "FLR done (slave:%d)\n", i);
+ mlx4_warn(dev, "Failed to notify FW on FLR done (slave:%d)\n",
+ i);
}
}
}
@@ -490,9 +489,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
be32_to_cpu(eqe->event.qp.qpn)
& 0xffffff, &slave);
if (ret && ret != -ENOENT) {
- mlx4_dbg(dev, "QP event %02x(%02x) on "
- "EQ %d at index %u: could "
- "not get slave id (%d)\n",
+ mlx4_dbg(dev, "QP event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
eqe->type, eqe->subtype,
eq->eqn, eq->cons_index, ret);
break;
@@ -520,23 +517,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
& 0xffffff,
&slave);
if (ret && ret != -ENOENT) {
- mlx4_warn(dev, "SRQ event %02x(%02x) "
- "on EQ %d at index %u: could"
- " not get slave id (%d)\n",
+ mlx4_warn(dev, "SRQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
eqe->type, eqe->subtype,
eq->eqn, eq->cons_index, ret);
break;
}
- mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x,"
- " event: %02x(%02x)\n", __func__,
- slave,
+ mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
+ __func__, slave,
be32_to_cpu(eqe->event.srq.srqn),
eqe->type, eqe->subtype);
if (!ret && slave != dev->caps.function) {
- mlx4_warn(dev, "%s: sending event "
- "%02x(%02x) to slave:%d\n",
- __func__, eqe->type,
+ mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
+ __func__, eqe->type,
eqe->subtype, slave);
mlx4_slave_event(dev, slave, eqe);
break;
@@ -569,8 +562,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) {
if (i == mlx4_master_func_num(dev))
continue;
- mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN"
- " to slave: %d, port:%d\n",
+ mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n",
__func__, i, port);
s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
@@ -634,11 +626,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
be32_to_cpu(eqe->event.cq_err.cqn)
& 0xffffff, &slave);
if (ret && ret != -ENOENT) {
- mlx4_dbg(dev, "CQ event %02x(%02x) on "
- "EQ %d at index %u: could "
- "not get slave id (%d)\n",
- eqe->type, eqe->subtype,
- eq->eqn, eq->cons_index, ret);
+ mlx4_dbg(dev, "CQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
+ eqe->type, eqe->subtype,
+ eq->eqn, eq->cons_index, ret);
break;
}
@@ -667,8 +657,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
case MLX4_EVENT_TYPE_COMM_CHANNEL:
if (!mlx4_is_master(dev)) {
- mlx4_warn(dev, "Received comm channel event "
- "for non master device\n");
+ mlx4_warn(dev, "Received comm channel event for non master device\n");
break;
}
memcpy(&priv->mfunc.master.comm_arm_bit_vector,
@@ -681,8 +670,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
case MLX4_EVENT_TYPE_FLR_EVENT:
flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id);
if (!mlx4_is_master(dev)) {
- mlx4_warn(dev, "Non-master function received"
- "FLR event\n");
+ mlx4_warn(dev, "Non-master function received FLR event\n");
break;
}
@@ -711,22 +699,17 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
if (eqe->subtype == MLX4_FATAL_WARNING_SUBTYPE_WARMING) {
if (mlx4_is_master(dev))
for (i = 0; i < dev->num_slaves; i++) {
- mlx4_dbg(dev, "%s: Sending "
- "MLX4_FATAL_WARNING_SUBTYPE_WARMING"
- " to slave: %d\n", __func__, i);
+ mlx4_dbg(dev, "%s: Sending MLX4_FATAL_WARNING_SUBTYPE_WARMING to slave: %d\n",
+ __func__, i);
if (i == dev->caps.function)
continue;
mlx4_slave_event(dev, i, eqe);
}
- mlx4_err(dev, "Temperature Threshold was reached! "
- "Threshold: %d celsius degrees; "
- "Current Temperature: %d\n",
- be16_to_cpu(eqe->event.warming.warning_threshold),
- be16_to_cpu(eqe->event.warming.current_temperature));
+ mlx4_err(dev, "Temperature Threshold was reached! Threshold: %d celsius degrees; Current Temperature: %d\n",
+ be16_to_cpu(eqe->event.warming.warning_threshold),
+ be16_to_cpu(eqe->event.warming.current_temperature));
} else
- mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), "
- "subtype %02x on EQ %d at index %u. owner=%x, "
- "nent=0x%x, slave=%x, ownership=%s\n",
+ mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), subtype %02x on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n",
eqe->type, eqe->subtype, eq->eqn,
eq->cons_index, eqe->owner, eq->nent,
eqe->slave_id,
@@ -743,9 +726,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
case MLX4_EVENT_TYPE_ECC_DETECT:
default:
- mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at "
- "index %u. owner=%x, nent=0x%x, slave=%x, "
- "ownership=%s\n",
+ mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n",
eqe->type, eqe->subtype, eq->eqn,
eq->cons_index, eqe->owner, eq->nent,
eqe->slave_id,
@@ -1088,7 +1069,7 @@ static int mlx4_map_clr_int(struct mlx4_dev *dev)
priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) +
priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
if (!priv->clr_base) {
- mlx4_err(dev, "Couldn't map interrupt clear register, aborting.\n");
+ mlx4_err(dev, "Couldn't map interrupt clear register, aborting\n");
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index d16a4d118903..c52e04891317 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -428,8 +428,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
} else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) {
MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
if (field & QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID) {
- mlx4_err(dev, "phy_wqe_gid is "
- "enforced on this ib port\n");
+ mlx4_err(dev, "phy_wqe_gid is enforced on this ib port\n");
err = -EPROTONOSUPPORT;
goto out;
}
@@ -1054,10 +1053,10 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
*/
lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1;
if (lg < MLX4_ICM_PAGE_SHIFT) {
- mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx).\n",
- MLX4_ICM_PAGE_SIZE,
- (unsigned long long) mlx4_icm_addr(&iter),
- mlx4_icm_size(&iter));
+ mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx)\n",
+ MLX4_ICM_PAGE_SIZE,
+ (unsigned long long) mlx4_icm_addr(&iter),
+ mlx4_icm_size(&iter));
err = -EINVAL;
goto out;
}
@@ -1093,14 +1092,14 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
switch (op) {
case MLX4_CMD_MAP_FA:
- mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW.\n", tc, ts);
+ mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW\n", tc, ts);
break;
case MLX4_CMD_MAP_ICM_AUX:
- mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux.\n", tc, ts);
+ mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux\n", tc, ts);
break;
case MLX4_CMD_MAP_ICM:
- mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM.\n",
- tc, ts, (unsigned long long) virt - (ts << 10));
+ mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM\n",
+ tc, ts, (unsigned long long) virt - (ts << 10));
break;
}
@@ -1186,14 +1185,13 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
- mlx4_err(dev, "Installed FW has unsupported "
- "command interface revision %d.\n",
+ mlx4_err(dev, "Installed FW has unsupported command interface revision %d\n",
cmd_if_rev);
mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n",
(int) (dev->caps.fw_ver >> 32),
(int) (dev->caps.fw_ver >> 16) & 0xffff,
(int) dev->caps.fw_ver & 0xffff);
- mlx4_err(dev, "This driver version supports only revisions %d to %d.\n",
+ mlx4_err(dev, "This driver version supports only revisions %d to %d\n",
MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV);
err = -ENODEV;
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 7cf9dadcb471..5db97a4fdc01 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -163,8 +163,7 @@ int mlx4_check_port_params(struct mlx4_dev *dev,
for (i = 0; i < dev->caps.num_ports - 1; i++) {
if (port_type[i] != port_type[i + 1]) {
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
- mlx4_err(dev, "Only same port types supported "
- "on this HCA, aborting.\n");
+ mlx4_err(dev, "Only same port types supported on this HCA, aborting\n");
return -EINVAL;
}
}
@@ -172,8 +171,8 @@ int mlx4_check_port_params(struct mlx4_dev *dev,
for (i = 0; i < dev->caps.num_ports; i++) {
if (!(port_type[i] & dev->caps.supported_type[i+1])) {
- mlx4_err(dev, "Requested port type for port %d is not "
- "supported on this HCA\n", i + 1);
+ mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n",
+ i + 1);
return -EINVAL;
}
}
@@ -195,26 +194,23 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
if (err) {
- mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+ mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
return err;
}
if (dev_cap->min_page_sz > PAGE_SIZE) {
- mlx4_err(dev, "HCA minimum page size of %d bigger than "
- "kernel PAGE_SIZE of %ld, aborting.\n",
+ mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
dev_cap->min_page_sz, PAGE_SIZE);
return -ENODEV;
}
if (dev_cap->num_ports > MLX4_MAX_PORTS) {
- mlx4_err(dev, "HCA has %d ports, but we only support %d, "
- "aborting.\n",
+ mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
dev_cap->num_ports, MLX4_MAX_PORTS);
return -ENODEV;
}
if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) {
- mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than "
- "PCI resource 2 size of 0x%llx, aborting.\n",
+ mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
dev_cap->uar_size,
(unsigned long long) pci_resource_len(dev->pdev, 2));
return -ENODEV;
@@ -347,14 +343,12 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
dev->caps.log_num_macs = dev_cap->log_max_macs[i];
- mlx4_warn(dev, "Requested number of MACs is too much "
- "for port %d, reducing to %d.\n",
+ mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n",
i, 1 << dev->caps.log_num_macs);
}
if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) {
dev->caps.log_num_vlans = dev_cap->log_max_vlans[i];
- mlx4_warn(dev, "Requested number of VLANs is too much "
- "for port %d, reducing to %d.\n",
+ mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n",
i, 1 << dev->caps.log_num_vlans);
}
}
@@ -584,7 +578,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
memset(&hca_param, 0, sizeof(hca_param));
err = mlx4_QUERY_HCA(dev, &hca_param);
if (err) {
- mlx4_err(dev, "QUERY_HCA command failed, aborting.\n");
+ mlx4_err(dev, "QUERY_HCA command failed, aborting\n");
return err;
}
@@ -603,19 +597,18 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp;
err = mlx4_dev_cap(dev, &dev_cap);
if (err) {
- mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+ mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
return err;
}
err = mlx4_QUERY_FW(dev);
if (err)
- mlx4_err(dev, "QUERY_FW command failed: could not get FW version.\n");
+ mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n");
page_size = ~dev->caps.page_size_cap + 1;
mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
if (page_size > PAGE_SIZE) {
- mlx4_err(dev, "HCA minimum page size of %d bigger than "
- "kernel PAGE_SIZE of %ld, aborting.\n",
+ mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
page_size, PAGE_SIZE);
return -ENODEV;
}
@@ -633,8 +626,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
memset(&func_cap, 0, sizeof(func_cap));
err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap);
if (err) {
- mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d).\n",
- err);
+ mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n",
+ err);
return err;
}
@@ -661,8 +654,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
dev->caps.num_amgms = 0;
if (dev->caps.num_ports > MLX4_MAX_PORTS) {
- mlx4_err(dev, "HCA has %d ports, but we only support %d, "
- "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS);
+ mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
+ dev->caps.num_ports, MLX4_MAX_PORTS);
return -ENODEV;
}
@@ -680,8 +673,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
for (i = 1; i <= dev->caps.num_ports; ++i) {
err = mlx4_QUERY_FUNC_CAP(dev, (u32) i, &func_cap);
if (err) {
- mlx4_err(dev, "QUERY_FUNC_CAP port command failed for"
- " port %d, aborting (%d).\n", i, err);
+ mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n",
+ i, err);
goto err_mem;
}
dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn;
@@ -699,8 +692,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
if (dev->caps.uar_page_size * (dev->caps.num_uars -
dev->caps.reserved_uars) >
pci_resource_len(dev->pdev, 2)) {
- mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than "
- "PCI resource 2 size of 0x%llx, aborting.\n",
+ mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
dev->caps.uar_page_size * dev->caps.num_uars,
(unsigned long long) pci_resource_len(dev->pdev, 2));
goto err_mem;
@@ -722,7 +714,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
}
dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
- mlx4_warn(dev, "Timestamping is not supported in slave mode.\n");
+ mlx4_warn(dev, "Timestamping is not supported in slave mode\n");
slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
@@ -784,8 +776,8 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
dev->caps.port_type[port] = port_types[port - 1];
err = mlx4_SET_PORT(dev, port, -1);
if (err) {
- mlx4_err(dev, "Failed to set port %d, "
- "aborting\n", port);
+ mlx4_err(dev, "Failed to set port %d, aborting\n",
+ port);
goto out;
}
}
@@ -868,9 +860,7 @@ static ssize_t set_port_type(struct device *dev,
}
}
if (err) {
- mlx4_err(mdev, "Auto sensing is not supported on this HCA. "
- "Set only 'eth' or 'ib' for both ports "
- "(should be the same)\n");
+ mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n");
goto out;
}
@@ -975,8 +965,8 @@ static ssize_t set_port_ib_mtu(struct device *dev,
mlx4_CLOSE_PORT(mdev, port);
err = mlx4_SET_PORT(mdev, port, -1);
if (err) {
- mlx4_err(mdev, "Failed to set port %d, "
- "aborting\n", port);
+ mlx4_err(mdev, "Failed to set port %d, aborting\n",
+ port);
goto err_set_port;
}
}
@@ -995,19 +985,19 @@ static int mlx4_load_fw(struct mlx4_dev *dev)
priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
GFP_HIGHUSER | __GFP_NOWARN, 0);
if (!priv->fw.fw_icm) {
- mlx4_err(dev, "Couldn't allocate FW area, aborting.\n");
+ mlx4_err(dev, "Couldn't allocate FW area, aborting\n");
return -ENOMEM;
}
err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
if (err) {
- mlx4_err(dev, "MAP_FA command failed, aborting.\n");
+ mlx4_err(dev, "MAP_FA command failed, aborting\n");
goto err_free;
}
err = mlx4_RUN_FW(dev);
if (err) {
- mlx4_err(dev, "RUN_FW command failed, aborting.\n");
+ mlx4_err(dev, "RUN_FW command failed, aborting\n");
goto err_unmap_fa;
}
@@ -1091,30 +1081,30 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
if (err) {
- mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n");
+ mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n");
return err;
}
- mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n",
+ mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n",
(unsigned long long) icm_size >> 10,
(unsigned long long) aux_pages << 2);
priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
GFP_HIGHUSER | __GFP_NOWARN, 0);
if (!priv->fw.aux_icm) {
- mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n");
+ mlx4_err(dev, "Couldn't allocate aux memory, aborting\n");
return -ENOMEM;
}
err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
if (err) {
- mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n");
+ mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n");
goto err_free_aux;
}
err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
if (err) {
- mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n");
+ mlx4_err(dev, "Failed to map cMPT context memory, aborting\n");
goto err_unmap_aux;
}
@@ -1125,7 +1115,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
init_hca->eqc_base, dev_cap->eqc_entry_sz,
num_eqs, num_eqs, 0, 0);
if (err) {
- mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
+ mlx4_err(dev, "Failed to map EQ context memory, aborting\n");
goto err_unmap_cmpt;
}
@@ -1146,7 +1136,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
dev->caps.num_mtts,
dev->caps.reserved_mtts, 1, 0);
if (err) {
- mlx4_err(dev, "Failed to map MTT context memory, aborting.\n");
+ mlx4_err(dev, "Failed to map MTT context memory, aborting\n");
goto err_unmap_eq;
}
@@ -1156,7 +1146,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
dev->caps.num_mpts,
dev->caps.reserved_mrws, 1, 1);
if (err) {
- mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n");
+ mlx4_err(dev, "Failed to map dMPT context memory, aborting\n");
goto err_unmap_mtt;
}
@@ -1167,7 +1157,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
0, 0);
if (err) {
- mlx4_err(dev, "Failed to map QP context memory, aborting.\n");
+ mlx4_err(dev, "Failed to map QP context memory, aborting\n");
goto err_unmap_dmpt;
}
@@ -1178,7 +1168,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
0, 0);
if (err) {
- mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n");
+ mlx4_err(dev, "Failed to map AUXC context memory, aborting\n");
goto err_unmap_qp;
}
@@ -1189,7 +1179,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
0, 0);
if (err) {
- mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n");
+ mlx4_err(dev, "Failed to map ALTC context memory, aborting\n");
goto err_unmap_auxc;
}
@@ -1210,7 +1200,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
dev->caps.num_cqs,
dev->caps.reserved_cqs, 0, 0);
if (err) {
- mlx4_err(dev, "Failed to map CQ context memory, aborting.\n");
+ mlx4_err(dev, "Failed to map CQ context memory, aborting\n");
goto err_unmap_rdmarc;
}
@@ -1220,7 +1210,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
dev->caps.num_srqs,
dev->caps.reserved_srqs, 0, 0);
if (err) {
- mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n");
+ mlx4_err(dev, "Failed to map SRQ context memory, aborting\n");
goto err_unmap_cq;
}
@@ -1238,7 +1228,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
dev->caps.num_mgms + dev->caps.num_amgms,
0, 0);
if (err) {
- mlx4_err(dev, "Failed to map MCG context memory, aborting.\n");
+ mlx4_err(dev, "Failed to map MCG context memory, aborting\n");
goto err_unmap_srq;
}
@@ -1315,7 +1305,7 @@ static void mlx4_slave_exit(struct mlx4_dev *dev)
mutex_lock(&priv->cmd.slave_cmd_mutex);
if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME))
- mlx4_warn(dev, "Failed to close slave function.\n");
+ mlx4_warn(dev, "Failed to close slave function\n");
mutex_unlock(&priv->cmd.slave_cmd_mutex);
}
@@ -1413,7 +1403,7 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
u32 cmd_channel_ver;
if (atomic_read(&pf_loading)) {
- mlx4_warn(dev, "PF is not ready. Deferring probe\n");
+ mlx4_warn(dev, "PF is not ready - Deferring probe\n");
return -EPROBE_DEFER;
}
@@ -1426,8 +1416,7 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
* NUM_OF_RESET_RETRIES times before leaving.*/
if (ret_from_reset) {
if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
- mlx4_warn(dev, "slave is currently in the "
- "middle of FLR. Deferring probe.\n");
+ mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n");
mutex_unlock(&priv->cmd.slave_cmd_mutex);
return -EPROBE_DEFER;
} else
@@ -1441,8 +1430,7 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) !=
MLX4_COMM_GET_IF_REV(slave_read)) {
- mlx4_err(dev, "slave driver version is not supported"
- " by the master\n");
+ mlx4_err(dev, "slave driver version is not supported by the master\n");
goto err;
}
@@ -1520,8 +1508,7 @@ static void choose_steering_mode(struct mlx4_dev *dev,
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
- mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags "
- "set to use B0 steering. Falling back to A0 steering mode.\n");
+ mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n");
}
dev->oper_log_mgm_entry_size =
mlx4_log_num_mgm_entry_size > 0 ?
@@ -1529,8 +1516,7 @@ static void choose_steering_mode(struct mlx4_dev *dev,
MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
}
- mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, "
- "modparam log_num_mgm_entry_size = %d\n",
+ mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n",
mlx4_steering_mode_str(dev->caps.steering_mode),
dev->oper_log_mgm_entry_size,
mlx4_log_num_mgm_entry_size);
@@ -1564,15 +1550,15 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
err = mlx4_QUERY_FW(dev);
if (err) {
if (err == -EACCES)
- mlx4_info(dev, "non-primary physical function, skipping.\n");
+ mlx4_info(dev, "non-primary physical function, skipping\n");
else
- mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
+ mlx4_err(dev, "QUERY_FW command failed, aborting\n");
return err;
}
err = mlx4_load_fw(dev);
if (err) {
- mlx4_err(dev, "Failed to start FW, aborting.\n");
+ mlx4_err(dev, "Failed to start FW, aborting\n");
return err;
}
@@ -1584,7 +1570,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
err = mlx4_dev_cap(dev, &dev_cap);
if (err) {
- mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+ mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
goto err_stop_fw;
}
@@ -1625,7 +1611,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
err = mlx4_INIT_HCA(dev, &init_hca);
if (err) {
- mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
+ mlx4_err(dev, "INIT_HCA command failed, aborting\n");
goto err_free_icm;
}
/*
@@ -1636,7 +1622,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
memset(&init_hca, 0, sizeof(init_hca));
err = mlx4_QUERY_HCA(dev, &init_hca);
if (err) {
- mlx4_err(dev, "QUERY_HCA command failed, disable timestamp.\n");
+ mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n");
dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
} else {
dev->caps.hca_core_clock =
@@ -1649,14 +1635,14 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
if (!dev->caps.hca_core_clock) {
dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
mlx4_err(dev,
- "HCA frequency is 0. Timestamping is not supported.");
+ "HCA frequency is 0 - timestamping is not supported\n");
} else if (map_internal_clock(dev)) {
/*
* Map internal clock,
* in case of failure disable timestamping
*/
dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
- mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported.\n");
+ mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n");
}
}
} else {
@@ -1683,7 +1669,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
err = mlx4_QUERY_ADAPTER(dev, &adapter);
if (err) {
- mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n");
+ mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n");
goto unmap_bf;
}
@@ -1793,79 +1779,69 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
err = mlx4_init_uar_table(dev);
if (err) {
- mlx4_err(dev, "Failed to initialize "
- "user access region table, aborting.\n");
- return err;
+ mlx4_err(dev, "Failed to initialize user access region table, aborting\n");
+ return err;
}
err = mlx4_uar_alloc(dev, &priv->driver_uar);
if (err) {
- mlx4_err(dev, "Failed to allocate driver access region, "
- "aborting.\n");
+ mlx4_err(dev, "Failed to allocate driver access region, aborting\n");
goto err_uar_table_free;
}
priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
if (!priv->kar) {
- mlx4_err(dev, "Couldn't map kernel access region, "
- "aborting.\n");
+ mlx4_err(dev, "Couldn't map kernel access region, aborting\n");
err = -ENOMEM;
goto err_uar_free;
}
err = mlx4_init_pd_table(dev);
if (err) {
- mlx4_err(dev, "Failed to initialize "
- "protection domain table, aborting.\n");
+ mlx4_err(dev, "Failed to initialize protection domain table, aborting\n");
goto err_kar_unmap;
}
err = mlx4_init_xrcd_table(dev);
if (err) {
- mlx4_err(dev, "Failed to initialize "
- "reliable connection domain table, aborting.\n");
+ mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n");
goto err_pd_table_free;
}
err = mlx4_init_mr_table(dev);
if (err) {
- mlx4_err(dev, "Failed to initialize "
- "memory region table, aborting.\n");
+ mlx4_err(dev, "Failed to initialize memory region table, aborting\n");
goto err_xrcd_table_free;
}
if (!mlx4_is_slave(dev)) {
err = mlx4_init_mcg_table(dev);
if (err) {
- mlx4_err(dev, "Failed to initialize multicast group table, aborting.\n");
+ mlx4_err(dev, "Failed to initialize multicast group table, aborting\n");
goto err_mr_table_free;
}
}
err = mlx4_init_eq_table(dev);
if (err) {
- mlx4_err(dev, "Failed to initialize "
- "event queue table, aborting.\n");
+ mlx4_err(dev, "Failed to initialize event queue table, aborting\n");
goto err_mcg_table_free;
}
err = mlx4_cmd_use_events(dev);
if (err) {
- mlx4_err(dev, "Failed to switch to event-driven "
- "firmware commands, aborting.\n");
+ mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n");
goto err_eq_table_free;
}
err = mlx4_NOP(dev);
if (err) {
if (dev->flags & MLX4_FLAG_MSI_X) {
- mlx4_warn(dev, "NOP command failed to generate MSI-X "
- "interrupt IRQ %d).\n",
+ mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n",
priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
- mlx4_warn(dev, "Trying again without MSI-X.\n");
+ mlx4_warn(dev, "Trying again without MSI-X\n");
} else {
- mlx4_err(dev, "NOP command failed to generate interrupt "
- "(IRQ %d), aborting.\n",
+ mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n",
priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
}
@@ -1877,28 +1853,25 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
err = mlx4_init_cq_table(dev);
if (err) {
- mlx4_err(dev, "Failed to initialize "
- "completion queue table, aborting.\n");
+ mlx4_err(dev, "Failed to initialize completion queue table, aborting\n");
goto err_cmd_poll;
}
err = mlx4_init_srq_table(dev);
if (err) {
- mlx4_err(dev, "Failed to initialize "
- "shared receive queue table, aborting.\n");
+ mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n");
goto err_cq_table_free;
}
err = mlx4_init_qp_table(dev);
if (err) {
- mlx4_err(dev, "Failed to initialize "
- "queue pair table, aborting.\n");
+ mlx4_err(dev, "Failed to initialize queue pair table, aborting\n");
goto err_srq_table_free;
}
err = mlx4_init_counters_table(dev);
if (err && err != -ENOENT) {
- mlx4_err(dev, "Failed to initialize counters table, aborting.\n");
+ mlx4_err(dev, "Failed to initialize counters table, aborting\n");
goto err_qp_table_free;
}
@@ -1908,9 +1881,8 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
err = mlx4_get_port_ib_caps(dev, port,
&ib_port_default_caps);
if (err)
- mlx4_warn(dev, "failed to get port %d default "
- "ib capabilities (%d). Continuing "
- "with caps = 0\n", port, err);
+ mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n",
+ port, err);
dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
/* initialize per-slave default ib port capabilities */
@@ -1920,7 +1892,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
if (i == mlx4_master_func_num(dev))
continue;
priv->mfunc.master.slave_state[i].ib_cap_mask[port] =
- ib_port_default_caps;
+ ib_port_default_caps;
}
}
@@ -1933,7 +1905,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
dev->caps.pkey_table_len[port] : -1);
if (err) {
mlx4_err(dev, "Failed to set port %d, aborting\n",
- port);
+ port);
goto err_counters_table_free;
}
}
@@ -2009,7 +1981,7 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
kfree(entries);
goto no_msi;
} else if (nreq < MSIX_LEGACY_SZ +
- dev->caps.num_ports * MIN_MSIX_P_PORT) {
+ dev->caps.num_ports * MIN_MSIX_P_PORT) {
/*Working in legacy mode , all EQ's shared*/
dev->caps.comp_pool = 0;
dev->caps.num_comp_vectors = nreq - 1;
@@ -2209,8 +2181,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
err = pci_enable_device(pdev);
if (err) {
- dev_err(&pdev->dev, "Cannot enable PCI device, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
return err;
}
@@ -2257,14 +2228,13 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
*/
if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) &&
!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
- dev_err(&pdev->dev, "Missing DCS, aborting."
- "(driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
+ dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
pci_dev_data, pci_resource_flags(pdev, 0));
err = -ENODEV;
goto err_disable_pdev;
}
if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
- dev_err(&pdev->dev, "Missing UAR, aborting.\n");
+ dev_err(&pdev->dev, "Missing UAR, aborting\n");
err = -ENODEV;
goto err_disable_pdev;
}
@@ -2279,21 +2249,19 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
- dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
+ dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
+ dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
goto err_release_regions;
}
}
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
- dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
- "consistent PCI DMA mask.\n");
+ dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n");
goto err_release_regions;
}
}
@@ -2324,7 +2292,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
if (total_vfs) {
unsigned vfs_offset = 0;
for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) &&
- vfs_offset + nvfs[i] < extended_func_num(pdev);
+ vfs_offset + nvfs[i] < extended_func_num(pdev);
vfs_offset += nvfs[i], i++)
;
if (i == sizeof(nvfs)/sizeof(nvfs[0])) {
@@ -2350,8 +2318,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
if (err < 0)
goto err_free_dev;
else {
- mlx4_warn(dev, "Multiple PFs not yet supported."
- " Skipping PF.\n");
+ mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n");
err = -EINVAL;
goto err_free_dev;
}
@@ -2361,8 +2328,8 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n",
total_vfs);
dev->dev_vfs = kzalloc(
- total_vfs * sizeof(*dev->dev_vfs),
- GFP_KERNEL);
+ total_vfs * sizeof(*dev->dev_vfs),
+ GFP_KERNEL);
if (NULL == dev->dev_vfs) {
mlx4_err(dev, "Failed to allocate memory for VFs\n");
err = 0;
@@ -2370,14 +2337,14 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
atomic_inc(&pf_loading);
err = pci_enable_sriov(pdev, total_vfs);
if (err) {
- mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n",
+ mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
err);
atomic_dec(&pf_loading);
err = 0;
} else {
mlx4_warn(dev, "Running in master mode\n");
dev->flags |= MLX4_FLAG_SRIOV |
- MLX4_FLAG_MASTER;
+ MLX4_FLAG_MASTER;
dev->num_vfs = total_vfs;
sriov_initialized = 1;
}
@@ -2394,7 +2361,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
*/
err = mlx4_reset(dev);
if (err) {
- mlx4_err(dev, "Failed to reset HCA, aborting.\n");
+ mlx4_err(dev, "Failed to reset HCA, aborting\n");
goto err_rel_own;
}
}
@@ -2402,7 +2369,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
slave_start:
err = mlx4_cmd_init(dev);
if (err) {
- mlx4_err(dev, "Failed to init command interface, aborting.\n");
+ mlx4_err(dev, "Failed to init command interface, aborting\n");
goto err_sriov;
}
@@ -2416,8 +2383,7 @@ slave_start:
dev->num_slaves = 0;
err = mlx4_multi_func_init(dev);
if (err) {
- mlx4_err(dev, "Failed to init slave mfunc"
- " interface, aborting.\n");
+ mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n");
goto err_cmd;
}
}
@@ -2449,8 +2415,7 @@ slave_start:
unsigned sum = 0;
err = mlx4_multi_func_init(dev);
if (err) {
- mlx4_err(dev, "Failed to init master mfunc"
- "interface, aborting.\n");
+ mlx4_err(dev, "Failed to init master mfunc interface, aborting\n");
goto err_close;
}
if (sriov_initialized) {
@@ -2461,10 +2426,7 @@ slave_start:
if (ib_ports &&
(num_vfs_argc > 1 || probe_vfs_argc > 1)) {
mlx4_err(dev,
- "Invalid syntax of num_vfs/probe_vfs "
- "with IB port. Single port VFs syntax"
- " is only supported when all ports "
- "are configured as ethernet\n");
+ "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n");
goto err_close;
}
for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]); i++) {
@@ -2490,8 +2452,7 @@ slave_start:
if ((mlx4_is_mfunc(dev)) &&
!(dev->flags & MLX4_FLAG_MSI_X)) {
err = -ENOSYS;
- mlx4_err(dev, "INTx is not supported in multi-function mode."
- " aborting.\n");
+ mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n");
goto err_free_eq;
}
@@ -2829,11 +2790,10 @@ static int __init mlx4_verify_params(void)
if (mlx4_log_num_mgm_entry_size != -1 &&
(mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE ||
mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE)) {
- pr_warning("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not "
- "in legal range (-1 or %d..%d)\n",
- mlx4_log_num_mgm_entry_size,
- MLX4_MIN_MGM_LOG_ENTRY_SIZE,
- MLX4_MAX_MGM_LOG_ENTRY_SIZE);
+ pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-1 or %d..%d)\n",
+ mlx4_log_num_mgm_entry_size,
+ MLX4_MIN_MGM_LOG_ENTRY_SIZE,
+ MLX4_MAX_MGM_LOG_ENTRY_SIZE);
return -1;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 80ccb4edf825..7c6eba622186 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -638,7 +638,7 @@ static int find_entry(struct mlx4_dev *dev, u8 port,
if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
if (*index != hash) {
- mlx4_err(dev, "Found zero MGID in AMGM.\n");
+ mlx4_err(dev, "Found zero MGID in AMGM\n");
err = -EINVAL;
}
return err;
@@ -874,7 +874,7 @@ static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
mlx4_err(dev, "%s", buf);
if (len >= BUF_SIZE)
- mlx4_err(dev, "Network rule error message was truncated, print buffer is too small.\n");
+ mlx4_err(dev, "Network rule error message was truncated, print buffer is too small\n");
}
int mlx4_flow_attach(struct mlx4_dev *dev,
@@ -905,10 +905,10 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id);
if (ret == -ENOMEM)
mlx4_err_rule(dev,
- "mcg table is full. Fail to register network rule.\n",
+ "mcg table is full. Fail to register network rule\n",
rule);
else if (ret)
- mlx4_err_rule(dev, "Fail to register network rule.\n", rule);
+ mlx4_err_rule(dev, "Fail to register network rule\n", rule);
mlx4_free_cmd_mailbox(dev, mailbox);
@@ -994,7 +994,7 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
if (members_count == dev->caps.num_qp_per_mgm) {
- mlx4_err(dev, "MGM at index %x is full.\n", index);
+ mlx4_err(dev, "MGM at index %x is full\n", index);
err = -ENOMEM;
goto out;
}
@@ -1042,7 +1042,7 @@ out:
}
if (err && link && index != -1) {
if (index < dev->caps.num_mgms)
- mlx4_warn(dev, "Got AMGM index %d < %d",
+ mlx4_warn(dev, "Got AMGM index %d < %d\n",
index, dev->caps.num_mgms);
else
mlx4_bitmap_free(&priv->mcg_table.bitmap,
@@ -1133,7 +1133,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
if (amgm_index) {
if (amgm_index < dev->caps.num_mgms)
- mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d",
+ mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d\n",
index, amgm_index, dev->caps.num_mgms);
else
mlx4_bitmap_free(&priv->mcg_table.bitmap,
@@ -1153,7 +1153,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
goto out;
if (index < dev->caps.num_mgms)
- mlx4_warn(dev, "entry %d had next AMGM index %d < %d",
+ mlx4_warn(dev, "entry %d had next AMGM index %d < %d\n",
prev, index, dev->caps.num_mgms);
else
mlx4_bitmap_free(&priv->mcg_table.bitmap,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index f9c465101963..52c1e7da74c4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -216,18 +216,19 @@ extern int mlx4_debug_level;
#define mlx4_debug_level (0)
#endif /* CONFIG_MLX4_DEBUG */
-#define mlx4_dbg(mdev, format, arg...) \
+#define mlx4_dbg(mdev, format, ...) \
do { \
if (mlx4_debug_level) \
- dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ##arg); \
+ dev_printk(KERN_DEBUG, &(mdev)->pdev->dev, format, \
+ ##__VA_ARGS__); \
} while (0)
-#define mlx4_err(mdev, format, arg...) \
- dev_err(&mdev->pdev->dev, format, ##arg)
-#define mlx4_info(mdev, format, arg...) \
- dev_info(&mdev->pdev->dev, format, ##arg)
-#define mlx4_warn(mdev, format, arg...) \
- dev_warn(&mdev->pdev->dev, format, ##arg)
+#define mlx4_err(mdev, format, ...) \
+ dev_err(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
+#define mlx4_info(mdev, format, ...) \
+ dev_info(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
+#define mlx4_warn(mdev, format, ...) \
+ dev_warn(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
extern int mlx4_log_num_mgm_entry_size;
extern int log_mtts_per_seg;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 04d9b6fe3e80..b5db1bf361dc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -830,26 +830,26 @@ __printf(3, 4)
int en_print(const char *level, const struct mlx4_en_priv *priv,
const char *format, ...);
-#define en_dbg(mlevel, priv, format, arg...) \
-do { \
- if (NETIF_MSG_##mlevel & priv->msg_enable) \
- en_print(KERN_DEBUG, priv, format, ##arg); \
+#define en_dbg(mlevel, priv, format, ...) \
+do { \
+ if (NETIF_MSG_##mlevel & (priv)->msg_enable) \
+ en_print(KERN_DEBUG, priv, format, ##__VA_ARGS__); \
} while (0)
-#define en_warn(priv, format, arg...) \
- en_print(KERN_WARNING, priv, format, ##arg)
-#define en_err(priv, format, arg...) \
- en_print(KERN_ERR, priv, format, ##arg)
-#define en_info(priv, format, arg...) \
- en_print(KERN_INFO, priv, format, ## arg)
-
-#define mlx4_err(mdev, format, arg...) \
- pr_err("%s %s: " format, DRV_NAME, \
- dev_name(&mdev->pdev->dev), ##arg)
-#define mlx4_info(mdev, format, arg...) \
- pr_info("%s %s: " format, DRV_NAME, \
- dev_name(&mdev->pdev->dev), ##arg)
-#define mlx4_warn(mdev, format, arg...) \
- pr_warning("%s %s: " format, DRV_NAME, \
- dev_name(&mdev->pdev->dev), ##arg)
+#define en_warn(priv, format, ...) \
+ en_print(KERN_WARNING, priv, format, ##__VA_ARGS__)
+#define en_err(priv, format, ...) \
+ en_print(KERN_ERR, priv, format, ##__VA_ARGS__)
+#define en_info(priv, format, ...) \
+ en_print(KERN_INFO, priv, format, ##__VA_ARGS__)
+
+#define mlx4_err(mdev, format, ...) \
+ pr_err(DRV_NAME " %s: " format, \
+ dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
+#define mlx4_info(mdev, format, ...) \
+ pr_info(DRV_NAME " %s: " format, \
+ dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
+#define mlx4_warn(mdev, format, ...) \
+ pr_warn(DRV_NAME " %s: " format, \
+ dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 24835853b753..64fb3e6431a0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -250,8 +250,8 @@ static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_WRAPPED);
if (err)
- mlx4_warn(dev, "Failed to free mtt range at:"
- "%d order:%d\n", offset, order);
+ mlx4_warn(dev, "Failed to free mtt range at:%d order:%d\n",
+ offset, order);
return;
}
__mlx4_free_mtt_range(dev, offset, order);
@@ -436,8 +436,8 @@ static int mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
key_to_hw_index(mr->key) &
(dev->caps.num_mpts - 1));
if (err) {
- mlx4_warn(dev, "HW2SW_MPT failed (%d),", err);
- mlx4_warn(dev, "MR has MWs bound to it.\n");
+ mlx4_warn(dev, "HW2SW_MPT failed (%d), MR has MWs bound to it\n",
+ err);
return err;
}
@@ -773,7 +773,7 @@ int mlx4_init_mr_table(struct mlx4_dev *dev)
mlx4_alloc_mtt_range(dev,
fls(dev->caps.reserved_mtts - 1));
if (priv->reserved_mtts < 0) {
- mlx4_warn(dev, "MTT table of order %u is too small.\n",
+ mlx4_warn(dev, "MTT table of order %u is too small\n",
mr_table->mtt_buddy.max_order);
err = -ENOMEM;
goto err_reserve_mtts;
@@ -954,8 +954,8 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
err = PTR_ERR(mailbox);
- printk(KERN_WARNING "mlx4_ib: mlx4_alloc_cmd_mailbox"
- " failed (%d)\n", err);
+ printk(KERN_WARNING "mlx4_ib: mlx4_alloc_cmd_mailbox failed (%d)\n",
+ err);
return;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index b5b3549b0c8d..376f2f1d445e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -244,8 +244,8 @@ void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
if (validate_index(dev, table, index))
goto out;
if (--table->refs[index]) {
- mlx4_dbg(dev, "Have more references for index %d,"
- "no need to modify mac table\n", index);
+ mlx4_dbg(dev, "Have more references for index %d, no need to modify mac table\n",
+ index);
goto out;
}
@@ -443,9 +443,8 @@ void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
}
if (--table->refs[index]) {
- mlx4_dbg(dev, "Have %d more references for index %d,"
- "no need to modify vlan table\n", table->refs[index],
- index);
+ mlx4_dbg(dev, "Have %d more references for index %d, no need to modify vlan table\n",
+ table->refs[index], index);
goto out;
}
table->entries[index] = 0;
@@ -706,8 +705,7 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw,
sizeof(gid_entry_tbl->raw))) {
/* found duplicate */
- mlx4_warn(dev, "requested gid entry for slave:%d "
- "is a duplicate of gid at index %d\n",
+ mlx4_warn(dev, "requested gid entry for slave:%d is a duplicate of gid at index %d\n",
slave, i);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
index 8e0c3cc2a1ec..14089d9e1667 100644
--- a/drivers/net/ethernet/mellanox/mlx4/profile.c
+++ b/drivers/net/ethernet/mellanox/mlx4/profile.c
@@ -164,18 +164,17 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
}
if (total_size > dev_cap->max_icm_sz) {
- mlx4_err(dev, "Profile requires 0x%llx bytes; "
- "won't fit in 0x%llx bytes of context memory.\n",
- (unsigned long long) total_size,
- (unsigned long long) dev_cap->max_icm_sz);
+ mlx4_err(dev, "Profile requires 0x%llx bytes; won't fit in 0x%llx bytes of context memory\n",
+ (unsigned long long) total_size,
+ (unsigned long long) dev_cap->max_icm_sz);
kfree(profile);
return -ENOMEM;
}
if (profile[i].size)
- mlx4_dbg(dev, " profile[%2d] (%6s): 2^%02d entries @ 0x%10llx, "
- "size 0x%10llx\n",
- i, res_name[profile[i].type], profile[i].log_num,
+ mlx4_dbg(dev, " profile[%2d] (%6s): 2^%02d entries @ 0x%10llx, size 0x%10llx\n",
+ i, res_name[profile[i].type],
+ profile[i].log_num,
(unsigned long long) profile[i].start,
(unsigned long long) profile[i].size);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 61d64ebffd56..9bdb6aeb3721 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -264,8 +264,8 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
MLX4_CMD_FREE_RES,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (err) {
- mlx4_warn(dev, "Failed to release qp range"
- " base:%d cnt:%d\n", base_qpn, cnt);
+ mlx4_warn(dev, "Failed to release qp range base:%d cnt:%d\n",
+ base_qpn, cnt);
}
} else
__mlx4_qp_release_range(dev, base_qpn, cnt);
@@ -577,8 +577,7 @@ int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
context, 0, 0, qp);
if (err) {
- mlx4_err(dev, "Failed to bring QP to state: "
- "%d with error: %d\n",
+ mlx4_err(dev, "Failed to bring QP to state: %d with error: %d\n",
states[i + 1], err);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/reset.c b/drivers/net/ethernet/mellanox/mlx4/reset.c
index dd1b5093d8b1..ea1c6d092145 100644
--- a/drivers/net/ethernet/mellanox/mlx4/reset.c
+++ b/drivers/net/ethernet/mellanox/mlx4/reset.c
@@ -72,8 +72,7 @@ int mlx4_reset(struct mlx4_dev *dev)
hca_header = kmalloc(256, GFP_KERNEL);
if (!hca_header) {
err = -ENOMEM;
- mlx4_err(dev, "Couldn't allocate memory to save HCA "
- "PCI header, aborting.\n");
+ mlx4_err(dev, "Couldn't allocate memory to save HCA PCI header, aborting\n");
goto out;
}
@@ -84,8 +83,7 @@ int mlx4_reset(struct mlx4_dev *dev)
continue;
if (pci_read_config_dword(dev->pdev, i * 4, hca_header + i)) {
err = -ENODEV;
- mlx4_err(dev, "Couldn't save HCA "
- "PCI header, aborting.\n");
+ mlx4_err(dev, "Couldn't save HCA PCI header, aborting\n");
goto out;
}
}
@@ -94,7 +92,7 @@ int mlx4_reset(struct mlx4_dev *dev)
MLX4_RESET_SIZE);
if (!reset) {
err = -ENOMEM;
- mlx4_err(dev, "Couldn't map HCA reset register, aborting.\n");
+ mlx4_err(dev, "Couldn't map HCA reset register, aborting\n");
goto out;
}
@@ -133,8 +131,7 @@ int mlx4_reset(struct mlx4_dev *dev)
if (vendor == 0xffff) {
err = -ENODEV;
- mlx4_err(dev, "PCI device did not come back after reset, "
- "aborting.\n");
+ mlx4_err(dev, "PCI device did not come back after reset, aborting\n");
goto out;
}
@@ -144,16 +141,14 @@ int mlx4_reset(struct mlx4_dev *dev)
if (pcie_capability_write_word(dev->pdev, PCI_EXP_DEVCTL,
devctl)) {
err = -ENODEV;
- mlx4_err(dev, "Couldn't restore HCA PCI Express "
- "Device Control register, aborting.\n");
+ mlx4_err(dev, "Couldn't restore HCA PCI Express Device Control register, aborting\n");
goto out;
}
linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4];
if (pcie_capability_write_word(dev->pdev, PCI_EXP_LNKCTL,
linkctl)) {
err = -ENODEV;
- mlx4_err(dev, "Couldn't restore HCA PCI Express "
- "Link control register, aborting.\n");
+ mlx4_err(dev, "Couldn't restore HCA PCI Express Link control register, aborting\n");
goto out;
}
}
@@ -164,8 +159,8 @@ int mlx4_reset(struct mlx4_dev *dev)
if (pci_write_config_dword(dev->pdev, i * 4, hca_header[i])) {
err = -ENODEV;
- mlx4_err(dev, "Couldn't restore HCA reg %x, "
- "aborting.\n", i);
+ mlx4_err(dev, "Couldn't restore HCA reg %x, aborting\n",
+ i);
goto out;
}
}
@@ -173,8 +168,7 @@ int mlx4_reset(struct mlx4_dev *dev)
if (pci_write_config_dword(dev->pdev, PCI_COMMAND,
hca_header[PCI_COMMAND / 4])) {
err = -ENODEV;
- mlx4_err(dev, "Couldn't restore HCA COMMAND, "
- "aborting.\n");
+ mlx4_err(dev, "Couldn't restore HCA COMMAND, aborting\n");
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 1c3fdd4a1f7d..a95df9d2645d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -3880,7 +3880,7 @@ static int add_eth_header(struct mlx4_dev *dev, int slave,
}
}
if (!be_mac) {
- pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
+ pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
port);
return -EINVAL;
}
@@ -3923,7 +3923,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
err = get_res(dev, slave, qpn, RES_QP, &rqp);
if (err) {
- pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
+ pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
return err;
}
rule_header = (struct _rule_hw *)(ctrl + 1);
@@ -3941,7 +3941,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
case MLX4_NET_TRANS_RULE_ID_IPV4:
case MLX4_NET_TRANS_RULE_ID_TCP:
case MLX4_NET_TRANS_RULE_ID_UDP:
- pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
+ pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
err = -EINVAL;
goto err_put;
@@ -3950,7 +3950,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
break;
default:
- pr_err("Corrupted mailbox.\n");
+ pr_err("Corrupted mailbox\n");
err = -EINVAL;
goto err_put;
}
@@ -3964,7 +3964,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
if (err) {
- mlx4_err(dev, "Fail to add flow steering resources.\n ");
+ mlx4_err(dev, "Fail to add flow steering resources\n");
/* detach rule*/
mlx4_cmd(dev, vhcr->out_param, 0, 0,
MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
@@ -4002,7 +4002,7 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
if (err) {
- mlx4_err(dev, "Fail to remove flow steering resources.\n ");
+ mlx4_err(dev, "Fail to remove flow steering resources\n");
goto out;
}
@@ -4131,8 +4131,8 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave)
err = move_all_busy(dev, slave, RES_QP);
if (err)
- mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
- "for slave %d\n", slave);
+ mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
+ slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
@@ -4170,10 +4170,8 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave)
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
- mlx4_dbg(dev, "rem_slave_qps: failed"
- " to move slave %d qpn %d to"
- " reset\n", slave,
- qp->local_qpn);
+ mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
+ slave, qp->local_qpn);
atomic_dec(&qp->rcq->ref_count);
atomic_dec(&qp->scq->ref_count);
atomic_dec(&qp->mtt->ref_count);
@@ -4207,8 +4205,8 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
err = move_all_busy(dev, slave, RES_SRQ);
if (err)
- mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
- "busy for slave %d\n", slave);
+ mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
+ slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
@@ -4238,9 +4236,7 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
- mlx4_dbg(dev, "rem_slave_srqs: failed"
- " to move slave %d srq %d to"
- " SW ownership\n",
+ mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
slave, srqn);
atomic_dec(&srq->mtt->ref_count);
@@ -4275,8 +4271,8 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
err = move_all_busy(dev, slave, RES_CQ);
if (err)
- mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
- "busy for slave %d\n", slave);
+ mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
+ slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
@@ -4306,9 +4302,7 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
- mlx4_dbg(dev, "rem_slave_cqs: failed"
- " to move slave %d cq %d to"
- " SW ownership\n",
+ mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
slave, cqn);
atomic_dec(&cq->mtt->ref_count);
state = RES_CQ_ALLOCATED;
@@ -4340,8 +4334,8 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
err = move_all_busy(dev, slave, RES_MPT);
if (err)
- mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
- "busy for slave %d\n", slave);
+ mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
+ slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
@@ -4376,9 +4370,7 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
- mlx4_dbg(dev, "rem_slave_mrs: failed"
- " to move slave %d mpt %d to"
- " SW ownership\n",
+ mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
slave, mptn);
if (mpt->mtt)
atomic_dec(&mpt->mtt->ref_count);
@@ -4410,8 +4402,8 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
err = move_all_busy(dev, slave, RES_MTT);
if (err)
- mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
- "busy for slave %d\n", slave);
+ mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n",
+ slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
@@ -4513,8 +4505,8 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
err = move_all_busy(dev, slave, RES_EQ);
if (err)
- mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
- "busy for slave %d\n", slave);
+ mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
+ slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
@@ -4546,9 +4538,8 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
- mlx4_dbg(dev, "rem_slave_eqs: failed"
- " to move slave %d eqs %d to"
- " SW ownership\n", slave, eqn);
+ mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
+ slave, eqn);
mlx4_free_cmd_mailbox(dev, mailbox);
atomic_dec(&eq->mtt->ref_count);
state = RES_EQ_RESERVED;
@@ -4577,8 +4568,8 @@ static void rem_slave_counters(struct mlx4_dev *dev, int slave)
err = move_all_busy(dev, slave, RES_COUNTER);
if (err)
- mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
- "busy for slave %d\n", slave);
+ mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
+ slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
@@ -4608,8 +4599,8 @@ static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
err = move_all_busy(dev, slave, RES_XRCD);
if (err)
- mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
- "busy for slave %d\n", slave);
+ mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
+ slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
@@ -4754,10 +4745,8 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
0, MLX4_CMD_UPDATE_QP,
MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
if (err) {
- mlx4_info(dev, "UPDATE_QP failed for slave %d, "
- "port %d, qpn %d (%d)\n",
- work->slave, port, qp->local_qpn,
- err);
+ mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
+ work->slave, port, qp->local_qpn, err);
errors++;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 405c4fbcd0ad..87d1b018a9c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -620,8 +620,8 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
mlx5_command_str(msg_to_opcode(ent->in)),
msg_to_opcode(ent->in));
}
- mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", err,
- deliv_status_to_str(ent->status), ent->status);
+ mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
+ err, deliv_status_to_str(ent->status), ent->status);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 64a61b286b2c..7f39ebcd6ad0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -208,7 +208,8 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
*/
rmb();
- mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n", eq->eqn, eqe_type_str(eqe->type));
+ mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n",
+ eq->eqn, eqe_type_str(eqe->type));
switch (eqe->type) {
case MLX5_EVENT_TYPE_COMP:
cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
@@ -270,14 +271,16 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
- mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages);
+ mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
+ func_id, npages);
mlx5_core_req_pages_handler(dev, func_id, npages);
}
break;
default:
- mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", eqe->type, eq->eqn);
+ mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
+ eqe->type, eq->eqn);
break;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c3eee5f70051..ee24f132e319 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -66,10 +66,10 @@ static int set_dma_caps(struct pci_dev *pdev)
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
- dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
+ dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
+ dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
return err;
}
}
@@ -77,11 +77,11 @@ static int set_dma_caps(struct pci_dev *pdev)
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
dev_warn(&pdev->dev,
- "Warning: couldn't set 64-bit consistent PCI DMA mask.\n");
+ "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev,
- "Can't set consistent PCI DMA mask, aborting.\n");
+ "Can't set consistent PCI DMA mask, aborting\n");
return err;
}
}
@@ -95,7 +95,7 @@ static int request_bar(struct pci_dev *pdev)
int err = 0;
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
- dev_err(&pdev->dev, "Missing registers BAR, aborting.\n");
+ dev_err(&pdev->dev, "Missing registers BAR, aborting\n");
return -ENODEV;
}
@@ -319,13 +319,13 @@ int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
err = pci_enable_device(pdev);
if (err) {
- dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
goto err_dbg;
}
err = request_bar(pdev);
if (err) {
- dev_err(&pdev->dev, "error requesting BARs, aborting.\n");
+ dev_err(&pdev->dev, "error requesting BARs, aborting\n");
goto err_disable;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 68b74e1ae1b0..f0c9f9a7a361 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -39,24 +39,26 @@
extern int mlx5_core_debug_mask;
-#define mlx5_core_dbg(dev, format, arg...) \
-pr_debug("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__, \
- current->pid, ##arg)
+#define mlx5_core_dbg(dev, format, ...) \
+ pr_debug("%s:%s:%d:(pid %d): " format, \
+ (dev)->priv.name, __func__, __LINE__, current->pid, \
+ ##__VA_ARGS__)
-#define mlx5_core_dbg_mask(dev, mask, format, arg...) \
-do { \
- if ((mask) & mlx5_core_debug_mask) \
- pr_debug("%s:%s:%d:(pid %d): " format, (dev)->priv.name, \
- __func__, __LINE__, current->pid, ##arg); \
+#define mlx5_core_dbg_mask(dev, mask, format, ...) \
+do { \
+ if ((mask) & mlx5_core_debug_mask) \
+ mlx5_core_dbg(dev, format, ##__VA_ARGS__); \
} while (0)
-#define mlx5_core_err(dev, format, arg...) \
-pr_err("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__, \
- current->pid, ##arg)
+#define mlx5_core_err(dev, format, ...) \
+ pr_err("%s:%s:%d:(pid %d): " format, \
+ (dev)->priv.name, __func__, __LINE__, current->pid, \
+ ##__VA_ARGS__)
-#define mlx5_core_warn(dev, format, arg...) \
-pr_warn("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__, \
- current->pid, ##arg)
+#define mlx5_core_warn(dev, format, ...) \
+ pr_warn("%s:%s:%d:(pid %d): " format, \
+ (dev)->priv.name, __func__, __LINE__, current->pid, \
+ ##__VA_ARGS__)
enum {
MLX5_CMD_DATA, /* print command payload only */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index 4cc927649404..0a11b3fe9c19 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -73,7 +73,7 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
}
if (err) {
- mlx5_core_dbg(dev, "cmd exec faile %d\n", err);
+ mlx5_core_dbg(dev, "cmd exec failed %d\n", err);
return err;
}
@@ -191,7 +191,8 @@ int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
}
if (out.hdr.status) {
- mlx5_core_err(dev, "create_psv bad status %d\n", out.hdr.status);
+ mlx5_core_err(dev, "create_psv bad status %d\n",
+ out.hdr.status);
return mlx5_cmd_status_to_err(&out.hdr);
}
@@ -220,7 +221,8 @@ int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num)
}
if (out.hdr.status) {
- mlx5_core_err(dev, "destroy_psv bad status %d\n", out.hdr.status);
+ mlx5_core_err(dev, "destroy_psv bad status %d\n",
+ out.hdr.status);
err = mlx5_cmd_status_to_err(&out.hdr);
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index d59790a82bc3..c2a953ef0e67 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -311,7 +311,8 @@ retry:
in->num_entries = cpu_to_be32(npages);
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
if (err) {
- mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", func_id, npages, err);
+ mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
+ func_id, npages, err);
goto out_alloc;
}
dev->priv.fw_pages += npages;
@@ -319,7 +320,8 @@ retry:
if (out.hdr.status) {
err = mlx5_cmd_status_to_err(&out.hdr);
if (err) {
- mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n", func_id, npages, out.hdr.status);
+ mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n",
+ func_id, npages, out.hdr.status);
goto out_alloc;
}
}
@@ -378,7 +380,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
if (err) {
- mlx5_core_err(dev, "failed recliaming pages\n");
+ mlx5_core_err(dev, "failed reclaiming pages\n");
goto out_free;
}
dev->priv.fw_pages -= npages;
@@ -414,8 +416,8 @@ static void pages_work_handler(struct work_struct *work)
err = give_pages(dev, req->func_id, req->npages, 1);
if (err)
- mlx5_core_warn(dev, "%s fail %d\n", req->npages < 0 ?
- "reclaim" : "give", err);
+ mlx5_core_warn(dev, "%s fail %d\n",
+ req->npages < 0 ? "reclaim" : "give", err);
kfree(req);
}
@@ -487,7 +489,8 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
optimal_reclaimed_pages(),
&nclaimed);
if (err) {
- mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", err);
+ mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
+ err);
return err;
}
if (nclaimed)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 510576213dd0..8145b4668229 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -79,7 +79,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
if (err) {
- mlx5_core_warn(dev, "ret %d", err);
+ mlx5_core_warn(dev, "ret %d\n", err);
return err;
}
@@ -96,7 +96,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
err = radix_tree_insert(&table->tree, qp->qpn, qp);
spin_unlock_irq(&table->lock);
if (err) {
- mlx5_core_warn(dev, "err %d", err);
+ mlx5_core_warn(dev, "err %d\n", err);
goto err_cmd;
}
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index a2844ff322c4..e900c1abdef7 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -534,15 +534,6 @@ static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
netif_tx_start_all_queues(sp->dev);
}
-static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
-{
- if (!sp->config.multiq)
- sp->mac_control.fifos[fifo_no].queue_state =
- FIFO_QUEUE_START;
-
- netif_tx_start_all_queues(sp->dev);
-}
-
static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
{
if (!sp->config.multiq) {
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index c14bd3116e45..d49cba129081 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -66,6 +66,17 @@ config QLCNIC_VXLAN
Say Y here if you want to enable hardware offload support for
Virtual eXtensible Local Area Network (VXLAN) in the driver.
+config QLCNIC_HWMON
+ bool "QLOGIC QLCNIC 82XX and 83XX family HWMON support"
+ depends on QLCNIC && HWMON && !(QLCNIC=y && HWMON=m)
+ default y
+ ---help---
+ This configuration parameter can be used to read the
+ board temperature in Converged Ethernet devices
+ supported by qlcnic.
+
+ This data is available via the hwmon sysfs interface.
+
config QLGE
tristate "QLogic QLGE 10Gb Ethernet Driver Support"
depends on PCI
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index f785d01c7d12..6e7527e2b595 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -39,8 +39,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 3
-#define _QLCNIC_LINUX_SUBVERSION 57
-#define QLCNIC_LINUX_VERSIONID "5.3.57"
+#define _QLCNIC_LINUX_SUBVERSION 59
+#define QLCNIC_LINUX_VERSIONID "5.3.59"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -537,6 +537,7 @@ struct qlcnic_hardware_context {
u8 phys_port_id[ETH_ALEN];
u8 lb_mode;
u16 vxlan_port;
+ struct device *hwmon_dev;
};
struct qlcnic_adapter_stats {
@@ -1018,6 +1019,8 @@ struct qlcnic_ipaddr {
#define QLCNIC_DEL_VXLAN_PORT 0x200000
#endif
+#define QLCNIC_VLAN_FILTERING 0x800000
+
#define QLCNIC_IS_MSI_FAMILY(adapter) \
((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
#define QLCNIC_IS_TSO_CAPABLE(adapter) \
@@ -1692,7 +1695,7 @@ int qlcnic_read_mac_addr(struct qlcnic_adapter *);
int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int);
void qlcnic_set_netdev_features(struct qlcnic_adapter *,
struct qlcnic_esw_func_cfg *);
-void qlcnic_sriov_vf_schedule_multi(struct net_device *);
+void qlcnic_sriov_vf_set_multi(struct net_device *);
int qlcnic_is_valid_nic_func(struct qlcnic_adapter *, u8);
int qlcnic_get_pci_func_type(struct qlcnic_adapter *, u16, u16 *, u16 *,
u16 *);
@@ -2338,6 +2341,16 @@ static inline bool qlcnic_83xx_vf_check(struct qlcnic_adapter *adapter)
return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false;
}
+static inline bool qlcnic_sriov_check(struct qlcnic_adapter *adapter)
+{
+ bool status;
+
+ status = (qlcnic_sriov_pf_check(adapter) ||
+ qlcnic_sriov_vf_check(adapter)) ? true : false;
+
+ return status;
+}
+
static inline u32 qlcnic_get_vnic_func_count(struct qlcnic_adapter *adapter)
{
if (qlcnic_84xx_check(adapter))
@@ -2345,4 +2358,18 @@ static inline u32 qlcnic_get_vnic_func_count(struct qlcnic_adapter *adapter)
else
return QLC_DEFAULT_VNIC_COUNT;
}
+
+#ifdef CONFIG_QLCNIC_HWMON
+void qlcnic_register_hwmon_dev(struct qlcnic_adapter *);
+void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *);
+#else
+static inline void qlcnic_register_hwmon_dev(struct qlcnic_adapter *adapter)
+{
+ return;
+}
+static inline void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *adapter)
+{
+ return;
+}
+#endif
#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index b7cffb46a75d..a4a4ec0b68f8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -33,6 +33,7 @@ static void qlcnic_83xx_get_beacon_state(struct qlcnic_adapter *);
#define RSS_HASHTYPE_IP_TCP 0x3
#define QLC_83XX_FW_MBX_CMD 0
#define QLC_SKIP_INACTIVE_PCI_REGS 7
+#define QLC_MAX_LEGACY_FUNC_SUPP 8
static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
{QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1},
@@ -357,8 +358,15 @@ int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter)
if (!ahw->intr_tbl)
return -ENOMEM;
- if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ if (adapter->ahw->pci_func >= QLC_MAX_LEGACY_FUNC_SUPP) {
+ dev_err(&adapter->pdev->dev, "PCI function number 8 and higher are not supported with legacy interrupt, func 0x%x\n",
+ ahw->pci_func);
+ return -EOPNOTSUPP;
+ }
+
qlcnic_83xx_enable_legacy(adapter);
+ }
for (i = 0; i < num_msix; i++) {
if (adapter->flags & QLCNIC_MSIX_ENABLED)
@@ -879,6 +887,9 @@ int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
return 0;
}
}
+
+ dev_err(&adapter->pdev->dev, "%s: Invalid mailbox command opcode 0x%x\n",
+ __func__, type);
return -EINVAL;
}
@@ -3026,19 +3037,18 @@ void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *adapter)
QLCRDX(adapter->ahw, QLC_83XX_DRV_UNLOCK);
}
-int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
+int qlcnic_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
u32 *data, u32 count)
{
int i, j, ret = 0;
u32 temp;
- int err = 0;
/* Check alignment */
if (addr & 0xF)
return -EIO;
mutex_lock(&adapter->ahw->mem_lock);
- qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_ADDR_HI, 0);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_HI, 0);
for (i = 0; i < count; i++, addr += 16) {
if (!((ADDR_IN_RANGE(addr, QLCNIC_ADDR_QDR_NET,
@@ -3049,26 +3059,16 @@ int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
return -EIO;
}
- qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_ADDR_LO, addr);
- qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_LO,
- *data++);
- qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_HI,
- *data++);
- qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_ULO,
- *data++);
- qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_UHI,
- *data++);
- qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_CTRL,
- QLCNIC_TA_WRITE_ENABLE);
- qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_CTRL,
- QLCNIC_TA_WRITE_START);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_LO, addr);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_LO, *data++);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_HI, *data++);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_ULO, *data++);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_UHI, *data++);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_WRITE_ENABLE);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_WRITE_START);
for (j = 0; j < MAX_CTL_CHECK; j++) {
- temp = QLCRD32(adapter, QLCNIC_MS_CTRL, &err);
- if (err == -EIO) {
- mutex_unlock(&adapter->ahw->mem_lock);
- return err;
- }
+ temp = qlcnic_ind_rd(adapter, QLCNIC_MS_CTRL);
if ((temp & TA_CTL_BUSY) == 0)
break;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 88d809c35633..97784d09933f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -560,7 +560,7 @@ void qlcnic_83xx_napi_del(struct qlcnic_adapter *);
void qlcnic_83xx_napi_enable(struct qlcnic_adapter *);
void qlcnic_83xx_napi_disable(struct qlcnic_adapter *);
int qlcnic_83xx_config_led(struct qlcnic_adapter *, u32, u32);
-void qlcnic_ind_wr(struct qlcnic_adapter *, u32, u32);
+int qlcnic_ind_wr(struct qlcnic_adapter *, u32, u32);
int qlcnic_ind_rd(struct qlcnic_adapter *, u32);
int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *);
int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *,
@@ -617,7 +617,6 @@ void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *, u32);
int qlcnic_83xx_lock_driver(struct qlcnic_adapter *);
void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *);
int qlcnic_83xx_set_default_offload_settings(struct qlcnic_adapter *);
-int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *, u64, u32 *, u32);
int qlcnic_83xx_idc_vnic_pf_entry(struct qlcnic_adapter *);
int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *, int);
int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *);
@@ -659,4 +658,5 @@ void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *);
u32 qlcnic_83xx_get_cap_size(void *, int);
void qlcnic_83xx_set_sys_info(void *, int, u32);
void qlcnic_83xx_store_cap_mask(void *, u32);
+int qlcnic_ms_mem_write128(struct qlcnic_adapter *, u64, u32 *, u32);
#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index ba20c721ee97..f33559b72528 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1363,8 +1363,8 @@ static int qlcnic_83xx_copy_bootloader(struct qlcnic_adapter *adapter)
return ret;
}
/* 16 byte write to MS memory */
- ret = qlcnic_83xx_ms_mem_write128(adapter, dest, (u32 *)p_cache,
- size / 16);
+ ret = qlcnic_ms_mem_write128(adapter, dest, (u32 *)p_cache,
+ size / 16);
if (ret) {
vfree(p_cache);
return ret;
@@ -1389,8 +1389,8 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
p_cache = (u32 *)fw->data;
addr = (u64)dest;
- ret = qlcnic_83xx_ms_mem_write128(adapter, addr,
- p_cache, size / 16);
+ ret = qlcnic_ms_mem_write128(adapter, addr,
+ p_cache, size / 16);
if (ret) {
dev_err(&adapter->pdev->dev, "MS memory write failed\n");
release_firmware(fw);
@@ -1405,8 +1405,8 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
data[i] = fw->data[size + i];
for (; i < 16; i++)
data[i] = 0;
- ret = qlcnic_83xx_ms_mem_write128(adapter, addr,
- (u32 *)data, 1);
+ ret = qlcnic_ms_mem_write128(adapter, addr,
+ (u32 *)data, 1);
if (ret) {
dev_err(&adapter->pdev->dev,
"MS memory write failed\n");
@@ -2181,6 +2181,8 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
max_sds_rings = QLCNIC_MAX_SDS_RINGS;
max_tx_rings = QLCNIC_MAX_TX_RINGS;
} else {
+ dev_err(&adapter->pdev->dev, "%s: Invalid opmode %d\n",
+ __func__, ret);
return -EIO;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index c1e11f5715b0..304e247bdf33 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -1027,8 +1027,11 @@ int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
u32 arg1;
if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC ||
- !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE))
+ !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) {
+ dev_err(&adapter->pdev->dev, "%s: Not a management function\n",
+ __func__);
return err;
+ }
arg1 = id | (enable_mirroring ? BIT_4 : 0);
arg1 |= pci_func << 8;
@@ -1318,8 +1321,12 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
u32 arg1, arg2 = 0;
u8 pci_func;
- if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
+ if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) {
+ dev_err(&adapter->pdev->dev, "%s: Not a management function\n",
+ __func__);
return err;
+ }
+
pci_func = esw_cfg->pci_func;
index = qlcnic_is_valid_nic_func(adapter, pci_func);
if (index < 0)
@@ -1363,6 +1370,8 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
arg1 &= ~(0x0ffff << 16);
break;
default:
+ dev_err(&adapter->pdev->dev, "%s: Invalid opmode 0x%x\n",
+ __func__, esw_cfg->op_mode);
return err;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 9f3adf4e70b5..c9e8574959b7 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -373,12 +373,16 @@ int qlcnic_ind_rd(struct qlcnic_adapter *adapter, u32 addr)
return data;
}
-void qlcnic_ind_wr(struct qlcnic_adapter *adapter, u32 addr, u32 data)
+int qlcnic_ind_wr(struct qlcnic_adapter *adapter, u32 addr, u32 data)
{
+ int ret = 0;
+
if (qlcnic_82xx_check(adapter))
qlcnic_write_window_reg(addr, adapter->ahw->pci_base0, data);
else
- qlcnic_83xx_wrt_reg_indirect(adapter, addr, data);
+ ret = qlcnic_83xx_wrt_reg_indirect(adapter, addr, data);
+
+ return ret;
}
static int
@@ -567,28 +571,14 @@ static void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
void qlcnic_set_multi(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- struct qlcnic_mac_vlan_list *cur;
- struct netdev_hw_addr *ha;
- size_t temp;
if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
return;
- if (qlcnic_sriov_vf_check(adapter)) {
- if (!netdev_mc_empty(netdev)) {
- netdev_for_each_mc_addr(ha, netdev) {
- temp = sizeof(struct qlcnic_mac_vlan_list);
- cur = kzalloc(temp, GFP_ATOMIC);
- if (cur == NULL)
- break;
- memcpy(cur->mac_addr,
- ha->addr, ETH_ALEN);
- list_add_tail(&cur->list, &adapter->vf_mc_list);
- }
- }
- qlcnic_sriov_vf_schedule_multi(adapter->netdev);
- return;
- }
- __qlcnic_set_multi(netdev, 0);
+
+ if (qlcnic_sriov_vf_check(adapter))
+ qlcnic_sriov_vf_set_multi(netdev);
+ else
+ __qlcnic_set_multi(netdev, 0);
}
int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 173b3d12991f..e45bf09af0c9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -305,7 +305,6 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
{
struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data);
struct ethhdr *phdr = (struct ethhdr *)(skb->data);
- struct net_device *netdev = adapter->netdev;
u16 protocol = ntohs(skb->protocol);
struct qlcnic_filter *fil, *tmp_fil;
struct hlist_head *head;
@@ -314,27 +313,16 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
u16 vlan_id = 0;
u8 hindex, hval;
- if (!qlcnic_sriov_pf_check(adapter)) {
- if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
- return;
- } else {
+ if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
+ return;
+
+ if (adapter->flags & QLCNIC_VLAN_FILTERING) {
if (protocol == ETH_P_8021Q) {
vh = (struct vlan_ethhdr *)skb->data;
vlan_id = ntohs(vh->h_vlan_TCI);
} else if (vlan_tx_tag_present(skb)) {
vlan_id = vlan_tx_tag_get(skb);
}
-
- if (ether_addr_equal(phdr->h_source, adapter->mac_addr) &&
- !vlan_id)
- return;
- }
-
- if (adapter->fhash.fnum >= adapter->fhash.fmax) {
- adapter->stats.mac_filter_limit_overrun++;
- netdev_info(netdev, "Can not add more than %d mac-vlan filters, configured %d\n",
- adapter->fhash.fmax, adapter->fhash.fnum);
- return;
}
memcpy(&src_addr, phdr->h_source, ETH_ALEN);
@@ -353,6 +341,11 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
}
}
+ if (unlikely(adapter->fhash.fnum >= adapter->fhash.fmax)) {
+ adapter->stats.mac_filter_limit_overrun++;
+ return;
+ }
+
fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
if (!fil)
return;
@@ -1216,8 +1209,7 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
if (!skb)
return buffer;
- if (adapter->drv_mac_learn &&
- (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ if (adapter->rx_mac_learn) {
t_vid = 0;
is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
@@ -1293,8 +1285,7 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
if (!skb)
return buffer;
- if (adapter->drv_mac_learn &&
- (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ if (adapter->rx_mac_learn) {
t_vid = 0;
is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 7e55e88a81bf..d2e18b52caba 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -378,7 +378,8 @@ static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
if (!adapter->fdb_mac_learn)
return ndo_dflt_fdb_del(ndm, tb, netdev, addr);
- if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
+ if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
+ qlcnic_sriov_check(adapter)) {
if (is_unicast_ether_addr(addr)) {
err = dev_uc_del(netdev, addr);
if (!err)
@@ -402,7 +403,8 @@ static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
if (!adapter->fdb_mac_learn)
return ndo_dflt_fdb_add(ndm, tb, netdev, addr, flags);
- if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) &&
+ !qlcnic_sriov_check(adapter)) {
pr_info("%s: FDB e-switch is not enabled\n", __func__);
return -EOPNOTSUPP;
}
@@ -432,7 +434,8 @@ static int qlcnic_fdb_dump(struct sk_buff *skb, struct netlink_callback *ncb,
if (!adapter->fdb_mac_learn)
return ndo_dflt_fdb_dump(skb, ncb, netdev, idx);
- if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
+ if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
+ qlcnic_sriov_check(adapter))
idx = ndo_dflt_fdb_dump(skb, ncb, netdev, idx);
return idx;
@@ -690,10 +693,10 @@ int qlcnic_setup_tss_rss_intr(struct qlcnic_adapter *adapter)
adapter->msix_entries[vector].entry = vector;
restore:
- err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
- if (err > 0) {
+ err = pci_enable_msix_exact(pdev, adapter->msix_entries, num_msix);
+ if (err == -ENOSPC) {
if (!adapter->drv_tss_rings && !adapter->drv_rss_rings)
- return -ENOSPC;
+ return err;
netdev_info(adapter->netdev,
"Unable to allocate %d MSI-X vectors, Available vectors %d\n",
@@ -1014,6 +1017,8 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
if (pfn >= ahw->max_vnic_func) {
ret = QL_STATUS_INVALID_PARAM;
+ dev_err(&adapter->pdev->dev, "%s: Invalid function 0x%x, max 0x%x\n",
+ __func__, pfn, ahw->max_vnic_func);
goto err_eswitch;
}
@@ -1915,8 +1920,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
return;
- if (qlcnic_sriov_vf_check(adapter))
- qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
smp_mb();
netif_carrier_off(netdev);
adapter->ahw->linkup = 0;
@@ -1928,6 +1931,8 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
qlcnic_delete_lb_filters(adapter);
qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
+ if (qlcnic_sriov_vf_check(adapter))
+ qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
qlcnic_napi_disable(adapter);
@@ -2052,6 +2057,7 @@ out:
static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
int err = 0;
adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context),
@@ -2061,6 +2067,18 @@ static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
goto err_out;
}
+ if (qlcnic_83xx_check(adapter)) {
+ ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX_TX;
+ ahw->coal.tx_time_us = QLCNIC_DEF_INTR_COALESCE_TX_TIME_US;
+ ahw->coal.tx_packets = QLCNIC_DEF_INTR_COALESCE_TX_PACKETS;
+ ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
+ ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
+ } else {
+ ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX;
+ ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
+ ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
+ }
+
/* clear stats */
memset(&adapter->stats, 0, sizeof(adapter->stats));
err_out:
@@ -2417,9 +2435,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int err, pci_using_dac = -1;
char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */
- if (pdev->is_virtfn)
- return -ENODEV;
-
err = pci_enable_device(pdev);
if (err)
return err;
@@ -2552,9 +2567,11 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case -ENOMEM:
dev_err(&pdev->dev, "Adapter initialization failed. Please reboot\n");
goto err_out_free_hw;
+ case -EOPNOTSUPP:
+ dev_err(&pdev->dev, "Adapter initialization failed\n");
+ goto err_out_free_hw;
default:
- dev_err(&pdev->dev, "Adapter initialization failed. A reboot may be required to recover from this failure\n");
- dev_err(&pdev->dev, "If reboot does not help to recover from this failure, try a flash update of the adapter\n");
+ dev_err(&pdev->dev, "Adapter initialization failed. Driver will load in maintenance mode to recover the adapter using the application\n");
goto err_out_maintenance_mode;
}
}
@@ -2628,7 +2645,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
qlcnic_alloc_lb_filters_mem(adapter);
qlcnic_add_sysfs(adapter);
-
+ qlcnic_register_hwmon_dev(adapter);
return 0;
err_out_disable_mbx_intr:
@@ -2698,9 +2715,9 @@ static void qlcnic_remove(struct pci_dev *pdev)
return;
netdev = adapter->netdev;
- qlcnic_sriov_pf_disable(adapter);
qlcnic_cancel_idc_work(adapter);
+ qlcnic_sriov_pf_disable(adapter);
ahw = adapter->ahw;
unregister_netdev(netdev);
@@ -2735,6 +2752,8 @@ static void qlcnic_remove(struct pci_dev *pdev)
qlcnic_remove_sysfs(adapter);
+ qlcnic_unregister_hwmon_dev(adapter);
+
qlcnic_cleanup_pci_map(adapter->ahw);
qlcnic_release_firmware(adapter);
@@ -2828,6 +2847,8 @@ static int qlcnic_close(struct net_device *netdev)
return 0;
}
+#define QLCNIC_VF_LB_BUCKET_SIZE 1
+
void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
{
void *head;
@@ -2843,7 +2864,10 @@ void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
spin_lock_init(&adapter->mac_learn_lock);
spin_lock_init(&adapter->rx_mac_learn_lock);
- if (qlcnic_82xx_check(adapter)) {
+ if (qlcnic_sriov_vf_check(adapter)) {
+ filter_size = QLCNIC_83XX_SRIOV_VF_MAX_MAC - 1;
+ adapter->fhash.fbucket_size = QLCNIC_VF_LB_BUCKET_SIZE;
+ } else if (qlcnic_82xx_check(adapter)) {
filter_size = QLCNIC_LB_MAX_FILTERS;
adapter->fhash.fbucket_size = QLCNIC_LB_BUCKET_SIZE;
} else {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index 37b979b1266b..f7694da8ed5d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -238,6 +238,8 @@ void qlcnic_82xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
hdr->drv_cap_mask = hdr->cap_mask;
fw_dump->cap_mask = hdr->cap_mask;
+
+ fw_dump->use_pex_dma = (hdr->capabilities & BIT_0) ? true : false;
}
inline u32 qlcnic_82xx_get_cap_size(void *t_hdr, int index)
@@ -276,6 +278,8 @@ inline void qlcnic_83xx_set_saved_state(void *t_hdr, u32 index,
hdr->saved_state[index] = value;
}
+#define QLCNIC_TEMPLATE_VERSION (0x20001)
+
void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
{
struct qlcnic_83xx_dump_template_hdr *hdr;
@@ -288,6 +292,9 @@ void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
hdr->drv_cap_mask = hdr->cap_mask;
fw_dump->cap_mask = hdr->cap_mask;
+
+ fw_dump->use_pex_dma = (fw_dump->version & 0xfffff) >=
+ QLCNIC_TEMPLATE_VERSION;
}
inline u32 qlcnic_83xx_get_cap_size(void *t_hdr, int index)
@@ -658,29 +665,28 @@ out:
static int qlcnic_start_pex_dma(struct qlcnic_adapter *adapter,
struct __mem *mem)
{
- struct qlcnic_83xx_dump_template_hdr *tmpl_hdr;
struct device *dev = &adapter->pdev->dev;
u32 dma_no, dma_base_addr, temp_addr;
int i, ret, dma_sts;
+ void *tmpl_hdr;
tmpl_hdr = adapter->ahw->fw_dump.tmpl_hdr;
- dma_no = tmpl_hdr->saved_state[QLC_83XX_DMA_ENGINE_INDEX];
+ dma_no = qlcnic_get_saved_state(adapter, tmpl_hdr,
+ QLC_83XX_DMA_ENGINE_INDEX);
dma_base_addr = QLC_DMA_REG_BASE_ADDR(dma_no);
temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_LOW;
- ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr,
- mem->desc_card_addr);
+ ret = qlcnic_ind_wr(adapter, temp_addr, mem->desc_card_addr);
if (ret)
return ret;
temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_HI;
- ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr, 0);
+ ret = qlcnic_ind_wr(adapter, temp_addr, 0);
if (ret)
return ret;
temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL;
- ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr,
- mem->start_dma_cmd);
+ ret = qlcnic_ind_wr(adapter, temp_addr, mem->start_dma_cmd);
if (ret)
return ret;
@@ -710,15 +716,16 @@ static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter,
struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
u32 temp, dma_base_addr, size = 0, read_size = 0;
struct qlcnic_pex_dma_descriptor *dma_descr;
- struct qlcnic_83xx_dump_template_hdr *tmpl_hdr;
struct device *dev = &adapter->pdev->dev;
dma_addr_t dma_phys_addr;
void *dma_buffer;
+ void *tmpl_hdr;
tmpl_hdr = fw_dump->tmpl_hdr;
/* Check if DMA engine is available */
- temp = tmpl_hdr->saved_state[QLC_83XX_DMA_ENGINE_INDEX];
+ temp = qlcnic_get_saved_state(adapter, tmpl_hdr,
+ QLC_83XX_DMA_ENGINE_INDEX);
dma_base_addr = QLC_DMA_REG_BASE_ADDR(temp);
temp = qlcnic_ind_rd(adapter,
dma_base_addr + QLC_DMA_CMD_STATUS_CTRL);
@@ -764,8 +771,8 @@ static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter,
/* Write DMA descriptor to MS memory*/
temp = sizeof(struct qlcnic_pex_dma_descriptor) / 16;
- *ret = qlcnic_83xx_ms_mem_write128(adapter, mem->desc_card_addr,
- (u32 *)dma_descr, temp);
+ *ret = qlcnic_ms_mem_write128(adapter, mem->desc_card_addr,
+ (u32 *)dma_descr, temp);
if (*ret) {
dev_info(dev, "Failed to write DMA descriptor to MS memory at address 0x%x\n",
mem->desc_card_addr);
@@ -1141,8 +1148,6 @@ free_mem:
return err;
}
-#define QLCNIC_TEMPLATE_VERSION (0x20001)
-
int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
{
struct qlcnic_hardware_context *ahw;
@@ -1203,12 +1208,6 @@ flash_temp:
"Default minidump capture mask 0x%x\n",
fw_dump->cap_mask);
- if (qlcnic_83xx_check(adapter) &&
- (fw_dump->version & 0xfffff) >= QLCNIC_TEMPLATE_VERSION)
- fw_dump->use_pex_dma = true;
- else
- fw_dump->use_pex_dma = false;
-
qlcnic_enable_fw_dump_state(adapter);
return 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
index 396bd1fd1d27..335b50f7bd3e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
@@ -52,6 +52,7 @@ enum qlcnic_bc_commands {
QLCNIC_BC_CMD_CFG_GUEST_VLAN = 0x3,
};
+#define QLCNIC_83XX_SRIOV_VF_MAX_MAC 2
#define QLC_BC_CMD 1
struct qlcnic_trans_list {
@@ -151,13 +152,14 @@ struct qlcnic_vf_info {
struct qlcnic_trans_list rcv_pend;
struct qlcnic_adapter *adapter;
struct qlcnic_vport *vp;
- struct mutex vlan_list_lock; /* Lock for VLAN list */
+ spinlock_t vlan_list_lock; /* Lock for VLAN list */
};
struct qlcnic_async_work_list {
struct list_head list;
struct work_struct work;
void *ptr;
+ struct qlcnic_cmd_args *cmd;
};
struct qlcnic_back_channel {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 6afe9c1f5ab9..498fa6350c8d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -39,6 +39,8 @@ static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *, u8);
static void qlcnic_sriov_process_bc_cmd(struct work_struct *);
static int qlcnic_sriov_vf_shutdown(struct pci_dev *);
static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *);
+static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *,
+ struct qlcnic_cmd_args *);
static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
.read_crb = qlcnic_83xx_read_crb,
@@ -181,7 +183,7 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
vf->adapter = adapter;
vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i);
mutex_init(&vf->send_cmd_lock);
- mutex_init(&vf->vlan_list_lock);
+ spin_lock_init(&vf->vlan_list_lock);
INIT_LIST_HEAD(&vf->rcv_act.wait_list);
INIT_LIST_HEAD(&vf->rcv_pend.wait_list);
spin_lock_init(&vf->rcv_act.lock);
@@ -197,8 +199,9 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
goto qlcnic_destroy_async_wq;
}
sriov->vf_info[i].vp = vp;
+ vp->vlan_mode = QLC_GUEST_VLAN_MODE;
vp->max_tx_bw = MAX_BW;
- vp->spoofchk = true;
+ vp->spoofchk = false;
random_ether_addr(vp->mac);
dev_info(&adapter->pdev->dev,
"MAC Address %pM is configured for VF %d\n",
@@ -515,6 +518,8 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
{
int err;
+ adapter->flags |= QLCNIC_VLAN_FILTERING;
+ adapter->ahw->total_nic_func = 1;
INIT_LIST_HEAD(&adapter->vf_mc_list);
if (!qlcnic_use_msi_x && !!qlcnic_use_msi)
dev_warn(&adapter->pdev->dev,
@@ -770,6 +775,7 @@ static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans *trans,
cmd->req.arg = (u32 *)trans->req_pay;
cmd->rsp.arg = (u32 *)trans->rsp_pay;
cmd_op = cmd->req.arg[0] & 0xff;
+ cmd->cmd_op = cmd_op;
remainder = (trans->rsp_pay_size) % (bc_pay_sz);
num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
if (remainder)
@@ -1356,7 +1362,7 @@ static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter *adapter,
return -EIO;
}
-static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
+static int __qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
struct qlcnic_cmd_args *cmd)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -1408,12 +1414,17 @@ retry:
(mbx_err_code == QLCNIC_MBX_PORT_RSP_OK)) {
rsp = QLCNIC_RCODE_SUCCESS;
} else {
- rsp = mbx_err_code;
- if (!rsp)
- rsp = 1;
- dev_err(dev,
- "MBX command 0x%x failed with err:0x%x for VF %d\n",
- opcode, mbx_err_code, func);
+ if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
+ rsp = QLCNIC_RCODE_SUCCESS;
+ } else {
+ rsp = mbx_err_code;
+ if (!rsp)
+ rsp = 1;
+
+ dev_err(dev,
+ "MBX command 0x%x failed with err:0x%x for VF %d\n",
+ opcode, mbx_err_code, func);
+ }
}
err_out:
@@ -1435,6 +1446,16 @@ free_cmd:
return rsp;
}
+
+static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT)
+ return qlcnic_sriov_async_issue_cmd(adapter, cmd);
+ else
+ return __qlcnic_sriov_issue_cmd(adapter, cmd);
+}
+
static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op)
{
struct qlcnic_cmd_args cmd;
@@ -1465,58 +1486,28 @@ out:
return ret;
}
-static void qlcnic_vf_add_mc_list(struct net_device *netdev)
+static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
- struct qlcnic_mac_vlan_list *cur;
- struct list_head *head, tmp_list;
struct qlcnic_vf_info *vf;
u16 vlan_id;
int i;
- static const u8 bcast_addr[ETH_ALEN] = {
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
- };
-
vf = &adapter->ahw->sriov->vf_info[0];
- INIT_LIST_HEAD(&tmp_list);
- head = &adapter->vf_mc_list;
- netif_addr_lock_bh(netdev);
- while (!list_empty(head)) {
- cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
- list_move(&cur->list, &tmp_list);
- }
-
- netif_addr_unlock_bh(netdev);
-
- while (!list_empty(&tmp_list)) {
- cur = list_entry((&tmp_list)->next,
- struct qlcnic_mac_vlan_list, list);
- if (!qlcnic_sriov_check_any_vlan(vf)) {
- qlcnic_nic_add_mac(adapter, bcast_addr, 0);
- qlcnic_nic_add_mac(adapter, cur->mac_addr, 0);
- } else {
- mutex_lock(&vf->vlan_list_lock);
- for (i = 0; i < sriov->num_allowed_vlans; i++) {
- vlan_id = vf->sriov_vlans[i];
- if (vlan_id) {
- qlcnic_nic_add_mac(adapter, bcast_addr,
- vlan_id);
- qlcnic_nic_add_mac(adapter,
- cur->mac_addr,
- vlan_id);
- }
- }
- mutex_unlock(&vf->vlan_list_lock);
- if (qlcnic_84xx_check(adapter)) {
- qlcnic_nic_add_mac(adapter, bcast_addr, 0);
- qlcnic_nic_add_mac(adapter, cur->mac_addr, 0);
- }
+ if (!qlcnic_sriov_check_any_vlan(vf)) {
+ qlcnic_nic_add_mac(adapter, mac, 0);
+ } else {
+ spin_lock(&vf->vlan_list_lock);
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ vlan_id = vf->sriov_vlans[i];
+ if (vlan_id)
+ qlcnic_nic_add_mac(adapter, mac, vlan_id);
}
- list_del(&cur->list);
- kfree(cur);
+ spin_unlock(&vf->vlan_list_lock);
+ if (qlcnic_84xx_check(adapter))
+ qlcnic_nic_add_mac(adapter, mac, 0);
}
}
@@ -1525,6 +1516,7 @@ void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
struct list_head *head = &bc->async_list;
struct qlcnic_async_work_list *entry;
+ flush_workqueue(bc->bc_async_wq);
while (!list_empty(head)) {
entry = list_entry(head->next, struct qlcnic_async_work_list,
list);
@@ -1534,10 +1526,14 @@ void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
}
}
-static void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
+void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ static const u8 bcast_addr[ETH_ALEN] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
+ struct netdev_hw_addr *ha;
u32 mode = VPORT_MISS_MODE_DROP;
if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
@@ -1549,23 +1545,49 @@ static void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
} else if ((netdev->flags & IFF_ALLMULTI) ||
(netdev_mc_count(netdev) > ahw->max_mc_count)) {
mode = VPORT_MISS_MODE_ACCEPT_MULTI;
+ } else {
+ qlcnic_vf_add_mc_list(netdev, bcast_addr);
+ if (!netdev_mc_empty(netdev)) {
+ netdev_for_each_mc_addr(ha, netdev)
+ qlcnic_vf_add_mc_list(netdev, ha->addr);
+ }
}
- if (qlcnic_sriov_vf_check(adapter))
- qlcnic_vf_add_mc_list(netdev);
+ /* configure unicast MAC address, if there is not sufficient space
+ * to store all the unicast addresses then enable promiscuous mode
+ */
+ if (netdev_uc_count(netdev) > ahw->max_uc_count) {
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
+ } else if (!netdev_uc_empty(netdev)) {
+ netdev_for_each_uc_addr(ha, netdev)
+ qlcnic_vf_add_mc_list(netdev, ha->addr);
+ }
+
+ if (adapter->pdev->is_virtfn) {
+ if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
+ !adapter->fdb_mac_learn) {
+ qlcnic_alloc_lb_filters_mem(adapter);
+ adapter->drv_mac_learn = 1;
+ adapter->rx_mac_learn = true;
+ } else {
+ adapter->drv_mac_learn = 0;
+ adapter->rx_mac_learn = false;
+ }
+ }
qlcnic_nic_set_promisc(adapter, mode);
}
-static void qlcnic_sriov_handle_async_multi(struct work_struct *work)
+static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work)
{
struct qlcnic_async_work_list *entry;
- struct net_device *netdev;
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_cmd_args *cmd;
entry = container_of(work, struct qlcnic_async_work_list, work);
- netdev = (struct net_device *)entry->ptr;
-
- qlcnic_sriov_vf_set_multi(netdev);
+ adapter = entry->ptr;
+ cmd = entry->cmd;
+ __qlcnic_sriov_issue_cmd(adapter, cmd);
return;
}
@@ -1595,8 +1617,9 @@ qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc)
return entry;
}
-static void qlcnic_sriov_schedule_bc_async_work(struct qlcnic_back_channel *bc,
- work_func_t func, void *data)
+static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc,
+ work_func_t func, void *data,
+ struct qlcnic_cmd_args *cmd)
{
struct qlcnic_async_work_list *entry = NULL;
@@ -1605,21 +1628,23 @@ static void qlcnic_sriov_schedule_bc_async_work(struct qlcnic_back_channel *bc,
return;
entry->ptr = data;
+ entry->cmd = cmd;
INIT_WORK(&entry->work, func);
queue_work(bc->bc_async_wq, &entry->work);
}
-void qlcnic_sriov_vf_schedule_multi(struct net_device *netdev)
+static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
{
- struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc;
if (adapter->need_fw_reset)
- return;
+ return -EIO;
- qlcnic_sriov_schedule_bc_async_work(bc, qlcnic_sriov_handle_async_multi,
- netdev);
+ qlcnic_sriov_schedule_async_cmd(bc, qlcnic_sriov_handle_async_issue_cmd,
+ adapter, cmd);
+ return 0;
}
static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
@@ -1843,6 +1868,12 @@ static int qlcnic_sriov_vf_idc_unknown_state(struct qlcnic_adapter *adapter)
return 0;
}
+static void qlcnic_sriov_vf_periodic_tasks(struct qlcnic_adapter *adapter)
+{
+ if (adapter->fhash.fnum)
+ qlcnic_prune_lb_filters(adapter);
+}
+
static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work)
{
struct qlcnic_adapter *adapter;
@@ -1874,6 +1905,8 @@ static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work)
}
idc->prev_state = idc->curr_state;
+ qlcnic_sriov_vf_periodic_tasks(adapter);
+
if (!ret && test_bit(QLC_83XX_MODULE_LOADED, &idc->status))
qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
idc->delay);
@@ -1897,7 +1930,7 @@ static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov *sriov,
if (!vf->sriov_vlans)
return err;
- mutex_lock(&vf->vlan_list_lock);
+ spin_lock_bh(&vf->vlan_list_lock);
for (i = 0; i < sriov->num_allowed_vlans; i++) {
if (vf->sriov_vlans[i] == vlan_id) {
@@ -1906,7 +1939,7 @@ static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov *sriov,
}
}
- mutex_unlock(&vf->vlan_list_lock);
+ spin_unlock_bh(&vf->vlan_list_lock);
return err;
}
@@ -1915,12 +1948,12 @@ static int qlcnic_sriov_validate_num_vlans(struct qlcnic_sriov *sriov,
{
int err = 0;
- mutex_lock(&vf->vlan_list_lock);
+ spin_lock_bh(&vf->vlan_list_lock);
if (vf->num_vlan >= sriov->num_allowed_vlans)
err = -EINVAL;
- mutex_unlock(&vf->vlan_list_lock);
+ spin_unlock_bh(&vf->vlan_list_lock);
return err;
}
@@ -1973,7 +2006,7 @@ static void qlcnic_sriov_vlan_operation(struct qlcnic_vf_info *vf, u16 vlan_id,
if (!vf->sriov_vlans)
return;
- mutex_lock(&vf->vlan_list_lock);
+ spin_lock_bh(&vf->vlan_list_lock);
switch (opcode) {
case QLC_VLAN_ADD:
@@ -1986,7 +2019,7 @@ static void qlcnic_sriov_vlan_operation(struct qlcnic_vf_info *vf, u16 vlan_id,
netdev_err(adapter->netdev, "Invalid VLAN operation\n");
}
- mutex_unlock(&vf->vlan_list_lock);
+ spin_unlock_bh(&vf->vlan_list_lock);
return;
}
@@ -1994,6 +2027,7 @@ int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
u16 vid, u8 enable)
{
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct net_device *netdev = adapter->netdev;
struct qlcnic_vf_info *vf;
struct qlcnic_cmd_args cmd;
int ret;
@@ -2019,14 +2053,18 @@ int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
dev_err(&adapter->pdev->dev,
"Failed to configure guest VLAN, err=%d\n", ret);
} else {
+ netif_addr_lock_bh(netdev);
qlcnic_free_mac_list(adapter);
+ netif_addr_unlock_bh(netdev);
if (enable)
qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_ADD);
else
qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_DELETE);
- qlcnic_set_multi(adapter->netdev);
+ netif_addr_lock_bh(netdev);
+ qlcnic_set_multi(netdev);
+ netif_addr_unlock_bh(netdev);
}
qlcnic_free_mbx_args(&cmd);
@@ -2157,11 +2195,11 @@ bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *vf)
{
bool err = false;
- mutex_lock(&vf->vlan_list_lock);
+ spin_lock_bh(&vf->vlan_list_lock);
if (vf->num_vlan)
err = true;
- mutex_unlock(&vf->vlan_list_lock);
+ spin_unlock_bh(&vf->vlan_list_lock);
return err;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 280137991544..6d2f72f114f2 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -16,6 +16,7 @@
#define QLC_VF_FLOOD_BIT BIT_16
#define QLC_FLOOD_MODE 0x5
#define QLC_SRIOV_ALLOW_VLAN0 BIT_19
+#define QLC_INTR_COAL_TYPE_MASK 0x7
static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *, u8);
@@ -83,7 +84,7 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
info->max_tx_ques = res->num_tx_queues / max;
if (qlcnic_83xx_pf_check(adapter))
- num_macs = 1;
+ num_macs = QLCNIC_83XX_SRIOV_VF_MAX_MAC;
info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters;
@@ -337,9 +338,12 @@ static int qlcnic_sriov_pf_cfg_vlan_filtering(struct qlcnic_adapter *adapter,
cmd.req.arg[1] = 0x4;
if (enable) {
+ adapter->flags |= QLCNIC_VLAN_FILTERING;
cmd.req.arg[1] |= BIT_16;
if (qlcnic_84xx_check(adapter))
cmd.req.arg[1] |= QLC_SRIOV_ALLOW_VLAN0;
+ } else {
+ adapter->flags &= ~QLCNIC_VLAN_FILTERING;
}
err = qlcnic_issue_cmd(adapter, &cmd);
@@ -471,12 +475,12 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
return -EPERM;
}
+ qlcnic_sriov_pf_disable(adapter);
+
rtnl_lock();
if (netif_running(netdev))
__qlcnic_down(adapter, netdev);
- qlcnic_sriov_pf_disable(adapter);
-
qlcnic_sriov_free_vlans(adapter);
qlcnic_sriov_pf_cleanup(adapter);
@@ -595,7 +599,6 @@ static int __qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter,
qlcnic_sriov_alloc_vlans(adapter);
- err = qlcnic_sriov_pf_enable(adapter, num_vfs);
return err;
del_flr_queue:
@@ -626,25 +629,36 @@ static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs)
__qlcnic_down(adapter, netdev);
err = __qlcnic_pci_sriov_enable(adapter, num_vfs);
- if (err) {
- netdev_info(netdev, "Failed to enable SR-IOV on port %d\n",
- adapter->portnum);
+ if (err)
+ goto error;
- err = -EIO;
- if (qlcnic_83xx_configure_opmode(adapter))
- goto error;
- } else {
+ if (netif_running(netdev))
+ __qlcnic_up(adapter, netdev);
+
+ rtnl_unlock();
+ err = qlcnic_sriov_pf_enable(adapter, num_vfs);
+ if (!err) {
netdev_info(netdev,
"SR-IOV is enabled successfully on port %d\n",
adapter->portnum);
/* Return number of vfs enabled */
- err = num_vfs;
+ return num_vfs;
}
+
+ rtnl_lock();
if (netif_running(netdev))
- __qlcnic_up(adapter, netdev);
+ __qlcnic_down(adapter, netdev);
error:
+ if (!qlcnic_83xx_configure_opmode(adapter)) {
+ if (netif_running(netdev))
+ __qlcnic_up(adapter, netdev);
+ }
+
rtnl_unlock();
+ netdev_info(netdev, "Failed to enable SR-IOV on port %d\n",
+ adapter->portnum);
+
return err;
}
@@ -773,7 +787,7 @@ static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
struct qlcnic_vf_info *vf,
u16 vlan, u8 op)
{
- struct qlcnic_cmd_args cmd;
+ struct qlcnic_cmd_args *cmd;
struct qlcnic_macvlan_mbx mv;
struct qlcnic_vport *vp;
u8 *addr;
@@ -783,21 +797,27 @@ static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
vp = vf->vp;
- if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN))
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+ if (!cmd)
return -ENOMEM;
+ err = qlcnic_alloc_mbx_args(cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN);
+ if (err)
+ goto free_cmd;
+
+ cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func);
if (vpid < 0) {
err = -EINVAL;
- goto out;
+ goto free_args;
}
if (vlan)
op = ((op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL);
- cmd.req.arg[1] = op | (1 << 8) | (3 << 6);
- cmd.req.arg[1] |= ((vpid & 0xffff) << 16) | BIT_31;
+ cmd->req.arg[1] = op | (1 << 8) | (3 << 6);
+ cmd->req.arg[1] |= ((vpid & 0xffff) << 16) | BIT_31;
addr = vp->mac;
mv.vlan = vlan;
@@ -807,18 +827,18 @@ static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
mv.mac_addr3 = addr[3];
mv.mac_addr4 = addr[4];
mv.mac_addr5 = addr[5];
- buf = &cmd.req.arg[2];
+ buf = &cmd->req.arg[2];
memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx));
- err = qlcnic_issue_cmd(adapter, &cmd);
+ err = qlcnic_issue_cmd(adapter, cmd);
- if (err)
- dev_err(&adapter->pdev->dev,
- "MAC-VLAN %s to CAM failed, err=%d.\n",
- ((op == 1) ? "add " : "delete "), err);
+ if (!err)
+ return err;
-out:
- qlcnic_free_mbx_args(&cmd);
+free_args:
+ qlcnic_free_mbx_args(cmd);
+free_cmd:
+ kfree(cmd);
return err;
}
@@ -840,7 +860,7 @@ static void qlcnic_83xx_cfg_default_mac_vlan(struct qlcnic_adapter *adapter,
sriov = adapter->ahw->sriov;
- mutex_lock(&vf->vlan_list_lock);
+ spin_lock_bh(&vf->vlan_list_lock);
if (vf->num_vlan) {
for (i = 0; i < sriov->num_allowed_vlans; i++) {
vlan = vf->sriov_vlans[i];
@@ -849,7 +869,7 @@ static void qlcnic_83xx_cfg_default_mac_vlan(struct qlcnic_adapter *adapter,
opcode);
}
}
- mutex_unlock(&vf->vlan_list_lock);
+ spin_unlock_bh(&vf->vlan_list_lock);
if (vf->vp->vlan_mode != QLC_PVID_MODE) {
if (qlcnic_83xx_pf_check(adapter) &&
@@ -1178,19 +1198,41 @@ static int qlcnic_sriov_validate_cfg_intrcoal(struct qlcnic_adapter *adapter,
{
struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
u16 ctx_id, pkts, time;
+ int err = -EINVAL;
+ u8 type;
+ type = cmd->req.arg[1] & QLC_INTR_COAL_TYPE_MASK;
ctx_id = cmd->req.arg[1] >> 16;
pkts = cmd->req.arg[2] & 0xffff;
time = cmd->req.arg[2] >> 16;
- if (ctx_id != vf->rx_ctx_id)
- return -EINVAL;
- if (pkts > coal->rx_packets)
- return -EINVAL;
- if (time < coal->rx_time_us)
- return -EINVAL;
+ switch (type) {
+ case QLCNIC_INTR_COAL_TYPE_RX:
+ if (ctx_id != vf->rx_ctx_id || pkts > coal->rx_packets ||
+ time < coal->rx_time_us)
+ goto err_label;
+ break;
+ case QLCNIC_INTR_COAL_TYPE_TX:
+ if (ctx_id != vf->tx_ctx_id || pkts > coal->tx_packets ||
+ time < coal->tx_time_us)
+ goto err_label;
+ break;
+ default:
+ netdev_err(adapter->netdev, "Invalid coalescing type 0x%x received\n",
+ type);
+ return err;
+ }
return 0;
+
+err_label:
+ netdev_err(adapter->netdev, "Expected: rx_ctx_id 0x%x rx_packets 0x%x rx_time_us 0x%x tx_ctx_id 0x%x tx_packets 0x%x tx_time_us 0x%x\n",
+ vf->rx_ctx_id, coal->rx_packets, coal->rx_time_us,
+ vf->tx_ctx_id, coal->tx_packets, coal->tx_time_us);
+ netdev_err(adapter->netdev, "Received: ctx_id 0x%x packets 0x%x time_us 0x%x type 0x%x\n",
+ ctx_id, pkts, time, type);
+
+ return err;
}
static int qlcnic_sriov_pf_cfg_intrcoal_cmd(struct qlcnic_bc_trans *tran,
@@ -1214,7 +1256,6 @@ static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter,
struct qlcnic_vf_info *vf,
struct qlcnic_cmd_args *cmd)
{
- struct qlcnic_macvlan_mbx *macvlan;
struct qlcnic_vport *vp = vf->vp;
u8 op, new_op;
@@ -1224,14 +1265,6 @@ static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter,
cmd->req.arg[1] |= (vf->vp->handle << 16);
cmd->req.arg[1] |= BIT_31;
- macvlan = (struct qlcnic_macvlan_mbx *)&cmd->req.arg[2];
- if (!(macvlan->mac_addr0 & BIT_0)) {
- dev_err(&adapter->pdev->dev,
- "MAC address change is not allowed from VF %d",
- vf->pci_func);
- return -EINVAL;
- }
-
if (vp->vlan_mode == QLC_PVID_MODE) {
op = cmd->req.arg[1] & 0x7;
cmd->req.arg[1] &= ~0x7;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index cd346e27f2e1..f5786d5792df 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -19,6 +19,10 @@
#include <linux/sysfs.h>
#include <linux/aer.h>
#include <linux/log2.h>
+#ifdef CONFIG_QLCNIC_HWMON
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#endif
#define QLC_STATUS_UNSUPPORTED_CMD -2
@@ -358,6 +362,8 @@ int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
if (adapter->npars[i].pci_func == pci_func)
return i;
}
+
+ dev_err(&adapter->pdev->dev, "%s: Invalid nic function\n", __func__);
return -EINVAL;
}
@@ -1243,6 +1249,68 @@ static struct bin_attribute bin_attr_flash = {
.write = qlcnic_83xx_sysfs_flash_write_handler,
};
+#ifdef CONFIG_QLCNIC_HWMON
+
+static ssize_t qlcnic_hwmon_show_temp(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ unsigned int temperature = 0, value = 0;
+
+ if (qlcnic_83xx_check(adapter))
+ value = QLCRDX(adapter->ahw, QLC_83XX_ASIC_TEMP);
+ else if (qlcnic_82xx_check(adapter))
+ value = QLC_SHARED_REG_RD32(adapter, QLCNIC_ASIC_TEMP);
+
+ temperature = qlcnic_get_temp_val(value);
+ /* display millidegree celcius */
+ temperature *= 1000;
+ return sprintf(buf, "%u\n", temperature);
+}
+
+/* hwmon-sysfs attributes */
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
+ qlcnic_hwmon_show_temp, NULL, 1);
+
+static struct attribute *qlcnic_hwmon_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(qlcnic_hwmon);
+
+void qlcnic_register_hwmon_dev(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+ struct device *hwmon_dev;
+
+ /* Skip hwmon registration for a VF device */
+ if (qlcnic_sriov_vf_check(adapter)) {
+ adapter->ahw->hwmon_dev = NULL;
+ return;
+ }
+ hwmon_dev = hwmon_device_register_with_groups(dev, qlcnic_driver_name,
+ adapter,
+ qlcnic_hwmon_groups);
+ if (IS_ERR(hwmon_dev)) {
+ dev_err(dev, "Cannot register with hwmon, err=%ld\n",
+ PTR_ERR(hwmon_dev));
+ hwmon_dev = NULL;
+ }
+ adapter->ahw->hwmon_dev = hwmon_dev;
+}
+
+void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *adapter)
+{
+ struct device *hwmon_dev = adapter->ahw->hwmon_dev;
+ if (hwmon_dev) {
+ hwmon_device_unregister(hwmon_dev);
+ adapter->ahw->hwmon_dev = NULL;
+ }
+}
+#endif
+
void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 0a1d76acab81..6e36fe14b848 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -3595,7 +3595,7 @@ static int ql_request_irq(struct ql_adapter *qdev)
}
return status;
err_irq:
- netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
+ netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
ql_free_irq(qdev);
return status;
}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 82a9a983869f..698494481d18 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -425,8 +425,8 @@ dmamem_err:
* @rx_rsize: ring size
* Description: this function initializes the DMA RX descriptor
*/
-void free_rx_ring(struct device *dev, struct sxgbe_rx_queue *rx_ring,
- int rx_rsize)
+static void free_rx_ring(struct device *dev, struct sxgbe_rx_queue *rx_ring,
+ int rx_rsize)
{
dma_free_coherent(dev, rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
rx_ring->dma_rx, rx_ring->dma_rx_phy);
@@ -519,8 +519,8 @@ error:
* @tx_rsize: ring size
* Description: this function initializes the DMA TX descriptor
*/
-void free_tx_ring(struct device *dev, struct sxgbe_tx_queue *tx_ring,
- int tx_rsize)
+static void free_tx_ring(struct device *dev, struct sxgbe_tx_queue *tx_ring,
+ int tx_rsize)
{
dma_free_coherent(dev, tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
tx_ring->dma_tx, tx_ring->dma_tx_phy);
@@ -1221,11 +1221,10 @@ static int sxgbe_release(struct net_device *dev)
return 0;
}
-
/* Prepare first Tx descriptor for doing TSO operation */
-void sxgbe_tso_prepare(struct sxgbe_priv_data *priv,
- struct sxgbe_tx_norm_desc *first_desc,
- struct sk_buff *skb)
+static void sxgbe_tso_prepare(struct sxgbe_priv_data *priv,
+ struct sxgbe_tx_norm_desc *first_desc,
+ struct sk_buff *skb)
{
unsigned int total_hdr_len, tcp_hdr_len;
@@ -1914,40 +1913,6 @@ static void sxgbe_set_rx_mode(struct net_device *dev)
readl(ioaddr + SXGBE_HASH_LOW));
}
-/**
- * sxgbe_config - entry point for changing configuration mode passed on by
- * ifconfig
- * @dev : pointer to the device structure
- * @map : pointer to the device mapping structure
- * Description:
- * This function is a driver entry point which gets called by the kernel
- * whenever some device configuration is changed.
- * Return value:
- * This function returns 0 if success and appropriate error otherwise.
- */
-static int sxgbe_config(struct net_device *dev, struct ifmap *map)
-{
- struct sxgbe_priv_data *priv = netdev_priv(dev);
-
- /* Can't act on a running interface */
- if (dev->flags & IFF_UP)
- return -EBUSY;
-
- /* Don't allow changing the I/O address */
- if (map->base_addr != (unsigned long)priv->ioaddr) {
- netdev_warn(dev, "can't change I/O address\n");
- return -EOPNOTSUPP;
- }
-
- /* Don't allow changing the IRQ */
- if (map->irq != priv->irq) {
- netdev_warn(dev, "not change IRQ number %d\n", priv->irq);
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
#ifdef CONFIG_NET_POLL_CONTROLLER
/**
* sxgbe_poll_controller - entry point for polling receive by device
@@ -2009,7 +1974,6 @@ static const struct net_device_ops sxgbe_netdev_ops = {
.ndo_set_rx_mode = sxgbe_set_rx_mode,
.ndo_tx_timeout = sxgbe_tx_timeout,
.ndo_do_ioctl = sxgbe_ioctl,
- .ndo_set_config = sxgbe_config,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = sxgbe_poll_controller,
#endif
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index d940034acdd4..93cf4f63f426 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2214,27 +2214,6 @@ static void stmmac_tx_timeout(struct net_device *dev)
stmmac_tx_err(priv);
}
-/* Configuration changes (passed on by ifconfig) */
-static int stmmac_config(struct net_device *dev, struct ifmap *map)
-{
- if (dev->flags & IFF_UP) /* can't act on a running interface */
- return -EBUSY;
-
- /* Don't allow changing the I/O address */
- if (map->base_addr != dev->base_addr) {
- pr_warn("%s: can't change I/O address\n", dev->name);
- return -EOPNOTSUPP;
- }
-
- /* Don't allow changing the IRQ */
- if (map->irq != dev->irq) {
- pr_warn("%s: not change IRQ number %d\n", dev->name, dev->irq);
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
/**
* stmmac_set_rx_mode - entry point for multicast addressing
* @dev : pointer to the device structure
@@ -2600,7 +2579,6 @@ static const struct net_device_ops stmmac_netdev_ops = {
.ndo_set_rx_mode = stmmac_set_rx_mode,
.ndo_tx_timeout = stmmac_tx_timeout,
.ndo_do_ioctl = stmmac_ioctl,
- .ndo_set_config = stmmac_config,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = stmmac_poll_controller,
#endif
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index a468eb107823..a5b1e1b776fe 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -205,10 +205,13 @@ int stmmac_mdio_register(struct net_device *ndev)
if (new_bus == NULL)
return -ENOMEM;
- if (mdio_bus_data->irqs)
+ if (mdio_bus_data->irqs) {
irqlist = mdio_bus_data->irqs;
- else
+ } else {
+ for (addr = 0; addr < PHY_MAX_ADDR; addr++)
+ priv->mii_irq[addr] = PHY_POLL;
irqlist = priv->mii_irq;
+ }
#ifdef CONFIG_OF
if (priv->device->of_node)
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 73f74f369437..7399a52f7c26 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -313,19 +313,6 @@ static int mii_irqs[PHY_MAX_ADDR] = { PHY_POLL, };
static struct mii_bus *cpmac_mii;
-static int cpmac_config(struct net_device *dev, struct ifmap *map)
-{
- if (dev->flags & IFF_UP)
- return -EBUSY;
-
- /* Don't allow changing the I/O address */
- if (map->base_addr != dev->base_addr)
- return -EOPNOTSUPP;
-
- /* ignore other fields */
- return 0;
-}
-
static void cpmac_set_multicast_list(struct net_device *dev)
{
struct netdev_hw_addr *ha;
@@ -1100,7 +1087,6 @@ static const struct net_device_ops cpmac_netdev_ops = {
.ndo_tx_timeout = cpmac_tx_timeout,
.ndo_set_rx_mode = cpmac_set_multicast_list,
.ndo_do_ioctl = cpmac_ioctl,
- .ndo_set_config = cpmac_config,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index c331b7ebc812..91499be03c6f 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -248,20 +248,31 @@ struct cpsw_ss_regs {
#define TS_131 (1<<11) /* Time Sync Dest IP Addr 131 enable */
#define TS_130 (1<<10) /* Time Sync Dest IP Addr 130 enable */
#define TS_129 (1<<9) /* Time Sync Dest IP Addr 129 enable */
-#define TS_BIT8 (1<<8) /* ts_ttl_nonzero? */
+#define TS_TTL_NONZERO (1<<8) /* Time Sync Time To Live Non-zero enable */
+#define TS_ANNEX_F_EN (1<<6) /* Time Sync Annex F enable */
#define TS_ANNEX_D_EN (1<<4) /* Time Sync Annex D enable */
#define TS_LTYPE2_EN (1<<3) /* Time Sync LTYPE 2 enable */
#define TS_LTYPE1_EN (1<<2) /* Time Sync LTYPE 1 enable */
#define TS_TX_EN (1<<1) /* Time Sync Transmit Enable */
#define TS_RX_EN (1<<0) /* Time Sync Receive Enable */
-#define CTRL_TS_BITS \
- (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 | TS_BIT8 | \
- TS_ANNEX_D_EN | TS_LTYPE1_EN)
+#define CTRL_V2_TS_BITS \
+ (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
+ TS_TTL_NONZERO | TS_ANNEX_D_EN | TS_LTYPE1_EN)
-#define CTRL_ALL_TS_MASK (CTRL_TS_BITS | TS_TX_EN | TS_RX_EN)
-#define CTRL_TX_TS_BITS (CTRL_TS_BITS | TS_TX_EN)
-#define CTRL_RX_TS_BITS (CTRL_TS_BITS | TS_RX_EN)
+#define CTRL_V2_ALL_TS_MASK (CTRL_V2_TS_BITS | TS_TX_EN | TS_RX_EN)
+#define CTRL_V2_TX_TS_BITS (CTRL_V2_TS_BITS | TS_TX_EN)
+#define CTRL_V2_RX_TS_BITS (CTRL_V2_TS_BITS | TS_RX_EN)
+
+
+#define CTRL_V3_TS_BITS \
+ (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
+ TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\
+ TS_LTYPE1_EN)
+
+#define CTRL_V3_ALL_TS_MASK (CTRL_V3_TS_BITS | TS_TX_EN | TS_RX_EN)
+#define CTRL_V3_TX_TS_BITS (CTRL_V3_TS_BITS | TS_TX_EN)
+#define CTRL_V3_RX_TS_BITS (CTRL_V3_TS_BITS | TS_RX_EN)
/* Bit definitions for the CPSW2_TS_SEQ_MTYPE register */
#define TS_SEQ_ID_OFFSET_SHIFT (16) /* Time Sync Sequence ID Offset */
@@ -1376,13 +1387,27 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
slave = &priv->slaves[priv->data.active_slave];
ctrl = slave_read(slave, CPSW2_CONTROL);
- ctrl &= ~CTRL_ALL_TS_MASK;
+ switch (priv->version) {
+ case CPSW_VERSION_2:
+ ctrl &= ~CTRL_V2_ALL_TS_MASK;
- if (priv->cpts->tx_enable)
- ctrl |= CTRL_TX_TS_BITS;
+ if (priv->cpts->tx_enable)
+ ctrl |= CTRL_V2_TX_TS_BITS;
- if (priv->cpts->rx_enable)
- ctrl |= CTRL_RX_TS_BITS;
+ if (priv->cpts->rx_enable)
+ ctrl |= CTRL_V2_RX_TS_BITS;
+ break;
+ case CPSW_VERSION_3:
+ default:
+ ctrl &= ~CTRL_V3_ALL_TS_MASK;
+
+ if (priv->cpts->tx_enable)
+ ctrl |= CTRL_V3_TX_TS_BITS;
+
+ if (priv->cpts->rx_enable)
+ ctrl |= CTRL_V3_RX_TS_BITS;
+ break;
+ }
mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
@@ -1398,7 +1423,8 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
struct hwtstamp_config cfg;
if (priv->version != CPSW_VERSION_1 &&
- priv->version != CPSW_VERSION_2)
+ priv->version != CPSW_VERSION_2 &&
+ priv->version != CPSW_VERSION_3)
return -EOPNOTSUPP;
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
@@ -1443,6 +1469,7 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
cpsw_hwtstamp_v1(priv);
break;
case CPSW_VERSION_2:
+ case CPSW_VERSION_3:
cpsw_hwtstamp_v2(priv);
break;
default:
@@ -1459,7 +1486,8 @@ static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
struct hwtstamp_config cfg;
if (priv->version != CPSW_VERSION_1 &&
- priv->version != CPSW_VERSION_2)
+ priv->version != CPSW_VERSION_2 &&
+ priv->version != CPSW_VERSION_3)
return -EOPNOTSUPP;
cfg.flags = 0;
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 243513980b51..6b56f85951e5 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -236,13 +236,11 @@ static void cpts_overflow_check(struct work_struct *work)
schedule_delayed_work(&cpts->overflow_work, CPTS_OVERFLOW_PERIOD);
}
-#define CPTS_REF_CLOCK_NAME "cpsw_cpts_rft_clk"
-
-static void cpts_clk_init(struct cpts *cpts)
+static void cpts_clk_init(struct device *dev, struct cpts *cpts)
{
- cpts->refclk = clk_get(NULL, CPTS_REF_CLOCK_NAME);
+ cpts->refclk = devm_clk_get(dev, "cpts");
if (IS_ERR(cpts->refclk)) {
- pr_err("Failed to clk_get %s\n", CPTS_REF_CLOCK_NAME);
+ dev_err(dev, "Failed to get cpts refclk\n");
cpts->refclk = NULL;
return;
}
@@ -252,7 +250,6 @@ static void cpts_clk_init(struct cpts *cpts)
static void cpts_clk_release(struct cpts *cpts)
{
clk_disable(cpts->refclk);
- clk_put(cpts->refclk);
}
static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
@@ -390,7 +387,7 @@ int cpts_register(struct device *dev, struct cpts *cpts,
for (i = 0; i < CPTS_MAX_EVENTS; i++)
list_add(&cpts->pool_data[i].list, &cpts->pool);
- cpts_clk_init(cpts);
+ cpts_clk_init(dev, cpts);
cpts_write32(cpts, CPTS_EN, control);
cpts_write32(cpts, TS_PEND_EN, int_enable);
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 0cca9dec5d82..34e97eca561c 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -321,15 +321,14 @@ static int davinci_mdio_probe(struct platform_device *pdev)
struct phy_device *phy;
int ret, addr;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- data->bus = mdiobus_alloc();
+ data->bus = devm_mdiobus_alloc(dev);
if (!data->bus) {
dev_err(dev, "failed to alloc mii bus\n");
- ret = -ENOMEM;
- goto bail_out;
+ return -ENOMEM;
}
if (dev->of_node) {
@@ -349,12 +348,9 @@ static int davinci_mdio_probe(struct platform_device *pdev)
data->bus->parent = dev;
data->bus->priv = data;
- /* Select default pin state */
- pinctrl_pm_select_default_state(&pdev->dev);
-
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
- data->clk = clk_get(&pdev->dev, "fck");
+ data->clk = devm_clk_get(dev, "fck");
if (IS_ERR(data->clk)) {
dev_err(dev, "failed to get device clock\n");
ret = PTR_ERR(data->clk);
@@ -367,24 +363,9 @@ static int davinci_mdio_probe(struct platform_device *pdev)
spin_lock_init(&data->lock);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "could not find register map resource\n");
- ret = -ENOENT;
- goto bail_out;
- }
-
- res = devm_request_mem_region(dev, res->start, resource_size(res),
- dev_name(dev));
- if (!res) {
- dev_err(dev, "could not allocate register map resource\n");
- ret = -ENXIO;
- goto bail_out;
- }
-
- data->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
- if (!data->regs) {
- dev_err(dev, "could not map mdio registers\n");
- ret = -ENOMEM;
+ data->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(data->regs)) {
+ ret = PTR_ERR(data->regs);
goto bail_out;
}
@@ -406,16 +387,9 @@ static int davinci_mdio_probe(struct platform_device *pdev)
return 0;
bail_out:
- if (data->bus)
- mdiobus_free(data->bus);
-
- if (data->clk)
- clk_put(data->clk);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- kfree(data);
-
return ret;
}
@@ -423,18 +397,12 @@ static int davinci_mdio_remove(struct platform_device *pdev)
{
struct davinci_mdio_data *data = platform_get_drvdata(pdev);
- if (data->bus) {
+ if (data->bus)
mdiobus_unregister(data->bus);
- mdiobus_free(data->bus);
- }
- if (data->clk)
- clk_put(data->clk);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- kfree(data);
-
return 0;
}
diff --git a/drivers/net/ethernet/via/Kconfig b/drivers/net/ethernet/via/Kconfig
index 8a049a2b4474..f66ddaee0c87 100644
--- a/drivers/net/ethernet/via/Kconfig
+++ b/drivers/net/ethernet/via/Kconfig
@@ -19,7 +19,7 @@ if NET_VENDOR_VIA
config VIA_RHINE
tristate "VIA Rhine support"
- depends on PCI
+ depends on (PCI || USE_OF)
select CRC32
select MII
---help---
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index f61dc2b72bb2..981be0154be3 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -94,6 +94,10 @@ static const int multicast_filter_limit = 32;
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -116,13 +120,6 @@ static const int multicast_filter_limit = 32;
static const char version[] =
"v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
-/* This driver was written to use PCI memory space. Some early versions
- of the Rhine may only work correctly with I/O space accesses. */
-#ifdef CONFIG_VIA_RHINE_MMIO
-#define USE_MMIO
-#else
-#endif
-
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
MODULE_LICENSE("GPL");
@@ -260,6 +257,12 @@ enum rhine_quirks {
rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
rqRhineI = 0x0100, /* See comment below */
+ rqIntPHY = 0x0200, /* Integrated PHY */
+ rqMgmt = 0x0400, /* Management adapter */
+ rqNeedEnMMIO = 0x0800, /* Whether the core needs to be
+ * switched from PIO mode to MMIO
+ * (only applies to PCI)
+ */
};
/*
* rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
@@ -279,6 +282,15 @@ static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = {
};
MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
+/* OpenFirmware identifiers for platform-bus devices
+ * The .data field is currently only used to store quirks
+ */
+static u32 vt8500_quirks = rqWOL | rqForceReset | rq6patterns;
+static struct of_device_id rhine_of_tbl[] = {
+ { .compatible = "via,vt8500-rhine", .data = &vt8500_quirks },
+ { } /* terminate list */
+};
+MODULE_DEVICE_TABLE(of, rhine_of_tbl);
/* Offsets to the device registers. */
enum register_offsets {
@@ -338,13 +350,11 @@ enum bcr1_bits {
BCR1_MED1=0x80, /* for VT6102 */
};
-#ifdef USE_MMIO
/* Registers we check that mmio and reg are the same. */
static const int mmio_verify_registers[] = {
RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
0
};
-#endif
/* Bits in the interrupt status/mask registers. */
enum intr_status_bits {
@@ -446,7 +456,7 @@ struct rhine_private {
unsigned char *tx_bufs;
dma_addr_t tx_bufs_dma;
- struct pci_dev *pdev;
+ int irq;
long pioaddr;
struct net_device *dev;
struct napi_struct napi;
@@ -649,20 +659,46 @@ static void rhine_chip_reset(struct net_device *dev)
"failed" : "succeeded");
}
-#ifdef USE_MMIO
static void enable_mmio(long pioaddr, u32 quirks)
{
int n;
- if (quirks & rqRhineI) {
- /* More recent docs say that this bit is reserved ... */
- n = inb(pioaddr + ConfigA) | 0x20;
- outb(n, pioaddr + ConfigA);
- } else {
- n = inb(pioaddr + ConfigD) | 0x80;
- outb(n, pioaddr + ConfigD);
+
+ if (quirks & rqNeedEnMMIO) {
+ if (quirks & rqRhineI) {
+ /* More recent docs say that this bit is reserved */
+ n = inb(pioaddr + ConfigA) | 0x20;
+ outb(n, pioaddr + ConfigA);
+ } else {
+ n = inb(pioaddr + ConfigD) | 0x80;
+ outb(n, pioaddr + ConfigD);
+ }
}
}
-#endif
+
+static inline int verify_mmio(struct device *hwdev,
+ long pioaddr,
+ void __iomem *ioaddr,
+ u32 quirks)
+{
+ if (quirks & rqNeedEnMMIO) {
+ int i = 0;
+
+ /* Check that selected MMIO registers match the PIO ones */
+ while (mmio_verify_registers[i]) {
+ int reg = mmio_verify_registers[i++];
+ unsigned char a = inb(pioaddr+reg);
+ unsigned char b = readb(ioaddr+reg);
+
+ if (a != b) {
+ dev_err(hwdev,
+ "MMIO do not match PIO [%02x] (%02x != %02x)\n",
+ reg, a, b);
+ return -EIO;
+ }
+ }
+ }
+ return 0;
+}
/*
* Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
@@ -682,14 +718,12 @@ static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
if (i > 512)
pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
-#ifdef USE_MMIO
/*
* Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
* MMIO. If reloading EEPROM was done first this could be avoided, but
* it is not known if that still works with the "win98-reboot" problem.
*/
enable_mmio(pioaddr, rp->quirks);
-#endif
/* Turn off EEPROM-controlled wake-up (magic packet) */
if (rp->quirks & rqWOL)
@@ -701,7 +735,7 @@ static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
static void rhine_poll(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
- const int irq = rp->pdev->irq;
+ const int irq = rp->irq;
disable_irq(irq);
rhine_interrupt(irq, dev);
@@ -846,7 +880,8 @@ static void rhine_hw_init(struct net_device *dev, long pioaddr)
msleep(5);
/* Reload EEPROM controlled bytes cleared by soft reset */
- rhine_reload_eeprom(pioaddr, dev);
+ if (dev_is_pci(dev->dev.parent))
+ rhine_reload_eeprom(pioaddr, dev);
}
static const struct net_device_ops rhine_netdev_ops = {
@@ -867,125 +902,37 @@ static const struct net_device_ops rhine_netdev_ops = {
#endif
};
-static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int rhine_init_one_common(struct device *hwdev, u32 quirks,
+ long pioaddr, void __iomem *ioaddr, int irq)
{
struct net_device *dev;
struct rhine_private *rp;
- int i, rc;
- u32 quirks;
- long pioaddr;
- long memaddr;
- void __iomem *ioaddr;
- int io_size, phy_id;
+ int i, rc, phy_id;
const char *name;
-#ifdef USE_MMIO
- int bar = 1;
-#else
- int bar = 0;
-#endif
-
-/* when built into the kernel, we only print version if device is found */
-#ifndef MODULE
- pr_info_once("%s\n", version);
-#endif
-
- io_size = 256;
- phy_id = 0;
- quirks = 0;
- name = "Rhine";
- if (pdev->revision < VTunknown0) {
- quirks = rqRhineI;
- io_size = 128;
- }
- else if (pdev->revision >= VT6102) {
- quirks = rqWOL | rqForceReset;
- if (pdev->revision < VT6105) {
- name = "Rhine II";
- quirks |= rqStatusWBRace; /* Rhine-II exclusive */
- }
- else {
- phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
- if (pdev->revision >= VT6105_B0)
- quirks |= rq6patterns;
- if (pdev->revision < VT6105M)
- name = "Rhine III";
- else
- name = "Rhine III (Management Adapter)";
- }
- }
-
- rc = pci_enable_device(pdev);
- if (rc)
- goto err_out;
/* this should always be supported */
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ rc = dma_set_mask(hwdev, DMA_BIT_MASK(32));
if (rc) {
- dev_err(&pdev->dev,
- "32-bit PCI DMA addresses not supported by the card!?\n");
- goto err_out_pci_disable;
- }
-
- /* sanity check */
- if ((pci_resource_len(pdev, 0) < io_size) ||
- (pci_resource_len(pdev, 1) < io_size)) {
- rc = -EIO;
- dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
- goto err_out_pci_disable;
+ dev_err(hwdev, "32-bit DMA addresses not supported by the card!?\n");
+ goto err_out;
}
- pioaddr = pci_resource_start(pdev, 0);
- memaddr = pci_resource_start(pdev, 1);
-
- pci_set_master(pdev);
-
dev = alloc_etherdev(sizeof(struct rhine_private));
if (!dev) {
rc = -ENOMEM;
- goto err_out_pci_disable;
+ goto err_out;
}
- SET_NETDEV_DEV(dev, &pdev->dev);
+ SET_NETDEV_DEV(dev, hwdev);
rp = netdev_priv(dev);
rp->dev = dev;
rp->quirks = quirks;
rp->pioaddr = pioaddr;
- rp->pdev = pdev;
+ rp->base = ioaddr;
+ rp->irq = irq;
rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
- rc = pci_request_regions(pdev, DRV_NAME);
- if (rc)
- goto err_out_free_netdev;
-
- ioaddr = pci_iomap(pdev, bar, io_size);
- if (!ioaddr) {
- rc = -EIO;
- dev_err(&pdev->dev,
- "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
- pci_name(pdev), io_size, memaddr);
- goto err_out_free_res;
- }
-
-#ifdef USE_MMIO
- enable_mmio(pioaddr, quirks);
-
- /* Check that selected MMIO registers match the PIO ones */
- i = 0;
- while (mmio_verify_registers[i]) {
- int reg = mmio_verify_registers[i++];
- unsigned char a = inb(pioaddr+reg);
- unsigned char b = readb(ioaddr+reg);
- if (a != b) {
- rc = -EIO;
- dev_err(&pdev->dev,
- "MMIO do not match PIO [%02x] (%02x != %02x)\n",
- reg, a, b);
- goto err_out_unmap;
- }
- }
-#endif /* USE_MMIO */
-
- rp->base = ioaddr;
+ phy_id = rp->quirks & rqIntPHY ? 1 : 0;
u64_stats_init(&rp->tx_stats.syncp);
u64_stats_init(&rp->rx_stats.syncp);
@@ -1030,7 +977,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rp->quirks & rqRhineI)
dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
- if (pdev->revision >= VT6105M)
+ if (rp->quirks & rqMgmt)
dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER;
@@ -1038,18 +985,21 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* dev->name not defined before register_netdev()! */
rc = register_netdev(dev);
if (rc)
- goto err_out_unmap;
+ goto err_out_free_netdev;
+
+ if (rp->quirks & rqRhineI)
+ name = "Rhine";
+ else if (rp->quirks & rqStatusWBRace)
+ name = "Rhine II";
+ else if (rp->quirks & rqMgmt)
+ name = "Rhine III (Management Adapter)";
+ else
+ name = "Rhine III";
netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
- name,
-#ifdef USE_MMIO
- memaddr,
-#else
- (long)ioaddr,
-#endif
- dev->dev_addr, pdev->irq);
+ name, (long)ioaddr, dev->dev_addr, rp->irq);
- pci_set_drvdata(pdev, dev);
+ dev_set_drvdata(hwdev, dev);
{
u16 mii_cmd;
@@ -1078,41 +1028,158 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
+err_out_free_netdev:
+ free_netdev(dev);
+err_out:
+ return rc;
+}
+
+static int rhine_init_one_pci(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *hwdev = &pdev->dev;
+ int rc;
+ long pioaddr, memaddr;
+ void __iomem *ioaddr;
+ int io_size = pdev->revision < VTunknown0 ? 128 : 256;
+
+/* This driver was written to use PCI memory space. Some early versions
+ * of the Rhine may only work correctly with I/O space accesses.
+ * TODO: determine for which revisions this is true and assign the flag
+ * in code as opposed to this Kconfig option (???)
+ */
+#ifdef CONFIG_VIA_RHINE_MMIO
+ u32 quirks = rqNeedEnMMIO;
+#else
+ u32 quirks = 0;
+#endif
+
+/* when built into the kernel, we only print version if device is found */
+#ifndef MODULE
+ pr_info_once("%s\n", version);
+#endif
+
+ rc = pci_enable_device(pdev);
+ if (rc)
+ goto err_out;
+
+ if (pdev->revision < VTunknown0) {
+ quirks |= rqRhineI;
+ } else if (pdev->revision >= VT6102) {
+ quirks |= rqWOL | rqForceReset;
+ if (pdev->revision < VT6105) {
+ quirks |= rqStatusWBRace;
+ } else {
+ quirks |= rqIntPHY;
+ if (pdev->revision >= VT6105_B0)
+ quirks |= rq6patterns;
+ if (pdev->revision >= VT6105M)
+ quirks |= rqMgmt;
+ }
+ }
+
+ /* sanity check */
+ if ((pci_resource_len(pdev, 0) < io_size) ||
+ (pci_resource_len(pdev, 1) < io_size)) {
+ rc = -EIO;
+ dev_err(hwdev, "Insufficient PCI resources, aborting\n");
+ goto err_out_pci_disable;
+ }
+
+ pioaddr = pci_resource_start(pdev, 0);
+ memaddr = pci_resource_start(pdev, 1);
+
+ pci_set_master(pdev);
+
+ rc = pci_request_regions(pdev, DRV_NAME);
+ if (rc)
+ goto err_out_pci_disable;
+
+ ioaddr = pci_iomap(pdev, (quirks & rqNeedEnMMIO ? 1 : 0), io_size);
+ if (!ioaddr) {
+ rc = -EIO;
+ dev_err(hwdev,
+ "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
+ dev_name(hwdev), io_size, memaddr);
+ goto err_out_free_res;
+ }
+
+ enable_mmio(pioaddr, quirks);
+
+ rc = verify_mmio(hwdev, pioaddr, ioaddr, quirks);
+ if (rc)
+ goto err_out_unmap;
+
+ rc = rhine_init_one_common(&pdev->dev, quirks,
+ pioaddr, ioaddr, pdev->irq);
+ if (!rc)
+ return 0;
+
err_out_unmap:
pci_iounmap(pdev, ioaddr);
err_out_free_res:
pci_release_regions(pdev);
-err_out_free_netdev:
- free_netdev(dev);
err_out_pci_disable:
pci_disable_device(pdev);
err_out:
return rc;
}
+static int rhine_init_one_platform(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ const u32 *quirks;
+ int irq;
+ struct resource *res;
+ void __iomem *ioaddr;
+
+ match = of_match_device(rhine_of_tbl, &pdev->dev);
+ if (!match)
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ioaddr))
+ return PTR_ERR(ioaddr);
+
+ irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (!irq)
+ return -EINVAL;
+
+ quirks = match->data;
+ if (!quirks)
+ return -EINVAL;
+
+ return rhine_init_one_common(&pdev->dev, *quirks,
+ (long)ioaddr, ioaddr, irq);
+}
+
static int alloc_ring(struct net_device* dev)
{
struct rhine_private *rp = netdev_priv(dev);
+ struct device *hwdev = dev->dev.parent;
void *ring;
dma_addr_t ring_dma;
- ring = pci_alloc_consistent(rp->pdev,
- RX_RING_SIZE * sizeof(struct rx_desc) +
- TX_RING_SIZE * sizeof(struct tx_desc),
- &ring_dma);
+ ring = dma_alloc_coherent(hwdev,
+ RX_RING_SIZE * sizeof(struct rx_desc) +
+ TX_RING_SIZE * sizeof(struct tx_desc),
+ &ring_dma,
+ GFP_ATOMIC);
if (!ring) {
netdev_err(dev, "Could not allocate DMA memory\n");
return -ENOMEM;
}
if (rp->quirks & rqRhineI) {
- rp->tx_bufs = pci_alloc_consistent(rp->pdev,
- PKT_BUF_SZ * TX_RING_SIZE,
- &rp->tx_bufs_dma);
+ rp->tx_bufs = dma_alloc_coherent(hwdev,
+ PKT_BUF_SZ * TX_RING_SIZE,
+ &rp->tx_bufs_dma,
+ GFP_ATOMIC);
if (rp->tx_bufs == NULL) {
- pci_free_consistent(rp->pdev,
- RX_RING_SIZE * sizeof(struct rx_desc) +
- TX_RING_SIZE * sizeof(struct tx_desc),
- ring, ring_dma);
+ dma_free_coherent(hwdev,
+ RX_RING_SIZE * sizeof(struct rx_desc) +
+ TX_RING_SIZE * sizeof(struct tx_desc),
+ ring, ring_dma);
return -ENOMEM;
}
}
@@ -1128,16 +1195,17 @@ static int alloc_ring(struct net_device* dev)
static void free_ring(struct net_device* dev)
{
struct rhine_private *rp = netdev_priv(dev);
+ struct device *hwdev = dev->dev.parent;
- pci_free_consistent(rp->pdev,
- RX_RING_SIZE * sizeof(struct rx_desc) +
- TX_RING_SIZE * sizeof(struct tx_desc),
- rp->rx_ring, rp->rx_ring_dma);
+ dma_free_coherent(hwdev,
+ RX_RING_SIZE * sizeof(struct rx_desc) +
+ TX_RING_SIZE * sizeof(struct tx_desc),
+ rp->rx_ring, rp->rx_ring_dma);
rp->tx_ring = NULL;
if (rp->tx_bufs)
- pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
- rp->tx_bufs, rp->tx_bufs_dma);
+ dma_free_coherent(hwdev, PKT_BUF_SZ * TX_RING_SIZE,
+ rp->tx_bufs, rp->tx_bufs_dma);
rp->tx_bufs = NULL;
@@ -1146,6 +1214,7 @@ static void free_ring(struct net_device* dev)
static void alloc_rbufs(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
+ struct device *hwdev = dev->dev.parent;
dma_addr_t next;
int i;
@@ -1174,9 +1243,9 @@ static void alloc_rbufs(struct net_device *dev)
break;
rp->rx_skbuff_dma[i] =
- pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
- if (dma_mapping_error(&rp->pdev->dev, rp->rx_skbuff_dma[i])) {
+ dma_map_single(hwdev, skb->data, rp->rx_buf_sz,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(hwdev, rp->rx_skbuff_dma[i])) {
rp->rx_skbuff_dma[i] = 0;
dev_kfree_skb(skb);
break;
@@ -1190,6 +1259,7 @@ static void alloc_rbufs(struct net_device *dev)
static void free_rbufs(struct net_device* dev)
{
struct rhine_private *rp = netdev_priv(dev);
+ struct device *hwdev = dev->dev.parent;
int i;
/* Free all the skbuffs in the Rx queue. */
@@ -1197,9 +1267,9 @@ static void free_rbufs(struct net_device* dev)
rp->rx_ring[i].rx_status = 0;
rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
if (rp->rx_skbuff[i]) {
- pci_unmap_single(rp->pdev,
+ dma_unmap_single(hwdev,
rp->rx_skbuff_dma[i],
- rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ rp->rx_buf_sz, DMA_FROM_DEVICE);
dev_kfree_skb(rp->rx_skbuff[i]);
}
rp->rx_skbuff[i] = NULL;
@@ -1230,6 +1300,7 @@ static void alloc_tbufs(struct net_device* dev)
static void free_tbufs(struct net_device* dev)
{
struct rhine_private *rp = netdev_priv(dev);
+ struct device *hwdev = dev->dev.parent;
int i;
for (i = 0; i < TX_RING_SIZE; i++) {
@@ -1238,10 +1309,10 @@ static void free_tbufs(struct net_device* dev)
rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
if (rp->tx_skbuff[i]) {
if (rp->tx_skbuff_dma[i]) {
- pci_unmap_single(rp->pdev,
+ dma_unmap_single(hwdev,
rp->tx_skbuff_dma[i],
rp->tx_skbuff[i]->len,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
}
dev_kfree_skb(rp->tx_skbuff[i]);
}
@@ -1469,7 +1540,7 @@ static void init_registers(struct net_device *dev)
rhine_set_rx_mode(dev);
- if (rp->pdev->revision >= VT6105M)
+ if (rp->quirks & rqMgmt)
rhine_init_cam_filter(dev);
napi_enable(&rp->napi);
@@ -1581,16 +1652,15 @@ static int rhine_open(struct net_device *dev)
void __iomem *ioaddr = rp->base;
int rc;
- rc = request_irq(rp->pdev->irq, rhine_interrupt, IRQF_SHARED, dev->name,
- dev);
+ rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev);
if (rc)
return rc;
- netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->pdev->irq);
+ netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq);
rc = alloc_ring(dev);
if (rc) {
- free_irq(rp->pdev->irq, dev);
+ free_irq(rp->irq, dev);
return rc;
}
alloc_rbufs(dev);
@@ -1659,6 +1729,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
+ struct device *hwdev = dev->dev.parent;
void __iomem *ioaddr = rp->base;
unsigned entry;
@@ -1695,9 +1766,9 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
rp->tx_bufs));
} else {
rp->tx_skbuff_dma[entry] =
- pci_map_single(rp->pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
- if (dma_mapping_error(&rp->pdev->dev, rp->tx_skbuff_dma[entry])) {
+ dma_map_single(hwdev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(hwdev, rp->tx_skbuff_dma[entry])) {
dev_kfree_skb_any(skb);
rp->tx_skbuff_dma[entry] = 0;
dev->stats.tx_dropped++;
@@ -1788,6 +1859,7 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
static void rhine_tx(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
+ struct device *hwdev = dev->dev.parent;
int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
/* find and cleanup dirty tx descriptors */
@@ -1831,10 +1903,10 @@ static void rhine_tx(struct net_device *dev)
}
/* Free the original skb. */
if (rp->tx_skbuff_dma[entry]) {
- pci_unmap_single(rp->pdev,
+ dma_unmap_single(hwdev,
rp->tx_skbuff_dma[entry],
rp->tx_skbuff[entry]->len,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
}
dev_consume_skb_any(rp->tx_skbuff[entry]);
rp->tx_skbuff[entry] = NULL;
@@ -1863,6 +1935,7 @@ static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
static int rhine_rx(struct net_device *dev, int limit)
{
struct rhine_private *rp = netdev_priv(dev);
+ struct device *hwdev = dev->dev.parent;
int count;
int entry = rp->cur_rx % RX_RING_SIZE;
@@ -1924,19 +1997,19 @@ static int rhine_rx(struct net_device *dev, int limit)
if (pkt_len < rx_copybreak)
skb = netdev_alloc_skb_ip_align(dev, pkt_len);
if (skb) {
- pci_dma_sync_single_for_cpu(rp->pdev,
- rp->rx_skbuff_dma[entry],
- rp->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(hwdev,
+ rp->rx_skbuff_dma[entry],
+ rp->rx_buf_sz,
+ DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb,
rp->rx_skbuff[entry]->data,
pkt_len);
skb_put(skb, pkt_len);
- pci_dma_sync_single_for_device(rp->pdev,
- rp->rx_skbuff_dma[entry],
- rp->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(hwdev,
+ rp->rx_skbuff_dma[entry],
+ rp->rx_buf_sz,
+ DMA_FROM_DEVICE);
} else {
skb = rp->rx_skbuff[entry];
if (skb == NULL) {
@@ -1945,10 +2018,10 @@ static int rhine_rx(struct net_device *dev, int limit)
}
rp->rx_skbuff[entry] = NULL;
skb_put(skb, pkt_len);
- pci_unmap_single(rp->pdev,
+ dma_unmap_single(hwdev,
rp->rx_skbuff_dma[entry],
rp->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
if (unlikely(desc_length & DescTag))
@@ -1979,10 +2052,11 @@ static int rhine_rx(struct net_device *dev, int limit)
if (skb == NULL)
break; /* Better luck next round. */
rp->rx_skbuff_dma[entry] =
- pci_map_single(rp->pdev, skb->data,
+ dma_map_single(hwdev, skb->data,
rp->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
- if (dma_mapping_error(&rp->pdev->dev, rp->rx_skbuff_dma[entry])) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(hwdev,
+ rp->rx_skbuff_dma[entry])) {
dev_kfree_skb(skb);
rp->rx_skbuff_dma[entry] = 0;
break;
@@ -2103,7 +2177,7 @@ static void rhine_set_rx_mode(struct net_device *dev)
/* Too many to match, or accept all multicasts. */
iowrite32(0xffffffff, ioaddr + MulticastFilter0);
iowrite32(0xffffffff, ioaddr + MulticastFilter1);
- } else if (rp->pdev->revision >= VT6105M) {
+ } else if (rp->quirks & rqMgmt) {
int i = 0;
u32 mCAMmask = 0; /* 32 mCAMs (6105M and better) */
netdev_for_each_mc_addr(ha, dev) {
@@ -2125,7 +2199,7 @@ static void rhine_set_rx_mode(struct net_device *dev)
iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
}
/* enable/disable VLAN receive filtering */
- if (rp->pdev->revision >= VT6105M) {
+ if (rp->quirks & rqMgmt) {
if (dev->flags & IFF_PROMISC)
BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
else
@@ -2136,11 +2210,11 @@ static void rhine_set_rx_mode(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- struct rhine_private *rp = netdev_priv(dev);
+ struct device *hwdev = dev->dev.parent;
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
+ strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
}
static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -2277,7 +2351,7 @@ static int rhine_close(struct net_device *dev)
/* Stop the chip's Tx and Rx processes. */
iowrite16(CmdStop, ioaddr + ChipCmd);
- free_irq(rp->pdev->irq, dev);
+ free_irq(rp->irq, dev);
free_rbufs(dev);
free_tbufs(dev);
free_ring(dev);
@@ -2286,7 +2360,7 @@ static int rhine_close(struct net_device *dev)
}
-static void rhine_remove_one(struct pci_dev *pdev)
+static void rhine_remove_one_pci(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct rhine_private *rp = netdev_priv(dev);
@@ -2300,7 +2374,21 @@ static void rhine_remove_one(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static void rhine_shutdown (struct pci_dev *pdev)
+static int rhine_remove_one_platform(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct rhine_private *rp = netdev_priv(dev);
+
+ unregister_netdev(dev);
+
+ iounmap(rp->base);
+
+ free_netdev(dev);
+
+ return 0;
+}
+
+static void rhine_shutdown_pci(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct rhine_private *rp = netdev_priv(dev);
@@ -2354,8 +2442,7 @@ static void rhine_shutdown (struct pci_dev *pdev)
#ifdef CONFIG_PM_SLEEP
static int rhine_suspend(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct rhine_private *rp = netdev_priv(dev);
if (!netif_running(dev))
@@ -2367,23 +2454,21 @@ static int rhine_suspend(struct device *device)
netif_device_detach(dev);
- rhine_shutdown(pdev);
+ if (dev_is_pci(device))
+ rhine_shutdown_pci(to_pci_dev(device));
return 0;
}
static int rhine_resume(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct rhine_private *rp = netdev_priv(dev);
if (!netif_running(dev))
return 0;
-#ifdef USE_MMIO
enable_mmio(rp->pioaddr, rp->quirks);
-#endif
rhine_power_init(dev);
free_tbufs(dev);
free_rbufs(dev);
@@ -2408,15 +2493,26 @@ static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
#endif /* !CONFIG_PM_SLEEP */
-static struct pci_driver rhine_driver = {
+static struct pci_driver rhine_driver_pci = {
.name = DRV_NAME,
.id_table = rhine_pci_tbl,
- .probe = rhine_init_one,
- .remove = rhine_remove_one,
- .shutdown = rhine_shutdown,
+ .probe = rhine_init_one_pci,
+ .remove = rhine_remove_one_pci,
+ .shutdown = rhine_shutdown_pci,
.driver.pm = RHINE_PM_OPS,
};
+static struct platform_driver rhine_driver_platform = {
+ .probe = rhine_init_one_platform,
+ .remove = rhine_remove_one_platform,
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = rhine_of_tbl,
+ .pm = RHINE_PM_OPS,
+ }
+};
+
static struct dmi_system_id rhine_dmi_table[] __initdata = {
{
.ident = "EPIA-M",
@@ -2437,6 +2533,8 @@ static struct dmi_system_id rhine_dmi_table[] __initdata = {
static int __init rhine_init(void)
{
+ int ret_pci, ret_platform;
+
/* when a module, this is printed whether or not devices are found in probe */
#ifdef MODULE
pr_info("%s\n", version);
@@ -2449,13 +2547,19 @@ static int __init rhine_init(void)
else if (avoid_D3)
pr_info("avoid_D3 set\n");
- return pci_register_driver(&rhine_driver);
+ ret_pci = pci_register_driver(&rhine_driver_pci);
+ ret_platform = platform_driver_register(&rhine_driver_platform);
+ if ((ret_pci < 0) && (ret_platform < 0))
+ return ret_pci;
+
+ return 0;
}
static void __exit rhine_cleanup(void)
{
- pci_unregister_driver(&rhine_driver);
+ platform_driver_unregister(&rhine_driver_platform);
+ pci_unregister_driver(&rhine_driver_pci);
}
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index d18f711d0b0c..4b7df5a5c966 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -28,50 +28,119 @@
#include <linux/hyperv.h>
#include <linux/rndis.h>
-/* Fwd declaration */
-struct hv_netvsc_packet;
-struct ndis_tcp_ip_checksum_info;
+/* RSS related */
+#define OID_GEN_RECEIVE_SCALE_CAPABILITIES 0x00010203 /* query only */
+#define OID_GEN_RECEIVE_SCALE_PARAMETERS 0x00010204 /* query and set */
-/* Represent the xfer page packet which contains 1 or more netvsc packet */
-struct xferpage_packet {
- struct list_head list_ent;
- u32 status;
+#define NDIS_OBJECT_TYPE_RSS_CAPABILITIES 0x88
+#define NDIS_OBJECT_TYPE_RSS_PARAMETERS 0x89
- /* # of netvsc packets this xfer packet contains */
- u32 count;
+#define NDIS_RECEIVE_SCALE_CAPABILITIES_REVISION_2 2
+#define NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2 2
+
+struct ndis_obj_header {
+ u8 type;
+ u8 rev;
+ u16 size;
+} __packed;
+
+/* ndis_recv_scale_cap/cap_flag */
+#define NDIS_RSS_CAPS_MESSAGE_SIGNALED_INTERRUPTS 0x01000000
+#define NDIS_RSS_CAPS_CLASSIFICATION_AT_ISR 0x02000000
+#define NDIS_RSS_CAPS_CLASSIFICATION_AT_DPC 0x04000000
+#define NDIS_RSS_CAPS_USING_MSI_X 0x08000000
+#define NDIS_RSS_CAPS_RSS_AVAILABLE_ON_PORTS 0x10000000
+#define NDIS_RSS_CAPS_SUPPORTS_MSI_X 0x20000000
+#define NDIS_RSS_CAPS_HASH_TYPE_TCP_IPV4 0x00000100
+#define NDIS_RSS_CAPS_HASH_TYPE_TCP_IPV6 0x00000200
+#define NDIS_RSS_CAPS_HASH_TYPE_TCP_IPV6_EX 0x00000400
+
+struct ndis_recv_scale_cap { /* NDIS_RECEIVE_SCALE_CAPABILITIES */
+ struct ndis_obj_header hdr;
+ u32 cap_flag;
+ u32 num_int_msg;
+ u32 num_recv_que;
+ u16 num_indirect_tabent;
+} __packed;
+
+
+/* ndis_recv_scale_param flags */
+#define NDIS_RSS_PARAM_FLAG_BASE_CPU_UNCHANGED 0x0001
+#define NDIS_RSS_PARAM_FLAG_HASH_INFO_UNCHANGED 0x0002
+#define NDIS_RSS_PARAM_FLAG_ITABLE_UNCHANGED 0x0004
+#define NDIS_RSS_PARAM_FLAG_HASH_KEY_UNCHANGED 0x0008
+#define NDIS_RSS_PARAM_FLAG_DISABLE_RSS 0x0010
+
+/* Hash info bits */
+#define NDIS_HASH_FUNC_TOEPLITZ 0x00000001
+#define NDIS_HASH_IPV4 0x00000100
+#define NDIS_HASH_TCP_IPV4 0x00000200
+#define NDIS_HASH_IPV6 0x00000400
+#define NDIS_HASH_IPV6_EX 0x00000800
+#define NDIS_HASH_TCP_IPV6 0x00001000
+#define NDIS_HASH_TCP_IPV6_EX 0x00002000
+
+#define NDIS_RSS_INDIRECTION_TABLE_MAX_SIZE_REVISION_2 (128 * 4)
+#define NDIS_RSS_HASH_SECRET_KEY_MAX_SIZE_REVISION_2 40
+
+#define ITAB_NUM 128
+#define HASH_KEYLEN NDIS_RSS_HASH_SECRET_KEY_MAX_SIZE_REVISION_2
+extern u8 netvsc_hash_key[];
+
+struct ndis_recv_scale_param { /* NDIS_RECEIVE_SCALE_PARAMETERS */
+ struct ndis_obj_header hdr;
+
+ /* Qualifies the rest of the information */
+ u16 flag;
+
+ /* The base CPU number to do receive processing. not used */
+ u16 base_cpu_number;
+
+ /* This describes the hash function and type being enabled */
+ u32 hashinfo;
+
+ /* The size of indirection table array */
+ u16 indirect_tabsize;
+
+ /* The offset of the indirection table from the beginning of this
+ * structure
+ */
+ u32 indirect_taboffset;
+
+ /* The size of the hash secret key */
+ u16 hashkey_size;
+
+ /* The offset of the secret key from the beginning of this structure */
+ u32 kashkey_offset;
+
+ u32 processor_masks_offset;
+ u32 num_processor_masks;
+ u32 processor_masks_entry_size;
};
+/* Fwd declaration */
+struct ndis_tcp_ip_checksum_info;
+
/*
* Represent netvsc packet which contains 1 RNDIS and 1 ethernet frame
* within the RNDIS
*/
struct hv_netvsc_packet {
/* Bookkeeping stuff */
- struct list_head list_ent;
u32 status;
struct hv_device *device;
bool is_data_pkt;
u16 vlan_tci;
- /*
- * Valid only for receives when we break a xfer page packet
- * into multiple netvsc packets
- */
- struct xferpage_packet *xfer_page_pkt;
+ u16 q_idx;
+ struct vmbus_channel *channel;
- union {
- struct {
- u64 recv_completion_tid;
- void *recv_completion_ctx;
- void (*recv_completion)(void *context);
- } recv;
- struct {
- u64 send_completion_tid;
- void *send_completion_ctx;
- void (*send_completion)(void *context);
- } send;
- } completion;
+ u64 send_completion_tid;
+ void *send_completion_ctx;
+ void (*send_completion)(void *context);
+
+ u32 send_buf_index;
/* This points to the memory after page_buf */
struct rndis_message *rndis_msg;
@@ -120,6 +189,7 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
int netvsc_recv_callback(struct hv_device *device_obj,
struct hv_netvsc_packet *packet,
struct ndis_tcp_ip_checksum_info *csum_info);
+void netvsc_channel_cb(void *context);
int rndis_filter_open(struct hv_device *dev);
int rndis_filter_close(struct hv_device *dev);
int rndis_filter_device_add(struct hv_device *dev,
@@ -514,14 +584,16 @@ struct nvsp_message {
#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*16) /* 16MB */
#define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY (1024*1024*15) /* 15MB */
+#define NETVSC_SEND_BUFFER_SIZE (1024 * 1024) /* 1MB */
+#define NETVSC_INVALID_INDEX -1
-#define NETVSC_RECEIVE_BUFFER_ID 0xcafe
-/* Preallocated receive packets */
-#define NETVSC_RECEIVE_PACKETLIST_COUNT 256
+#define NETVSC_RECEIVE_BUFFER_ID 0xcafe
#define NETVSC_PACKET_SIZE 2048
+#define VRSS_SEND_TAB_SIZE 16
+
/* Per netvsc channel-specific */
struct netvsc_device {
struct hv_device *dev;
@@ -532,12 +604,6 @@ struct netvsc_device {
wait_queue_head_t wait_drain;
bool start_remove;
bool destroy;
- /*
- * List of free preallocated hv_netvsc_packet to represent receive
- * packet
- */
- struct list_head recv_pkt_list;
- spinlock_t recv_pkt_list_lock;
/* Receive buffer allocated by us but manages by NetVSP */
void *recv_buf;
@@ -546,6 +612,15 @@ struct netvsc_device {
u32 recv_section_cnt;
struct nvsp_1_receive_buffer_section *recv_section;
+ /* Send buffer allocated by us */
+ void *send_buf;
+ u32 send_buf_size;
+ u32 send_buf_gpadl_handle;
+ u32 send_section_cnt;
+ u32 send_section_size;
+ unsigned long *send_section_map;
+ int map_words;
+
/* Used for NetVSP initialization protocol */
struct completion channel_init_wait;
struct nvsp_message channel_init_pkt;
@@ -555,10 +630,20 @@ struct netvsc_device {
struct net_device *ndev;
+ struct vmbus_channel *chn_table[NR_CPUS];
+ u32 send_table[VRSS_SEND_TAB_SIZE];
+ u32 num_chn;
+ atomic_t queue_sends[NR_CPUS];
+
/* Holds rndis device info */
void *extension;
- /* The recive buffer for this device */
+
+ int ring_size;
+
+ /* The primary channel callback buffer */
unsigned char cb_buffer[NETVSC_PACKET_SIZE];
+ /* The sub channel callback buffer */
+ unsigned char *sub_cb_buf;
};
/* NdisInitialize message */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index f7629ecefa84..c041f63a6d30 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -28,6 +28,7 @@
#include <linux/slab.h>
#include <linux/netdevice.h>
#include <linux/if_ether.h>
+#include <asm/sync_bitops.h>
#include "hyperv_net.h"
@@ -80,7 +81,7 @@ get_in_err:
}
-static int netvsc_destroy_recv_buf(struct netvsc_device *net_device)
+static int netvsc_destroy_buf(struct netvsc_device *net_device)
{
struct nvsp_message *revoke_packet;
int ret = 0;
@@ -146,10 +147,62 @@ static int netvsc_destroy_recv_buf(struct netvsc_device *net_device)
net_device->recv_section = NULL;
}
+ /* Deal with the send buffer we may have setup.
+ * If we got a send section size, it means we received a
+ * SendsendBufferComplete msg (ie sent
+ * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
+ * to send a revoke msg here
+ */
+ if (net_device->send_section_size) {
+ /* Send the revoke receive buffer */
+ revoke_packet = &net_device->revoke_packet;
+ memset(revoke_packet, 0, sizeof(struct nvsp_message));
+
+ revoke_packet->hdr.msg_type =
+ NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
+ revoke_packet->msg.v1_msg.revoke_recv_buf.id = 0;
+
+ ret = vmbus_sendpacket(net_device->dev->channel,
+ revoke_packet,
+ sizeof(struct nvsp_message),
+ (unsigned long)revoke_packet,
+ VM_PKT_DATA_INBAND, 0);
+ /* If we failed here, we might as well return and
+ * have a leak rather than continue and a bugchk
+ */
+ if (ret != 0) {
+ netdev_err(ndev, "unable to send "
+ "revoke send buffer to netvsp\n");
+ return ret;
+ }
+ }
+ /* Teardown the gpadl on the vsp end */
+ if (net_device->send_buf_gpadl_handle) {
+ ret = vmbus_teardown_gpadl(net_device->dev->channel,
+ net_device->send_buf_gpadl_handle);
+
+ /* If we failed here, we might as well return and have a leak
+ * rather than continue and a bugchk
+ */
+ if (ret != 0) {
+ netdev_err(ndev,
+ "unable to teardown send buffer's gpadl\n");
+ return ret;
+ }
+ net_device->recv_buf_gpadl_handle = 0;
+ }
+ if (net_device->send_buf) {
+ /* Free up the receive buffer */
+ free_pages((unsigned long)net_device->send_buf,
+ get_order(net_device->send_buf_size));
+ net_device->send_buf = NULL;
+ }
+ kfree(net_device->send_section_map);
+
return ret;
}
-static int netvsc_init_recv_buf(struct hv_device *device)
+static int netvsc_init_buf(struct hv_device *device)
{
int ret = 0;
int t;
@@ -248,10 +301,90 @@ static int netvsc_init_recv_buf(struct hv_device *device)
goto cleanup;
}
+ /* Now setup the send buffer.
+ */
+ net_device->send_buf =
+ (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
+ get_order(net_device->send_buf_size));
+ if (!net_device->send_buf) {
+ netdev_err(ndev, "unable to allocate send "
+ "buffer of size %d\n", net_device->send_buf_size);
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ /* Establish the gpadl handle for this buffer on this
+ * channel. Note: This call uses the vmbus connection rather
+ * than the channel to establish the gpadl handle.
+ */
+ ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
+ net_device->send_buf_size,
+ &net_device->send_buf_gpadl_handle);
+ if (ret != 0) {
+ netdev_err(ndev,
+ "unable to establish send buffer's gpadl\n");
+ goto cleanup;
+ }
+
+ /* Notify the NetVsp of the gpadl handle */
+ init_packet = &net_device->channel_init_pkt;
+ memset(init_packet, 0, sizeof(struct nvsp_message));
+ init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
+ init_packet->msg.v1_msg.send_recv_buf.gpadl_handle =
+ net_device->send_buf_gpadl_handle;
+ init_packet->msg.v1_msg.send_recv_buf.id = 0;
+
+ /* Send the gpadl notification request */
+ ret = vmbus_sendpacket(device->channel, init_packet,
+ sizeof(struct nvsp_message),
+ (unsigned long)init_packet,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (ret != 0) {
+ netdev_err(ndev,
+ "unable to send send buffer's gpadl to netvsp\n");
+ goto cleanup;
+ }
+
+ t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
+ BUG_ON(t == 0);
+
+ /* Check the response */
+ if (init_packet->msg.v1_msg.
+ send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
+ netdev_err(ndev, "Unable to complete send buffer "
+ "initialization with NetVsp - status %d\n",
+ init_packet->msg.v1_msg.
+ send_recv_buf_complete.status);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ /* Parse the response */
+ net_device->send_section_size = init_packet->msg.
+ v1_msg.send_send_buf_complete.section_size;
+
+ /* Section count is simply the size divided by the section size.
+ */
+ net_device->send_section_cnt =
+ net_device->send_buf_size/net_device->send_section_size;
+
+ dev_info(&device->device, "Send section size: %d, Section count:%d\n",
+ net_device->send_section_size, net_device->send_section_cnt);
+
+ /* Setup state for managing the send buffer. */
+ net_device->map_words = DIV_ROUND_UP(net_device->send_section_cnt,
+ BITS_PER_LONG);
+
+ net_device->send_section_map =
+ kzalloc(net_device->map_words * sizeof(ulong), GFP_KERNEL);
+ if (net_device->send_section_map == NULL)
+ goto cleanup;
+
goto exit;
cleanup:
- netvsc_destroy_recv_buf(net_device);
+ netvsc_destroy_buf(net_device);
exit:
return ret;
@@ -369,8 +502,9 @@ static int netvsc_connect_vsp(struct hv_device *device)
net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
else
net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
+ net_device->send_buf_size = NETVSC_SEND_BUFFER_SIZE;
- ret = netvsc_init_recv_buf(device);
+ ret = netvsc_init_buf(device);
cleanup:
return ret;
@@ -378,7 +512,7 @@ cleanup:
static void netvsc_disconnect_vsp(struct netvsc_device *net_device)
{
- netvsc_destroy_recv_buf(net_device);
+ netvsc_destroy_buf(net_device);
}
/*
@@ -387,7 +521,6 @@ static void netvsc_disconnect_vsp(struct netvsc_device *net_device)
int netvsc_device_remove(struct hv_device *device)
{
struct netvsc_device *net_device;
- struct hv_netvsc_packet *netvsc_packet, *pos;
unsigned long flags;
net_device = hv_get_drvdata(device);
@@ -416,11 +549,8 @@ int netvsc_device_remove(struct hv_device *device)
vmbus_close(device->channel);
/* Release all resources */
- list_for_each_entry_safe(netvsc_packet, pos,
- &net_device->recv_pkt_list, list_ent) {
- list_del(&netvsc_packet->list_ent);
- kfree(netvsc_packet);
- }
+ if (net_device->sub_cb_buf)
+ vfree(net_device->sub_cb_buf);
kfree(net_device);
return 0;
@@ -444,6 +574,12 @@ static inline u32 hv_ringbuf_avail_percent(
return avail_write * 100 / ring_info->ring_datasize;
}
+static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
+ u32 index)
+{
+ sync_change_bit(index, net_device->send_section_map);
+}
+
static void netvsc_send_completion(struct netvsc_device *net_device,
struct hv_device *device,
struct vmpacket_descriptor *packet)
@@ -451,6 +587,7 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
struct nvsp_message *nvsp_packet;
struct hv_netvsc_packet *nvsc_packet;
struct net_device *ndev;
+ u32 send_index;
ndev = net_device->ndev;
@@ -461,7 +598,9 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
(nvsp_packet->hdr.msg_type ==
NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE) ||
(nvsp_packet->hdr.msg_type ==
- NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE)) {
+ NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE) ||
+ (nvsp_packet->hdr.msg_type ==
+ NVSP_MSG5_TYPE_SUBCHANNEL)) {
/* Copy the response back */
memcpy(&net_device->channel_init_pkt, nvsp_packet,
sizeof(struct nvsp_message));
@@ -469,28 +608,39 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
} else if (nvsp_packet->hdr.msg_type ==
NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) {
int num_outstanding_sends;
+ u16 q_idx = 0;
+ struct vmbus_channel *channel = device->channel;
+ int queue_sends;
/* Get the send context */
nvsc_packet = (struct hv_netvsc_packet *)(unsigned long)
packet->trans_id;
/* Notify the layer above us */
- if (nvsc_packet)
- nvsc_packet->completion.send.send_completion(
- nvsc_packet->completion.send.
- send_completion_ctx);
+ if (nvsc_packet) {
+ send_index = nvsc_packet->send_buf_index;
+ if (send_index != NETVSC_INVALID_INDEX)
+ netvsc_free_send_slot(net_device, send_index);
+ q_idx = nvsc_packet->q_idx;
+ channel = nvsc_packet->channel;
+ nvsc_packet->send_completion(nvsc_packet->
+ send_completion_ctx);
+ }
num_outstanding_sends =
atomic_dec_return(&net_device->num_outstanding_sends);
+ queue_sends = atomic_dec_return(&net_device->
+ queue_sends[q_idx]);
if (net_device->destroy && num_outstanding_sends == 0)
wake_up(&net_device->wait_drain);
- if (netif_queue_stopped(ndev) && !net_device->start_remove &&
- (hv_ringbuf_avail_percent(&device->channel->outbound)
- > RING_AVAIL_PERCENT_HIWATER ||
- num_outstanding_sends < 1))
- netif_wake_queue(ndev);
+ if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
+ !net_device->start_remove &&
+ (hv_ringbuf_avail_percent(&channel->outbound) >
+ RING_AVAIL_PERCENT_HIWATER || queue_sends < 1))
+ netif_tx_wake_queue(netdev_get_tx_queue(
+ ndev, q_idx));
} else {
netdev_err(ndev, "Unknown send completion packet type- "
"%d received!!\n", nvsp_packet->hdr.msg_type);
@@ -498,6 +648,52 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
}
+static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
+{
+ unsigned long index;
+ u32 max_words = net_device->map_words;
+ unsigned long *map_addr = (unsigned long *)net_device->send_section_map;
+ u32 section_cnt = net_device->send_section_cnt;
+ int ret_val = NETVSC_INVALID_INDEX;
+ int i;
+ int prev_val;
+
+ for (i = 0; i < max_words; i++) {
+ if (!~(map_addr[i]))
+ continue;
+ index = ffz(map_addr[i]);
+ prev_val = sync_test_and_set_bit(index, &map_addr[i]);
+ if (prev_val)
+ continue;
+ if ((index + (i * BITS_PER_LONG)) >= section_cnt)
+ break;
+ ret_val = (index + (i * BITS_PER_LONG));
+ break;
+ }
+ return ret_val;
+}
+
+u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
+ unsigned int section_index,
+ struct hv_netvsc_packet *packet)
+{
+ char *start = net_device->send_buf;
+ char *dest = (start + (section_index * net_device->send_section_size));
+ int i;
+ u32 msg_size = 0;
+
+ for (i = 0; i < packet->page_buf_cnt; i++) {
+ char *src = phys_to_virt(packet->page_buf[i].pfn << PAGE_SHIFT);
+ u32 offset = packet->page_buf[i].offset;
+ u32 len = packet->page_buf[i].len;
+
+ memcpy(dest, (src + offset), len);
+ msg_size += len;
+ dest += len;
+ }
+ return msg_size;
+}
+
int netvsc_send(struct hv_device *device,
struct hv_netvsc_packet *packet)
{
@@ -505,7 +701,12 @@ int netvsc_send(struct hv_device *device,
int ret = 0;
struct nvsp_message sendMessage;
struct net_device *ndev;
+ struct vmbus_channel *out_channel = NULL;
u64 req_id;
+ unsigned int section_index = NETVSC_INVALID_INDEX;
+ u32 msg_size = 0;
+ struct sk_buff *skb;
+
net_device = get_outbound_net_device(device);
if (!net_device)
@@ -521,25 +722,46 @@ int netvsc_send(struct hv_device *device,
sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 1;
}
- /* Not using send buffer section */
+ /* Attempt to send via sendbuf */
+ if (packet->total_data_buflen < net_device->send_section_size) {
+ section_index = netvsc_get_next_send_section(net_device);
+ if (section_index != NETVSC_INVALID_INDEX) {
+ msg_size = netvsc_copy_to_send_buf(net_device,
+ section_index,
+ packet);
+ skb = (struct sk_buff *)
+ (unsigned long)packet->send_completion_tid;
+ if (skb)
+ dev_kfree_skb_any(skb);
+ packet->page_buf_cnt = 0;
+ }
+ }
+ packet->send_buf_index = section_index;
+
+
sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_index =
- 0xFFFFFFFF;
- sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
+ section_index;
+ sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = msg_size;
- if (packet->completion.send.send_completion)
+ if (packet->send_completion)
req_id = (ulong)packet;
else
req_id = 0;
+ out_channel = net_device->chn_table[packet->q_idx];
+ if (out_channel == NULL)
+ out_channel = device->channel;
+ packet->channel = out_channel;
+
if (packet->page_buf_cnt) {
- ret = vmbus_sendpacket_pagebuffer(device->channel,
+ ret = vmbus_sendpacket_pagebuffer(out_channel,
packet->page_buf,
packet->page_buf_cnt,
&sendMessage,
sizeof(struct nvsp_message),
req_id);
} else {
- ret = vmbus_sendpacket(device->channel, &sendMessage,
+ ret = vmbus_sendpacket(out_channel, &sendMessage,
sizeof(struct nvsp_message),
req_id,
VM_PKT_DATA_INBAND,
@@ -548,17 +770,24 @@ int netvsc_send(struct hv_device *device,
if (ret == 0) {
atomic_inc(&net_device->num_outstanding_sends);
- if (hv_ringbuf_avail_percent(&device->channel->outbound) <
+ atomic_inc(&net_device->queue_sends[packet->q_idx]);
+
+ if (hv_ringbuf_avail_percent(&out_channel->outbound) <
RING_AVAIL_PERCENT_LOWATER) {
- netif_stop_queue(ndev);
+ netif_tx_stop_queue(netdev_get_tx_queue(
+ ndev, packet->q_idx));
+
if (atomic_read(&net_device->
- num_outstanding_sends) < 1)
- netif_wake_queue(ndev);
+ queue_sends[packet->q_idx]) < 1)
+ netif_tx_wake_queue(netdev_get_tx_queue(
+ ndev, packet->q_idx));
}
} else if (ret == -EAGAIN) {
- netif_stop_queue(ndev);
- if (atomic_read(&net_device->num_outstanding_sends) < 1) {
- netif_wake_queue(ndev);
+ netif_tx_stop_queue(netdev_get_tx_queue(
+ ndev, packet->q_idx));
+ if (atomic_read(&net_device->queue_sends[packet->q_idx]) < 1) {
+ netif_tx_wake_queue(netdev_get_tx_queue(
+ ndev, packet->q_idx));
ret = -ENOSPC;
}
} else {
@@ -570,6 +799,7 @@ int netvsc_send(struct hv_device *device,
}
static void netvsc_send_recv_completion(struct hv_device *device,
+ struct vmbus_channel *channel,
struct netvsc_device *net_device,
u64 transaction_id, u32 status)
{
@@ -587,7 +817,7 @@ static void netvsc_send_recv_completion(struct hv_device *device,
retry_send_cmplt:
/* Send the completion */
- ret = vmbus_sendpacket(device->channel, &recvcompMessage,
+ ret = vmbus_sendpacket(channel, &recvcompMessage,
sizeof(struct nvsp_message), transaction_id,
VM_PKT_COMP, 0);
if (ret == 0) {
@@ -613,76 +843,20 @@ retry_send_cmplt:
}
}
-/* Send a receive completion packet to RNDIS device (ie NetVsp) */
-static void netvsc_receive_completion(void *context)
-{
- struct hv_netvsc_packet *packet = context;
- struct hv_device *device = packet->device;
- struct netvsc_device *net_device;
- u64 transaction_id = 0;
- bool fsend_receive_comp = false;
- unsigned long flags;
- struct net_device *ndev;
- u32 status = NVSP_STAT_NONE;
-
- /*
- * Even though it seems logical to do a GetOutboundNetDevice() here to
- * send out receive completion, we are using GetInboundNetDevice()
- * since we may have disable outbound traffic already.
- */
- net_device = get_inbound_net_device(device);
- if (!net_device)
- return;
- ndev = net_device->ndev;
-
- /* Overloading use of the lock. */
- spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
-
- if (packet->status != NVSP_STAT_SUCCESS)
- packet->xfer_page_pkt->status = NVSP_STAT_FAIL;
-
- packet->xfer_page_pkt->count--;
-
- /*
- * Last one in the line that represent 1 xfer page packet.
- * Return the xfer page packet itself to the freelist
- */
- if (packet->xfer_page_pkt->count == 0) {
- fsend_receive_comp = true;
- transaction_id = packet->completion.recv.recv_completion_tid;
- status = packet->xfer_page_pkt->status;
- list_add_tail(&packet->xfer_page_pkt->list_ent,
- &net_device->recv_pkt_list);
-
- }
-
- /* Put the packet back */
- list_add_tail(&packet->list_ent, &net_device->recv_pkt_list);
- spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags);
-
- /* Send a receive completion for the xfer page packet */
- if (fsend_receive_comp)
- netvsc_send_recv_completion(device, net_device, transaction_id,
- status);
-
-}
-
static void netvsc_receive(struct netvsc_device *net_device,
+ struct vmbus_channel *channel,
struct hv_device *device,
struct vmpacket_descriptor *packet)
{
struct vmtransfer_page_packet_header *vmxferpage_packet;
struct nvsp_message *nvsp_packet;
- struct hv_netvsc_packet *netvsc_packet = NULL;
- /* struct netvsc_driver *netvscDriver; */
- struct xferpage_packet *xferpage_packet = NULL;
+ struct hv_netvsc_packet nv_pkt;
+ struct hv_netvsc_packet *netvsc_packet = &nv_pkt;
+ u32 status = NVSP_STAT_SUCCESS;
int i;
int count = 0;
- unsigned long flags;
struct net_device *ndev;
- LIST_HEAD(listHead);
-
ndev = net_device->ndev;
/*
@@ -715,77 +889,14 @@ static void netvsc_receive(struct netvsc_device *net_device,
return;
}
- /*
- * Grab free packets (range count + 1) to represent this xfer
- * page packet. +1 to represent the xfer page packet itself.
- * We grab it here so that we know exactly how many we can
- * fulfil
- */
- spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
- while (!list_empty(&net_device->recv_pkt_list)) {
- list_move_tail(net_device->recv_pkt_list.next, &listHead);
- if (++count == vmxferpage_packet->range_cnt + 1)
- break;
- }
- spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags);
-
- /*
- * We need at least 2 netvsc pkts (1 to represent the xfer
- * page and at least 1 for the range) i.e. we can handled
- * some of the xfer page packet ranges...
- */
- if (count < 2) {
- netdev_err(ndev, "Got only %d netvsc pkt...needed "
- "%d pkts. Dropping this xfer page packet completely!\n",
- count, vmxferpage_packet->range_cnt + 1);
-
- /* Return it to the freelist */
- spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
- for (i = count; i != 0; i--) {
- list_move_tail(listHead.next,
- &net_device->recv_pkt_list);
- }
- spin_unlock_irqrestore(&net_device->recv_pkt_list_lock,
- flags);
-
- netvsc_send_recv_completion(device, net_device,
- vmxferpage_packet->d.trans_id,
- NVSP_STAT_FAIL);
-
- return;
- }
-
- /* Remove the 1st packet to represent the xfer page packet itself */
- xferpage_packet = (struct xferpage_packet *)listHead.next;
- list_del(&xferpage_packet->list_ent);
- xferpage_packet->status = NVSP_STAT_SUCCESS;
-
- /* This is how much we can satisfy */
- xferpage_packet->count = count - 1;
-
- if (xferpage_packet->count != vmxferpage_packet->range_cnt) {
- netdev_err(ndev, "Needed %d netvsc pkts to satisfy "
- "this xfer page...got %d\n",
- vmxferpage_packet->range_cnt, xferpage_packet->count);
- }
+ count = vmxferpage_packet->range_cnt;
+ netvsc_packet->device = device;
+ netvsc_packet->channel = channel;
/* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
- for (i = 0; i < (count - 1); i++) {
- netvsc_packet = (struct hv_netvsc_packet *)listHead.next;
- list_del(&netvsc_packet->list_ent);
-
+ for (i = 0; i < count; i++) {
/* Initialize the netvsc packet */
netvsc_packet->status = NVSP_STAT_SUCCESS;
- netvsc_packet->xfer_page_pkt = xferpage_packet;
- netvsc_packet->completion.recv.recv_completion =
- netvsc_receive_completion;
- netvsc_packet->completion.recv.recv_completion_ctx =
- netvsc_packet;
- netvsc_packet->device = device;
- /* Save this so that we can send it back */
- netvsc_packet->completion.recv.recv_completion_tid =
- vmxferpage_packet->d.trans_id;
-
netvsc_packet->data = (void *)((unsigned long)net_device->
recv_buf + vmxferpage_packet->ranges[i].byte_offset);
netvsc_packet->total_data_buflen =
@@ -794,16 +905,53 @@ static void netvsc_receive(struct netvsc_device *net_device,
/* Pass it to the upper layer */
rndis_filter_receive(device, netvsc_packet);
- netvsc_receive_completion(netvsc_packet->
- completion.recv.recv_completion_ctx);
+ if (netvsc_packet->status != NVSP_STAT_SUCCESS)
+ status = NVSP_STAT_FAIL;
+ }
+
+ netvsc_send_recv_completion(device, channel, net_device,
+ vmxferpage_packet->d.trans_id, status);
+}
+
+
+static void netvsc_send_table(struct hv_device *hdev,
+ struct vmpacket_descriptor *vmpkt)
+{
+ struct netvsc_device *nvscdev;
+ struct net_device *ndev;
+ struct nvsp_message *nvmsg;
+ int i;
+ u32 count, *tab;
+
+ nvscdev = get_outbound_net_device(hdev);
+ if (!nvscdev)
+ return;
+ ndev = nvscdev->ndev;
+
+ nvmsg = (struct nvsp_message *)((unsigned long)vmpkt +
+ (vmpkt->offset8 << 3));
+
+ if (nvmsg->hdr.msg_type != NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE)
+ return;
+
+ count = nvmsg->msg.v5_msg.send_table.count;
+ if (count != VRSS_SEND_TAB_SIZE) {
+ netdev_err(ndev, "Received wrong send-table size:%u\n", count);
+ return;
}
+ tab = (u32 *)((unsigned long)&nvmsg->msg.v5_msg.send_table +
+ nvmsg->msg.v5_msg.send_table.offset);
+
+ for (i = 0; i < count; i++)
+ nvscdev->send_table[i] = tab[i];
}
-static void netvsc_channel_cb(void *context)
+void netvsc_channel_cb(void *context)
{
int ret;
- struct hv_device *device = context;
+ struct vmbus_channel *channel = (struct vmbus_channel *)context;
+ struct hv_device *device;
struct netvsc_device *net_device;
u32 bytes_recvd;
u64 request_id;
@@ -812,14 +960,19 @@ static void netvsc_channel_cb(void *context)
int bufferlen = NETVSC_PACKET_SIZE;
struct net_device *ndev;
+ if (channel->primary_channel != NULL)
+ device = channel->primary_channel->device_obj;
+ else
+ device = channel->device_obj;
+
net_device = get_inbound_net_device(device);
if (!net_device)
return;
ndev = net_device->ndev;
- buffer = net_device->cb_buffer;
+ buffer = get_per_channel_state(channel);
do {
- ret = vmbus_recvpacket_raw(device->channel, buffer, bufferlen,
+ ret = vmbus_recvpacket_raw(channel, buffer, bufferlen,
&bytes_recvd, &request_id);
if (ret == 0) {
if (bytes_recvd > 0) {
@@ -831,8 +984,12 @@ static void netvsc_channel_cb(void *context)
break;
case VM_PKT_DATA_USING_XFER_PAGES:
- netvsc_receive(net_device,
- device, desc);
+ netvsc_receive(net_device, channel,
+ device, desc);
+ break;
+
+ case VM_PKT_DATA_INBAND:
+ netvsc_send_table(device, desc);
break;
default:
@@ -880,11 +1037,9 @@ static void netvsc_channel_cb(void *context)
int netvsc_device_add(struct hv_device *device, void *additional_info)
{
int ret = 0;
- int i;
int ring_size =
((struct netvsc_device_info *)additional_info)->ring_size;
struct netvsc_device *net_device;
- struct hv_netvsc_packet *packet, *pos;
struct net_device *ndev;
net_device = alloc_net_device(device);
@@ -893,6 +1048,8 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
goto cleanup;
}
+ net_device->ring_size = ring_size;
+
/*
* Coming into this function, struct net_device * is
* registered as the driver private data.
@@ -903,24 +1060,14 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
ndev = net_device->ndev;
/* Initialize the NetVSC channel extension */
- spin_lock_init(&net_device->recv_pkt_list_lock);
-
- INIT_LIST_HEAD(&net_device->recv_pkt_list);
-
- for (i = 0; i < NETVSC_RECEIVE_PACKETLIST_COUNT; i++) {
- packet = kzalloc(sizeof(struct hv_netvsc_packet), GFP_KERNEL);
- if (!packet)
- break;
-
- list_add_tail(&packet->list_ent,
- &net_device->recv_pkt_list);
- }
init_completion(&net_device->channel_init_wait);
+ set_per_channel_state(device->channel, net_device->cb_buffer);
+
/* Open the channel */
ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
ring_size * PAGE_SIZE, NULL, 0,
- netvsc_channel_cb, device);
+ netvsc_channel_cb, device->channel);
if (ret != 0) {
netdev_err(ndev, "unable to open channel: %d\n", ret);
@@ -930,6 +1077,8 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
/* Channel is opened */
pr_info("hv_netvsc channel opened successfully\n");
+ net_device->chn_table[0] = device->channel;
+
/* Connect with the NetVsp */
ret = netvsc_connect_vsp(device);
if (ret != 0) {
@@ -946,16 +1095,8 @@ close:
cleanup:
- if (net_device) {
- list_for_each_entry_safe(packet, pos,
- &net_device->recv_pkt_list,
- list_ent) {
- list_del(&packet->list_ent);
- kfree(packet);
- }
-
+ if (net_device)
kfree(net_device);
- }
return ret;
}
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 7918d5132c1f..1de3ef5dd5d2 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -101,7 +101,7 @@ static int netvsc_open(struct net_device *net)
return ret;
}
- netif_start_queue(net);
+ netif_tx_start_all_queues(net);
nvdev = hv_get_drvdata(device_obj);
rdev = nvdev->extension;
@@ -149,15 +149,98 @@ static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
return ppi;
}
+union sub_key {
+ u64 k;
+ struct {
+ u8 pad[3];
+ u8 kb;
+ u32 ka;
+ };
+};
+
+/* Toeplitz hash function
+ * data: network byte order
+ * return: host byte order
+ */
+static u32 comp_hash(u8 *key, int klen, u8 *data, int dlen)
+{
+ union sub_key subk;
+ int k_next = 4;
+ u8 dt;
+ int i, j;
+ u32 ret = 0;
+
+ subk.k = 0;
+ subk.ka = ntohl(*(u32 *)key);
+
+ for (i = 0; i < dlen; i++) {
+ subk.kb = key[k_next];
+ k_next = (k_next + 1) % klen;
+ dt = data[i];
+ for (j = 0; j < 8; j++) {
+ if (dt & 0x80)
+ ret ^= subk.ka;
+ dt <<= 1;
+ subk.k <<= 1;
+ }
+ }
+
+ return ret;
+}
+
+static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb)
+{
+ struct iphdr *iphdr;
+ int data_len;
+ bool ret = false;
+
+ if (eth_hdr(skb)->h_proto != htons(ETH_P_IP))
+ return false;
+
+ iphdr = ip_hdr(skb);
+
+ if (iphdr->version == 4) {
+ if (iphdr->protocol == IPPROTO_TCP)
+ data_len = 12;
+ else
+ data_len = 8;
+ *hash = comp_hash(netvsc_hash_key, HASH_KEYLEN,
+ (u8 *)&iphdr->saddr, data_len);
+ ret = true;
+ }
+
+ return ret;
+}
+
+static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
+{
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
+ struct hv_device *hdev = net_device_ctx->device_ctx;
+ struct netvsc_device *nvsc_dev = hv_get_drvdata(hdev);
+ u32 hash;
+ u16 q_idx = 0;
+
+ if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1)
+ return 0;
+
+ if (netvsc_set_hash(&hash, skb))
+ q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] %
+ ndev->real_num_tx_queues;
+
+ return q_idx;
+}
+
static void netvsc_xmit_completion(void *context)
{
struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context;
struct sk_buff *skb = (struct sk_buff *)
- (unsigned long)packet->completion.send.send_completion_tid;
+ (unsigned long)packet->send_completion_tid;
+ u32 index = packet->send_buf_index;
kfree(packet);
- if (skb)
+ if (skb && (index == NETVSC_INVALID_INDEX))
dev_kfree_skb_any(skb);
}
@@ -333,6 +416,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
packet->vlan_tci = skb->vlan_tci;
+ packet->q_idx = skb_get_queue_mapping(skb);
+
packet->is_data_pkt = true;
packet->total_data_buflen = skb->len;
@@ -341,9 +426,9 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
(num_data_pgs * sizeof(struct hv_page_buffer)));
/* Set the completion routine */
- packet->completion.send.send_completion = netvsc_xmit_completion;
- packet->completion.send.send_completion_ctx = packet;
- packet->completion.send.send_completion_tid = (unsigned long)skb;
+ packet->send_completion = netvsc_xmit_completion;
+ packet->send_completion_ctx = packet;
+ packet->send_completion_tid = (unsigned long)skb;
isvlan = packet->vlan_tci & VLAN_TAG_PRESENT;
@@ -558,6 +643,9 @@ int netvsc_recv_callback(struct hv_device *device_obj,
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
packet->vlan_tci);
+ skb_record_rx_queue(skb, packet->channel->
+ offermsg.offer.sub_channel_index);
+
net->stats.rx_packets++;
net->stats.rx_bytes += packet->total_data_buflen;
@@ -606,7 +694,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
hv_set_drvdata(hdev, ndev);
device_info.ring_size = ring_size;
rndis_filter_device_add(hdev, &device_info);
- netif_wake_queue(ndev);
+ netif_tx_wake_all_queues(ndev);
return 0;
}
@@ -652,6 +740,7 @@ static const struct net_device_ops device_ops = {
.ndo_change_mtu = netvsc_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = netvsc_set_mac_addr,
+ .ndo_select_queue = netvsc_select_queue,
};
/*
@@ -698,9 +787,11 @@ static int netvsc_probe(struct hv_device *dev,
struct net_device *net = NULL;
struct net_device_context *net_device_ctx;
struct netvsc_device_info device_info;
+ struct netvsc_device *nvdev;
int ret;
- net = alloc_etherdev(sizeof(struct net_device_context));
+ net = alloc_etherdev_mq(sizeof(struct net_device_context),
+ num_online_cpus());
if (!net)
return -ENOMEM;
@@ -733,6 +824,10 @@ static int netvsc_probe(struct hv_device *dev,
}
memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
+ nvdev = hv_get_drvdata(dev);
+ netif_set_real_num_tx_queues(net, nvdev->num_chn);
+ netif_set_real_num_rx_queues(net, nvdev->num_chn);
+
ret = register_netdev(net);
if (ret != 0) {
pr_err("Unable to register netdev.\n");
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 143a98caf618..99c527adae5b 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -31,7 +31,7 @@
#include "hyperv_net.h"
-#define RNDIS_EXT_LEN 100
+#define RNDIS_EXT_LEN PAGE_SIZE
struct rndis_request {
struct list_head list_ent;
struct completion wait_event;
@@ -94,6 +94,8 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
rndis_msg->ndis_msg_type = msg_type;
rndis_msg->msg_len = msg_len;
+ request->pkt.q_idx = 0;
+
/*
* Set the request id. This field is always after the rndis header for
* request/response packet types so we just used the SetRequest as a
@@ -234,7 +236,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
packet->page_buf[0].len;
}
- packet->completion.send.send_completion = NULL;
+ packet->send_completion = NULL;
ret = netvsc_send(dev->net_dev->dev, packet);
return ret;
@@ -399,8 +401,6 @@ static void rndis_filter_receive_data(struct rndis_device *dev,
pkt->total_data_buflen = rndis_pkt->data_len;
pkt->data = (void *)((unsigned long)pkt->data + data_offset);
- pkt->is_data_pkt = true;
-
vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO);
if (vlan) {
pkt->vlan_tci = VLAN_TAG_PRESENT | vlan->vlanid |
@@ -509,6 +509,19 @@ static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
query->info_buflen = 0;
query->dev_vc_handle = 0;
+ if (oid == OID_GEN_RECEIVE_SCALE_CAPABILITIES) {
+ struct ndis_recv_scale_cap *cap;
+
+ request->request_msg.msg_len +=
+ sizeof(struct ndis_recv_scale_cap);
+ query->info_buflen = sizeof(struct ndis_recv_scale_cap);
+ cap = (struct ndis_recv_scale_cap *)((unsigned long)query +
+ query->info_buf_offset);
+ cap->hdr.type = NDIS_OBJECT_TYPE_RSS_CAPABILITIES;
+ cap->hdr.rev = NDIS_RECEIVE_SCALE_CAPABILITIES_REVISION_2;
+ cap->hdr.size = sizeof(struct ndis_recv_scale_cap);
+ }
+
ret = rndis_filter_send_request(dev, request);
if (ret != 0)
goto cleanup;
@@ -695,6 +708,89 @@ cleanup:
return ret;
}
+u8 netvsc_hash_key[HASH_KEYLEN] = {
+ 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
+ 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
+ 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
+ 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
+ 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
+};
+
+int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
+{
+ struct net_device *ndev = rdev->net_dev->ndev;
+ struct rndis_request *request;
+ struct rndis_set_request *set;
+ struct rndis_set_complete *set_complete;
+ u32 extlen = sizeof(struct ndis_recv_scale_param) +
+ 4*ITAB_NUM + HASH_KEYLEN;
+ struct ndis_recv_scale_param *rssp;
+ u32 *itab;
+ u8 *keyp;
+ int i, t, ret;
+
+ request = get_rndis_request(
+ rdev, RNDIS_MSG_SET,
+ RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
+ if (!request)
+ return -ENOMEM;
+
+ set = &request->request_msg.msg.set_req;
+ set->oid = OID_GEN_RECEIVE_SCALE_PARAMETERS;
+ set->info_buflen = extlen;
+ set->info_buf_offset = sizeof(struct rndis_set_request);
+ set->dev_vc_handle = 0;
+
+ rssp = (struct ndis_recv_scale_param *)(set + 1);
+ rssp->hdr.type = NDIS_OBJECT_TYPE_RSS_PARAMETERS;
+ rssp->hdr.rev = NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2;
+ rssp->hdr.size = sizeof(struct ndis_recv_scale_param);
+ rssp->flag = 0;
+ rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 |
+ NDIS_HASH_TCP_IPV4;
+ rssp->indirect_tabsize = 4*ITAB_NUM;
+ rssp->indirect_taboffset = sizeof(struct ndis_recv_scale_param);
+ rssp->hashkey_size = HASH_KEYLEN;
+ rssp->kashkey_offset = rssp->indirect_taboffset +
+ rssp->indirect_tabsize;
+
+ /* Set indirection table entries */
+ itab = (u32 *)(rssp + 1);
+ for (i = 0; i < ITAB_NUM; i++)
+ itab[i] = i % num_queue;
+
+ /* Set hask key values */
+ keyp = (u8 *)((unsigned long)rssp + rssp->kashkey_offset);
+ for (i = 0; i < HASH_KEYLEN; i++)
+ keyp[i] = netvsc_hash_key[i];
+
+
+ ret = rndis_filter_send_request(rdev, request);
+ if (ret != 0)
+ goto cleanup;
+
+ t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
+ if (t == 0) {
+ netdev_err(ndev, "timeout before we got a set response...\n");
+ /* can't put_rndis_request, since we may still receive a
+ * send-completion.
+ */
+ return -ETIMEDOUT;
+ } else {
+ set_complete = &request->response_msg.msg.set_complete;
+ if (set_complete->status != RNDIS_STATUS_SUCCESS) {
+ netdev_err(ndev, "Fail to set RSS parameters:0x%x\n",
+ set_complete->status);
+ ret = -EINVAL;
+ }
+ }
+
+cleanup:
+ put_rndis_request(rdev, request);
+ return ret;
+}
+
+
static int rndis_filter_query_device_link_status(struct rndis_device *dev)
{
u32 size = sizeof(u32);
@@ -886,6 +982,28 @@ static int rndis_filter_close_device(struct rndis_device *dev)
return ret;
}
+static void netvsc_sc_open(struct vmbus_channel *new_sc)
+{
+ struct netvsc_device *nvscdev;
+ u16 chn_index = new_sc->offermsg.offer.sub_channel_index;
+ int ret;
+
+ nvscdev = hv_get_drvdata(new_sc->primary_channel->device_obj);
+
+ if (chn_index >= nvscdev->num_chn)
+ return;
+
+ set_per_channel_state(new_sc, nvscdev->sub_cb_buf + (chn_index - 1) *
+ NETVSC_PACKET_SIZE);
+
+ ret = vmbus_open(new_sc, nvscdev->ring_size * PAGE_SIZE,
+ nvscdev->ring_size * PAGE_SIZE, NULL, 0,
+ netvsc_channel_cb, new_sc);
+
+ if (ret == 0)
+ nvscdev->chn_table[chn_index] = new_sc;
+}
+
int rndis_filter_device_add(struct hv_device *dev,
void *additional_info)
{
@@ -894,6 +1012,10 @@ int rndis_filter_device_add(struct hv_device *dev,
struct rndis_device *rndis_device;
struct netvsc_device_info *device_info = additional_info;
struct ndis_offload_params offloads;
+ struct nvsp_message *init_packet;
+ int t;
+ struct ndis_recv_scale_cap rsscap;
+ u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
rndis_device = get_rndis_device();
if (!rndis_device)
@@ -913,6 +1035,7 @@ int rndis_filter_device_add(struct hv_device *dev,
/* Initialize the rndis device */
net_device = hv_get_drvdata(dev);
+ net_device->num_chn = 1;
net_device->extension = rndis_device;
rndis_device->net_dev = net_device;
@@ -952,7 +1075,6 @@ int rndis_filter_device_add(struct hv_device *dev,
if (ret)
goto err_dev_remv;
-
rndis_filter_query_device_link_status(rndis_device);
device_info->link_state = rndis_device->link_state;
@@ -961,7 +1083,66 @@ int rndis_filter_device_add(struct hv_device *dev,
rndis_device->hw_mac_adr,
device_info->link_state ? "down" : "up");
- return ret;
+ if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_5)
+ return 0;
+
+ /* vRSS setup */
+ memset(&rsscap, 0, rsscap_size);
+ ret = rndis_filter_query_device(rndis_device,
+ OID_GEN_RECEIVE_SCALE_CAPABILITIES,
+ &rsscap, &rsscap_size);
+ if (ret || rsscap.num_recv_que < 2)
+ goto out;
+
+ net_device->num_chn = (num_online_cpus() < rsscap.num_recv_que) ?
+ num_online_cpus() : rsscap.num_recv_que;
+ if (net_device->num_chn == 1)
+ goto out;
+
+ net_device->sub_cb_buf = vzalloc((net_device->num_chn - 1) *
+ NETVSC_PACKET_SIZE);
+ if (!net_device->sub_cb_buf) {
+ net_device->num_chn = 1;
+ dev_info(&dev->device, "No memory for subchannels.\n");
+ goto out;
+ }
+
+ vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
+
+ init_packet = &net_device->channel_init_pkt;
+ memset(init_packet, 0, sizeof(struct nvsp_message));
+ init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL;
+ init_packet->msg.v5_msg.subchn_req.op = NVSP_SUBCHANNEL_ALLOCATE;
+ init_packet->msg.v5_msg.subchn_req.num_subchannels =
+ net_device->num_chn - 1;
+ ret = vmbus_sendpacket(dev->channel, init_packet,
+ sizeof(struct nvsp_message),
+ (unsigned long)init_packet,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (ret)
+ goto out;
+ t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
+ if (t == 0) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+ if (init_packet->msg.v5_msg.subchn_comp.status !=
+ NVSP_STAT_SUCCESS) {
+ ret = -ENODEV;
+ goto out;
+ }
+ net_device->num_chn = 1 +
+ init_packet->msg.v5_msg.subchn_comp.num_subchannels;
+
+ vmbus_are_subchannels_present(dev->channel);
+
+ ret = rndis_filter_set_rss_param(rndis_device, net_device->num_chn);
+
+out:
+ if (ret)
+ net_device->num_chn = 1;
+ return 0; /* return 0 because primary channel can be used alone */
err_dev_remv:
rndis_filter_device_remove(dev);
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index e36f194673a4..4517b149ed07 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -23,6 +23,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/mutex.h>
@@ -692,10 +693,7 @@ at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
if (rc < 0)
goto err_rx;
- rc = at86rf230_start(dev);
-
- return rc;
-
+ return at86rf230_start(dev);
err_rx:
at86rf230_start(dev);
err:
@@ -963,33 +961,24 @@ static irqreturn_t at86rf230_isr_level(int irq, void *data)
return at86rf230_isr(irq, data);
}
-static int at86rf230_irq_polarity(struct at86rf230_local *lp, int pol)
-{
- return at86rf230_write_subreg(lp, SR_IRQ_POLARITY, pol);
-}
-
static int at86rf230_hw_init(struct at86rf230_local *lp)
{
- struct at86rf230_platform_data *pdata = lp->spi->dev.platform_data;
- int rc, irq_pol;
- u8 status;
+ int rc, irq_pol, irq_type;
+ u8 dvdd;
u8 csma_seed[2];
- rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
- if (rc)
- return rc;
-
rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_FORCE_TRX_OFF);
if (rc)
return rc;
+ irq_type = irq_get_trigger_type(lp->spi->irq);
/* configure irq polarity, defaults to high active */
- if (pdata->irq_type & (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW))
+ if (irq_type & (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW))
irq_pol = IRQ_ACTIVE_LOW;
else
irq_pol = IRQ_ACTIVE_HIGH;
- rc = at86rf230_irq_polarity(lp, irq_pol);
+ rc = at86rf230_write_subreg(lp, SR_IRQ_POLARITY, irq_pol);
if (rc)
return rc;
@@ -1017,10 +1006,10 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
/* Wait the next SLEEP cycle */
msleep(100);
- rc = at86rf230_read_subreg(lp, SR_DVDD_OK, &status);
+ rc = at86rf230_read_subreg(lp, SR_DVDD_OK, &dvdd);
if (rc)
return rc;
- if (!status) {
+ if (!dvdd) {
dev_err(&lp->spi->dev, "DVDD error\n");
return -EINVAL;
}
@@ -1032,7 +1021,6 @@ static struct at86rf230_platform_data *
at86rf230_get_pdata(struct spi_device *spi)
{
struct at86rf230_platform_data *pdata;
- const char *irq_type;
if (!IS_ENABLED(CONFIG_OF) || !spi->dev.of_node)
return spi->dev.platform_data;
@@ -1044,19 +1032,6 @@ at86rf230_get_pdata(struct spi_device *spi)
pdata->rstn = of_get_named_gpio(spi->dev.of_node, "reset-gpio", 0);
pdata->slp_tr = of_get_named_gpio(spi->dev.of_node, "sleep-gpio", 0);
- pdata->irq_type = IRQF_TRIGGER_RISING;
- of_property_read_string(spi->dev.of_node, "irq-type", &irq_type);
- if (!strcmp(irq_type, "level-high"))
- pdata->irq_type = IRQF_TRIGGER_HIGH;
- else if (!strcmp(irq_type, "level-low"))
- pdata->irq_type = IRQF_TRIGGER_LOW;
- else if (!strcmp(irq_type, "edge-rising"))
- pdata->irq_type = IRQF_TRIGGER_RISING;
- else if (!strcmp(irq_type, "edge-falling"))
- pdata->irq_type = IRQF_TRIGGER_FALLING;
- else
- dev_warn(&spi->dev, "wrong irq-type specified using edge-rising\n");
-
spi->dev.platform_data = pdata;
done:
return pdata;
@@ -1071,7 +1046,7 @@ static int at86rf230_probe(struct spi_device *spi)
u8 part = 0, version = 0, status;
irq_handler_t irq_handler;
work_func_t irq_worker;
- int rc;
+ int rc, irq_type;
const char *chip;
struct ieee802154_ops *ops = NULL;
@@ -1087,27 +1062,17 @@ static int at86rf230_probe(struct spi_device *spi)
}
if (gpio_is_valid(pdata->rstn)) {
- rc = gpio_request(pdata->rstn, "rstn");
+ rc = devm_gpio_request_one(&spi->dev, pdata->rstn,
+ GPIOF_OUT_INIT_HIGH, "rstn");
if (rc)
return rc;
}
if (gpio_is_valid(pdata->slp_tr)) {
- rc = gpio_request(pdata->slp_tr, "slp_tr");
- if (rc)
- goto err_slp_tr;
- }
-
- if (gpio_is_valid(pdata->rstn)) {
- rc = gpio_direction_output(pdata->rstn, 1);
- if (rc)
- goto err_gpio_dir;
- }
-
- if (gpio_is_valid(pdata->slp_tr)) {
- rc = gpio_direction_output(pdata->slp_tr, 0);
+ rc = devm_gpio_request_one(&spi->dev, pdata->slp_tr,
+ GPIOF_OUT_INIT_LOW, "slp_tr");
if (rc)
- goto err_gpio_dir;
+ return rc;
}
/* Reset */
@@ -1121,13 +1086,12 @@ static int at86rf230_probe(struct spi_device *spi)
rc = __at86rf230_detect_device(spi, &man_id, &part, &version);
if (rc < 0)
- goto err_gpio_dir;
+ return rc;
if (man_id != 0x001f) {
dev_err(&spi->dev, "Non-Atmel dev found (MAN_ID %02x %02x)\n",
man_id >> 8, man_id & 0xFF);
- rc = -EINVAL;
- goto err_gpio_dir;
+ return -EINVAL;
}
switch (part) {
@@ -1154,16 +1118,12 @@ static int at86rf230_probe(struct spi_device *spi)
}
dev_info(&spi->dev, "Detected %s chip version %d\n", chip, version);
- if (!ops) {
- rc = -ENOTSUPP;
- goto err_gpio_dir;
- }
+ if (!ops)
+ return -ENOTSUPP;
dev = ieee802154_alloc_device(sizeof(*lp), ops);
- if (!dev) {
- rc = -ENOMEM;
- goto err_gpio_dir;
- }
+ if (!dev)
+ return -ENOMEM;
lp = dev->priv;
lp->dev = dev;
@@ -1176,7 +1136,8 @@ static int at86rf230_probe(struct spi_device *spi)
dev->extra_tx_headroom = 0;
dev->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK;
- if (pdata->irq_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
+ irq_type = irq_get_trigger_type(spi->irq);
+ if (irq_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
irq_worker = at86rf230_irqwork;
irq_handler = at86rf230_isr;
} else {
@@ -1202,75 +1163,65 @@ static int at86rf230_probe(struct spi_device *spi)
if (rc)
goto err_hw_init;
- rc = request_irq(spi->irq, irq_handler,
- IRQF_SHARED | pdata->irq_type,
- dev_name(&spi->dev), lp);
+ /* Read irq status register to reset irq line */
+ rc = at86rf230_read_subreg(lp, RG_IRQ_STATUS, 0xff, 0, &status);
if (rc)
goto err_hw_init;
- /* Read irq status register to reset irq line */
- rc = at86rf230_read_subreg(lp, RG_IRQ_STATUS, 0xff, 0, &status);
+ rc = devm_request_irq(&spi->dev, spi->irq, irq_handler, IRQF_SHARED,
+ dev_name(&spi->dev), lp);
if (rc)
- goto err_irq;
+ goto err_hw_init;
rc = ieee802154_register_device(lp->dev);
if (rc)
- goto err_irq;
+ goto err_hw_init;
return rc;
-err_irq:
- free_irq(spi->irq, lp);
err_hw_init:
flush_work(&lp->irqwork);
- spi_set_drvdata(spi, NULL);
mutex_destroy(&lp->bmux);
ieee802154_free_device(lp->dev);
-err_gpio_dir:
- if (gpio_is_valid(pdata->slp_tr))
- gpio_free(pdata->slp_tr);
-err_slp_tr:
- if (gpio_is_valid(pdata->rstn))
- gpio_free(pdata->rstn);
return rc;
}
static int at86rf230_remove(struct spi_device *spi)
{
struct at86rf230_local *lp = spi_get_drvdata(spi);
- struct at86rf230_platform_data *pdata = spi->dev.platform_data;
/* mask all at86rf230 irq's */
at86rf230_write_subreg(lp, SR_IRQ_MASK, 0);
ieee802154_unregister_device(lp->dev);
-
- free_irq(spi->irq, lp);
flush_work(&lp->irqwork);
-
- if (gpio_is_valid(pdata->slp_tr))
- gpio_free(pdata->slp_tr);
- if (gpio_is_valid(pdata->rstn))
- gpio_free(pdata->rstn);
-
mutex_destroy(&lp->bmux);
ieee802154_free_device(lp->dev);
-
dev_dbg(&spi->dev, "unregistered at86rf230\n");
+
return 0;
}
-#if IS_ENABLED(CONFIG_OF)
-static struct of_device_id at86rf230_of_match[] = {
+static const struct of_device_id at86rf230_of_match[] = {
{ .compatible = "atmel,at86rf230", },
{ .compatible = "atmel,at86rf231", },
{ .compatible = "atmel,at86rf233", },
{ .compatible = "atmel,at86rf212", },
{ },
};
-#endif
+MODULE_DEVICE_TABLE(of, at86rf230_of_match);
+
+static const struct spi_device_id at86rf230_device_id[] = {
+ { .name = "at86rf230", },
+ { .name = "at86rf231", },
+ { .name = "at86rf233", },
+ { .name = "at86rf212", },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, at86rf230_device_id);
static struct spi_driver at86rf230_driver = {
+ .id_table = at86rf230_device_id,
.driver = {
.of_match_table = of_match_ptr(at86rf230_of_match),
.name = "at86rf230",
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index e641bb240362..11dbdf36d9c1 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -62,10 +62,6 @@
#include "w83977af.h"
#include "w83977af_ir.h"
-#ifdef CONFIG_ARCH_NETWINDER /* Adjust to NetWinder differences */
-#undef CONFIG_NETWINDER_TX_DMA_PROBLEMS /* Not needed */
-#define CONFIG_NETWINDER_RX_DMA_PROBLEMS /* Must have this one! */
-#endif
#define CONFIG_USE_W977_PNP /* Currently needed */
#define PIO_MAX_SPEED 115200
@@ -332,7 +328,7 @@ static int w83977af_probe(int iobase, int irq, int dma)
w977_write_reg(0x74, dma+1, efbase[i]);
#else
w977_write_reg(0x74, dma, efbase[i]);
-#endif /*CONFIG_ARCH_NETWINDER */
+#endif /* CONFIG_ARCH_NETWINDER */
w977_write_reg(0x75, 0x04, efbase[i]); /* Disable Tx DMA */
/* Set append hardware CRC, enable IR bank selection */
@@ -563,10 +559,6 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
{
__u8 set;
-#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
- unsigned long flags;
- __u8 hcr;
-#endif
IRDA_DEBUG(4, "%s(), len=%d\n", __func__ , self->tx_buff.len);
/* Save current set */
@@ -579,30 +571,13 @@ static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
/* Choose transmit DMA channel */
switch_bank(iobase, SET2);
outb(ADCR1_D_CHSW|/*ADCR1_DMA_F|*/ADCR1_ADV_SL, iobase+ADCR1);
-#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
- spin_lock_irqsave(&self->lock, flags);
-
- disable_dma(self->io.dma);
- clear_dma_ff(self->io.dma);
- set_dma_mode(self->io.dma, DMA_MODE_READ);
- set_dma_addr(self->io.dma, self->tx_buff_dma);
- set_dma_count(self->io.dma, self->tx_buff.len);
-#else
irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
DMA_MODE_WRITE);
-#endif
self->io.direction = IO_XMIT;
/* Enable DMA */
switch_bank(iobase, SET0);
-#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
- hcr = inb(iobase+HCR);
- outb(hcr | HCR_EN_DMA, iobase+HCR);
- enable_dma(self->io.dma);
- spin_unlock_irqrestore(&self->lock, flags);
-#else
outb(inb(iobase+HCR) | HCR_EN_DMA | HCR_TX_WT, iobase+HCR);
-#endif
/* Restore set register */
outb(set, iobase+SSR);
@@ -711,7 +686,7 @@ static int w83977af_dma_receive(struct w83977af_ir *self)
{
int iobase;
__u8 set;
-#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
+#ifdef CONFIG_ARCH_NETWINDER
unsigned long flags;
__u8 hcr;
#endif
@@ -736,7 +711,7 @@ static int w83977af_dma_receive(struct w83977af_ir *self)
self->io.direction = IO_RECV;
self->rx_buff.data = self->rx_buff.head;
-#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
+#ifdef CONFIG_ARCH_NETWINDER
spin_lock_irqsave(&self->lock, flags);
disable_dma(self->io.dma);
@@ -759,7 +734,7 @@ static int w83977af_dma_receive(struct w83977af_ir *self)
/* Enable DMA */
switch_bank(iobase, SET0);
-#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
+#ifdef CONFIG_ARCH_NETWINDER
hcr = inb(iobase+HCR);
outb(hcr | HCR_EN_DMA, iobase+HCR);
enable_dma(self->io.dma);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index b0e2865a6810..f0118d1a3e46 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -30,6 +30,7 @@
#include <linux/if_link.h>
#include <linux/if_macvlan.h>
#include <linux/hash.h>
+#include <linux/workqueue.h>
#include <net/rtnetlink.h>
#include <net/xfrm.h>
@@ -40,10 +41,18 @@ struct macvlan_port {
struct hlist_head vlan_hash[MACVLAN_HASH_SIZE];
struct list_head vlans;
struct rcu_head rcu;
+ struct sk_buff_head bc_queue;
+ struct work_struct bc_work;
bool passthru;
int count;
};
+struct macvlan_skb_cb {
+ const struct macvlan_dev *src;
+};
+
+#define MACVLAN_SKB_CB(__skb) ((struct macvlan_skb_cb *)&((__skb)->cb[0]))
+
static void macvlan_port_destroy(struct net_device *dev);
static struct macvlan_port *macvlan_port_get_rcu(const struct net_device *dev)
@@ -120,7 +129,7 @@ static int macvlan_broadcast_one(struct sk_buff *skb,
struct net_device *dev = vlan->dev;
if (local)
- return dev_forward_skb(dev, skb);
+ return __dev_forward_skb(dev, skb);
skb->dev = dev;
if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
@@ -128,7 +137,7 @@ static int macvlan_broadcast_one(struct sk_buff *skb,
else
skb->pkt_type = PACKET_MULTICAST;
- return netif_rx(skb);
+ return 0;
}
static u32 macvlan_hash_mix(const struct macvlan_dev *vlan)
@@ -175,32 +184,32 @@ static void macvlan_broadcast(struct sk_buff *skb,
if (likely(nskb))
err = macvlan_broadcast_one(
nskb, vlan, eth,
- mode == MACVLAN_MODE_BRIDGE);
+ mode == MACVLAN_MODE_BRIDGE) ?:
+ netif_rx_ni(nskb);
macvlan_count_rx(vlan, skb->len + ETH_HLEN,
err == NET_RX_SUCCESS, 1);
}
}
}
-/* called under rcu_read_lock() from netif_receive_skb */
-static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
+static void macvlan_process_broadcast(struct work_struct *w)
{
- struct macvlan_port *port;
- struct sk_buff *skb = *pskb;
- const struct ethhdr *eth = eth_hdr(skb);
- const struct macvlan_dev *vlan;
- const struct macvlan_dev *src;
- struct net_device *dev;
- unsigned int len = 0;
- int ret = NET_RX_DROP;
+ struct macvlan_port *port = container_of(w, struct macvlan_port,
+ bc_work);
+ struct sk_buff *skb;
+ struct sk_buff_head list;
+
+ skb_queue_head_init(&list);
+
+ spin_lock_bh(&port->bc_queue.lock);
+ skb_queue_splice_tail_init(&port->bc_queue, &list);
+ spin_unlock_bh(&port->bc_queue.lock);
+
+ while ((skb = __skb_dequeue(&list))) {
+ const struct macvlan_dev *src = MACVLAN_SKB_CB(skb)->src;
+
+ rcu_read_lock();
- port = macvlan_port_get_rcu(skb->dev);
- if (is_multicast_ether_addr(eth->h_dest)) {
- skb = ip_check_defrag(skb, IP_DEFRAG_MACVLAN);
- if (!skb)
- return RX_HANDLER_CONSUMED;
- eth = eth_hdr(skb);
- src = macvlan_hash_lookup(port, eth->h_source);
if (!src)
/* frame comes from an external address */
macvlan_broadcast(skb, port, NULL,
@@ -213,20 +222,80 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
macvlan_broadcast(skb, port, src->dev,
MACVLAN_MODE_VEPA |
MACVLAN_MODE_BRIDGE);
- else if (src->mode == MACVLAN_MODE_BRIDGE)
+ else
/*
* flood only to VEPA ports, bridge ports
* already saw the frame on the way out.
*/
macvlan_broadcast(skb, port, src->dev,
MACVLAN_MODE_VEPA);
- else {
+
+ rcu_read_unlock();
+
+ kfree_skb(skb);
+ }
+}
+
+static void macvlan_broadcast_enqueue(struct macvlan_port *port,
+ struct sk_buff *skb)
+{
+ struct sk_buff *nskb;
+ int err = -ENOMEM;
+
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
+ goto err;
+
+ spin_lock(&port->bc_queue.lock);
+ if (skb_queue_len(&port->bc_queue) < skb->dev->tx_queue_len) {
+ __skb_queue_tail(&port->bc_queue, nskb);
+ err = 0;
+ }
+ spin_unlock(&port->bc_queue.lock);
+
+ if (err)
+ goto free_nskb;
+
+ schedule_work(&port->bc_work);
+ return;
+
+free_nskb:
+ kfree_skb(nskb);
+err:
+ atomic_long_inc(&skb->dev->rx_dropped);
+}
+
+/* called under rcu_read_lock() from netif_receive_skb */
+static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
+{
+ struct macvlan_port *port;
+ struct sk_buff *skb = *pskb;
+ const struct ethhdr *eth = eth_hdr(skb);
+ const struct macvlan_dev *vlan;
+ const struct macvlan_dev *src;
+ struct net_device *dev;
+ unsigned int len = 0;
+ int ret = NET_RX_DROP;
+
+ port = macvlan_port_get_rcu(skb->dev);
+ if (is_multicast_ether_addr(eth->h_dest)) {
+ skb = ip_check_defrag(skb, IP_DEFRAG_MACVLAN);
+ if (!skb)
+ return RX_HANDLER_CONSUMED;
+ eth = eth_hdr(skb);
+ src = macvlan_hash_lookup(port, eth->h_source);
+ if (src && src->mode != MACVLAN_MODE_VEPA &&
+ src->mode != MACVLAN_MODE_BRIDGE) {
/* forward to original port. */
vlan = src;
- ret = macvlan_broadcast_one(skb, vlan, eth, 0);
+ ret = macvlan_broadcast_one(skb, vlan, eth, 0) ?:
+ netif_rx(skb);
goto out;
}
+ MACVLAN_SKB_CB(skb)->src = src;
+ macvlan_broadcast_enqueue(port, skb);
+
return RX_HANDLER_PASS;
}
@@ -761,6 +830,9 @@ static int macvlan_port_create(struct net_device *dev)
for (i = 0; i < MACVLAN_HASH_SIZE; i++)
INIT_HLIST_HEAD(&port->vlan_hash[i]);
+ skb_queue_head_init(&port->bc_queue);
+ INIT_WORK(&port->bc_work, macvlan_process_broadcast);
+
err = netdev_rx_handler_register(dev, macvlan_handle_frame, port);
if (err)
kfree(port);
@@ -773,6 +845,7 @@ static void macvlan_port_destroy(struct net_device *dev)
{
struct macvlan_port *port = macvlan_port_get_rtnl(dev);
+ cancel_work_sync(&port->bc_work);
dev->priv_flags &= ~IFF_MACVLAN_PORT;
netdev_rx_handler_unregister(dev);
kfree_rcu(port, rcu);
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 643464d5a727..6c622aedbae1 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -144,41 +144,11 @@ static int at803x_resume(struct phy_device *phydev)
static int at803x_config_init(struct phy_device *phydev)
{
- int val;
int ret;
- u32 features;
-
- features = SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_AUI |
- SUPPORTED_FIBRE | SUPPORTED_BNC;
-
- val = phy_read(phydev, MII_BMSR);
- if (val < 0)
- return val;
-
- if (val & BMSR_ANEGCAPABLE)
- features |= SUPPORTED_Autoneg;
- if (val & BMSR_100FULL)
- features |= SUPPORTED_100baseT_Full;
- if (val & BMSR_100HALF)
- features |= SUPPORTED_100baseT_Half;
- if (val & BMSR_10FULL)
- features |= SUPPORTED_10baseT_Full;
- if (val & BMSR_10HALF)
- features |= SUPPORTED_10baseT_Half;
-
- if (val & BMSR_ESTATEN) {
- val = phy_read(phydev, MII_ESTATUS);
- if (val < 0)
- return val;
-
- if (val & ESTATUS_1000_TFULL)
- features |= SUPPORTED_1000baseT_Full;
- if (val & ESTATUS_1000_THALF)
- features |= SUPPORTED_1000baseT_Half;
- }
- phydev->supported = features;
- phydev->advertising = features;
+ ret = genphy_config_init(phydev);
+ if (ret < 0)
+ return ret;
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
ret = phy_write(phydev, AT803X_DEBUG_ADDR,
@@ -283,8 +253,7 @@ static int __init atheros_init(void)
static void __exit atheros_exit(void)
{
- return phy_drivers_unregister(at803x_driver,
- ARRAY_SIZE(at803x_driver));
+ phy_drivers_unregister(at803x_driver, ARRAY_SIZE(at803x_driver));
}
module_init(atheros_init);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 76f54b32a120..a6284964b711 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -69,6 +69,73 @@ struct mii_bus *mdiobus_alloc_size(size_t size)
}
EXPORT_SYMBOL(mdiobus_alloc_size);
+static void _devm_mdiobus_free(struct device *dev, void *res)
+{
+ mdiobus_free(*(struct mii_bus **)res);
+}
+
+static int devm_mdiobus_match(struct device *dev, void *res, void *data)
+{
+ struct mii_bus **r = res;
+
+ if (WARN_ON(!r || !*r))
+ return 0;
+
+ return *r == data;
+}
+
+/**
+ * devm_mdiobus_alloc_size - Resource-managed mdiobus_alloc_size()
+ * @dev: Device to allocate mii_bus for
+ * @sizeof_priv: Space to allocate for private structure.
+ *
+ * Managed mdiobus_alloc_size. mii_bus allocated with this function is
+ * automatically freed on driver detach.
+ *
+ * If an mii_bus allocated with this function needs to be freed separately,
+ * devm_mdiobus_free() must be used.
+ *
+ * RETURNS:
+ * Pointer to allocated mii_bus on success, NULL on failure.
+ */
+struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv)
+{
+ struct mii_bus **ptr, *bus;
+
+ ptr = devres_alloc(_devm_mdiobus_free, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ /* use raw alloc_dr for kmalloc caller tracing */
+ bus = mdiobus_alloc_size(sizeof_priv);
+ if (bus) {
+ *ptr = bus;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return bus;
+}
+EXPORT_SYMBOL_GPL(devm_mdiobus_alloc_size);
+
+/**
+ * devm_mdiobus_free - Resource-managed mdiobus_free()
+ * @dev: Device this mii_bus belongs to
+ * @bus: the mii_bus associated with the device
+ *
+ * Free mii_bus allocated with devm_mdiobus_alloc_size().
+ */
+void devm_mdiobus_free(struct device *dev, struct mii_bus *bus)
+{
+ int rc;
+
+ rc = devres_release(dev, _devm_mdiobus_free,
+ devm_mdiobus_match, bus);
+ WARN_ON(rc);
+}
+EXPORT_SYMBOL_GPL(devm_mdiobus_free);
+
/**
* mdiobus_release - mii_bus device release callback
* @d: the target struct device that contains the mii_bus
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index d849684231c1..bc7c7d2f75f2 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -283,6 +283,110 @@ static int ksz9021_config_init(struct phy_device *phydev)
return 0;
}
+#define MII_KSZ9031RN_MMD_CTRL_REG 0x0d
+#define MII_KSZ9031RN_MMD_REGDATA_REG 0x0e
+#define OP_DATA 1
+#define KSZ9031_PS_TO_REG 60
+
+/* Extended registers */
+#define MII_KSZ9031RN_CONTROL_PAD_SKEW 4
+#define MII_KSZ9031RN_RX_DATA_PAD_SKEW 5
+#define MII_KSZ9031RN_TX_DATA_PAD_SKEW 6
+#define MII_KSZ9031RN_CLK_PAD_SKEW 8
+
+static int ksz9031_extended_write(struct phy_device *phydev,
+ u8 mode, u32 dev_addr, u32 regnum, u16 val)
+{
+ phy_write(phydev, MII_KSZ9031RN_MMD_CTRL_REG, dev_addr);
+ phy_write(phydev, MII_KSZ9031RN_MMD_REGDATA_REG, regnum);
+ phy_write(phydev, MII_KSZ9031RN_MMD_CTRL_REG, (mode << 14) | dev_addr);
+ return phy_write(phydev, MII_KSZ9031RN_MMD_REGDATA_REG, val);
+}
+
+static int ksz9031_extended_read(struct phy_device *phydev,
+ u8 mode, u32 dev_addr, u32 regnum)
+{
+ phy_write(phydev, MII_KSZ9031RN_MMD_CTRL_REG, dev_addr);
+ phy_write(phydev, MII_KSZ9031RN_MMD_REGDATA_REG, regnum);
+ phy_write(phydev, MII_KSZ9031RN_MMD_CTRL_REG, (mode << 14) | dev_addr);
+ return phy_read(phydev, MII_KSZ9031RN_MMD_REGDATA_REG);
+}
+
+static int ksz9031_of_load_skew_values(struct phy_device *phydev,
+ struct device_node *of_node,
+ u16 reg, size_t field_sz,
+ char *field[], u8 numfields)
+{
+ int val[4] = {-1, -2, -3, -4};
+ int matches = 0;
+ u16 mask;
+ u16 maxval;
+ u16 newval;
+ int i;
+
+ for (i = 0; i < numfields; i++)
+ if (!of_property_read_u32(of_node, field[i], val + i))
+ matches++;
+
+ if (!matches)
+ return 0;
+
+ if (matches < numfields)
+ newval = ksz9031_extended_read(phydev, OP_DATA, 2, reg);
+ else
+ newval = 0;
+
+ maxval = (field_sz == 4) ? 0xf : 0x1f;
+ for (i = 0; i < numfields; i++)
+ if (val[i] != -(i + 1)) {
+ mask = 0xffff;
+ mask ^= maxval << (field_sz * i);
+ newval = (newval & mask) |
+ (((val[i] / KSZ9031_PS_TO_REG) & maxval)
+ << (field_sz * i));
+ }
+
+ return ksz9031_extended_write(phydev, OP_DATA, 2, reg, newval);
+}
+
+static int ksz9031_config_init(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->dev;
+ struct device_node *of_node = dev->of_node;
+ char *clk_skews[2] = {"rxc-skew-ps", "txc-skew-ps"};
+ char *rx_data_skews[4] = {
+ "rxd0-skew-ps", "rxd1-skew-ps",
+ "rxd2-skew-ps", "rxd3-skew-ps"
+ };
+ char *tx_data_skews[4] = {
+ "txd0-skew-ps", "txd1-skew-ps",
+ "txd2-skew-ps", "txd3-skew-ps"
+ };
+ char *control_skews[2] = {"txen-skew-ps", "rxdv-skew-ps"};
+
+ if (!of_node && dev->parent->of_node)
+ of_node = dev->parent->of_node;
+
+ if (of_node) {
+ ksz9031_of_load_skew_values(phydev, of_node,
+ MII_KSZ9031RN_CLK_PAD_SKEW, 5,
+ clk_skews, 2);
+
+ ksz9031_of_load_skew_values(phydev, of_node,
+ MII_KSZ9031RN_CONTROL_PAD_SKEW, 4,
+ control_skews, 2);
+
+ ksz9031_of_load_skew_values(phydev, of_node,
+ MII_KSZ9031RN_RX_DATA_PAD_SKEW, 4,
+ rx_data_skews, 4);
+
+ ksz9031_of_load_skew_values(phydev, of_node,
+ MII_KSZ9031RN_TX_DATA_PAD_SKEW, 4,
+ tx_data_skews, 4);
+ }
+ return 0;
+}
+
#define KSZ8873MLL_GLOBAL_CONTROL_4 0x06
#define KSZ8873MLL_GLOBAL_CONTROL_4_DUPLEX (1 << 6)
#define KSZ8873MLL_GLOBAL_CONTROL_4_SPEED (1 << 4)
@@ -469,7 +573,7 @@ static struct phy_driver ksphy_driver[] = {
.features = (PHY_GBIT_FEATURES | SUPPORTED_Pause
| SUPPORTED_Asym_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
- .config_init = kszphy_config_init,
+ .config_init = ksz9031_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 0ce606624296..466ae3e06322 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1067,7 +1067,7 @@ int genphy_soft_reset(struct phy_device *phydev)
}
EXPORT_SYMBOL(genphy_soft_reset);
-static int genphy_config_init(struct phy_device *phydev)
+int genphy_config_init(struct phy_device *phydev)
{
int val;
u32 features;
@@ -1118,6 +1118,7 @@ static int gen10g_soft_reset(struct phy_device *phydev)
/* Do nothing for now */
return 0;
}
+EXPORT_SYMBOL(genphy_config_init);
static int gen10g_config_init(struct phy_device *phydev)
{
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 11f34813e23f..180c49479c42 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -249,8 +249,7 @@ static int __init smsc_init(void)
static void __exit smsc_exit(void)
{
- return phy_drivers_unregister(smsc_phy_driver,
- ARRAY_SIZE(smsc_phy_driver));
+ phy_drivers_unregister(smsc_phy_driver, ARRAY_SIZE(smsc_phy_driver));
}
MODULE_DESCRIPTION("SMSC PHY driver");
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 14372c65a7e8..5dc0935da99c 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -319,8 +319,7 @@ static int __init vsc82xx_init(void)
static void __exit vsc82xx_exit(void)
{
- return phy_drivers_unregister(vsc82xx_driver,
- ARRAY_SIZE(vsc82xx_driver));
+ phy_drivers_unregister(vsc82xx_driver, ARRAY_SIZE(vsc82xx_driver));
}
module_init(vsc82xx_init);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 8a852b5f215f..b852bb368acc 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1724,6 +1724,13 @@ static int virtnet_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
vi->has_cvq = true;
+ if (vi->any_header_sg) {
+ if (vi->mergeable_rx_bufs)
+ dev->needed_headroom = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ else
+ dev->needed_headroom = sizeof(struct virtio_net_hdr);
+ }
+
/* Use single tx/rx queue pair as default */
vi->curr_queue_pairs = 1;
vi->max_queue_pairs = max_queue_pairs;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 4dbb2ed85b97..1dfee9a7fbf7 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -127,6 +127,7 @@ struct vxlan_dev {
struct list_head next; /* vxlan's per namespace list */
struct vxlan_sock *vn_sock; /* listening socket */
struct net_device *dev;
+ struct net *net; /* netns for packet i/o */
struct vxlan_rdst default_dst; /* default destination */
union vxlan_addr saddr; /* source address */
__be16 dst_port;
@@ -1203,6 +1204,7 @@ static void vxlan_rcv(struct vxlan_sock *vs,
remote_ip = &vxlan->default_dst.remote_ip;
skb_reset_mac_header(skb);
+ skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev)));
skb->protocol = eth_type_trans(skb, vxlan->dev);
/* Ignore packet loops (and multicast echo) */
@@ -1618,7 +1620,8 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
struct dst_entry *dst, struct sk_buff *skb,
struct net_device *dev, struct in6_addr *saddr,
struct in6_addr *daddr, __u8 prio, __u8 ttl,
- __be16 src_port, __be16 dst_port, __be32 vni)
+ __be16 src_port, __be16 dst_port, __be32 vni,
+ bool xnet)
{
struct ipv6hdr *ip6h;
struct vxlanhdr *vxh;
@@ -1631,7 +1634,7 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
skb->encapsulation = 1;
}
- skb_scrub_packet(skb, false);
+ skb_scrub_packet(skb, xnet);
min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
+ VXLAN_HLEN + sizeof(struct ipv6hdr)
@@ -1711,7 +1714,7 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
int vxlan_xmit_skb(struct vxlan_sock *vs,
struct rtable *rt, struct sk_buff *skb,
__be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
- __be16 src_port, __be16 dst_port, __be32 vni)
+ __be16 src_port, __be16 dst_port, __be32 vni, bool xnet)
{
struct vxlanhdr *vxh;
struct udphdr *uh;
@@ -1760,7 +1763,7 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
return err;
return iptunnel_xmit(vs->sock->sk, rt, skb, src, dst, IPPROTO_UDP,
- tos, ttl, df, false);
+ tos, ttl, df, xnet);
}
EXPORT_SYMBOL_GPL(vxlan_xmit_skb);
@@ -1853,7 +1856,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
fl4.daddr = dst->sin.sin_addr.s_addr;
fl4.saddr = vxlan->saddr.sin.sin_addr.s_addr;
- rt = ip_route_output_key(dev_net(dev), &fl4);
+ rt = ip_route_output_key(vxlan->net, &fl4);
if (IS_ERR(rt)) {
netdev_dbg(dev, "no route to %pI4\n",
&dst->sin.sin_addr.s_addr);
@@ -1874,7 +1877,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
struct vxlan_dev *dst_vxlan;
ip_rt_put(rt);
- dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port);
+ dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port);
if (!dst_vxlan)
goto tx_error;
vxlan_encap_bypass(skb, vxlan, dst_vxlan);
@@ -1887,7 +1890,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
err = vxlan_xmit_skb(vxlan->vn_sock, rt, skb,
fl4.saddr, dst->sin.sin_addr.s_addr,
tos, ttl, df, src_port, dst_port,
- htonl(vni << 8));
+ htonl(vni << 8),
+ !net_eq(vxlan->net, dev_net(vxlan->dev)));
if (err < 0)
goto rt_tx_error;
@@ -1927,7 +1931,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
struct vxlan_dev *dst_vxlan;
dst_release(ndst);
- dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port);
+ dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port);
if (!dst_vxlan)
goto tx_error;
vxlan_encap_bypass(skb, vxlan, dst_vxlan);
@@ -1938,7 +1942,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
err = vxlan6_xmit_skb(vxlan->vn_sock, ndst, skb,
dev, &fl6.saddr, &fl6.daddr, 0, ttl,
- src_port, dst_port, htonl(vni << 8));
+ src_port, dst_port, htonl(vni << 8),
+ !net_eq(vxlan->net, dev_net(vxlan->dev)));
#endif
}
@@ -2082,7 +2087,7 @@ static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
static int vxlan_init(struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
- struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
+ struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
struct vxlan_sock *vs;
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
@@ -2090,7 +2095,7 @@ static int vxlan_init(struct net_device *dev)
return -ENOMEM;
spin_lock(&vn->sock_lock);
- vs = vxlan_find_sock(dev_net(dev), vxlan->dst_port);
+ vs = vxlan_find_sock(vxlan->net, vxlan->dst_port);
if (vs) {
/* If we have a socket with same port already, reuse it */
atomic_inc(&vs->refcnt);
@@ -2172,8 +2177,8 @@ static void vxlan_flush(struct vxlan_dev *vxlan)
/* Cleanup timer and forwarding table on shutdown */
static int vxlan_stop(struct net_device *dev)
{
- struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
struct vxlan_sock *vs = vxlan->vn_sock;
if (vs && vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
@@ -2202,7 +2207,7 @@ static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
struct net_device *lowerdev;
int max_mtu;
- lowerdev = __dev_get_by_index(dev_net(dev), dst->remote_ifindex);
+ lowerdev = __dev_get_by_index(vxlan->net, dst->remote_ifindex);
if (lowerdev == NULL)
return eth_change_mtu(dev, new_mtu);
@@ -2285,7 +2290,6 @@ static void vxlan_setup(struct net_device *dev)
dev->tx_queue_len = 0;
dev->features |= NETIF_F_LLTX;
- dev->features |= NETIF_F_NETNS_LOCAL;
dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
dev->features |= NETIF_F_RXCSUM;
dev->features |= NETIF_F_GSO_SOFTWARE;
@@ -2578,7 +2582,7 @@ EXPORT_SYMBOL_GPL(vxlan_sock_add);
static void vxlan_sock_work(struct work_struct *work)
{
struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, sock_work);
- struct net *net = dev_net(vxlan->dev);
+ struct net *net = vxlan->net;
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
__be16 port = vxlan->dst_port;
struct vxlan_sock *nvs;
@@ -2605,6 +2609,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
if (!data[IFLA_VXLAN_ID])
return -EINVAL;
+ vxlan->net = dev_net(dev);
+
vni = nla_get_u32(data[IFLA_VXLAN_ID]);
dst->remote_vni = vni;
@@ -2739,8 +2745,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
static void vxlan_dellink(struct net_device *dev, struct list_head *head)
{
- struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
spin_lock(&vn->sock_lock);
if (!hlist_unhashed(&vxlan->hlist))
@@ -2905,8 +2911,33 @@ static __net_init int vxlan_init_net(struct net *net)
return 0;
}
+static void __net_exit vxlan_exit_net(struct net *net)
+{
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+ struct vxlan_dev *vxlan, *next;
+ struct net_device *dev, *aux;
+ LIST_HEAD(list);
+
+ rtnl_lock();
+ for_each_netdev_safe(net, dev, aux)
+ if (dev->rtnl_link_ops == &vxlan_link_ops)
+ unregister_netdevice_queue(dev, &list);
+
+ list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
+ /* If vxlan->dev is in the same netns, it has already been added
+ * to the list by the previous loop.
+ */
+ if (!net_eq(dev_net(vxlan->dev), net))
+ unregister_netdevice_queue(dev, &list);
+ }
+
+ unregister_netdevice_many(&list);
+ rtnl_unlock();
+}
+
static struct pernet_operations vxlan_net_ops = {
.init = vxlan_init_net,
+ .exit = vxlan_exit_net,
.id = &vxlan_net_id,
.size = sizeof(struct vxlan_net),
};
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 507d9a9ee69a..f92050617ae6 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1090,7 +1090,8 @@ static int ar5523_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
return ret;
}
-static void ar5523_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void ar5523_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
struct ar5523 *ar = hw->priv;
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
index a1f099628850..17d221abd58c 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.c
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -175,7 +175,7 @@ int ath10k_bmi_write_memory(struct ath10k *ar,
return 0;
}
-int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
+int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result)
{
struct bmi_cmd cmd;
union bmi_resp resp;
@@ -184,7 +184,7 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
int ret;
ath10k_dbg(ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
- address, *param);
+ address, param);
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
@@ -193,7 +193,7 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
cmd.id = __cpu_to_le32(BMI_EXECUTE);
cmd.execute.addr = __cpu_to_le32(address);
- cmd.execute.param = __cpu_to_le32(*param);
+ cmd.execute.param = __cpu_to_le32(param);
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
if (ret) {
@@ -204,10 +204,13 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
if (resplen < sizeof(resp.execute)) {
ath10k_warn("invalid execute response length (%d)\n",
resplen);
- return ret;
+ return -EIO;
}
- *param = __le32_to_cpu(resp.execute.result);
+ *result = __le32_to_cpu(resp.execute.result);
+
+ ath10k_dbg(ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result);
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h
index 8d81ce1cec21..3a9bdf51c96a 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.h
+++ b/drivers/net/wireless/ath/ath10k/bmi.h
@@ -217,7 +217,7 @@ int ath10k_bmi_write_memory(struct ath10k *ar, u32 address,
ret; \
})
-int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param);
+int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result);
int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address);
int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length);
int ath10k_bmi_fast_download(struct ath10k *ar, u32 address,
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index a79499c82350..1e4cad8632b5 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -840,35 +840,17 @@ void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
static int ath10k_ce_init_src_ring(struct ath10k *ar,
unsigned int ce_id,
- struct ath10k_ce_pipe *ce_state,
const struct ce_attr *attr)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ath10k_ce_ring *src_ring;
- unsigned int nentries = attr->src_nentries;
- unsigned int ce_nbytes;
- u32 ctrl_addr = ath10k_ce_base_address(ce_id);
- dma_addr_t base_addr;
- char *ptr;
-
- nentries = roundup_pow_of_two(nentries);
-
- if (ce_state->src_ring) {
- WARN_ON(ce_state->src_ring->nentries != nentries);
- return 0;
- }
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
+ struct ath10k_ce_ring *src_ring = ce_state->src_ring;
+ u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id);
- ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
- ptr = kzalloc(ce_nbytes, GFP_KERNEL);
- if (ptr == NULL)
- return -ENOMEM;
+ nentries = roundup_pow_of_two(attr->src_nentries);
- ce_state->src_ring = (struct ath10k_ce_ring *)ptr;
- src_ring = ce_state->src_ring;
-
- ptr += sizeof(struct ath10k_ce_ring);
- src_ring->nentries = nentries;
- src_ring->nentries_mask = nentries - 1;
+ memset(src_ring->per_transfer_context, 0,
+ nentries * sizeof(*src_ring->per_transfer_context));
src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
src_ring->sw_index &= src_ring->nentries_mask;
@@ -878,21 +860,87 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
src_ring->write_index &= src_ring->nentries_mask;
- src_ring->per_transfer_context = (void **)ptr;
+ ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
+ src_ring->base_addr_ce_space);
+ ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
+ ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
+ ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
+ ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
+ ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
+
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "boot init ce src ring id %d entries %d base_addr %p\n",
+ ce_id, nentries, src_ring->base_addr_owner_space);
+
+ return 0;
+}
+
+static int ath10k_ce_init_dest_ring(struct ath10k *ar,
+ unsigned int ce_id,
+ const struct ce_attr *attr)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
+ struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
+ u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id);
+
+ nentries = roundup_pow_of_two(attr->dest_nentries);
+
+ memset(dest_ring->per_transfer_context, 0,
+ nentries * sizeof(*dest_ring->per_transfer_context));
+
+ dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
+ dest_ring->sw_index &= dest_ring->nentries_mask;
+ dest_ring->write_index =
+ ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
+ dest_ring->write_index &= dest_ring->nentries_mask;
+
+ ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
+ dest_ring->base_addr_ce_space);
+ ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
+ ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
+ ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
+ ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
+
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "boot ce dest ring id %d entries %d base_addr %p\n",
+ ce_id, nentries, dest_ring->base_addr_owner_space);
+
+ return 0;
+}
+
+static struct ath10k_ce_ring *
+ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
+ const struct ce_attr *attr)
+{
+ struct ath10k_ce_ring *src_ring;
+ u32 nentries = attr->src_nentries;
+ dma_addr_t base_addr;
+
+ nentries = roundup_pow_of_two(nentries);
+
+ src_ring = kzalloc(sizeof(*src_ring) +
+ (nentries *
+ sizeof(*src_ring->per_transfer_context)),
+ GFP_KERNEL);
+ if (src_ring == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ src_ring->nentries = nentries;
+ src_ring->nentries_mask = nentries - 1;
/*
* Legacy platforms that do not support cache
* coherent DMA are unsupported
*/
src_ring->base_addr_owner_space_unaligned =
- pci_alloc_consistent(ar_pci->pdev,
- (nentries * sizeof(struct ce_desc) +
- CE_DESC_RING_ALIGN),
- &base_addr);
+ dma_alloc_coherent(ar->dev,
+ (nentries * sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ &base_addr, GFP_KERNEL);
if (!src_ring->base_addr_owner_space_unaligned) {
- kfree(ce_state->src_ring);
- ce_state->src_ring = NULL;
- return -ENOMEM;
+ kfree(src_ring);
+ return ERR_PTR(-ENOMEM);
}
src_ring->base_addr_ce_space_unaligned = base_addr;
@@ -912,88 +960,54 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
kmalloc((nentries * sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN), GFP_KERNEL);
if (!src_ring->shadow_base_unaligned) {
- pci_free_consistent(ar_pci->pdev,
- (nentries * sizeof(struct ce_desc) +
- CE_DESC_RING_ALIGN),
- src_ring->base_addr_owner_space,
- src_ring->base_addr_ce_space);
- kfree(ce_state->src_ring);
- ce_state->src_ring = NULL;
- return -ENOMEM;
+ dma_free_coherent(ar->dev,
+ (nentries * sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ src_ring->base_addr_owner_space,
+ src_ring->base_addr_ce_space);
+ kfree(src_ring);
+ return ERR_PTR(-ENOMEM);
}
src_ring->shadow_base = PTR_ALIGN(
src_ring->shadow_base_unaligned,
CE_DESC_RING_ALIGN);
- ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
- src_ring->base_addr_ce_space);
- ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
- ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
- ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
- ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
- ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
-
- ath10k_dbg(ATH10K_DBG_BOOT,
- "boot ce src ring id %d entries %d base_addr %p\n",
- ce_id, nentries, src_ring->base_addr_owner_space);
-
- return 0;
+ return src_ring;
}
-static int ath10k_ce_init_dest_ring(struct ath10k *ar,
- unsigned int ce_id,
- struct ath10k_ce_pipe *ce_state,
- const struct ce_attr *attr)
+static struct ath10k_ce_ring *
+ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
+ const struct ce_attr *attr)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_ring *dest_ring;
- unsigned int nentries = attr->dest_nentries;
- unsigned int ce_nbytes;
- u32 ctrl_addr = ath10k_ce_base_address(ce_id);
+ u32 nentries;
dma_addr_t base_addr;
- char *ptr;
- nentries = roundup_pow_of_two(nentries);
+ nentries = roundup_pow_of_two(attr->dest_nentries);
- if (ce_state->dest_ring) {
- WARN_ON(ce_state->dest_ring->nentries != nentries);
- return 0;
- }
-
- ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
- ptr = kzalloc(ce_nbytes, GFP_KERNEL);
- if (ptr == NULL)
- return -ENOMEM;
-
- ce_state->dest_ring = (struct ath10k_ce_ring *)ptr;
- dest_ring = ce_state->dest_ring;
+ dest_ring = kzalloc(sizeof(*dest_ring) +
+ (nentries *
+ sizeof(*dest_ring->per_transfer_context)),
+ GFP_KERNEL);
+ if (dest_ring == NULL)
+ return ERR_PTR(-ENOMEM);
- ptr += sizeof(struct ath10k_ce_ring);
dest_ring->nentries = nentries;
dest_ring->nentries_mask = nentries - 1;
- dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
- dest_ring->sw_index &= dest_ring->nentries_mask;
- dest_ring->write_index =
- ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
- dest_ring->write_index &= dest_ring->nentries_mask;
-
- dest_ring->per_transfer_context = (void **)ptr;
-
/*
* Legacy platforms that do not support cache
* coherent DMA are unsupported
*/
dest_ring->base_addr_owner_space_unaligned =
- pci_alloc_consistent(ar_pci->pdev,
- (nentries * sizeof(struct ce_desc) +
- CE_DESC_RING_ALIGN),
- &base_addr);
+ dma_alloc_coherent(ar->dev,
+ (nentries * sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ &base_addr, GFP_KERNEL);
if (!dest_ring->base_addr_owner_space_unaligned) {
- kfree(ce_state->dest_ring);
- ce_state->dest_ring = NULL;
- return -ENOMEM;
+ kfree(dest_ring);
+ return ERR_PTR(-ENOMEM);
}
dest_ring->base_addr_ce_space_unaligned = base_addr;
@@ -1012,39 +1026,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
dest_ring->base_addr_ce_space_unaligned,
CE_DESC_RING_ALIGN);
- ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
- dest_ring->base_addr_ce_space);
- ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
- ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
- ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
- ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
-
- ath10k_dbg(ATH10K_DBG_BOOT,
- "boot ce dest ring id %d entries %d base_addr %p\n",
- ce_id, nentries, dest_ring->base_addr_owner_space);
-
- return 0;
-}
-
-static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar,
- unsigned int ce_id,
- const struct ce_attr *attr)
-{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
- u32 ctrl_addr = ath10k_ce_base_address(ce_id);
-
- spin_lock_bh(&ar_pci->ce_lock);
-
- ce_state->ar = ar;
- ce_state->id = ce_id;
- ce_state->ctrl_addr = ctrl_addr;
- ce_state->attr_flags = attr->flags;
- ce_state->src_sz_max = attr->src_sz_max;
-
- spin_unlock_bh(&ar_pci->ce_lock);
-
- return ce_state;
+ return dest_ring;
}
/*
@@ -1054,11 +1036,11 @@ static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar,
* initialization. It may be that only one side or the other is
* initialized by software/firmware.
*/
-struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
- unsigned int ce_id,
- const struct ce_attr *attr)
+int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
+ const struct ce_attr *attr)
{
- struct ath10k_ce_pipe *ce_state;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
int ret;
/*
@@ -1074,64 +1056,128 @@ struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
ret = ath10k_pci_wake(ar);
if (ret)
- return NULL;
+ return ret;
- ce_state = ath10k_ce_init_state(ar, ce_id, attr);
- if (!ce_state) {
- ath10k_err("Failed to initialize CE state for ID: %d\n", ce_id);
- goto out;
- }
+ spin_lock_bh(&ar_pci->ce_lock);
+ ce_state->ar = ar;
+ ce_state->id = ce_id;
+ ce_state->ctrl_addr = ath10k_ce_base_address(ce_id);
+ ce_state->attr_flags = attr->flags;
+ ce_state->src_sz_max = attr->src_sz_max;
+ spin_unlock_bh(&ar_pci->ce_lock);
if (attr->src_nentries) {
- ret = ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr);
+ ret = ath10k_ce_init_src_ring(ar, ce_id, attr);
if (ret) {
ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n",
ce_id, ret);
- ath10k_ce_deinit(ce_state);
- ce_state = NULL;
goto out;
}
}
if (attr->dest_nentries) {
- ret = ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr);
+ ret = ath10k_ce_init_dest_ring(ar, ce_id, attr);
if (ret) {
ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n",
ce_id, ret);
- ath10k_ce_deinit(ce_state);
- ce_state = NULL;
goto out;
}
}
out:
ath10k_pci_sleep(ar);
- return ce_state;
+ return ret;
}
-void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state)
+static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
+{
+ u32 ctrl_addr = ath10k_ce_base_address(ce_id);
+
+ ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0);
+ ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0);
+ ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0);
+ ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0);
+}
+
+static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id)
+{
+ u32 ctrl_addr = ath10k_ce_base_address(ce_id);
+
+ ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0);
+ ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0);
+ ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0);
+}
+
+void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
+{
+ int ret;
+
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ return;
+
+ ath10k_ce_deinit_src_ring(ar, ce_id);
+ ath10k_ce_deinit_dest_ring(ar, ce_id);
+
+ ath10k_pci_sleep(ar);
+}
+
+int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
+ const struct ce_attr *attr)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
+ int ret;
+
+ if (attr->src_nentries) {
+ ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr);
+ if (IS_ERR(ce_state->src_ring)) {
+ ret = PTR_ERR(ce_state->src_ring);
+ ath10k_err("failed to allocate copy engine source ring %d: %d\n",
+ ce_id, ret);
+ ce_state->src_ring = NULL;
+ return ret;
+ }
+ }
+
+ if (attr->dest_nentries) {
+ ce_state->dest_ring = ath10k_ce_alloc_dest_ring(ar, ce_id,
+ attr);
+ if (IS_ERR(ce_state->dest_ring)) {
+ ret = PTR_ERR(ce_state->dest_ring);
+ ath10k_err("failed to allocate copy engine destination ring %d: %d\n",
+ ce_id, ret);
+ ce_state->dest_ring = NULL;
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
{
- struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
if (ce_state->src_ring) {
kfree(ce_state->src_ring->shadow_base_unaligned);
- pci_free_consistent(ar_pci->pdev,
- (ce_state->src_ring->nentries *
- sizeof(struct ce_desc) +
- CE_DESC_RING_ALIGN),
- ce_state->src_ring->base_addr_owner_space,
- ce_state->src_ring->base_addr_ce_space);
+ dma_free_coherent(ar->dev,
+ (ce_state->src_ring->nentries *
+ sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ ce_state->src_ring->base_addr_owner_space,
+ ce_state->src_ring->base_addr_ce_space);
kfree(ce_state->src_ring);
}
if (ce_state->dest_ring) {
- pci_free_consistent(ar_pci->pdev,
- (ce_state->dest_ring->nentries *
- sizeof(struct ce_desc) +
- CE_DESC_RING_ALIGN),
- ce_state->dest_ring->base_addr_owner_space,
- ce_state->dest_ring->base_addr_ce_space);
+ dma_free_coherent(ar->dev,
+ (ce_state->dest_ring->nentries *
+ sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ ce_state->dest_ring->base_addr_owner_space,
+ ce_state->dest_ring->base_addr_ce_space);
kfree(ce_state->dest_ring);
}
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index 8eb7f99ed992..fd0bc3561e42 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -104,7 +104,8 @@ struct ath10k_ce_ring {
void *shadow_base_unaligned;
struct ce_desc *shadow_base;
- void **per_transfer_context;
+ /* keep last */
+ void *per_transfer_context[0];
};
struct ath10k_ce_pipe {
@@ -210,10 +211,12 @@ int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
/*==================CE Engine Initialization=======================*/
-/* Initialize an instance of a CE */
-struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
- unsigned int ce_id,
- const struct ce_attr *attr);
+int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
+ const struct ce_attr *attr);
+void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id);
+int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
+ const struct ce_attr *attr);
+void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id);
/*==================CE Engine Shutdown=======================*/
/*
@@ -236,8 +239,6 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
unsigned int *nbytesp,
unsigned int *transfer_idp);
-void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state);
-
/*==================CE Interrupt Handlers====================*/
void ath10k_ce_per_engine_service_any(struct ath10k *ar);
void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index ebc5fc2ede75..6abde37fb339 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -249,30 +249,40 @@ exit:
static int ath10k_download_and_run_otp(struct ath10k *ar)
{
- u32 address = ar->hw_params.patch_load_addr;
- u32 exec_param;
+ u32 result, address = ar->hw_params.patch_load_addr;
int ret;
/* OTP is optional */
- if (!ar->otp_data || !ar->otp_len)
+ if (!ar->otp_data || !ar->otp_len) {
+ ath10k_warn("Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n",
+ ar->otp_data, ar->otp_len);
return 0;
+ }
+
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n",
+ address, ar->otp_len);
ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len);
if (ret) {
ath10k_err("could not write otp (%d)\n", ret);
- goto exit;
+ return ret;
}
- exec_param = 0;
- ret = ath10k_bmi_execute(ar, address, &exec_param);
+ ret = ath10k_bmi_execute(ar, address, 0, &result);
if (ret) {
ath10k_err("could not execute otp (%d)\n", ret);
- goto exit;
+ return ret;
}
-exit:
- return ret;
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
+
+ if (result != 0) {
+ ath10k_err("otp calibration failed: %d", result);
+ return -EINVAL;
+ }
+
+ return 0;
}
static int ath10k_download_fw(struct ath10k *ar)
@@ -389,8 +399,8 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
/* first fetch the firmware file (firmware-*.bin) */
ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name);
if (IS_ERR(ar->firmware)) {
- ath10k_err("Could not fetch firmware file '%s': %ld\n",
- name, PTR_ERR(ar->firmware));
+ ath10k_err("could not fetch firmware file '%s/%s': %ld\n",
+ ar->hw_params.fw.dir, name, PTR_ERR(ar->firmware));
return PTR_ERR(ar->firmware);
}
@@ -401,14 +411,14 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1;
if (len < magic_len) {
- ath10k_err("firmware image too small to contain magic: %zu\n",
- len);
+ ath10k_err("firmware file '%s/%s' too small to contain magic: %zu\n",
+ ar->hw_params.fw.dir, name, len);
ret = -EINVAL;
goto err;
}
if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) {
- ath10k_err("Invalid firmware magic\n");
+ ath10k_err("invalid firmware magic\n");
ret = -EINVAL;
goto err;
}
@@ -430,7 +440,7 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
data += sizeof(*hdr);
if (len < ie_len) {
- ath10k_err("Invalid length for FW IE %d (%zu < %zu)\n",
+ ath10k_err("invalid length for FW IE %d (%zu < %zu)\n",
ie_id, len, ie_len);
ret = -EINVAL;
goto err;
@@ -513,8 +523,8 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
}
if (!ar->firmware_data || !ar->firmware_len) {
- ath10k_warn("No ATH10K_FW_IE_FW_IMAGE found from %s, skipping\n",
- name);
+ ath10k_warn("No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n",
+ ar->hw_params.fw.dir, name);
ret = -ENOMEDIUM;
goto err;
}
@@ -531,7 +541,9 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
ar->hw_params.fw.board);
if (IS_ERR(ar->board)) {
ret = PTR_ERR(ar->board);
- ath10k_err("could not fetch board data (%d)\n", ret);
+ ath10k_err("could not fetch board data '%s/%s' (%d)\n",
+ ar->hw_params.fw.dir, ar->hw_params.fw.board,
+ ret);
goto err;
}
@@ -549,19 +561,21 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
{
int ret;
+ ar->fw_api = 2;
+ ath10k_dbg(ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
+
ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE);
- if (ret == 0) {
- ar->fw_api = 2;
- goto out;
- }
+ if (ret == 0)
+ goto success;
+
+ ar->fw_api = 1;
+ ath10k_dbg(ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
ret = ath10k_core_fetch_firmware_api_1(ar);
if (ret)
return ret;
- ar->fw_api = 1;
-
-out:
+success:
ath10k_dbg(ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api);
return 0;
@@ -572,16 +586,22 @@ static int ath10k_init_download_firmware(struct ath10k *ar)
int ret;
ret = ath10k_download_board_data(ar);
- if (ret)
+ if (ret) {
+ ath10k_err("failed to download board data: %d\n", ret);
return ret;
+ }
ret = ath10k_download_and_run_otp(ar);
- if (ret)
+ if (ret) {
+ ath10k_err("failed to run otp: %d\n", ret);
return ret;
+ }
ret = ath10k_download_fw(ar);
- if (ret)
+ if (ret) {
+ ath10k_err("failed to download firmware: %d\n", ret);
return ret;
+ }
return ret;
}
@@ -835,9 +855,12 @@ int ath10k_core_start(struct ath10k *ar)
INIT_LIST_HEAD(&ar->arvifs);
if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
- ath10k_info("%s (0x%x) fw %s api %d htt %d.%d\n",
- ar->hw_params.name, ar->target_version,
- ar->hw->wiphy->fw_version, ar->fw_api,
+ ath10k_info("%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d\n",
+ ar->hw_params.name,
+ ar->target_version,
+ ar->chip_id,
+ ar->hw->wiphy->fw_version,
+ ar->fw_api,
ar->htt.target_version_major,
ar->htt.target_version_minor);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 0e71979d837c..2c1dfd719146 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -119,6 +119,7 @@ struct ath10k_peer_stat {
u8 peer_macaddr[ETH_ALEN];
u32 peer_rssi;
u32 peer_tx_rate;
+ u32 peer_rx_rate; /* 10x only */
};
struct ath10k_target_stats {
@@ -130,6 +131,12 @@ struct ath10k_target_stats {
u32 cycle_count;
u32 phy_err_count;
u32 chan_tx_power;
+ u32 ack_rx_bad;
+ u32 rts_bad;
+ u32 rts_good;
+ u32 fcs_bad;
+ u32 no_beacons;
+ u32 mib_int_count;
/* PDEV TX stats */
s32 comp_queued;
@@ -260,6 +267,8 @@ struct ath10k_vif {
u8 fixed_rate;
u8 fixed_nss;
u8 force_sgi;
+ bool use_cts_prot;
+ int num_legacy_stations;
};
struct ath10k_vif_iter {
@@ -419,13 +428,18 @@ struct ath10k {
struct cfg80211_chan_def chandef;
int free_vdev_map;
+ bool promisc;
+ bool monitor;
int monitor_vdev_id;
- bool monitor_enabled;
- bool monitor_present;
+ bool monitor_started;
unsigned int filter_flags;
unsigned long dev_flags;
u32 dfs_block_radar_events;
+ /* protected by conf_mutex */
+ bool radar_enabled;
+ int num_started_vdevs;
+
struct wmi_pdev_set_wmm_params_arg wmm_params;
struct completion install_key_done;
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 6debd281350a..1b7ff4ba122c 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -161,7 +161,7 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
u8 *tmp = ev->data;
struct ath10k_target_stats *stats;
int num_pdev_stats, num_vdev_stats, num_peer_stats;
- struct wmi_pdev_stats *ps;
+ struct wmi_pdev_stats_10x *ps;
int i;
spin_lock_bh(&ar->data_lock);
@@ -173,7 +173,7 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
num_peer_stats = __le32_to_cpu(ev->num_peer_stats); /* 0 or max peers */
if (num_pdev_stats) {
- ps = (struct wmi_pdev_stats *)tmp;
+ ps = (struct wmi_pdev_stats_10x *)tmp;
stats->ch_noise_floor = __le32_to_cpu(ps->chan_nf);
stats->tx_frame_count = __le32_to_cpu(ps->tx_frame_count);
@@ -228,7 +228,18 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
stats->phy_err_drop = __le32_to_cpu(ps->wal.rx.phy_err_drop);
stats->mpdu_errs = __le32_to_cpu(ps->wal.rx.mpdu_errs);
- tmp += sizeof(struct wmi_pdev_stats);
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X,
+ ar->fw_features)) {
+ stats->ack_rx_bad = __le32_to_cpu(ps->ack_rx_bad);
+ stats->rts_bad = __le32_to_cpu(ps->rts_bad);
+ stats->rts_good = __le32_to_cpu(ps->rts_good);
+ stats->fcs_bad = __le32_to_cpu(ps->fcs_bad);
+ stats->no_beacons = __le32_to_cpu(ps->no_beacons);
+ stats->mib_int_count = __le32_to_cpu(ps->mib_int_count);
+ tmp += sizeof(struct wmi_pdev_stats_10x);
+ } else {
+ tmp += sizeof(struct wmi_pdev_stats_old);
+ }
}
/* 0 or max vdevs */
@@ -243,22 +254,29 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
}
if (num_peer_stats) {
- struct wmi_peer_stats *peer_stats;
+ struct wmi_peer_stats_10x *peer_stats;
struct ath10k_peer_stat *s;
stats->peers = num_peer_stats;
for (i = 0; i < num_peer_stats; i++) {
- peer_stats = (struct wmi_peer_stats *)tmp;
+ peer_stats = (struct wmi_peer_stats_10x *)tmp;
s = &stats->peer_stat[i];
- WMI_MAC_ADDR_TO_CHAR_ARRAY(&peer_stats->peer_macaddr,
- s->peer_macaddr);
+ memcpy(s->peer_macaddr, &peer_stats->peer_macaddr.addr,
+ ETH_ALEN);
s->peer_rssi = __le32_to_cpu(peer_stats->peer_rssi);
s->peer_tx_rate =
__le32_to_cpu(peer_stats->peer_tx_rate);
-
- tmp += sizeof(struct wmi_peer_stats);
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X,
+ ar->fw_features)) {
+ s->peer_rx_rate =
+ __le32_to_cpu(peer_stats->peer_rx_rate);
+ tmp += sizeof(struct wmi_peer_stats_10x);
+
+ } else {
+ tmp += sizeof(struct wmi_peer_stats_old);
+ }
}
}
@@ -272,7 +290,7 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
struct ath10k *ar = file->private_data;
struct ath10k_target_stats *fw_stats;
char *buf = NULL;
- unsigned int len = 0, buf_len = 2500;
+ unsigned int len = 0, buf_len = 8000;
ssize_t ret_cnt = 0;
long left;
int i;
@@ -320,6 +338,16 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
"Cycle count", fw_stats->cycle_count);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"PHY error count", fw_stats->phy_err_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "RTS bad count", fw_stats->rts_bad);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "RTS good count", fw_stats->rts_good);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "FCS bad count", fw_stats->fcs_bad);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "No beacon count", fw_stats->no_beacons);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "MIB int count", fw_stats->mib_int_count);
len += scnprintf(buf + len, buf_len - len, "\n");
len += scnprintf(buf + len, buf_len - len, "%30s\n",
@@ -411,8 +439,8 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
"MPDU errors (FCS, MIC, ENC)", fw_stats->mpdu_errs);
len += scnprintf(buf + len, buf_len - len, "\n");
- len += scnprintf(buf + len, buf_len - len, "%30s\n",
- "ath10k PEER stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s (%d)\n",
+ "ath10k PEER stats", fw_stats->peers);
len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
"=================");
@@ -425,6 +453,9 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
"Peer TX rate",
fw_stats->peer_stat[i].peer_tx_rate);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "Peer RX rate",
+ fw_stats->peer_stat[i].peer_rx_rate);
len += scnprintf(buf + len, buf_len - len, "\n");
}
spin_unlock_bh(&ar->data_lock);
@@ -451,27 +482,37 @@ static ssize_t ath10k_read_simulate_fw_crash(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- const char buf[] = "To simulate firmware crash write the keyword"
- " `crash` to this file.\nThis will force firmware"
- " to report a crash to the host system.\n";
+ const char buf[] = "To simulate firmware crash write one of the"
+ " keywords to this file:\n `soft` - this will send"
+ " WMI_FORCE_FW_HANG_ASSERT to firmware if FW"
+ " supports that command.\n `hard` - this will send"
+ " to firmware command with illegal parameters"
+ " causing firmware crash.\n";
+
return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
}
+/* Simulate firmware crash:
+ * 'soft': Call wmi command causing firmware hang. This firmware hang is
+ * recoverable by warm firmware reset.
+ * 'hard': Force firmware crash by setting any vdev parameter for not allowed
+ * vdev id. This is hard firmware crash because it is recoverable only by cold
+ * firmware reset.
+ */
static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
- char buf[32] = {};
+ char buf[32];
int ret;
mutex_lock(&ar->conf_mutex);
simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
- if (strcmp(buf, "crash") && strcmp(buf, "crash\n")) {
- ret = -EINVAL;
- goto exit;
- }
+
+ /* make sure that buf is null terminated */
+ buf[sizeof(buf) - 1] = 0;
if (ar->state != ATH10K_STATE_ON &&
ar->state != ATH10K_STATE_RESTARTED) {
@@ -479,14 +520,30 @@ static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
goto exit;
}
- ath10k_info("simulating firmware crash\n");
+ /* drop the possible '\n' from the end */
+ if (buf[count - 1] == '\n') {
+ buf[count - 1] = 0;
+ count--;
+ }
- ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0);
- if (ret)
- ath10k_warn("failed to force fw hang (%d)\n", ret);
+ if (!strcmp(buf, "soft")) {
+ ath10k_info("simulating soft firmware crash\n");
+ ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0);
+ } else if (!strcmp(buf, "hard")) {
+ ath10k_info("simulating hard firmware crash\n");
+ ret = ath10k_wmi_vdev_set_param(ar, TARGET_NUM_VDEVS + 1,
+ ar->wmi.vdev_param->rts_threshold, 0);
+ } else {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (ret) {
+ ath10k_warn("failed to simulate firmware crash: %d\n", ret);
+ goto exit;
+ }
- if (ret == 0)
- ret = count;
+ ret = count;
exit:
mutex_unlock(&ar->conf_mutex);
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 7f1bccd3597f..5b58dbb17416 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -157,6 +157,9 @@ int ath10k_htc_send(struct ath10k_htc *htc,
goto err_pull;
}
ep->tx_credits -= credits;
+ ath10k_dbg(ATH10K_DBG_HTC,
+ "htc ep %d consumed %d credits (total %d)\n",
+ eid, credits, ep->tx_credits);
spin_unlock_bh(&htc->tx_lock);
}
@@ -185,6 +188,9 @@ err_credits:
if (ep->tx_credit_flow_enabled) {
spin_lock_bh(&htc->tx_lock);
ep->tx_credits += credits;
+ ath10k_dbg(ATH10K_DBG_HTC,
+ "htc ep %d reverted %d credits back (total %d)\n",
+ eid, credits, ep->tx_credits);
spin_unlock_bh(&htc->tx_lock);
if (ep->ep_ops.ep_tx_credits)
@@ -234,12 +240,12 @@ ath10k_htc_process_credit_report(struct ath10k_htc *htc,
if (report->eid >= ATH10K_HTC_EP_COUNT)
break;
- ath10k_dbg(ATH10K_DBG_HTC, "ep %d got %d credits\n",
- report->eid, report->credits);
-
ep = &htc->endpoint[report->eid];
ep->tx_credits += report->credits;
+ ath10k_dbg(ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
+ report->eid, report->credits, ep->tx_credits);
+
if (ep->ep_ops.ep_tx_credits) {
spin_unlock_bh(&htc->tx_lock);
ep->ep_ops.ep_tx_credits(htc->ar);
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 654867fc1ae7..645a563e3fb9 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -21,6 +21,7 @@
#include <linux/bug.h>
#include <linux/interrupt.h>
#include <linux/dmapool.h>
+#include <net/mac80211.h>
#include "htc.h"
#include "rx_desc.h"
@@ -1172,23 +1173,6 @@ struct htt_peer_unmap_event {
u16 peer_id;
};
-struct htt_rx_info {
- struct sk_buff *skb;
- enum htt_rx_mpdu_status status;
- enum htt_rx_mpdu_encrypt_type encrypt_type;
- s8 signal;
- struct {
- u8 info0;
- u32 info1;
- u32 info2;
- } rate;
-
- u32 tsf;
- bool fcs_err;
- bool amsdu_more;
- bool mic_err;
-};
-
struct ath10k_htt_txbuf {
struct htt_data_tx_desc_frag frags[2];
struct ath10k_htc_hdr htc_hdr;
@@ -1289,6 +1273,9 @@ struct ath10k_htt {
struct tasklet_struct txrx_compl_task;
struct sk_buff_head tx_compl_q;
struct sk_buff_head rx_compl_q;
+
+ /* rx_status template */
+ struct ieee80211_rx_status rx_status;
};
#define RX_HTT_HDR_STATUS_LEN 64
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index cdcbe2de95f9..f85a3cf6da31 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -297,6 +297,7 @@ static void ath10k_htt_rx_free_msdu_chain(struct sk_buff *skb)
}
}
+/* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
u8 **fw_desc, int *fw_desc_len,
struct sk_buff **head_msdu,
@@ -310,7 +311,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
if (htt->rx_confused) {
ath10k_warn("htt is confused. refusing rx\n");
- return 0;
+ return -1;
}
msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt);
@@ -442,6 +443,9 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
}
*tail_msdu = msdu;
+ if (*head_msdu == NULL)
+ msdu_chaining = -1;
+
/*
* Don't refill the ring yet.
*
@@ -636,6 +640,190 @@ struct amsdu_subframe_hdr {
__be16 len;
} __packed;
+static const u8 rx_legacy_rate_idx[] = {
+ 3, /* 0x00 - 11Mbps */
+ 2, /* 0x01 - 5.5Mbps */
+ 1, /* 0x02 - 2Mbps */
+ 0, /* 0x03 - 1Mbps */
+ 3, /* 0x04 - 11Mbps */
+ 2, /* 0x05 - 5.5Mbps */
+ 1, /* 0x06 - 2Mbps */
+ 0, /* 0x07 - 1Mbps */
+ 10, /* 0x08 - 48Mbps */
+ 8, /* 0x09 - 24Mbps */
+ 6, /* 0x0A - 12Mbps */
+ 4, /* 0x0B - 6Mbps */
+ 11, /* 0x0C - 54Mbps */
+ 9, /* 0x0D - 36Mbps */
+ 7, /* 0x0E - 18Mbps */
+ 5, /* 0x0F - 9Mbps */
+};
+
+static void ath10k_htt_rx_h_rates(struct ath10k *ar,
+ enum ieee80211_band band,
+ u8 info0, u32 info1, u32 info2,
+ struct ieee80211_rx_status *status)
+{
+ u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
+ u8 preamble = 0;
+
+ /* Check if valid fields */
+ if (!(info0 & HTT_RX_INDICATION_INFO0_START_VALID))
+ return;
+
+ preamble = MS(info1, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE);
+
+ switch (preamble) {
+ case HTT_RX_LEGACY:
+ cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK;
+ rate = MS(info0, HTT_RX_INDICATION_INFO0_LEGACY_RATE);
+ rate_idx = 0;
+
+ if (rate < 0x08 || rate > 0x0F)
+ break;
+
+ switch (band) {
+ case IEEE80211_BAND_2GHZ:
+ if (cck)
+ rate &= ~BIT(3);
+ rate_idx = rx_legacy_rate_idx[rate];
+ break;
+ case IEEE80211_BAND_5GHZ:
+ rate_idx = rx_legacy_rate_idx[rate];
+ /* We are using same rate table registering
+ HW - ath10k_rates[]. In case of 5GHz skip
+ CCK rates, so -4 here */
+ rate_idx -= 4;
+ break;
+ default:
+ break;
+ }
+
+ status->rate_idx = rate_idx;
+ break;
+ case HTT_RX_HT:
+ case HTT_RX_HT_WITH_TXBF:
+ /* HT-SIG - Table 20-11 in info1 and info2 */
+ mcs = info1 & 0x1F;
+ nss = mcs >> 3;
+ bw = (info1 >> 7) & 1;
+ sgi = (info2 >> 7) & 1;
+
+ status->rate_idx = mcs;
+ status->flag |= RX_FLAG_HT;
+ if (sgi)
+ status->flag |= RX_FLAG_SHORT_GI;
+ if (bw)
+ status->flag |= RX_FLAG_40MHZ;
+ break;
+ case HTT_RX_VHT:
+ case HTT_RX_VHT_WITH_TXBF:
+ /* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2
+ TODO check this */
+ mcs = (info2 >> 4) & 0x0F;
+ nss = ((info1 >> 10) & 0x07) + 1;
+ bw = info1 & 3;
+ sgi = info2 & 1;
+
+ status->rate_idx = mcs;
+ status->vht_nss = nss;
+
+ if (sgi)
+ status->flag |= RX_FLAG_SHORT_GI;
+
+ switch (bw) {
+ /* 20MHZ */
+ case 0:
+ break;
+ /* 40MHZ */
+ case 1:
+ status->flag |= RX_FLAG_40MHZ;
+ break;
+ /* 80MHZ */
+ case 2:
+ status->vht_flag |= RX_VHT_FLAG_80MHZ;
+ }
+
+ status->flag |= RX_FLAG_VHT;
+ break;
+ default:
+ break;
+ }
+}
+
+static void ath10k_htt_rx_h_protected(struct ath10k_htt *htt,
+ struct ieee80211_rx_status *rx_status,
+ struct sk_buff *skb,
+ enum htt_rx_mpdu_encrypt_type enctype)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+
+ if (enctype == HTT_RX_MPDU_ENCRYPT_NONE) {
+ rx_status->flag &= ~(RX_FLAG_DECRYPTED |
+ RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED);
+ return;
+ }
+
+ rx_status->flag |= RX_FLAG_DECRYPTED |
+ RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED;
+ hdr->frame_control = __cpu_to_le16(__le16_to_cpu(hdr->frame_control) &
+ ~IEEE80211_FCTL_PROTECTED);
+}
+
+static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
+ struct ieee80211_rx_status *status)
+{
+ struct ieee80211_channel *ch;
+
+ spin_lock_bh(&ar->data_lock);
+ ch = ar->scan_channel;
+ if (!ch)
+ ch = ar->rx_channel;
+ spin_unlock_bh(&ar->data_lock);
+
+ if (!ch)
+ return false;
+
+ status->band = ch->band;
+ status->freq = ch->center_freq;
+
+ return true;
+}
+
+static void ath10k_process_rx(struct ath10k *ar,
+ struct ieee80211_rx_status *rx_status,
+ struct sk_buff *skb)
+{
+ struct ieee80211_rx_status *status;
+
+ status = IEEE80211_SKB_RXCB(skb);
+ *status = *rx_status;
+
+ ath10k_dbg(ATH10K_DBG_DATA,
+ "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %imic-err %i\n",
+ skb,
+ skb->len,
+ status->flag == 0 ? "legacy" : "",
+ status->flag & RX_FLAG_HT ? "ht" : "",
+ status->flag & RX_FLAG_VHT ? "vht" : "",
+ status->flag & RX_FLAG_40MHZ ? "40" : "",
+ status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
+ status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
+ status->rate_idx,
+ status->vht_nss,
+ status->freq,
+ status->band, status->flag,
+ !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
+ !!(status->flag & RX_FLAG_MMIC_ERROR));
+ ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
+ skb->data, skb->len);
+
+ ieee80211_rx(ar->hw, skb);
+}
+
static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
{
/* nwifi header is padded to 4 bytes. this fixes 4addr rx */
@@ -643,11 +831,12 @@ static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
}
static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
- struct htt_rx_info *info)
+ struct ieee80211_rx_status *rx_status,
+ struct sk_buff *skb_in)
{
struct htt_rx_desc *rxd;
+ struct sk_buff *skb = skb_in;
struct sk_buff *first;
- struct sk_buff *skb = info->skb;
enum rx_msdu_decap_format fmt;
enum htt_rx_mpdu_encrypt_type enctype;
struct ieee80211_hdr *hdr;
@@ -728,24 +917,27 @@ static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
break;
}
- info->skb = skb;
- info->encrypt_type = enctype;
+ skb_in = skb;
+ ath10k_htt_rx_h_protected(htt, rx_status, skb_in, enctype);
skb = skb->next;
- info->skb->next = NULL;
+ skb_in->next = NULL;
if (skb)
- info->amsdu_more = true;
+ rx_status->flag |= RX_FLAG_AMSDU_MORE;
+ else
+ rx_status->flag &= ~RX_FLAG_AMSDU_MORE;
- ath10k_process_rx(htt->ar, info);
+ ath10k_process_rx(htt->ar, rx_status, skb_in);
}
/* FIXME: It might be nice to re-assemble the A-MSDU when there's a
* monitor interface active for sniffing purposes. */
}
-static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
+static void ath10k_htt_rx_msdu(struct ath10k_htt *htt,
+ struct ieee80211_rx_status *rx_status,
+ struct sk_buff *skb)
{
- struct sk_buff *skb = info->skb;
struct htt_rx_desc *rxd;
struct ieee80211_hdr *hdr;
enum rx_msdu_decap_format fmt;
@@ -808,66 +1000,9 @@ static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
break;
}
- info->skb = skb;
- info->encrypt_type = enctype;
-
- ath10k_process_rx(htt->ar, info);
-}
-
-static bool ath10k_htt_rx_has_decrypt_err(struct sk_buff *skb)
-{
- struct htt_rx_desc *rxd;
- u32 flags;
-
- rxd = (void *)skb->data - sizeof(*rxd);
- flags = __le32_to_cpu(rxd->attention.flags);
-
- if (flags & RX_ATTENTION_FLAGS_DECRYPT_ERR)
- return true;
-
- return false;
-}
-
-static bool ath10k_htt_rx_has_fcs_err(struct sk_buff *skb)
-{
- struct htt_rx_desc *rxd;
- u32 flags;
-
- rxd = (void *)skb->data - sizeof(*rxd);
- flags = __le32_to_cpu(rxd->attention.flags);
-
- if (flags & RX_ATTENTION_FLAGS_FCS_ERR)
- return true;
-
- return false;
-}
-
-static bool ath10k_htt_rx_has_mic_err(struct sk_buff *skb)
-{
- struct htt_rx_desc *rxd;
- u32 flags;
-
- rxd = (void *)skb->data - sizeof(*rxd);
- flags = __le32_to_cpu(rxd->attention.flags);
-
- if (flags & RX_ATTENTION_FLAGS_TKIP_MIC_ERR)
- return true;
-
- return false;
-}
-
-static bool ath10k_htt_rx_is_mgmt(struct sk_buff *skb)
-{
- struct htt_rx_desc *rxd;
- u32 flags;
-
- rxd = (void *)skb->data - sizeof(*rxd);
- flags = __le32_to_cpu(rxd->attention.flags);
-
- if (flags & RX_ATTENTION_FLAGS_MGMT_TYPE)
- return true;
+ ath10k_htt_rx_h_protected(htt, rx_status, skb, enctype);
- return false;
+ ath10k_process_rx(htt->ar, rx_status, skb);
}
static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
@@ -952,21 +1087,73 @@ static int ath10k_unchain_msdu(struct sk_buff *msdu_head)
return 0;
}
+static bool ath10k_htt_rx_amsdu_allowed(struct ath10k_htt *htt,
+ struct sk_buff *head,
+ enum htt_rx_mpdu_status status,
+ bool channel_set,
+ u32 attention)
+{
+ if (head->len == 0) {
+ ath10k_dbg(ATH10K_DBG_HTT,
+ "htt rx dropping due to zero-len\n");
+ return false;
+ }
+
+ if (attention & RX_ATTENTION_FLAGS_DECRYPT_ERR) {
+ ath10k_dbg(ATH10K_DBG_HTT,
+ "htt rx dropping due to decrypt-err\n");
+ return false;
+ }
+
+ if (!channel_set) {
+ ath10k_warn("no channel configured; ignoring frame!\n");
+ return false;
+ }
+
+ /* Skip mgmt frames while we handle this in WMI */
+ if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL ||
+ attention & RX_ATTENTION_FLAGS_MGMT_TYPE) {
+ ath10k_dbg(ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
+ return false;
+ }
+
+ if (status != HTT_RX_IND_MPDU_STATUS_OK &&
+ status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR &&
+ status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER &&
+ !htt->ar->monitor_started) {
+ ath10k_dbg(ATH10K_DBG_HTT,
+ "htt rx ignoring frame w/ status %d\n",
+ status);
+ return false;
+ }
+
+ if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) {
+ ath10k_dbg(ATH10K_DBG_HTT,
+ "htt rx CAC running\n");
+ return false;
+ }
+
+ return true;
+}
+
static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
struct htt_rx_indication *rx)
{
- struct htt_rx_info info;
+ struct ieee80211_rx_status *rx_status = &htt->rx_status;
struct htt_rx_indication_mpdu_range *mpdu_ranges;
+ struct htt_rx_desc *rxd;
+ enum htt_rx_mpdu_status status;
struct ieee80211_hdr *hdr;
int num_mpdu_ranges;
+ u32 attention;
int fw_desc_len;
u8 *fw_desc;
+ bool channel_set;
int i, j;
+ int ret;
lockdep_assert_held(&htt->rx_ring.lock);
- memset(&info, 0, sizeof(info));
-
fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
fw_desc = (u8 *)&rx->fw_desc;
@@ -974,106 +1161,90 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
+ /* Fill this once, while this is per-ppdu */
+ if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_START_VALID) {
+ memset(rx_status, 0, sizeof(*rx_status));
+ rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
+ rx->ppdu.combined_rssi;
+ }
+
+ if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_END_VALID) {
+ /* TSF available only in 32-bit */
+ rx_status->mactime = __le32_to_cpu(rx->ppdu.tsf) & 0xffffffff;
+ rx_status->flag |= RX_FLAG_MACTIME_END;
+ }
+
+ channel_set = ath10k_htt_rx_h_channel(htt->ar, rx_status);
+
+ if (channel_set) {
+ ath10k_htt_rx_h_rates(htt->ar, rx_status->band,
+ rx->ppdu.info0,
+ __le32_to_cpu(rx->ppdu.info1),
+ __le32_to_cpu(rx->ppdu.info2),
+ rx_status);
+ }
+
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
rx, sizeof(*rx) +
(sizeof(struct htt_rx_indication_mpdu_range) *
num_mpdu_ranges));
for (i = 0; i < num_mpdu_ranges; i++) {
- info.status = mpdu_ranges[i].mpdu_range_status;
+ status = mpdu_ranges[i].mpdu_range_status;
for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) {
struct sk_buff *msdu_head, *msdu_tail;
- enum htt_rx_mpdu_status status;
- int msdu_chaining;
msdu_head = NULL;
msdu_tail = NULL;
- msdu_chaining = ath10k_htt_rx_amsdu_pop(htt,
- &fw_desc,
- &fw_desc_len,
- &msdu_head,
- &msdu_tail);
-
- if (!msdu_head) {
- ath10k_warn("htt rx no data!\n");
- continue;
- }
-
- if (msdu_head->len == 0) {
- ath10k_dbg(ATH10K_DBG_HTT,
- "htt rx dropping due to zero-len\n");
- ath10k_htt_rx_free_msdu_chain(msdu_head);
- continue;
- }
-
- if (ath10k_htt_rx_has_decrypt_err(msdu_head)) {
- ath10k_dbg(ATH10K_DBG_HTT,
- "htt rx dropping due to decrypt-err\n");
+ ret = ath10k_htt_rx_amsdu_pop(htt,
+ &fw_desc,
+ &fw_desc_len,
+ &msdu_head,
+ &msdu_tail);
+
+ if (ret < 0) {
+ ath10k_warn("failed to pop amsdu from htt rx ring %d\n",
+ ret);
ath10k_htt_rx_free_msdu_chain(msdu_head);
continue;
}
- status = info.status;
-
- /* Skip mgmt frames while we handle this in WMI */
- if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL ||
- ath10k_htt_rx_is_mgmt(msdu_head)) {
- ath10k_dbg(ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
- ath10k_htt_rx_free_msdu_chain(msdu_head);
- continue;
- }
+ rxd = container_of((void *)msdu_head->data,
+ struct htt_rx_desc,
+ msdu_payload);
+ attention = __le32_to_cpu(rxd->attention.flags);
- if (status != HTT_RX_IND_MPDU_STATUS_OK &&
- status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR &&
- status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER &&
- !htt->ar->monitor_enabled) {
- ath10k_dbg(ATH10K_DBG_HTT,
- "htt rx ignoring frame w/ status %d\n",
- status);
+ if (!ath10k_htt_rx_amsdu_allowed(htt, msdu_head,
+ status,
+ channel_set,
+ attention)) {
ath10k_htt_rx_free_msdu_chain(msdu_head);
continue;
}
- if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) {
- ath10k_dbg(ATH10K_DBG_HTT,
- "htt rx CAC running\n");
+ if (ret > 0 &&
+ ath10k_unchain_msdu(msdu_head) < 0) {
ath10k_htt_rx_free_msdu_chain(msdu_head);
continue;
}
- if (msdu_chaining &&
- (ath10k_unchain_msdu(msdu_head) < 0)) {
- ath10k_htt_rx_free_msdu_chain(msdu_head);
- continue;
- }
-
- info.skb = msdu_head;
- info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head);
- info.mic_err = ath10k_htt_rx_has_mic_err(msdu_head);
-
- if (info.fcs_err)
- ath10k_dbg(ATH10K_DBG_HTT,
- "htt rx has FCS err\n");
-
- if (info.mic_err)
- ath10k_dbg(ATH10K_DBG_HTT,
- "htt rx has MIC err\n");
-
- info.signal = ATH10K_DEFAULT_NOISE_FLOOR;
- info.signal += rx->ppdu.combined_rssi;
+ if (attention & RX_ATTENTION_FLAGS_FCS_ERR)
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ else
+ rx_status->flag &= ~RX_FLAG_FAILED_FCS_CRC;
- info.rate.info0 = rx->ppdu.info0;
- info.rate.info1 = __le32_to_cpu(rx->ppdu.info1);
- info.rate.info2 = __le32_to_cpu(rx->ppdu.info2);
- info.tsf = __le32_to_cpu(rx->ppdu.tsf);
+ if (attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR)
+ rx_status->flag |= RX_FLAG_MMIC_ERROR;
+ else
+ rx_status->flag &= ~RX_FLAG_MMIC_ERROR;
hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
if (ath10k_htt_rx_hdr_is_amsdu(hdr))
- ath10k_htt_rx_amsdu(htt, &info);
+ ath10k_htt_rx_amsdu(htt, rx_status, msdu_head);
else
- ath10k_htt_rx_msdu(htt, &info);
+ ath10k_htt_rx_msdu(htt, rx_status, msdu_head);
}
}
@@ -1084,11 +1255,12 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
struct htt_rx_fragment_indication *frag)
{
struct sk_buff *msdu_head, *msdu_tail;
+ enum htt_rx_mpdu_encrypt_type enctype;
struct htt_rx_desc *rxd;
enum rx_msdu_decap_format fmt;
- struct htt_rx_info info = {};
+ struct ieee80211_rx_status *rx_status = &htt->rx_status;
struct ieee80211_hdr *hdr;
- int msdu_chaining;
+ int ret;
bool tkip_mic_err;
bool decrypt_err;
u8 *fw_desc;
@@ -1102,19 +1274,15 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
msdu_tail = NULL;
spin_lock_bh(&htt->rx_ring.lock);
- msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
- &msdu_head, &msdu_tail);
+ ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
+ &msdu_head, &msdu_tail);
spin_unlock_bh(&htt->rx_ring.lock);
ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
- if (!msdu_head) {
- ath10k_warn("htt rx frag no data\n");
- return;
- }
-
- if (msdu_chaining || msdu_head != msdu_tail) {
- ath10k_warn("aggregation with fragmentation?!\n");
+ if (ret) {
+ ath10k_warn("failed to pop amsdu from httr rx ring for fragmented rx %d\n",
+ ret);
ath10k_htt_rx_free_msdu_chain(msdu_head);
return;
}
@@ -1136,57 +1304,54 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
goto end;
}
- info.skb = msdu_head;
- info.status = HTT_RX_IND_MPDU_STATUS_OK;
- info.encrypt_type = MS(__le32_to_cpu(rxd->mpdu_start.info0),
- RX_MPDU_START_INFO0_ENCRYPT_TYPE);
- info.skb->ip_summed = ath10k_htt_rx_get_csum_state(info.skb);
+ enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+ RX_MPDU_START_INFO0_ENCRYPT_TYPE);
+ ath10k_htt_rx_h_protected(htt, rx_status, msdu_head, enctype);
+ msdu_head->ip_summed = ath10k_htt_rx_get_csum_state(msdu_head);
- if (tkip_mic_err) {
+ if (tkip_mic_err)
ath10k_warn("tkip mic error\n");
- info.status = HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR;
- }
if (decrypt_err) {
ath10k_warn("decryption err in fragmented rx\n");
- dev_kfree_skb_any(info.skb);
+ dev_kfree_skb_any(msdu_head);
goto end;
}
- if (info.encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) {
+ if (enctype != HTT_RX_MPDU_ENCRYPT_NONE) {
hdrlen = ieee80211_hdrlen(hdr->frame_control);
- paramlen = ath10k_htt_rx_crypto_param_len(info.encrypt_type);
+ paramlen = ath10k_htt_rx_crypto_param_len(enctype);
/* It is more efficient to move the header than the payload */
- memmove((void *)info.skb->data + paramlen,
- (void *)info.skb->data,
+ memmove((void *)msdu_head->data + paramlen,
+ (void *)msdu_head->data,
hdrlen);
- skb_pull(info.skb, paramlen);
- hdr = (struct ieee80211_hdr *)info.skb->data;
+ skb_pull(msdu_head, paramlen);
+ hdr = (struct ieee80211_hdr *)msdu_head->data;
}
/* remove trailing FCS */
trim = 4;
/* remove crypto trailer */
- trim += ath10k_htt_rx_crypto_tail_len(info.encrypt_type);
+ trim += ath10k_htt_rx_crypto_tail_len(enctype);
/* last fragment of TKIP frags has MIC */
if (!ieee80211_has_morefrags(hdr->frame_control) &&
- info.encrypt_type == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
+ enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
trim += 8;
- if (trim > info.skb->len) {
+ if (trim > msdu_head->len) {
ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n");
- dev_kfree_skb_any(info.skb);
+ dev_kfree_skb_any(msdu_head);
goto end;
}
- skb_trim(info.skb, info.skb->len - trim);
+ skb_trim(msdu_head, msdu_head->len - trim);
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx frag mpdu: ",
- info.skb->data, info.skb->len);
- ath10k_process_rx(htt->ar, &info);
+ msdu_head->data, msdu_head->len);
+ ath10k_process_rx(htt->ar, rx_status, msdu_head);
end:
if (fw_desc_len > 0) {
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 35fc44e281f5..007e855f4ba9 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -28,6 +28,7 @@
#define QCA988X_HW_2_0_CHIP_ID_REV 0x2
#define QCA988X_HW_2_0_FW_DIR "ath10k/QCA988X/hw2.0"
#define QCA988X_HW_2_0_FW_FILE "firmware.bin"
+#define QCA988X_HW_2_0_FW_2_FILE "firmware-2.bin"
#define QCA988X_HW_2_0_OTP_FILE "otp.bin"
#define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin"
#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 511a2f81e7af..0ac5437492fd 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -165,7 +165,7 @@ static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
first_errno = ret;
if (ret)
- ath10k_warn("could not remove peer wep key %d (%d)\n",
+ ath10k_warn("failed to remove peer wep key %d: %d\n",
i, ret);
peer->keys[i] = NULL;
@@ -213,7 +213,8 @@ static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
first_errno = ret;
if (ret)
- ath10k_warn("could not remove key for %pM\n", addr);
+ ath10k_warn("failed to remove key for %pM: %d\n",
+ addr, ret);
}
return first_errno;
@@ -323,14 +324,14 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
ret = ath10k_wmi_peer_create(ar, vdev_id, addr);
if (ret) {
- ath10k_warn("Failed to create wmi peer %pM on vdev %i: %i\n",
+ ath10k_warn("failed to create wmi peer %pM on vdev %i: %i\n",
addr, vdev_id, ret);
return ret;
}
ret = ath10k_wait_for_peer_created(ar, vdev_id, addr);
if (ret) {
- ath10k_warn("Failed to wait for created wmi peer %pM on vdev %i: %i\n",
+ ath10k_warn("failed to wait for created wmi peer %pM on vdev %i: %i\n",
addr, vdev_id, ret);
return ret;
}
@@ -351,7 +352,7 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
ret = ath10k_wmi_pdev_set_param(ar, param,
ATH10K_KICKOUT_THRESHOLD);
if (ret) {
- ath10k_warn("Failed to set kickout threshold on vdev %i: %d\n",
+ ath10k_warn("failed to set kickout threshold on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
@@ -360,7 +361,7 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
ATH10K_KEEPALIVE_MIN_IDLE);
if (ret) {
- ath10k_warn("Failed to set keepalive minimum idle time on vdev %i : %d\n",
+ ath10k_warn("failed to set keepalive minimum idle time on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
@@ -369,7 +370,7 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
ATH10K_KEEPALIVE_MAX_IDLE);
if (ret) {
- ath10k_warn("Failed to set keepalive maximum idle time on vdev %i: %d\n",
+ ath10k_warn("failed to set keepalive maximum idle time on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
@@ -378,7 +379,7 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
ATH10K_KEEPALIVE_MAX_UNRESPONSIVE);
if (ret) {
- ath10k_warn("Failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
+ ath10k_warn("failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
@@ -488,92 +489,20 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
return 0;
}
-static int ath10k_vdev_start(struct ath10k_vif *arvif)
+static bool ath10k_monitor_is_enabled(struct ath10k *ar)
{
- struct ath10k *ar = arvif->ar;
- struct cfg80211_chan_def *chandef = &ar->chandef;
- struct wmi_vdev_start_request_arg arg = {};
- int ret = 0;
-
lockdep_assert_held(&ar->conf_mutex);
- reinit_completion(&ar->vdev_setup_done);
-
- arg.vdev_id = arvif->vdev_id;
- arg.dtim_period = arvif->dtim_period;
- arg.bcn_intval = arvif->beacon_interval;
-
- arg.channel.freq = chandef->chan->center_freq;
- arg.channel.band_center_freq1 = chandef->center_freq1;
- arg.channel.mode = chan_to_phymode(chandef);
-
- arg.channel.min_power = 0;
- arg.channel.max_power = chandef->chan->max_power * 2;
- arg.channel.max_reg_power = chandef->chan->max_reg_power * 2;
- arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
-
- if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
- arg.ssid = arvif->u.ap.ssid;
- arg.ssid_len = arvif->u.ap.ssid_len;
- arg.hidden_ssid = arvif->u.ap.hidden_ssid;
-
- /* For now allow DFS for AP mode */
- arg.channel.chan_radar =
- !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
- } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
- arg.ssid = arvif->vif->bss_conf.ssid;
- arg.ssid_len = arvif->vif->bss_conf.ssid_len;
- }
-
ath10k_dbg(ATH10K_DBG_MAC,
- "mac vdev %d start center_freq %d phymode %s\n",
- arg.vdev_id, arg.channel.freq,
- ath10k_wmi_phymode_str(arg.channel.mode));
-
- ret = ath10k_wmi_vdev_start(ar, &arg);
- if (ret) {
- ath10k_warn("WMI vdev %i start failed: ret %d\n",
- arg.vdev_id, ret);
- return ret;
- }
-
- ret = ath10k_vdev_setup_sync(ar);
- if (ret) {
- ath10k_warn("vdev %i setup failed %d\n",
- arg.vdev_id, ret);
- return ret;
- }
-
- return ret;
-}
-
-static int ath10k_vdev_stop(struct ath10k_vif *arvif)
-{
- struct ath10k *ar = arvif->ar;
- int ret;
+ "mac monitor refs: promisc %d monitor %d cac %d\n",
+ ar->promisc, ar->monitor,
+ test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags));
- lockdep_assert_held(&ar->conf_mutex);
-
- reinit_completion(&ar->vdev_setup_done);
-
- ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
- if (ret) {
- ath10k_warn("WMI vdev %i stop failed: ret %d\n",
- arvif->vdev_id, ret);
- return ret;
- }
-
- ret = ath10k_vdev_setup_sync(ar);
- if (ret) {
- ath10k_warn("vdev %i setup sync failed %d\n",
- arvif->vdev_id, ret);
- return ret;
- }
-
- return ret;
+ return ar->promisc || ar->monitor ||
+ test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
}
-static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
+static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
{
struct cfg80211_chan_def *chandef = &ar->chandef;
struct ieee80211_channel *channel = chandef->chan;
@@ -582,11 +511,6 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
lockdep_assert_held(&ar->conf_mutex);
- if (!ar->monitor_present) {
- ath10k_warn("mac montor stop -- monitor is not present\n");
- return -EINVAL;
- }
-
arg.vdev_id = vdev_id;
arg.channel.freq = channel->center_freq;
arg.channel.band_center_freq1 = chandef->center_freq1;
@@ -604,88 +528,75 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
ret = ath10k_wmi_vdev_start(ar, &arg);
if (ret) {
- ath10k_warn("Monitor vdev %i start failed: ret %d\n",
+ ath10k_warn("failed to request monitor vdev %i start: %d\n",
vdev_id, ret);
return ret;
}
ret = ath10k_vdev_setup_sync(ar);
if (ret) {
- ath10k_warn("Monitor vdev %i setup failed %d\n",
+ ath10k_warn("failed to synchronize setup for monitor vdev %i: %d\n",
vdev_id, ret);
return ret;
}
ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
if (ret) {
- ath10k_warn("Monitor vdev %i up failed: %d\n",
+ ath10k_warn("failed to put up monitor vdev %i: %d\n",
vdev_id, ret);
goto vdev_stop;
}
ar->monitor_vdev_id = vdev_id;
- ar->monitor_enabled = true;
+ ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %i started\n",
+ ar->monitor_vdev_id);
return 0;
vdev_stop:
ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
if (ret)
- ath10k_warn("Monitor vdev %i stop failed: %d\n",
+ ath10k_warn("failed to stop monitor vdev %i after start failure: %d\n",
ar->monitor_vdev_id, ret);
return ret;
}
-static int ath10k_monitor_stop(struct ath10k *ar)
+static int ath10k_monitor_vdev_stop(struct ath10k *ar)
{
int ret = 0;
lockdep_assert_held(&ar->conf_mutex);
- if (!ar->monitor_present) {
- ath10k_warn("mac montor stop -- monitor is not present\n");
- return -EINVAL;
- }
-
- if (!ar->monitor_enabled) {
- ath10k_warn("mac montor stop -- monitor is not enabled\n");
- return -EINVAL;
- }
-
ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
if (ret)
- ath10k_warn("Monitor vdev %i down failed: %d\n",
+ ath10k_warn("failed to put down monitor vdev %i: %d\n",
ar->monitor_vdev_id, ret);
ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
if (ret)
- ath10k_warn("Monitor vdev %i stop failed: %d\n",
+ ath10k_warn("failed to to request monitor vdev %i stop: %d\n",
ar->monitor_vdev_id, ret);
ret = ath10k_vdev_setup_sync(ar);
if (ret)
- ath10k_warn("Monitor_down sync failed, vdev %i: %d\n",
+ ath10k_warn("failed to synchronise monitor vdev %i: %d\n",
ar->monitor_vdev_id, ret);
- ar->monitor_enabled = false;
+ ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n",
+ ar->monitor_vdev_id);
return ret;
}
-static int ath10k_monitor_create(struct ath10k *ar)
+static int ath10k_monitor_vdev_create(struct ath10k *ar)
{
int bit, ret = 0;
lockdep_assert_held(&ar->conf_mutex);
- if (ar->monitor_present) {
- ath10k_warn("Monitor mode already enabled\n");
- return 0;
- }
-
bit = ffs(ar->free_vdev_map);
if (bit == 0) {
- ath10k_warn("No free VDEV slots\n");
+ ath10k_warn("failed to find free vdev id for monitor vdev\n");
return -ENOMEM;
}
@@ -696,7 +607,7 @@ static int ath10k_monitor_create(struct ath10k *ar)
WMI_VDEV_TYPE_MONITOR,
0, ar->mac_addr);
if (ret) {
- ath10k_warn("WMI vdev %i monitor create failed: ret %d\n",
+ ath10k_warn("failed to request monitor vdev %i creation: %d\n",
ar->monitor_vdev_id, ret);
goto vdev_fail;
}
@@ -704,7 +615,6 @@ static int ath10k_monitor_create(struct ath10k *ar)
ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
ar->monitor_vdev_id);
- ar->monitor_present = true;
return 0;
vdev_fail:
@@ -715,48 +625,123 @@ vdev_fail:
return ret;
}
-static int ath10k_monitor_destroy(struct ath10k *ar)
+static int ath10k_monitor_vdev_delete(struct ath10k *ar)
{
int ret = 0;
lockdep_assert_held(&ar->conf_mutex);
- if (!ar->monitor_present)
- return 0;
-
ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
if (ret) {
- ath10k_warn("WMI vdev %i monitor delete failed: %d\n",
+ ath10k_warn("failed to request wmi monitor vdev %i removal: %d\n",
ar->monitor_vdev_id, ret);
return ret;
}
ar->free_vdev_map |= 1 << (ar->monitor_vdev_id);
- ar->monitor_present = false;
ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
ar->monitor_vdev_id);
return ret;
}
-static int ath10k_start_cac(struct ath10k *ar)
+static int ath10k_monitor_start(struct ath10k *ar)
{
int ret;
lockdep_assert_held(&ar->conf_mutex);
- set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+ if (!ath10k_monitor_is_enabled(ar)) {
+ ath10k_warn("trying to start monitor with no references\n");
+ return 0;
+ }
+
+ if (ar->monitor_started) {
+ ath10k_dbg(ATH10K_DBG_MAC, "mac monitor already started\n");
+ return 0;
+ }
- ret = ath10k_monitor_create(ar);
+ ret = ath10k_monitor_vdev_create(ar);
if (ret) {
- clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+ ath10k_warn("failed to create monitor vdev: %d\n", ret);
return ret;
}
- ret = ath10k_monitor_start(ar, ar->monitor_vdev_id);
+ ret = ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id);
if (ret) {
+ ath10k_warn("failed to start monitor vdev: %d\n", ret);
+ ath10k_monitor_vdev_delete(ar);
+ return ret;
+ }
+
+ ar->monitor_started = true;
+ ath10k_dbg(ATH10K_DBG_MAC, "mac monitor started\n");
+
+ return 0;
+}
+
+static void ath10k_monitor_stop(struct ath10k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ath10k_monitor_is_enabled(ar)) {
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac monitor will be stopped later\n");
+ return;
+ }
+
+ if (!ar->monitor_started) {
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac monitor probably failed to start earlier\n");
+ return;
+ }
+
+ ret = ath10k_monitor_vdev_stop(ar);
+ if (ret)
+ ath10k_warn("failed to stop monitor vdev: %d\n", ret);
+
+ ret = ath10k_monitor_vdev_delete(ar);
+ if (ret)
+ ath10k_warn("failed to delete monitor vdev: %d\n", ret);
+
+ ar->monitor_started = false;
+ ath10k_dbg(ATH10K_DBG_MAC, "mac monitor stopped\n");
+}
+
+static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ u32 vdev_param, rts_cts = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ vdev_param = ar->wmi.vdev_param->enable_rtscts;
+
+ if (arvif->use_cts_prot || arvif->num_legacy_stations > 0)
+ rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET);
+
+ if (arvif->num_legacy_stations > 0)
+ rts_cts |= SM(WMI_RTSCTS_ACROSS_SW_RETRIES,
+ WMI_RTSCTS_PROFILE);
+
+ return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ rts_cts);
+}
+
+static int ath10k_start_cac(struct ath10k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+
+ ret = ath10k_monitor_start(ar);
+ if (ret) {
+ ath10k_warn("failed to start monitor (cac): %d\n", ret);
clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
- ath10k_monitor_destroy(ar);
return ret;
}
@@ -774,58 +759,26 @@ static int ath10k_stop_cac(struct ath10k *ar)
if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags))
return 0;
- ath10k_monitor_stop(ar);
- ath10k_monitor_destroy(ar);
clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+ ath10k_monitor_stop(ar);
ath10k_dbg(ATH10K_DBG_MAC, "mac cac finished\n");
return 0;
}
-static const char *ath10k_dfs_state(enum nl80211_dfs_state dfs_state)
-{
- switch (dfs_state) {
- case NL80211_DFS_USABLE:
- return "USABLE";
- case NL80211_DFS_UNAVAILABLE:
- return "UNAVAILABLE";
- case NL80211_DFS_AVAILABLE:
- return "AVAILABLE";
- default:
- WARN_ON(1);
- return "bug";
- }
-}
-
-static void ath10k_config_radar_detection(struct ath10k *ar)
+static void ath10k_recalc_radar_detection(struct ath10k *ar)
{
- struct ieee80211_channel *chan = ar->hw->conf.chandef.chan;
- bool radar = ar->hw->conf.radar_enabled;
- bool chan_radar = !!(chan->flags & IEEE80211_CHAN_RADAR);
- enum nl80211_dfs_state dfs_state = chan->dfs_state;
int ret;
lockdep_assert_held(&ar->conf_mutex);
- ath10k_dbg(ATH10K_DBG_MAC,
- "mac radar config update: chan %dMHz radar %d chan radar %d chan state %s\n",
- chan->center_freq, radar, chan_radar,
- ath10k_dfs_state(dfs_state));
-
- /*
- * It's safe to call it even if CAC is not started.
- * This call here guarantees changing channel, etc. will stop CAC.
- */
ath10k_stop_cac(ar);
- if (!radar)
- return;
-
- if (!chan_radar)
+ if (!ar->radar_enabled)
return;
- if (dfs_state != NL80211_DFS_USABLE)
+ if (ar->num_started_vdevs > 0)
return;
ret = ath10k_start_cac(ar);
@@ -835,11 +788,106 @@ static void ath10k_config_radar_detection(struct ath10k *ar)
* radiation is not allowed, make this channel DFS_UNAVAILABLE
* by indicating that radar was detected.
*/
- ath10k_warn("failed to start CAC (%d)\n", ret);
+ ath10k_warn("failed to start CAC: %d\n", ret);
ieee80211_radar_detected(ar->hw);
}
}
+static int ath10k_vdev_start(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ struct cfg80211_chan_def *chandef = &ar->chandef;
+ struct wmi_vdev_start_request_arg arg = {};
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_setup_done);
+
+ arg.vdev_id = arvif->vdev_id;
+ arg.dtim_period = arvif->dtim_period;
+ arg.bcn_intval = arvif->beacon_interval;
+
+ arg.channel.freq = chandef->chan->center_freq;
+ arg.channel.band_center_freq1 = chandef->center_freq1;
+ arg.channel.mode = chan_to_phymode(chandef);
+
+ arg.channel.min_power = 0;
+ arg.channel.max_power = chandef->chan->max_power * 2;
+ arg.channel.max_reg_power = chandef->chan->max_reg_power * 2;
+ arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ arg.ssid = arvif->u.ap.ssid;
+ arg.ssid_len = arvif->u.ap.ssid_len;
+ arg.hidden_ssid = arvif->u.ap.hidden_ssid;
+
+ /* For now allow DFS for AP mode */
+ arg.channel.chan_radar =
+ !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
+ } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
+ arg.ssid = arvif->vif->bss_conf.ssid;
+ arg.ssid_len = arvif->vif->bss_conf.ssid_len;
+ }
+
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac vdev %d start center_freq %d phymode %s\n",
+ arg.vdev_id, arg.channel.freq,
+ ath10k_wmi_phymode_str(arg.channel.mode));
+
+ ret = ath10k_wmi_vdev_start(ar, &arg);
+ if (ret) {
+ ath10k_warn("failed to start WMI vdev %i: %d\n",
+ arg.vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_vdev_setup_sync(ar);
+ if (ret) {
+ ath10k_warn("failed to synchronise setup for vdev %i: %d\n",
+ arg.vdev_id, ret);
+ return ret;
+ }
+
+ ar->num_started_vdevs++;
+ ath10k_recalc_radar_detection(ar);
+
+ return ret;
+}
+
+static int ath10k_vdev_stop(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_setup_done);
+
+ ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
+ if (ret) {
+ ath10k_warn("failed to stop WMI vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_vdev_setup_sync(ar);
+ if (ret) {
+ ath10k_warn("failed to syncronise setup for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ WARN_ON(ar->num_started_vdevs == 0);
+
+ if (ar->num_started_vdevs != 0) {
+ ar->num_started_vdevs--;
+ ath10k_recalc_radar_detection(ar);
+ }
+
+ return ret;
+}
+
static void ath10k_control_beaconing(struct ath10k_vif *arvif,
struct ieee80211_bss_conf *info)
{
@@ -880,7 +928,7 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
arvif->bssid);
if (ret) {
- ath10k_warn("Failed to bring up vdev %d: %i\n",
+ ath10k_warn("failed to bring up vdev %d: %i\n",
arvif->vdev_id, ret);
ath10k_vdev_stop(arvif);
return;
@@ -904,7 +952,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
if (!info->ibss_joined) {
ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, self_peer);
if (ret)
- ath10k_warn("Failed to delete IBSS self peer:%pM for VDEV:%d ret:%d\n",
+ ath10k_warn("failed to delete IBSS self peer %pM for vdev %d: %d\n",
self_peer, arvif->vdev_id, ret);
if (is_zero_ether_addr(arvif->bssid))
@@ -913,7 +961,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id,
arvif->bssid);
if (ret) {
- ath10k_warn("Failed to delete IBSS BSSID peer:%pM for VDEV:%d ret:%d\n",
+ ath10k_warn("failed to delete IBSS BSSID peer %pM for vdev %d: %d\n",
arvif->bssid, arvif->vdev_id, ret);
return;
}
@@ -925,7 +973,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
ret = ath10k_peer_create(arvif->ar, arvif->vdev_id, self_peer);
if (ret) {
- ath10k_warn("Failed to create IBSS self peer:%pM for VDEV:%d ret:%d\n",
+ ath10k_warn("failed to create IBSS self peer %pM for vdev %d: %d\n",
self_peer, arvif->vdev_id, ret);
return;
}
@@ -934,7 +982,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param,
ATH10K_DEFAULT_ATIM);
if (ret)
- ath10k_warn("Failed to set IBSS ATIM for VDEV:%d ret:%d\n",
+ ath10k_warn("failed to set IBSS ATIM for vdev %d: %d\n",
arvif->vdev_id, ret);
}
@@ -961,7 +1009,7 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
conf->dynamic_ps_timeout);
if (ret) {
- ath10k_warn("Failed to set inactivity time for vdev %d: %i\n",
+ ath10k_warn("failed to set inactivity time for vdev %d: %i\n",
arvif->vdev_id, ret);
return ret;
}
@@ -974,8 +1022,8 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode);
if (ret) {
- ath10k_warn("Failed to set PS Mode: %d for VDEV: %d\n",
- psmode, arvif->vdev_id);
+ ath10k_warn("failed to set PS Mode %d for vdev %d: %d\n",
+ psmode, arvif->vdev_id, ret);
return ret;
}
@@ -1429,7 +1477,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
if (!ap_sta) {
- ath10k_warn("Failed to find station entry for %pM, vdev %i\n",
+ ath10k_warn("failed to find station entry for bss %pM vdev %i\n",
bss_conf->bssid, arvif->vdev_id);
rcu_read_unlock();
return;
@@ -1442,7 +1490,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
ret = ath10k_peer_assoc_prepare(ar, arvif, ap_sta,
bss_conf, &peer_arg);
if (ret) {
- ath10k_warn("Peer assoc prepare failed for %pM vdev %i\n: %d",
+ ath10k_warn("failed to prepare peer assoc for %pM vdev %i: %d\n",
bss_conf->bssid, arvif->vdev_id, ret);
rcu_read_unlock();
return;
@@ -1452,7 +1500,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
if (ret) {
- ath10k_warn("Peer assoc failed for %pM vdev %i\n: %d",
+ ath10k_warn("failed to run peer assoc for %pM vdev %i: %d\n",
bss_conf->bssid, arvif->vdev_id, ret);
return;
}
@@ -1473,7 +1521,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
if (ret) {
- ath10k_warn("VDEV: %d up failed: ret %d\n",
+ ath10k_warn("failed to set vdev %d up: %d\n",
arvif->vdev_id, ret);
return;
}
@@ -1524,7 +1572,7 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
}
static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
- struct ieee80211_sta *sta)
+ struct ieee80211_sta *sta, bool reassoc)
{
struct wmi_peer_assoc_complete_arg peer_arg;
int ret = 0;
@@ -1533,34 +1581,46 @@ static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
ret = ath10k_peer_assoc_prepare(ar, arvif, sta, NULL, &peer_arg);
if (ret) {
- ath10k_warn("WMI peer assoc prepare failed for %pM vdev %i: %i\n",
+ ath10k_warn("failed to prepare WMI peer assoc for %pM vdev %i: %i\n",
sta->addr, arvif->vdev_id, ret);
return ret;
}
+ peer_arg.peer_reassoc = reassoc;
ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
if (ret) {
- ath10k_warn("Peer assoc failed for STA %pM vdev %i: %d\n",
+ ath10k_warn("failed to run peer assoc for STA %pM vdev %i: %d\n",
sta->addr, arvif->vdev_id, ret);
return ret;
}
ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, &sta->ht_cap);
if (ret) {
- ath10k_warn("failed to setup peer SMPS for vdev: %d\n", ret);
+ ath10k_warn("failed to setup peer SMPS for vdev %d: %d\n",
+ arvif->vdev_id, ret);
return ret;
}
+ if (!sta->wme) {
+ arvif->num_legacy_stations++;
+ ret = ath10k_recalc_rtscts_prot(arvif);
+ if (ret) {
+ ath10k_warn("failed to recalculate rts/cts prot for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
if (ret) {
- ath10k_warn("could not install peer wep keys for vdev %i: %d\n",
+ ath10k_warn("failed to install peer wep keys for vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
if (ret) {
- ath10k_warn("could not set qos params for STA %pM for vdev %i: %d\n",
+ ath10k_warn("failed to set qos params for STA %pM for vdev %i: %d\n",
sta->addr, arvif->vdev_id, ret);
return ret;
}
@@ -1575,9 +1635,19 @@ static int ath10k_station_disassoc(struct ath10k *ar, struct ath10k_vif *arvif,
lockdep_assert_held(&ar->conf_mutex);
+ if (!sta->wme) {
+ arvif->num_legacy_stations--;
+ ret = ath10k_recalc_rtscts_prot(arvif);
+ if (ret) {
+ ath10k_warn("failed to recalculate rts/cts prot for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
ret = ath10k_clear_peer_keys(arvif, sta->addr);
if (ret) {
- ath10k_warn("could not clear all peer wep keys for vdev %i: %d\n",
+ ath10k_warn("failed to clear all peer wep keys for vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
@@ -1685,19 +1755,44 @@ static int ath10k_update_channel_list(struct ath10k *ar)
return ret;
}
+static enum wmi_dfs_region
+ath10k_mac_get_dfs_region(enum nl80211_dfs_regions dfs_region)
+{
+ switch (dfs_region) {
+ case NL80211_DFS_UNSET:
+ return WMI_UNINIT_DFS_DOMAIN;
+ case NL80211_DFS_FCC:
+ return WMI_FCC_DFS_DOMAIN;
+ case NL80211_DFS_ETSI:
+ return WMI_ETSI_DFS_DOMAIN;
+ case NL80211_DFS_JP:
+ return WMI_MKK4_DFS_DOMAIN;
+ }
+ return WMI_UNINIT_DFS_DOMAIN;
+}
+
static void ath10k_regd_update(struct ath10k *ar)
{
struct reg_dmn_pair_mapping *regpair;
int ret;
+ enum wmi_dfs_region wmi_dfs_reg;
+ enum nl80211_dfs_regions nl_dfs_reg;
lockdep_assert_held(&ar->conf_mutex);
ret = ath10k_update_channel_list(ar);
if (ret)
- ath10k_warn("could not update channel list (%d)\n", ret);
+ ath10k_warn("failed to update channel list: %d\n", ret);
regpair = ar->ath_common.regulatory.regpair;
+ if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
+ nl_dfs_reg = ar->dfs_detector->region;
+ wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg);
+ } else {
+ wmi_dfs_reg = WMI_UNINIT_DFS_DOMAIN;
+ }
+
/* Target allows setting up per-band regdomain but ath_common provides
* a combined one only */
ret = ath10k_wmi_pdev_set_regdomain(ar,
@@ -1705,9 +1800,10 @@ static void ath10k_regd_update(struct ath10k *ar)
regpair->reg_domain, /* 2ghz */
regpair->reg_domain, /* 5ghz */
regpair->reg_2ghz_ctl,
- regpair->reg_5ghz_ctl);
+ regpair->reg_5ghz_ctl,
+ wmi_dfs_reg);
if (ret)
- ath10k_warn("could not set pdev regdomain (%d)\n", ret);
+ ath10k_warn("failed to set pdev regdomain: %d\n", ret);
}
static void ath10k_reg_notifier(struct wiphy *wiphy,
@@ -1725,7 +1821,7 @@ static void ath10k_reg_notifier(struct wiphy *wiphy,
result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector,
request->dfs_region);
if (!result)
- ath10k_warn("dfs region 0x%X not supported, will trigger radar for every pulse\n",
+ ath10k_warn("DFS region 0x%X not supported, will trigger radar for every pulse\n",
request->dfs_region);
}
@@ -1759,10 +1855,10 @@ static u8 ath10k_tx_h_get_vdev_id(struct ath10k *ar,
if (info->control.vif)
return ath10k_vif_to_arvif(info->control.vif)->vdev_id;
- if (ar->monitor_enabled)
+ if (ar->monitor_started)
return ar->monitor_vdev_id;
- ath10k_warn("could not resolve vdev id\n");
+ ath10k_warn("failed to resolve vdev id\n");
return 0;
}
@@ -1803,7 +1899,9 @@ static void ath10k_tx_wep_key_work(struct work_struct *work)
arvif->ar->wmi.vdev_param->def_keyid,
keyidx);
if (ret) {
- ath10k_warn("could not update wep keyidx (%d)\n", ret);
+ ath10k_warn("failed to update wep key index for vdev %d: %d\n",
+ arvif->vdev_id,
+ ret);
return;
}
@@ -1879,7 +1977,7 @@ static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
ar->fw_features)) {
if (skb_queue_len(&ar->wmi_mgmt_tx_queue) >=
ATH10K_MAX_NUM_MGMT_PENDING) {
- ath10k_warn("wmi mgmt_tx queue limit reached\n");
+ ath10k_warn("reached WMI management tranmist queue limit\n");
ret = -EBUSY;
goto exit;
}
@@ -1903,7 +2001,7 @@ static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
exit:
if (ret) {
- ath10k_warn("tx failed (%d). dropping packet.\n", ret);
+ ath10k_warn("failed to transmit packet, dropping: %d\n", ret);
ieee80211_free_txskb(ar->hw, skb);
}
}
@@ -1964,7 +2062,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
if (!peer) {
ret = ath10k_peer_create(ar, vdev_id, peer_addr);
if (ret)
- ath10k_warn("peer %pM on vdev %d not created (%d)\n",
+ ath10k_warn("failed to create peer %pM on vdev %d: %d\n",
peer_addr, vdev_id, ret);
}
@@ -1984,7 +2082,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
if (!peer) {
ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
if (ret)
- ath10k_warn("peer %pM on vdev %d not deleted (%d)\n",
+ ath10k_warn("failed to delete peer %pM on vdev %d: %d\n",
peer_addr, vdev_id, ret);
}
@@ -2018,7 +2116,8 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
ret = ath10k_wmi_mgmt_tx(ar, skb);
if (ret) {
- ath10k_warn("wmi mgmt_tx failed (%d)\n", ret);
+ ath10k_warn("failed to transmit management frame via WMI: %d\n",
+ ret);
ieee80211_free_txskb(ar->hw, skb);
}
}
@@ -2043,7 +2142,7 @@ void ath10k_reset_scan(unsigned long ptr)
return;
}
- ath10k_warn("scan timeout. resetting. fw issue?\n");
+ ath10k_warn("scan timed out, firmware problem?\n");
if (ar->scan.is_roc)
ieee80211_remain_on_channel_expired(ar->hw);
@@ -2079,7 +2178,7 @@ static int ath10k_abort_scan(struct ath10k *ar)
ret = ath10k_wmi_stop_scan(ar, &arg);
if (ret) {
- ath10k_warn("could not submit wmi stop scan (%d)\n", ret);
+ ath10k_warn("failed to stop wmi scan: %d\n", ret);
spin_lock_bh(&ar->data_lock);
ar->scan.in_progress = false;
ath10k_offchan_tx_purge(ar);
@@ -2099,7 +2198,7 @@ static int ath10k_abort_scan(struct ath10k *ar)
spin_lock_bh(&ar->data_lock);
if (ar->scan.in_progress) {
- ath10k_warn("could not stop scan. its still in progress\n");
+ ath10k_warn("failed to stop scan, it's still in progress\n");
ar->scan.in_progress = false;
ath10k_offchan_tx_purge(ar);
ret = -ETIMEDOUT;
@@ -2194,7 +2293,13 @@ void ath10k_halt(struct ath10k *ar)
{
lockdep_assert_held(&ar->conf_mutex);
- ath10k_stop_cac(ar);
+ if (ath10k_monitor_is_enabled(ar)) {
+ clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+ ar->promisc = false;
+ ar->monitor = false;
+ ath10k_monitor_stop(ar);
+ }
+
del_timer_sync(&ar->scan.timeout);
ath10k_offchan_tx_purge(ar);
ath10k_mgmt_over_wmi_tx_purge(ar);
@@ -2226,14 +2331,14 @@ static int ath10k_start(struct ieee80211_hw *hw)
ret = ath10k_hif_power_up(ar);
if (ret) {
- ath10k_err("could not init hif (%d)\n", ret);
+ ath10k_err("Could not init hif: %d\n", ret);
ar->state = ATH10K_STATE_OFF;
goto exit;
}
ret = ath10k_core_start(ar);
if (ret) {
- ath10k_err("could not init core (%d)\n", ret);
+ ath10k_err("Could not init core: %d\n", ret);
ath10k_hif_power_down(ar);
ar->state = ATH10K_STATE_OFF;
goto exit;
@@ -2246,13 +2351,11 @@ static int ath10k_start(struct ieee80211_hw *hw)
ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pmf_qos, 1);
if (ret)
- ath10k_warn("could not enable WMI_PDEV_PARAM_PMF_QOS (%d)\n",
- ret);
+ ath10k_warn("failed to enable PMF QOS: %d\n", ret);
ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 1);
if (ret)
- ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n",
- ret);
+ ath10k_warn("failed to enable dynamic BW: %d\n", ret);
/*
* By default FW set ARP frames ac to voice (6). In that case ARP
@@ -2266,11 +2369,12 @@ static int ath10k_start(struct ieee80211_hw *hw)
ret = ath10k_wmi_pdev_set_param(ar,
ar->wmi.pdev_param->arp_ac_override, 0);
if (ret) {
- ath10k_warn("could not set arp ac override parameter: %d\n",
+ ath10k_warn("failed to set arp ac override parameter: %d\n",
ret);
goto exit;
}
+ ar->num_started_vdevs = 0;
ath10k_regd_update(ar);
ret = 0;
@@ -2309,7 +2413,7 @@ static int ath10k_config_ps(struct ath10k *ar)
list_for_each_entry(arvif, &ar->arvifs, list) {
ret = ath10k_mac_vif_setup_ps(arvif);
if (ret) {
- ath10k_warn("could not setup powersave (%d)\n", ret);
+ ath10k_warn("failed to setup powersave: %d\n", ret);
break;
}
}
@@ -2343,7 +2447,6 @@ static const char *chandef_get_width(enum nl80211_chan_width width)
static void ath10k_config_chan(struct ath10k *ar)
{
struct ath10k_vif *arvif;
- bool monitor_was_enabled;
int ret;
lockdep_assert_held(&ar->conf_mutex);
@@ -2357,10 +2460,8 @@ static void ath10k_config_chan(struct ath10k *ar)
/* First stop monitor interface. Some FW versions crash if there's a
* lone monitor interface. */
- monitor_was_enabled = ar->monitor_enabled;
-
- if (ar->monitor_enabled)
- ath10k_monitor_stop(ar);
+ if (ar->monitor_started)
+ ath10k_monitor_vdev_stop(ar);
list_for_each_entry(arvif, &ar->arvifs, list) {
if (!arvif->is_started)
@@ -2371,7 +2472,7 @@ static void ath10k_config_chan(struct ath10k *ar)
ret = ath10k_vdev_stop(arvif);
if (ret) {
- ath10k_warn("could not stop vdev %d (%d)\n",
+ ath10k_warn("failed to stop vdev %d: %d\n",
arvif->vdev_id, ret);
continue;
}
@@ -2388,7 +2489,7 @@ static void ath10k_config_chan(struct ath10k *ar)
ret = ath10k_vdev_start(arvif);
if (ret) {
- ath10k_warn("could not start vdev %d (%d)\n",
+ ath10k_warn("failed to start vdev %d: %d\n",
arvif->vdev_id, ret);
continue;
}
@@ -2399,14 +2500,14 @@ static void ath10k_config_chan(struct ath10k *ar)
ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
arvif->bssid);
if (ret) {
- ath10k_warn("could not bring vdev up %d (%d)\n",
+ ath10k_warn("failed to bring vdev up %d: %d\n",
arvif->vdev_id, ret);
continue;
}
}
- if (monitor_was_enabled)
- ath10k_monitor_start(ar, ar->monitor_vdev_id);
+ if (ath10k_monitor_is_enabled(ar))
+ ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id);
}
static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
@@ -2420,15 +2521,17 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
ath10k_dbg(ATH10K_DBG_MAC,
- "mac config channel %d mhz flags 0x%x\n",
+ "mac config channel %dMHz flags 0x%x radar %d\n",
conf->chandef.chan->center_freq,
- conf->chandef.chan->flags);
+ conf->chandef.chan->flags,
+ conf->radar_enabled);
spin_lock_bh(&ar->data_lock);
ar->rx_channel = conf->chandef.chan;
spin_unlock_bh(&ar->data_lock);
- ath10k_config_radar_detection(ar);
+ ar->radar_enabled = conf->radar_enabled;
+ ath10k_recalc_radar_detection(ar);
if (!cfg80211_chandef_identical(&ar->chandef, &conf->chandef)) {
ar->chandef = conf->chandef;
@@ -2444,14 +2547,14 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
ret = ath10k_wmi_pdev_set_param(ar, param,
hw->conf.power_level * 2);
if (ret)
- ath10k_warn("mac failed to set 2g txpower %d (%d)\n",
+ ath10k_warn("failed to set 2g txpower %d: %d\n",
hw->conf.power_level, ret);
param = ar->wmi.pdev_param->txpower_limit5g;
ret = ath10k_wmi_pdev_set_param(ar, param,
hw->conf.power_level * 2);
if (ret)
- ath10k_warn("mac failed to set 5g txpower %d (%d)\n",
+ ath10k_warn("failed to set 5g txpower %d: %d\n",
hw->conf.power_level, ret);
}
@@ -2459,10 +2562,19 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
ath10k_config_ps(ar);
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
- if (conf->flags & IEEE80211_CONF_MONITOR)
- ret = ath10k_monitor_create(ar);
- else
- ret = ath10k_monitor_destroy(ar);
+ if (conf->flags & IEEE80211_CONF_MONITOR && !ar->monitor) {
+ ar->monitor = true;
+ ret = ath10k_monitor_start(ar);
+ if (ret) {
+ ath10k_warn("failed to start monitor (config): %d\n",
+ ret);
+ ar->monitor = false;
+ }
+ } else if (!(conf->flags & IEEE80211_CONF_MONITOR) &&
+ ar->monitor) {
+ ar->monitor = false;
+ ath10k_monitor_stop(ar);
+ }
}
mutex_unlock(&ar->conf_mutex);
@@ -2497,12 +2609,6 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
INIT_WORK(&arvif->wep_key_work, ath10k_tx_wep_key_work);
INIT_LIST_HEAD(&arvif->list);
- if ((vif->type == NL80211_IFTYPE_MONITOR) && ar->monitor_present) {
- ath10k_warn("Only one monitor interface allowed\n");
- ret = -EBUSY;
- goto err;
- }
-
bit = ffs(ar->free_vdev_map);
if (bit == 0) {
ret = -EBUSY;
@@ -2545,7 +2651,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
arvif->vdev_subtype, vif->addr);
if (ret) {
- ath10k_warn("WMI vdev %i create failed: ret %d\n",
+ ath10k_warn("failed to create WMI vdev %i: %d\n",
arvif->vdev_id, ret);
goto err;
}
@@ -2557,7 +2663,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_set_param(ar, 0, vdev_param,
arvif->def_wep_key_idx);
if (ret) {
- ath10k_warn("Failed to set vdev %i default keyid: %d\n",
+ ath10k_warn("failed to set vdev %i default key id: %d\n",
arvif->vdev_id, ret);
goto err_vdev_delete;
}
@@ -2567,7 +2673,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ATH10K_HW_TXRX_NATIVE_WIFI);
/* 10.X firmware does not support this VDEV parameter. Do not warn */
if (ret && ret != -EOPNOTSUPP) {
- ath10k_warn("Failed to set vdev %i TX encap: %d\n",
+ ath10k_warn("failed to set vdev %i TX encapsulation: %d\n",
arvif->vdev_id, ret);
goto err_vdev_delete;
}
@@ -2575,14 +2681,14 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr);
if (ret) {
- ath10k_warn("Failed to create vdev %i peer for AP: %d\n",
+ ath10k_warn("failed to create vdev %i peer for AP: %d\n",
arvif->vdev_id, ret);
goto err_vdev_delete;
}
ret = ath10k_mac_set_kickout(arvif);
if (ret) {
- ath10k_warn("Failed to set vdev %i kickout parameters: %d\n",
+ ath10k_warn("failed to set vdev %i kickout parameters: %d\n",
arvif->vdev_id, ret);
goto err_peer_delete;
}
@@ -2594,7 +2700,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
param, value);
if (ret) {
- ath10k_warn("Failed to set vdev %i RX wake policy: %d\n",
+ ath10k_warn("failed to set vdev %i RX wake policy: %d\n",
arvif->vdev_id, ret);
goto err_peer_delete;
}
@@ -2604,7 +2710,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
param, value);
if (ret) {
- ath10k_warn("Failed to set vdev %i TX wake thresh: %d\n",
+ ath10k_warn("failed to set vdev %i TX wake thresh: %d\n",
arvif->vdev_id, ret);
goto err_peer_delete;
}
@@ -2614,7 +2720,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
param, value);
if (ret) {
- ath10k_warn("Failed to set vdev %i PSPOLL count: %d\n",
+ ath10k_warn("failed to set vdev %i PSPOLL count: %d\n",
arvif->vdev_id, ret);
goto err_peer_delete;
}
@@ -2622,21 +2728,18 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
if (ret) {
- ath10k_warn("failed to set rts threshold for vdev %d (%d)\n",
+ ath10k_warn("failed to set rts threshold for vdev %d: %d\n",
arvif->vdev_id, ret);
goto err_peer_delete;
}
ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold);
if (ret) {
- ath10k_warn("failed to set frag threshold for vdev %d (%d)\n",
+ ath10k_warn("failed to set frag threshold for vdev %d: %d\n",
arvif->vdev_id, ret);
goto err_peer_delete;
}
- if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
- ar->monitor_present = true;
-
mutex_unlock(&ar->conf_mutex);
return 0;
@@ -2679,7 +2782,7 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr);
if (ret)
- ath10k_warn("Failed to remove peer for AP vdev %i: %d\n",
+ ath10k_warn("failed to remove peer for AP vdev %i: %d\n",
arvif->vdev_id, ret);
kfree(arvif->u.ap.noa_data);
@@ -2690,12 +2793,9 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
if (ret)
- ath10k_warn("WMI vdev %i delete failed: %d\n",
+ ath10k_warn("failed to delete WMI vdev %i: %d\n",
arvif->vdev_id, ret);
- if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
- ar->monitor_present = false;
-
ath10k_peer_cleanup(ar, arvif->vdev_id);
mutex_unlock(&ar->conf_mutex);
@@ -2728,28 +2828,17 @@ static void ath10k_configure_filter(struct ieee80211_hw *hw,
*total_flags &= SUPPORTED_FILTERS;
ar->filter_flags = *total_flags;
- /* Monitor must not be started if it wasn't created first.
- * Promiscuous mode may be started on a non-monitor interface - in
- * such case the monitor vdev is not created so starting the
- * monitor makes no sense. Since ath10k uses no special RX filters
- * (only BSS filter in STA mode) there's no need for any special
- * action here. */
- if ((ar->filter_flags & FIF_PROMISC_IN_BSS) &&
- !ar->monitor_enabled && ar->monitor_present) {
- ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d start\n",
- ar->monitor_vdev_id);
-
- ret = ath10k_monitor_start(ar, ar->monitor_vdev_id);
- if (ret)
- ath10k_warn("Unable to start monitor mode\n");
- } else if (!(ar->filter_flags & FIF_PROMISC_IN_BSS) &&
- ar->monitor_enabled && ar->monitor_present) {
- ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d stop\n",
- ar->monitor_vdev_id);
-
- ret = ath10k_monitor_stop(ar);
- if (ret)
- ath10k_warn("Unable to stop monitor mode\n");
+ if (ar->filter_flags & FIF_PROMISC_IN_BSS && !ar->promisc) {
+ ar->promisc = true;
+ ret = ath10k_monitor_start(ar);
+ if (ret) {
+ ath10k_warn("failed to start monitor (promisc): %d\n",
+ ret);
+ ar->promisc = false;
+ }
+ } else if (!(ar->filter_flags & FIF_PROMISC_IN_BSS) && ar->promisc) {
+ ar->promisc = false;
+ ath10k_monitor_stop(ar);
}
mutex_unlock(&ar->conf_mutex);
@@ -2780,7 +2869,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
arvif->vdev_id, arvif->beacon_interval);
if (ret)
- ath10k_warn("Failed to set beacon interval for vdev %d: %i\n",
+ ath10k_warn("failed to set beacon interval for vdev %d: %i\n",
arvif->vdev_id, ret);
}
@@ -2793,7 +2882,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ret = ath10k_wmi_pdev_set_param(ar, pdev_param,
WMI_BEACON_STAGGERED_MODE);
if (ret)
- ath10k_warn("Failed to set beacon mode for vdev %d: %i\n",
+ ath10k_warn("failed to set beacon mode for vdev %d: %i\n",
arvif->vdev_id, ret);
}
@@ -2808,7 +2897,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
arvif->dtim_period);
if (ret)
- ath10k_warn("Failed to set dtim period for vdev %d: %i\n",
+ ath10k_warn("failed to set dtim period for vdev %d: %i\n",
arvif->vdev_id, ret);
}
@@ -2829,7 +2918,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ret = ath10k_peer_create(ar, arvif->vdev_id,
info->bssid);
if (ret)
- ath10k_warn("Failed to add peer %pM for vdev %d when changing bssid: %i\n",
+ ath10k_warn("failed to add peer %pM for vdev %d when changing bssid: %i\n",
info->bssid, arvif->vdev_id, ret);
if (vif->type == NL80211_IFTYPE_STATION) {
@@ -2868,20 +2957,13 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ath10k_control_beaconing(arvif, info);
if (changed & BSS_CHANGED_ERP_CTS_PROT) {
- u32 cts_prot;
- if (info->use_cts_prot)
- cts_prot = 1;
- else
- cts_prot = 0;
-
+ arvif->use_cts_prot = info->use_cts_prot;
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n",
- arvif->vdev_id, cts_prot);
+ arvif->vdev_id, info->use_cts_prot);
- vdev_param = ar->wmi.vdev_param->enable_rtscts;
- ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
- cts_prot);
+ ret = ath10k_recalc_rtscts_prot(arvif);
if (ret)
- ath10k_warn("Failed to set CTS prot for vdev %d: %d\n",
+ ath10k_warn("failed to recalculate rts/cts prot for vdev %d: %d\n",
arvif->vdev_id, ret);
}
@@ -2900,7 +2982,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
slottime);
if (ret)
- ath10k_warn("Failed to set erp slot for vdev %d: %i\n",
+ ath10k_warn("failed to set erp slot for vdev %d: %i\n",
arvif->vdev_id, ret);
}
@@ -2919,7 +3001,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
preamble);
if (ret)
- ath10k_warn("Failed to set preamble for vdev %d: %i\n",
+ ath10k_warn("failed to set preamble for vdev %d: %i\n",
arvif->vdev_id, ret);
}
@@ -2990,7 +3072,7 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw,
ret = ath10k_start_scan(ar, &arg);
if (ret) {
- ath10k_warn("could not start hw scan (%d)\n", ret);
+ ath10k_warn("failed to start hw scan: %d\n", ret);
spin_lock_bh(&ar->data_lock);
ar->scan.in_progress = false;
spin_unlock_bh(&ar->data_lock);
@@ -3010,8 +3092,7 @@ static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
ret = ath10k_abort_scan(ar);
if (ret) {
- ath10k_warn("couldn't abort scan (%d). forcefully sending scan completion to mac80211\n",
- ret);
+ ath10k_warn("failed to abort scan: %d\n", ret);
ieee80211_scan_completed(hw, 1 /* aborted */);
}
mutex_unlock(&ar->conf_mutex);
@@ -3089,7 +3170,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (!peer) {
if (cmd == SET_KEY) {
- ath10k_warn("cannot install key for non-existent peer %pM\n",
+ ath10k_warn("failed to install key for non-existent peer %pM\n",
peer_addr);
ret = -EOPNOTSUPP;
goto exit;
@@ -3112,7 +3193,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
ret = ath10k_install_key(arvif, key, cmd, peer_addr);
if (ret) {
- ath10k_warn("key installation failed for vdev %i peer %pM: %d\n",
+ ath10k_warn("failed to install key for vdev %i peer %pM: %d\n",
arvif->vdev_id, peer_addr, ret);
goto exit;
}
@@ -3127,7 +3208,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
peer->keys[key->keyidx] = NULL;
else if (peer == NULL)
/* impossible unless FW goes crazy */
- ath10k_warn("peer %pM disappeared!\n", peer_addr);
+ ath10k_warn("Peer %pM disappeared!\n", peer_addr);
spin_unlock_bh(&ar->data_lock);
exit:
@@ -3195,6 +3276,16 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
sta->addr, smps, err);
}
+ if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
+ ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM supp rates\n",
+ sta->addr);
+
+ err = ath10k_station_assoc(ar, arvif, sta, true);
+ if (err)
+ ath10k_warn("failed to reassociate station: %pM\n",
+ sta->addr);
+ }
+
mutex_unlock(&ar->conf_mutex);
}
@@ -3236,7 +3327,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
max_num_peers = TARGET_NUM_PEERS;
if (ar->num_peers >= max_num_peers) {
- ath10k_warn("Number of peers exceeded: peers number %d (max peers %d)\n",
+ ath10k_warn("number of peers exceeded: peers number %d (max peers %d)\n",
ar->num_peers, max_num_peers);
ret = -ENOBUFS;
goto exit;
@@ -3248,7 +3339,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr);
if (ret)
- ath10k_warn("Failed to add peer %pM for vdev %d when adding a new sta: %i\n",
+ ath10k_warn("failed to add peer %pM for vdev %d when adding a new sta: %i\n",
sta->addr, arvif->vdev_id, ret);
} else if ((old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST)) {
@@ -3260,7 +3351,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
arvif->vdev_id, sta->addr);
ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
if (ret)
- ath10k_warn("Failed to delete peer %pM for vdev %d: %i\n",
+ ath10k_warn("failed to delete peer %pM for vdev %d: %i\n",
sta->addr, arvif->vdev_id, ret);
if (vif->type == NL80211_IFTYPE_STATION)
@@ -3275,9 +3366,9 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
ath10k_dbg(ATH10K_DBG_MAC, "mac sta %pM associated\n",
sta->addr);
- ret = ath10k_station_assoc(ar, arvif, sta);
+ ret = ath10k_station_assoc(ar, arvif, sta, false);
if (ret)
- ath10k_warn("Failed to associate station %pM for vdev %i: %i\n",
+ ath10k_warn("failed to associate station %pM for vdev %i: %i\n",
sta->addr, arvif->vdev_id, ret);
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTH &&
@@ -3291,7 +3382,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
ret = ath10k_station_disassoc(ar, arvif, sta);
if (ret)
- ath10k_warn("Failed to disassociate station: %pM vdev %i ret %i\n",
+ ath10k_warn("failed to disassociate station: %pM vdev %i: %i\n",
sta->addr, arvif->vdev_id, ret);
}
exit:
@@ -3339,7 +3430,7 @@ static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
WMI_STA_PS_PARAM_UAPSD,
arvif->u.sta.uapsd);
if (ret) {
- ath10k_warn("could not set uapsd params %d\n", ret);
+ ath10k_warn("failed to set uapsd params: %d\n", ret);
goto exit;
}
@@ -3352,7 +3443,7 @@ static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
WMI_STA_PS_PARAM_RX_WAKE_POLICY,
value);
if (ret)
- ath10k_warn("could not set rx wake param %d\n", ret);
+ ath10k_warn("failed to set rx wake param: %d\n", ret);
exit:
return ret;
@@ -3402,13 +3493,13 @@ static int ath10k_conf_tx(struct ieee80211_hw *hw,
/* FIXME: FW accepts wmm params per hw, not per vif */
ret = ath10k_wmi_pdev_set_wmm_params(ar, &ar->wmm_params);
if (ret) {
- ath10k_warn("could not set wmm params %d\n", ret);
+ ath10k_warn("failed to set wmm params: %d\n", ret);
goto exit;
}
ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
if (ret)
- ath10k_warn("could not set sta uapsd %d\n", ret);
+ ath10k_warn("failed to set sta uapsd: %d\n", ret);
exit:
mutex_unlock(&ar->conf_mutex);
@@ -3461,7 +3552,7 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
ret = ath10k_start_scan(ar, &arg);
if (ret) {
- ath10k_warn("could not start roc scan (%d)\n", ret);
+ ath10k_warn("failed to start roc scan: %d\n", ret);
spin_lock_bh(&ar->data_lock);
ar->scan.in_progress = false;
spin_unlock_bh(&ar->data_lock);
@@ -3470,7 +3561,7 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
ret = wait_for_completion_timeout(&ar->scan.on_channel, 3*HZ);
if (ret == 0) {
- ath10k_warn("could not switch to channel for roc scan\n");
+ ath10k_warn("failed to switch to channel for roc scan\n");
ath10k_abort_scan(ar);
ret = -ETIMEDOUT;
goto exit;
@@ -3511,7 +3602,7 @@ static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
ret = ath10k_mac_set_rts(arvif, value);
if (ret) {
- ath10k_warn("could not set rts threshold for vdev %d (%d)\n",
+ ath10k_warn("failed to set rts threshold for vdev %d: %d\n",
arvif->vdev_id, ret);
break;
}
@@ -3534,7 +3625,7 @@ static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
ret = ath10k_mac_set_rts(arvif, value);
if (ret) {
- ath10k_warn("could not set fragmentation threshold for vdev %d (%d)\n",
+ ath10k_warn("failed to set fragmentation threshold for vdev %d: %d\n",
arvif->vdev_id, ret);
break;
}
@@ -3544,7 +3635,8 @@ static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
return ret;
}
-static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
struct ath10k *ar = hw->priv;
bool skip;
@@ -3573,7 +3665,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
}), ATH10K_FLUSH_TIMEOUT_HZ);
if (ret <= 0 || skip)
- ath10k_warn("tx not flushed (skip %i ar-state %i): %i\n",
+ ath10k_warn("failed to flush transmit queue (skip %i ar-state %i): %i\n",
skip, ar->state, ret);
skip:
@@ -3608,7 +3700,7 @@ static int ath10k_suspend(struct ieee80211_hw *hw,
ret = ath10k_hif_suspend(ar);
if (ret) {
- ath10k_warn("could not suspend hif (%d)\n", ret);
+ ath10k_warn("failed to suspend hif: %d\n", ret);
goto resume;
}
@@ -3617,7 +3709,7 @@ static int ath10k_suspend(struct ieee80211_hw *hw,
resume:
ret = ath10k_wmi_pdev_resume_target(ar);
if (ret)
- ath10k_warn("could not resume target (%d)\n", ret);
+ ath10k_warn("failed to resume target: %d\n", ret);
ret = 1;
exit:
@@ -3634,14 +3726,14 @@ static int ath10k_resume(struct ieee80211_hw *hw)
ret = ath10k_hif_resume(ar);
if (ret) {
- ath10k_warn("could not resume hif (%d)\n", ret);
+ ath10k_warn("failed to resume hif: %d\n", ret);
ret = 1;
goto exit;
}
ret = ath10k_wmi_pdev_resume_target(ar);
if (ret) {
- ath10k_warn("could not resume target (%d)\n", ret);
+ ath10k_warn("failed to resume target: %d\n", ret);
ret = 1;
goto exit;
}
@@ -3964,7 +4056,7 @@ static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
vdev_param, fixed_rate);
if (ret) {
- ath10k_warn("Could not set fixed_rate param 0x%02x: %d\n",
+ ath10k_warn("failed to set fixed rate param 0x%02x: %d\n",
fixed_rate, ret);
ret = -EINVAL;
goto exit;
@@ -3977,7 +4069,7 @@ static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
vdev_param, fixed_nss);
if (ret) {
- ath10k_warn("Could not set fixed_nss param %d: %d\n",
+ ath10k_warn("failed to set fixed nss param %d: %d\n",
fixed_nss, ret);
ret = -EINVAL;
goto exit;
@@ -3990,7 +4082,7 @@ static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
force_sgi);
if (ret) {
- ath10k_warn("Could not set sgi param %d: %d\n",
+ ath10k_warn("failed to set sgi param %d: %d\n",
force_sgi, ret);
ret = -EINVAL;
goto exit;
@@ -4026,7 +4118,7 @@ static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw,
}
if (fixed_rate == WMI_FIXED_RATE_NONE && force_sgi) {
- ath10k_warn("Could not force SGI usage for default rate settings\n");
+ ath10k_warn("failed to force SGI usage for default rate settings\n");
return -EINVAL;
}
@@ -4072,8 +4164,8 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
bw = WMI_PEER_CHWIDTH_80MHZ;
break;
case IEEE80211_STA_RX_BW_160:
- ath10k_warn("mac sta rc update for %pM: invalid bw %d\n",
- sta->addr, sta->bandwidth);
+ ath10k_warn("Invalid bandwith %d in rc update for %pM\n",
+ sta->bandwidth, sta->addr);
bw = WMI_PEER_CHWIDTH_20MHZ;
break;
}
@@ -4099,8 +4191,8 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
smps = WMI_PEER_SMPS_DYNAMIC;
break;
case IEEE80211_SMPS_NUM_MODES:
- ath10k_warn("mac sta rc update for %pM: invalid smps: %d\n",
- sta->addr, sta->smps_mode);
+ ath10k_warn("Invalid smps %d in sta rc update for %pM\n",
+ sta->smps_mode, sta->addr);
smps = WMI_PEER_SMPS_PS_NONE;
break;
}
@@ -4108,15 +4200,6 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
arsta->smps = smps;
}
- if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
- /* FIXME: Not implemented. Probably the only way to do it would
- * be to re-assoc the peer. */
- changed &= ~IEEE80211_RC_SUPP_RATES_CHANGED;
- ath10k_dbg(ATH10K_DBG_MAC,
- "mac sta rc update for %pM: changing supported rates not implemented\n",
- sta->addr);
- }
-
arsta->changed |= changed;
spin_unlock_bh(&ar->data_lock);
@@ -4516,7 +4599,6 @@ int ath10k_mac_register(struct ath10k *ar)
IEEE80211_HW_REPORTS_TX_ACK_STATUS |
IEEE80211_HW_HAS_RATE_CONTROL |
IEEE80211_HW_SUPPORTS_STATIC_SMPS |
- IEEE80211_HW_WANT_MONITOR_VIF |
IEEE80211_HW_AP_LINK_PS |
IEEE80211_HW_SPECTRUM_MGMT;
@@ -4570,19 +4652,19 @@ int ath10k_mac_register(struct ath10k *ar)
NL80211_DFS_UNSET);
if (!ar->dfs_detector)
- ath10k_warn("dfs pattern detector init failed\n");
+ ath10k_warn("failed to initialise DFS pattern detector\n");
}
ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
ath10k_reg_notifier);
if (ret) {
- ath10k_err("Regulatory initialization failed: %i\n", ret);
+ ath10k_err("failed to initialise regulatory: %i\n", ret);
goto err_free;
}
ret = ieee80211_register_hw(ar->hw);
if (ret) {
- ath10k_err("ieee80211 registration failed: %d\n", ret);
+ ath10k_err("failed to register ieee80211: %d\n", ret);
goto err_free;
}
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 9d242d801d9d..bf1083d52e61 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -39,15 +39,27 @@ enum ath10k_pci_irq_mode {
ATH10K_PCI_IRQ_MSI = 2,
};
-static unsigned int ath10k_target_ps;
+enum ath10k_pci_reset_mode {
+ ATH10K_PCI_RESET_AUTO = 0,
+ ATH10K_PCI_RESET_WARM_ONLY = 1,
+};
+
+static unsigned int ath10k_pci_target_ps;
static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
+static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
-module_param(ath10k_target_ps, uint, 0644);
-MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
+module_param_named(target_ps, ath10k_pci_target_ps, uint, 0644);
+MODULE_PARM_DESC(target_ps, "Enable ath10k Target (SoC) PS option");
module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
+module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
+MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
+
+/* how long wait to wait for target to initialise, in ms */
+#define ATH10K_PCI_TARGET_WAIT 3000
+
#define QCA988X_2_0_DEVICE_ID (0x003c)
static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
@@ -346,9 +358,10 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
* 2) Buffer in DMA-able space
*/
orig_nbytes = nbytes;
- data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
- orig_nbytes,
- &ce_data_base);
+ data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
+ orig_nbytes,
+ &ce_data_base,
+ GFP_ATOMIC);
if (!data_buf) {
ret = -ENOMEM;
@@ -442,12 +455,12 @@ done:
__le32_to_cpu(((__le32 *)data_buf)[i]);
}
} else
- ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
- __func__, address);
+ ath10k_warn("failed to read diag value at 0x%x: %d\n",
+ address, ret);
if (data_buf)
- pci_free_consistent(ar_pci->pdev, orig_nbytes,
- data_buf, ce_data_base);
+ dma_free_coherent(ar->dev, orig_nbytes, data_buf,
+ ce_data_base);
return ret;
}
@@ -490,9 +503,10 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
* 2) Buffer in DMA-able space
*/
orig_nbytes = nbytes;
- data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
- orig_nbytes,
- &ce_data_base);
+ data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
+ orig_nbytes,
+ &ce_data_base,
+ GFP_ATOMIC);
if (!data_buf) {
ret = -ENOMEM;
goto done;
@@ -588,13 +602,13 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
done:
if (data_buf) {
- pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
- ce_data_base);
+ dma_free_coherent(ar->dev, orig_nbytes, data_buf,
+ ce_data_base);
}
if (ret != 0)
- ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
- address);
+ ath10k_warn("failed to write diag value at 0x%x: %d\n",
+ address, ret);
return ret;
}
@@ -803,6 +817,9 @@ unlock:
static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ ath10k_dbg(ATH10K_DBG_PCI, "pci hif get free queue number\n");
+
return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
}
@@ -854,6 +871,8 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar)
static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
int force)
{
+ ath10k_dbg(ATH10K_DBG_PCI, "pci hif send complete check\n");
+
if (!force) {
int resources;
/*
@@ -880,7 +899,7 @@ static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
+ ath10k_dbg(ATH10K_DBG_PCI, "pci hif set callbacks\n");
memcpy(&ar_pci->msg_callbacks_current, callbacks,
sizeof(ar_pci->msg_callbacks_current));
@@ -938,6 +957,8 @@ static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
{
int ret = 0;
+ ath10k_dbg(ATH10K_DBG_PCI, "pci hif map service\n");
+
/* polling for received messages not supported */
*dl_is_polled = 0;
@@ -997,6 +1018,8 @@ static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
{
int ul_is_polled, dl_is_polled;
+ ath10k_dbg(ATH10K_DBG_PCI, "pci hif get default pipe\n");
+
(void)ath10k_pci_hif_map_service_to_pipe(ar,
ATH10K_HTC_SVC_ID_RSVD_CTRL,
ul_pipe,
@@ -1098,6 +1121,8 @@ static int ath10k_pci_hif_start(struct ath10k *ar)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret, ret_early;
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot hif start\n");
+
ath10k_pci_free_early_irq(ar);
ath10k_pci_kill_tasklet(ar);
@@ -1233,18 +1258,10 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
static void ath10k_pci_ce_deinit(struct ath10k *ar)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ath10k_pci_pipe *pipe_info;
- int pipe_num;
+ int i;
- for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
- pipe_info = &ar_pci->pipe_info[pipe_num];
- if (pipe_info->ce_hdl) {
- ath10k_ce_deinit(pipe_info->ce_hdl);
- pipe_info->ce_hdl = NULL;
- pipe_info->buf_sz = 0;
- }
- }
+ for (i = 0; i < CE_COUNT; i++)
+ ath10k_ce_deinit_pipe(ar, i);
}
static void ath10k_pci_hif_stop(struct ath10k *ar)
@@ -1252,7 +1269,7 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret;
- ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot hif stop\n");
ret = ath10k_ce_disable_interrupts(ar);
if (ret)
@@ -1697,30 +1714,49 @@ static int ath10k_pci_init_config(struct ath10k *ar)
return 0;
}
+static int ath10k_pci_alloc_ce(struct ath10k *ar)
+{
+ int i, ret;
+
+ for (i = 0; i < CE_COUNT; i++) {
+ ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
+ if (ret) {
+ ath10k_err("failed to allocate copy engine pipe %d: %d\n",
+ i, ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static void ath10k_pci_free_ce(struct ath10k *ar)
+{
+ int i;
+
+ for (i = 0; i < CE_COUNT; i++)
+ ath10k_ce_free_pipe(ar, i);
+}
static int ath10k_pci_ce_init(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_pci_pipe *pipe_info;
const struct ce_attr *attr;
- int pipe_num;
+ int pipe_num, ret;
for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
pipe_info = &ar_pci->pipe_info[pipe_num];
+ pipe_info->ce_hdl = &ar_pci->ce_states[pipe_num];
pipe_info->pipe_num = pipe_num;
pipe_info->hif_ce_state = ar;
attr = &host_ce_config_wlan[pipe_num];
- pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
- if (pipe_info->ce_hdl == NULL) {
- ath10k_err("failed to initialize CE for pipe: %d\n",
- pipe_num);
-
- /* It is safe to call it here. It checks if ce_hdl is
- * valid for each pipe */
- ath10k_pci_ce_deinit(ar);
- return -1;
+ ret = ath10k_ce_init_pipe(ar, pipe_num, attr);
+ if (ret) {
+ ath10k_err("failed to initialize copy engine pipe %d: %d\n",
+ pipe_num, ret);
+ return ret;
}
if (pipe_num == CE_COUNT - 1) {
@@ -1741,16 +1777,15 @@ static int ath10k_pci_ce_init(struct ath10k *ar)
static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- u32 fw_indicator_address, fw_indicator;
+ u32 fw_indicator;
ath10k_pci_wake(ar);
- fw_indicator_address = ar_pci->fw_indicator_address;
- fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
+ fw_indicator = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
if (fw_indicator & FW_IND_EVENT_PENDING) {
/* ACK: clear Target-side pending event */
- ath10k_pci_write32(ar, fw_indicator_address,
+ ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
fw_indicator & ~FW_IND_EVENT_PENDING);
if (ar_pci->started) {
@@ -1769,11 +1804,10 @@ static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
static int ath10k_pci_warm_reset(struct ath10k *ar)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret = 0;
u32 val;
- ath10k_dbg(ATH10K_DBG_BOOT, "boot performing warm chip reset\n");
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset\n");
ret = ath10k_do_pci_wake(ar);
if (ret) {
@@ -1801,7 +1835,7 @@ static int ath10k_pci_warm_reset(struct ath10k *ar)
msleep(100);
/* clear fw indicator */
- ath10k_pci_write32(ar, ar_pci->fw_indicator_address, 0);
+ ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
/* clear target LF timer interrupts */
val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
@@ -1934,7 +1968,9 @@ static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
irq_mode = "legacy";
if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
- ath10k_info("pci irq %s\n", irq_mode);
+ ath10k_info("pci irq %s irq_mode %d reset_mode %d\n",
+ irq_mode, ath10k_pci_irq_mode,
+ ath10k_pci_reset_mode);
return 0;
@@ -1956,6 +1992,8 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar)
{
int ret;
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power up\n");
+
/*
* Hardware CUS232 version 2 has some issues with cold reset and the
* preferred (and safer) way to perform a device reset is through a
@@ -1966,9 +2004,14 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar)
*/
ret = __ath10k_pci_hif_power_up(ar, false);
if (ret) {
- ath10k_warn("failed to power up target using warm reset (%d), trying cold reset\n",
+ ath10k_warn("failed to power up target using warm reset: %d\n",
ret);
+ if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY)
+ return ret;
+
+ ath10k_warn("trying cold reset\n");
+
ret = __ath10k_pci_hif_power_up(ar, true);
if (ret) {
ath10k_err("failed to power up target using cold reset too (%d)\n",
@@ -1984,12 +2027,14 @@ static void ath10k_pci_hif_power_down(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power down\n");
+
ath10k_pci_free_early_irq(ar);
ath10k_pci_kill_tasklet(ar);
ath10k_pci_deinit_irq(ar);
+ ath10k_pci_ce_deinit(ar);
ath10k_pci_warm_reset(ar);
- ath10k_pci_ce_deinit(ar);
if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
ath10k_do_pci_sleep(ar);
}
@@ -2137,7 +2182,6 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
static void ath10k_pci_early_irq_tasklet(unsigned long data)
{
struct ath10k *ar = (struct ath10k *)data;
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
u32 fw_ind;
int ret;
@@ -2148,9 +2192,9 @@ static void ath10k_pci_early_irq_tasklet(unsigned long data)
return;
}
- fw_ind = ath10k_pci_read32(ar, ar_pci->fw_indicator_address);
+ fw_ind = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
if (fw_ind & FW_IND_EVENT_PENDING) {
- ath10k_pci_write32(ar, ar_pci->fw_indicator_address,
+ ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
fw_ind & ~FW_IND_EVENT_PENDING);
/* Some structures are unavailable during early boot or at
@@ -2385,33 +2429,50 @@ static int ath10k_pci_deinit_irq(struct ath10k *ar)
static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int wait_limit = 300; /* 3 sec */
+ unsigned long timeout;
int ret;
+ u32 val;
+
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
ret = ath10k_pci_wake(ar);
if (ret) {
- ath10k_err("failed to wake up target: %d\n", ret);
+ ath10k_err("failed to wake up target for init: %d\n", ret);
return ret;
}
- while (wait_limit-- &&
- !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
- FW_IND_INITIALIZED)) {
+ timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
+
+ do {
+ val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
+
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot target indicator %x\n", val);
+
+ /* target should never return this */
+ if (val == 0xffffffff)
+ continue;
+
+ if (val & FW_IND_INITIALIZED)
+ break;
+
if (ar_pci->num_msi_intrs == 0)
/* Fix potential race by repeating CORE_BASE writes */
- iowrite32(PCIE_INTR_FIRMWARE_MASK |
- PCIE_INTR_CE_MASK_ALL,
- ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
- PCIE_INTR_ENABLE_ADDRESS));
+ ath10k_pci_soc_write32(ar, PCIE_INTR_ENABLE_ADDRESS,
+ PCIE_INTR_FIRMWARE_MASK |
+ PCIE_INTR_CE_MASK_ALL);
+
mdelay(10);
- }
+ } while (time_before(jiffies, timeout));
- if (wait_limit < 0) {
- ath10k_err("target stalled\n");
- ret = -EIO;
+ if (val == 0xffffffff || !(val & FW_IND_INITIALIZED)) {
+ ath10k_err("failed to receive initialized event from target: %08x\n",
+ val);
+ ret = -ETIMEDOUT;
goto out;
}
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot target initialised\n");
+
out:
ath10k_pci_sleep(ar);
return ret;
@@ -2422,6 +2483,8 @@ static int ath10k_pci_cold_reset(struct ath10k *ar)
int i, ret;
u32 val;
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset\n");
+
ret = ath10k_do_pci_wake(ar);
if (ret) {
ath10k_err("failed to wake up target: %d\n",
@@ -2453,6 +2516,9 @@ static int ath10k_pci_cold_reset(struct ath10k *ar)
}
ath10k_do_pci_sleep(ar);
+
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset complete\n");
+
return 0;
}
@@ -2484,7 +2550,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
struct ath10k_pci *ar_pci;
u32 lcr_val, chip_id;
- ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
+ ath10k_dbg(ATH10K_DBG_PCI, "pci probe\n");
ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
if (ar_pci == NULL)
@@ -2503,7 +2569,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
goto err_ar_pci;
}
- if (ath10k_target_ps)
+ if (ath10k_pci_target_ps)
set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
ath10k_pci_dump_features(ar_pci);
@@ -2516,7 +2582,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
}
ar_pci->ar = ar;
- ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
atomic_set(&ar_pci->keep_awake_count, 0);
pci_set_drvdata(pdev, ar);
@@ -2594,16 +2659,24 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
ath10k_do_pci_sleep(ar);
+ ret = ath10k_pci_alloc_ce(ar);
+ if (ret) {
+ ath10k_err("failed to allocate copy engine pipes: %d\n", ret);
+ goto err_iomap;
+ }
+
ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
ret = ath10k_core_register(ar, chip_id);
if (ret) {
ath10k_err("failed to register driver core: %d\n", ret);
- goto err_iomap;
+ goto err_free_ce;
}
return 0;
+err_free_ce:
+ ath10k_pci_free_ce(ar);
err_iomap:
pci_iounmap(pdev, mem);
err_master:
@@ -2626,7 +2699,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
struct ath10k *ar = pci_get_drvdata(pdev);
struct ath10k_pci *ar_pci;
- ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
+ ath10k_dbg(ATH10K_DBG_PCI, "pci remove\n");
if (!ar)
return;
@@ -2639,6 +2712,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
tasklet_kill(&ar_pci->msi_fw_err);
ath10k_core_unregister(ar);
+ ath10k_pci_free_ce(ar);
pci_iounmap(pdev, ar_pci->mem);
pci_release_region(pdev, BAR_NUM);
@@ -2680,6 +2754,5 @@ module_exit(ath10k_pci_exit);
MODULE_AUTHOR("Qualcomm Atheros");
MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
-MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_2_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index b43fdb4f7319..dfdebb4157aa 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -189,9 +189,6 @@ struct ath10k_pci {
struct ath10k_hif_cb msg_callbacks_current;
- /* Target address used to signal a pending firmware event */
- u32 fw_indicator_address;
-
/* Copy Engine used for Diagnostic Accesses */
struct ath10k_ce_pipe *ce_diag;
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 0541dd939ce9..82669a77e553 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -100,189 +100,6 @@ exit:
wake_up(&htt->empty_tx_wq);
}
-static const u8 rx_legacy_rate_idx[] = {
- 3, /* 0x00 - 11Mbps */
- 2, /* 0x01 - 5.5Mbps */
- 1, /* 0x02 - 2Mbps */
- 0, /* 0x03 - 1Mbps */
- 3, /* 0x04 - 11Mbps */
- 2, /* 0x05 - 5.5Mbps */
- 1, /* 0x06 - 2Mbps */
- 0, /* 0x07 - 1Mbps */
- 10, /* 0x08 - 48Mbps */
- 8, /* 0x09 - 24Mbps */
- 6, /* 0x0A - 12Mbps */
- 4, /* 0x0B - 6Mbps */
- 11, /* 0x0C - 54Mbps */
- 9, /* 0x0D - 36Mbps */
- 7, /* 0x0E - 18Mbps */
- 5, /* 0x0F - 9Mbps */
-};
-
-static void process_rx_rates(struct ath10k *ar, struct htt_rx_info *info,
- enum ieee80211_band band,
- struct ieee80211_rx_status *status)
-{
- u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
- u8 info0 = info->rate.info0;
- u32 info1 = info->rate.info1;
- u32 info2 = info->rate.info2;
- u8 preamble = 0;
-
- /* Check if valid fields */
- if (!(info0 & HTT_RX_INDICATION_INFO0_START_VALID))
- return;
-
- preamble = MS(info1, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE);
-
- switch (preamble) {
- case HTT_RX_LEGACY:
- cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK;
- rate = MS(info0, HTT_RX_INDICATION_INFO0_LEGACY_RATE);
- rate_idx = 0;
-
- if (rate < 0x08 || rate > 0x0F)
- break;
-
- switch (band) {
- case IEEE80211_BAND_2GHZ:
- if (cck)
- rate &= ~BIT(3);
- rate_idx = rx_legacy_rate_idx[rate];
- break;
- case IEEE80211_BAND_5GHZ:
- rate_idx = rx_legacy_rate_idx[rate];
- /* We are using same rate table registering
- HW - ath10k_rates[]. In case of 5GHz skip
- CCK rates, so -4 here */
- rate_idx -= 4;
- break;
- default:
- break;
- }
-
- status->rate_idx = rate_idx;
- break;
- case HTT_RX_HT:
- case HTT_RX_HT_WITH_TXBF:
- /* HT-SIG - Table 20-11 in info1 and info2 */
- mcs = info1 & 0x1F;
- nss = mcs >> 3;
- bw = (info1 >> 7) & 1;
- sgi = (info2 >> 7) & 1;
-
- status->rate_idx = mcs;
- status->flag |= RX_FLAG_HT;
- if (sgi)
- status->flag |= RX_FLAG_SHORT_GI;
- if (bw)
- status->flag |= RX_FLAG_40MHZ;
- break;
- case HTT_RX_VHT:
- case HTT_RX_VHT_WITH_TXBF:
- /* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2
- TODO check this */
- mcs = (info2 >> 4) & 0x0F;
- nss = ((info1 >> 10) & 0x07) + 1;
- bw = info1 & 3;
- sgi = info2 & 1;
-
- status->rate_idx = mcs;
- status->vht_nss = nss;
-
- if (sgi)
- status->flag |= RX_FLAG_SHORT_GI;
-
- switch (bw) {
- /* 20MHZ */
- case 0:
- break;
- /* 40MHZ */
- case 1:
- status->flag |= RX_FLAG_40MHZ;
- break;
- /* 80MHZ */
- case 2:
- status->vht_flag |= RX_VHT_FLAG_80MHZ;
- }
-
- status->flag |= RX_FLAG_VHT;
- break;
- default:
- break;
- }
-}
-
-void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info)
-{
- struct ieee80211_rx_status *status;
- struct ieee80211_channel *ch;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)info->skb->data;
-
- status = IEEE80211_SKB_RXCB(info->skb);
- memset(status, 0, sizeof(*status));
-
- if (info->encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) {
- status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED |
- RX_FLAG_MMIC_STRIPPED;
- hdr->frame_control = __cpu_to_le16(
- __le16_to_cpu(hdr->frame_control) &
- ~IEEE80211_FCTL_PROTECTED);
- }
-
- if (info->mic_err)
- status->flag |= RX_FLAG_MMIC_ERROR;
-
- if (info->fcs_err)
- status->flag |= RX_FLAG_FAILED_FCS_CRC;
-
- if (info->amsdu_more)
- status->flag |= RX_FLAG_AMSDU_MORE;
-
- status->signal = info->signal;
-
- spin_lock_bh(&ar->data_lock);
- ch = ar->scan_channel;
- if (!ch)
- ch = ar->rx_channel;
- spin_unlock_bh(&ar->data_lock);
-
- if (!ch) {
- ath10k_warn("no channel configured; ignoring frame!\n");
- dev_kfree_skb_any(info->skb);
- return;
- }
-
- process_rx_rates(ar, info, ch->band, status);
- status->band = ch->band;
- status->freq = ch->center_freq;
-
- if (info->rate.info0 & HTT_RX_INDICATION_INFO0_END_VALID) {
- /* TSF available only in 32-bit */
- status->mactime = info->tsf & 0xffffffff;
- status->flag |= RX_FLAG_MACTIME_END;
- }
-
- ath10k_dbg(ATH10K_DBG_DATA,
- "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i\n",
- info->skb,
- info->skb->len,
- status->flag == 0 ? "legacy" : "",
- status->flag & RX_FLAG_HT ? "ht" : "",
- status->flag & RX_FLAG_VHT ? "vht" : "",
- status->flag & RX_FLAG_40MHZ ? "40" : "",
- status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
- status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
- status->rate_idx,
- status->vht_nss,
- status->freq,
- status->band, status->flag, info->fcs_err);
- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
- info->skb->data, info->skb->len);
-
- ieee80211_rx(ar->hw, info->skb);
-}
-
struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
const u8 *addr)
{
diff --git a/drivers/net/wireless/ath/ath10k/txrx.h b/drivers/net/wireless/ath/ath10k/txrx.h
index 356dc9c04c9e..aee3e20058f8 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.h
+++ b/drivers/net/wireless/ath/ath10k/txrx.h
@@ -21,7 +21,6 @@
void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
const struct htt_tx_done *tx_done);
-void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info);
struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
const u8 *addr);
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index cb1f7b5bcf4c..fe4d5f1c672f 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -1362,13 +1362,10 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
struct sk_buff *bcn;
int ret, vdev_id = 0;
- ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n");
-
ev = (struct wmi_host_swba_event *)skb->data;
map = __le32_to_cpu(ev->vdev_map);
- ath10k_dbg(ATH10K_DBG_MGMT, "host swba:\n"
- "-vdev map 0x%x\n",
+ ath10k_dbg(ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n",
ev->vdev_map);
for (; map; map >>= 1, vdev_id++) {
@@ -1385,12 +1382,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
bcn_info = &ev->bcn_info[i];
ath10k_dbg(ATH10K_DBG_MGMT,
- "-bcn_info[%d]:\n"
- "--tim_len %d\n"
- "--tim_mcast %d\n"
- "--tim_changed %d\n"
- "--tim_num_ps_pending %d\n"
- "--tim_bitmap 0x%08x%08x%08x%08x\n",
+ "mgmt event bcn_info %d tim_len %d mcast %d changed %d num_ps_pending %d bitmap 0x%08x%08x%08x%08x\n",
i,
__le32_to_cpu(bcn_info->tim_info.tim_len),
__le32_to_cpu(bcn_info->tim_info.tim_mcast),
@@ -2393,8 +2385,9 @@ int ath10k_wmi_connect_htc_service(struct ath10k *ar)
return 0;
}
-int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
- u16 rd5g, u16 ctl2g, u16 ctl5g)
+static int ath10k_wmi_main_pdev_set_regdomain(struct ath10k *ar, u16 rd,
+ u16 rd2g, u16 rd5g, u16 ctl2g,
+ u16 ctl5g)
{
struct wmi_pdev_set_regdomain_cmd *cmd;
struct sk_buff *skb;
@@ -2418,6 +2411,46 @@ int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
ar->wmi.cmd->pdev_set_regdomain_cmdid);
}
+static int ath10k_wmi_10x_pdev_set_regdomain(struct ath10k *ar, u16 rd,
+ u16 rd2g, u16 rd5g,
+ u16 ctl2g, u16 ctl5g,
+ enum wmi_dfs_region dfs_reg)
+{
+ struct wmi_pdev_set_regdomain_cmd_10x *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_regdomain_cmd_10x *)skb->data;
+ cmd->reg_domain = __cpu_to_le32(rd);
+ cmd->reg_domain_2G = __cpu_to_le32(rd2g);
+ cmd->reg_domain_5G = __cpu_to_le32(rd5g);
+ cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
+ cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
+ cmd->dfs_domain = __cpu_to_le32(dfs_reg);
+
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x dfs_region %x\n",
+ rd, rd2g, rd5g, ctl2g, ctl5g, dfs_reg);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_set_regdomain_cmdid);
+}
+
+int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
+ u16 rd5g, u16 ctl2g, u16 ctl5g,
+ enum wmi_dfs_region dfs_reg)
+{
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
+ return ath10k_wmi_10x_pdev_set_regdomain(ar, rd, rd2g, rd5g,
+ ctl2g, ctl5g, dfs_reg);
+ else
+ return ath10k_wmi_main_pdev_set_regdomain(ar, rd, rd2g, rd5g,
+ ctl2g, ctl5g);
+}
+
int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
const struct wmi_channel_arg *arg)
{
@@ -3456,8 +3489,9 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar,
__cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
ath10k_dbg(ATH10K_DBG_WMI,
- "wmi peer assoc vdev %d addr %pM\n",
- arg->vdev_id, arg->addr);
+ "wmi peer assoc vdev %d addr %pM (%s)\n",
+ arg->vdev_id, arg->addr,
+ arg->peer_reassoc ? "reassociate" : "new");
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 4fcc96aa9513..ae838221af65 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -198,16 +198,6 @@ struct wmi_mac_addr {
} __packed;
} __packed;
-/* macro to convert MAC address from WMI word format to char array */
-#define WMI_MAC_ADDR_TO_CHAR_ARRAY(pwmi_mac_addr, c_macaddr) do { \
- (c_macaddr)[0] = ((pwmi_mac_addr)->word0) & 0xff; \
- (c_macaddr)[1] = (((pwmi_mac_addr)->word0) >> 8) & 0xff; \
- (c_macaddr)[2] = (((pwmi_mac_addr)->word0) >> 16) & 0xff; \
- (c_macaddr)[3] = (((pwmi_mac_addr)->word0) >> 24) & 0xff; \
- (c_macaddr)[4] = ((pwmi_mac_addr)->word1) & 0xff; \
- (c_macaddr)[5] = (((pwmi_mac_addr)->word1) >> 8) & 0xff; \
- } while (0)
-
struct wmi_cmd_map {
u32 init_cmdid;
u32 start_scan_cmdid;
@@ -2185,6 +2175,31 @@ struct wmi_pdev_set_regdomain_cmd {
__le32 conformance_test_limit_5G;
} __packed;
+enum wmi_dfs_region {
+ /* Uninitialized dfs domain */
+ WMI_UNINIT_DFS_DOMAIN = 0,
+
+ /* FCC3 dfs domain */
+ WMI_FCC_DFS_DOMAIN = 1,
+
+ /* ETSI dfs domain */
+ WMI_ETSI_DFS_DOMAIN = 2,
+
+ /*Japan dfs domain */
+ WMI_MKK4_DFS_DOMAIN = 3,
+};
+
+struct wmi_pdev_set_regdomain_cmd_10x {
+ __le32 reg_domain;
+ __le32 reg_domain_2G;
+ __le32 reg_domain_5G;
+ __le32 conformance_test_limit_2G;
+ __le32 conformance_test_limit_5G;
+
+ /* dfs domain from wmi_dfs_region */
+ __le32 dfs_domain;
+} __packed;
+
/* Command to set/unset chip in quiet mode */
struct wmi_pdev_set_quiet_cmd {
/* period in TUs */
@@ -2210,6 +2225,19 @@ enum ath10k_protmode {
ATH10K_PROT_RTSCTS = 2, /* RTS-CTS */
};
+enum wmi_rtscts_profile {
+ WMI_RTSCTS_FOR_NO_RATESERIES = 0,
+ WMI_RTSCTS_FOR_SECOND_RATESERIES,
+ WMI_RTSCTS_ACROSS_SW_RETRIES
+};
+
+#define WMI_RTSCTS_ENABLED 1
+#define WMI_RTSCTS_SET_MASK 0x0f
+#define WMI_RTSCTS_SET_LSB 0
+
+#define WMI_RTSCTS_PROFILE_MASK 0xf0
+#define WMI_RTSCTS_PROFILE_LSB 4
+
enum wmi_beacon_gen_mode {
WMI_BEACON_STAGGERED_MODE = 0,
WMI_BEACON_BURST_MODE = 1
@@ -2682,6 +2710,9 @@ struct wal_dbg_tx_stats {
/* wal pdev resets */
__le32 pdev_resets;
+ /* frames dropped due to non-availability of stateless TIDs */
+ __le32 stateless_tid_alloc_failure;
+
__le32 phy_underrun;
/* MPDU is more than txop limit */
@@ -2738,13 +2769,21 @@ enum wmi_stats_id {
WMI_REQUEST_AP_STAT = 0x02
};
+struct wlan_inst_rssi_args {
+ __le16 cfg_retry_count;
+ __le16 retry_count;
+};
+
struct wmi_request_stats_cmd {
__le32 stats_id;
- /*
- * Space to add parameters like
- * peer mac addr
- */
+ __le32 vdev_id;
+
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+
+ /* Instantaneous RSSI arguments */
+ struct wlan_inst_rssi_args inst_rssi_args;
} __packed;
/* Suspend option */
@@ -2795,7 +2834,7 @@ struct wmi_stats_event {
* PDEV statistics
* TODO: add all PDEV stats here
*/
-struct wmi_pdev_stats {
+struct wmi_pdev_stats_old {
__le32 chan_nf; /* Channel noise floor */
__le32 tx_frame_count; /* TX frame count */
__le32 rx_frame_count; /* RX frame count */
@@ -2806,6 +2845,23 @@ struct wmi_pdev_stats {
struct wal_dbg_stats wal; /* WAL dbg stats */
} __packed;
+struct wmi_pdev_stats_10x {
+ __le32 chan_nf; /* Channel noise floor */
+ __le32 tx_frame_count; /* TX frame count */
+ __le32 rx_frame_count; /* RX frame count */
+ __le32 rx_clear_count; /* rx clear count */
+ __le32 cycle_count; /* cycle count */
+ __le32 phy_err_count; /* Phy error count */
+ __le32 chan_tx_pwr; /* channel tx power */
+ struct wal_dbg_stats wal; /* WAL dbg stats */
+ __le32 ack_rx_bad;
+ __le32 rts_bad;
+ __le32 rts_good;
+ __le32 fcs_bad;
+ __le32 no_beacons;
+ __le32 mib_int_count;
+} __packed;
+
/*
* VDEV statistics
* TODO: add all VDEV stats here
@@ -2818,10 +2874,17 @@ struct wmi_vdev_stats {
* peer statistics.
* TODO: add more stats
*/
-struct wmi_peer_stats {
+struct wmi_peer_stats_old {
+ struct wmi_mac_addr peer_macaddr;
+ __le32 peer_rssi;
+ __le32 peer_tx_rate;
+} __packed;
+
+struct wmi_peer_stats_10x {
struct wmi_mac_addr peer_macaddr;
__le32 peer_rssi;
__le32 peer_tx_rate;
+ __le32 peer_rx_rate;
} __packed;
struct wmi_vdev_create_cmd {
@@ -4202,7 +4265,8 @@ int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt);
int ath10k_wmi_pdev_resume_target(struct ath10k *ar);
int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
- u16 rd5g, u16 ctl2g, u16 ctl5g);
+ u16 rd5g, u16 ctl2g, u16 ctl5g,
+ enum wmi_dfs_region dfs_reg);
int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value);
int ath10k_wmi_cmd_init(struct ath10k *ar);
int ath10k_wmi_start_scan(struct ath10k *ar, const struct wmi_start_scan_arg *);
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 1a2973b7acf2..0fce1c76638e 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -3709,8 +3709,8 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
AR5K_REG_MS(AR5K_TUNE_MAX_TXPOWER, AR5K_TPC_CHIRP),
AR5K_TPC);
} else {
- ath5k_hw_reg_write(ah, AR5K_PHY_TXPOWER_RATE_MAX |
- AR5K_TUNE_MAX_TXPOWER, AR5K_PHY_TXPOWER_RATE_MAX);
+ ath5k_hw_reg_write(ah, AR5K_TUNE_MAX_TXPOWER,
+ AR5K_PHY_TXPOWER_RATE_MAX);
}
return 0;
diff --git a/drivers/net/wireless/ath/ath6kl/Kconfig b/drivers/net/wireless/ath/ath6kl/Kconfig
index e39e5860a2e9..9c125ff083f7 100644
--- a/drivers/net/wireless/ath/ath6kl/Kconfig
+++ b/drivers/net/wireless/ath/ath6kl/Kconfig
@@ -1,11 +1,19 @@
config ATH6KL
tristate "Atheros mobile chipsets support"
+ depends on CFG80211
+ ---help---
+ This module adds core support for wireless adapters based on
+ Atheros AR6003 and AR6004 chipsets. You still need separate
+ bus drivers for USB and SDIO to be able to use real devices.
+
+ If you choose to build it as a module, it will be called
+ ath6kl_core. Please note that AR6002 and AR6001 are not
+ supported by this driver.
config ATH6KL_SDIO
tristate "Atheros ath6kl SDIO support"
depends on ATH6KL
depends on MMC
- depends on CFG80211
---help---
This module adds support for wireless adapters based on
Atheros AR6003 and AR6004 chipsets running over SDIO. If you
@@ -17,25 +25,31 @@ config ATH6KL_USB
tristate "Atheros ath6kl USB support"
depends on ATH6KL
depends on USB
- depends on CFG80211
---help---
This module adds support for wireless adapters based on
- Atheros AR6004 chipset running over USB. This is still under
- implementation and it isn't functional. If you choose to
- build it as a module, it will be called ath6kl_usb.
+ Atheros AR6004 chipset and chipsets based on it running over
+ USB. If you choose to build it as a module, it will be
+ called ath6kl_usb.
config ATH6KL_DEBUG
bool "Atheros ath6kl debugging"
depends on ATH6KL
---help---
- Enables debug support
+ Enables ath6kl debug support, including debug messages
+ enabled with debug_mask module parameter and debugfs
+ interface.
+
+ If unsure, say Y to make it easier to debug problems.
config ATH6KL_TRACING
bool "Atheros ath6kl tracing support"
depends on ATH6KL
depends on EVENT_TRACING
---help---
- Select this to ath6kl use tracing infrastructure.
+ Select this to ath6kl use tracing infrastructure which, for
+ example, can be enabled with help of trace-cmd. All debug
+ messages and commands are delivered to using individually
+ enablable trace points.
If unsure, say Y to make it easier to debug problems.
@@ -47,3 +61,5 @@ config ATH6KL_REGDOMAIN
Enabling this makes it possible to change the regdomain in
the firmware. This can be only enabled if regulatory requirements
are taken into account.
+
+ If unsure, say N.
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index c2c6f4604958..09285084bcd3 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -724,8 +724,9 @@ ath6kl_add_bss_if_needed(struct ath6kl_vif *vif,
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"added bss %pM to cfg80211\n", bssid);
kfree(ie);
- } else
+ } else {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "cfg80211 already has a bss\n");
+ }
return bss;
}
@@ -970,7 +971,6 @@ static int ath6kl_set_probed_ssids(struct ath6kl *ar,
ssid_list[i].flag,
ssid_list[i].ssid.ssid_len,
ssid_list[i].ssid.ssid);
-
}
/* Make sure no old entries are left behind */
@@ -1897,7 +1897,6 @@ static int ath6kl_wow_usr(struct ath6kl *ar, struct ath6kl_vif *vif,
/* Configure the patterns that we received from the user. */
for (i = 0; i < wow->n_patterns; i++) {
-
/*
* Convert given nl80211 specific mask value to equivalent
* driver specific mask value and send it to the chip along
@@ -2850,8 +2849,9 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
if (p.prwise_crypto_type == 0) {
p.prwise_crypto_type = NONE_CRYPT;
ath6kl_set_cipher(vif, 0, true);
- } else if (info->crypto.n_ciphers_pairwise == 1)
+ } else if (info->crypto.n_ciphers_pairwise == 1) {
ath6kl_set_cipher(vif, info->crypto.ciphers_pairwise[0], true);
+ }
switch (info->crypto.cipher_group) {
case WLAN_CIPHER_SUITE_WEP40:
@@ -2897,7 +2897,6 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
}
if (info->inactivity_timeout) {
-
inactivity_timeout = info->inactivity_timeout;
if (ar->hw.flags & ATH6KL_HW_AP_INACTIVITY_MINS)
diff --git a/drivers/net/wireless/ath/ath6kl/core.c b/drivers/net/wireless/ath/ath6kl/core.c
index 4b46adbe8c92..b0b652042760 100644
--- a/drivers/net/wireless/ath/ath6kl/core.c
+++ b/drivers/net/wireless/ath/ath6kl/core.c
@@ -45,9 +45,9 @@ module_param(testmode, uint, 0644);
module_param(recovery_enable, uint, 0644);
module_param(heart_beat_poll, uint, 0644);
MODULE_PARM_DESC(recovery_enable, "Enable recovery from firmware error");
-MODULE_PARM_DESC(heart_beat_poll, "Enable fw error detection periodic" \
- "polling. This also specifies the polling interval in" \
- "msecs. Set reocvery_enable for this to be effective");
+MODULE_PARM_DESC(heart_beat_poll,
+ "Enable fw error detection periodic polling in msecs - Also set recovery_enable for this to be effective");
+
void ath6kl_core_tx_complete(struct ath6kl *ar, struct sk_buff *skb)
{
diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c
index dbfd17d0a5fa..55c4064dd506 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.c
+++ b/drivers/net/wireless/ath/ath6kl/debug.c
@@ -172,7 +172,6 @@ void ath6kl_dump_registers(struct ath6kl_device *dev,
struct ath6kl_irq_proc_registers *irq_proc_reg,
struct ath6kl_irq_enable_reg *irq_enable_reg)
{
-
ath6kl_dbg(ATH6KL_DBG_IRQ, ("<------- Register Table -------->\n"));
if (irq_proc_reg != NULL) {
@@ -219,7 +218,6 @@ void ath6kl_dump_registers(struct ath6kl_device *dev,
"GMBOX lookahead alias 1: 0x%x\n",
irq_proc_reg->rx_gmbox_lkahd_alias[1]);
}
-
}
if (irq_enable_reg != NULL) {
@@ -1396,7 +1394,6 @@ static ssize_t ath6kl_create_qos_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
-
struct ath6kl *ar = file->private_data;
struct ath6kl_vif *vif;
char buf[200];
@@ -1575,7 +1572,6 @@ static ssize_t ath6kl_delete_qos_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
-
struct ath6kl *ar = file->private_data;
struct ath6kl_vif *vif;
char buf[100];
diff --git a/drivers/net/wireless/ath/ath6kl/debug.h b/drivers/net/wireless/ath/ath6kl/debug.h
index ca9ba005f287..e194c10d9f00 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.h
+++ b/drivers/net/wireless/ath/ath6kl/debug.h
@@ -97,8 +97,8 @@ static inline void ath6kl_dump_registers(struct ath6kl_device *dev,
struct ath6kl_irq_proc_registers *irq_proc_reg,
struct ath6kl_irq_enable_reg *irq_en_reg)
{
-
}
+
static inline void dump_cred_dist_stats(struct htc_target *target)
{
}
diff --git a/drivers/net/wireless/ath/ath6kl/hif.c b/drivers/net/wireless/ath/ath6kl/hif.c
index fea7709b5dda..18c070850a09 100644
--- a/drivers/net/wireless/ath/ath6kl/hif.c
+++ b/drivers/net/wireless/ath/ath6kl/hif.c
@@ -37,7 +37,6 @@ static int ath6kl_hif_cp_scat_dma_buf(struct hif_scatter_req *req,
buf = req->virt_dma_buf;
for (i = 0; i < req->scat_entries; i++) {
-
if (from_dma)
memcpy(req->scat_list[i].buf, buf,
req->scat_list[i].len);
@@ -116,7 +115,6 @@ static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
le32_to_cpu(regdump_val[i + 2]),
le32_to_cpu(regdump_val[i + 3]));
}
-
}
static int ath6kl_hif_proc_dbg_intr(struct ath6kl_device *dev)
@@ -701,5 +699,4 @@ int ath6kl_hif_setup(struct ath6kl_device *dev)
fail_setup:
return status;
-
}
diff --git a/drivers/net/wireless/ath/ath6kl/hif.h b/drivers/net/wireless/ath/ath6kl/hif.h
index 61f6b21fb0ae..dc6bd8cd9b83 100644
--- a/drivers/net/wireless/ath/ath6kl/hif.h
+++ b/drivers/net/wireless/ath/ath6kl/hif.h
@@ -197,9 +197,9 @@ struct hif_scatter_req {
/* bounce buffer for upper layers to copy to/from */
u8 *virt_dma_buf;
- struct hif_scatter_item scat_list[1];
-
u32 scat_q_depth;
+
+ struct hif_scatter_item scat_list[0];
};
struct ath6kl_irq_proc_registers {
diff --git a/drivers/net/wireless/ath/ath6kl/htc_mbox.c b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
index 65e5b719093d..e481f14b9878 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_mbox.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
@@ -112,9 +112,9 @@ static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
if (cur_ep_dist->endpoint == ENDPOINT_0)
continue;
- if (cur_ep_dist->svc_id == WMI_CONTROL_SVC)
+ if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg;
- else {
+ } else {
/*
* For the remaining data endpoints, we assume that
* each cred_per_msg are the same. We use a simple
@@ -129,7 +129,6 @@ static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
count = (count * 3) >> 2;
count = max(count, cur_ep_dist->cred_per_msg);
cur_ep_dist->cred_norm = count;
-
}
ath6kl_dbg(ATH6KL_DBG_CREDIT,
@@ -549,7 +548,6 @@ static int htc_check_credits(struct htc_target *target,
enum htc_endpoint_id eid, unsigned int len,
int *req_cred)
{
-
*req_cred = (len > target->tgt_cred_sz) ?
DIV_ROUND_UP(len, target->tgt_cred_sz) : 1;
@@ -608,7 +606,6 @@ static void ath6kl_htc_tx_pkts_get(struct htc_target *target,
unsigned int len;
while (true) {
-
flags = 0;
if (list_empty(&endpoint->txq))
@@ -889,7 +886,6 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target,
ac = target->dev->ar->ep2ac_map[endpoint->eid];
while (true) {
-
if (list_empty(&endpoint->txq))
break;
@@ -1190,7 +1186,6 @@ static void ath6kl_htc_mbox_flush_txep(struct htc_target *target,
list_add_tail(&packet->list, &container);
htc_tx_complete(endpoint, &container);
}
-
}
static void ath6kl_htc_flush_txep_all(struct htc_target *target)
@@ -1394,7 +1389,6 @@ static int ath6kl_htc_rx_setup(struct htc_target *target,
ep_cb = ep->ep_cb;
for (j = 0; j < n_msg; j++) {
-
/*
* Reset flag, any packets allocated using the
* rx_alloc() API cannot be recycled on
@@ -1424,9 +1418,9 @@ static int ath6kl_htc_rx_setup(struct htc_target *target,
}
}
- if (list_empty(&ep->rx_bufq))
+ if (list_empty(&ep->rx_bufq)) {
packet = NULL;
- else {
+ } else {
packet = list_first_entry(&ep->rx_bufq,
struct htc_packet, list);
list_del(&packet->list);
@@ -1487,7 +1481,6 @@ static int ath6kl_htc_rx_alloc(struct htc_target *target,
spin_lock_bh(&target->rx_lock);
for (i = 0; i < msg; i++) {
-
htc_hdr = (struct htc_frame_hdr *)&lk_ahds[i];
if (htc_hdr->eid >= ENDPOINT_MAX) {
@@ -1708,7 +1701,6 @@ static int htc_parse_trailer(struct htc_target *target,
lk_ahd = (struct htc_lookahead_report *) record_buf;
if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF)) &&
next_lk_ahds) {
-
ath6kl_dbg(ATH6KL_DBG_HTC,
"htc rx lk_ahd found pre_valid 0x%x post_valid 0x%x\n",
lk_ahd->pre_valid, lk_ahd->post_valid);
@@ -1755,7 +1747,6 @@ static int htc_parse_trailer(struct htc_target *target,
}
return 0;
-
}
static int htc_proc_trailer(struct htc_target *target,
@@ -1776,7 +1767,6 @@ static int htc_proc_trailer(struct htc_target *target,
status = 0;
while (len > 0) {
-
if (len < sizeof(struct htc_record_hdr)) {
status = -ENOMEM;
break;
@@ -2098,7 +2088,6 @@ static int ath6kl_htc_rx_fetch(struct htc_target *target,
}
if (!fetched_pkts) {
-
packet = list_first_entry(rx_pktq, struct htc_packet,
list);
@@ -2173,7 +2162,6 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
look_aheads[0] = msg_look_ahead;
while (true) {
-
/*
* First lookahead sets the expected endpoint IDs for all
* packets in a bundle.
@@ -2825,8 +2813,9 @@ static int ath6kl_htc_reset(struct htc_target *target)
packet->buf = packet->buf_start;
packet->endpoint = ENDPOINT_0;
list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
- } else
+ } else {
list_add_tail(&packet->list, &target->free_ctrl_txbuf);
+ }
}
return 0;
diff --git a/drivers/net/wireless/ath/ath6kl/htc_pipe.c b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
index 67aa924ed8b3..756fe52a12c8 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_pipe.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
@@ -137,7 +137,6 @@ static void get_htc_packet_credit_based(struct htc_target *target,
credits_required = 0;
} else {
-
if (ep->cred_dist.credits < credits_required)
break;
@@ -169,7 +168,6 @@ static void get_htc_packet_credit_based(struct htc_target *target,
/* queue this packet into the caller's queue */
list_add_tail(&packet->list, queue);
}
-
}
static void get_htc_packet(struct htc_target *target,
@@ -279,7 +277,6 @@ static int htc_issue_packets(struct htc_target *target,
list_add(&packet->list, pkt_queue);
break;
}
-
}
if (status != 0) {
@@ -385,7 +382,6 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
*/
list_for_each_entry_safe(packet, tmp_pkt,
txq, list) {
-
ath6kl_dbg(ATH6KL_DBG_HTC,
"%s: Indicat overflowed TX pkts: %p\n",
__func__, packet);
@@ -403,7 +399,6 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
list_move_tail(&packet->list,
&send_queue);
}
-
}
if (list_empty(&send_queue)) {
@@ -454,7 +449,6 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
* enough transmit resources.
*/
while (true) {
-
if (get_queue_depth(&ep->txq) == 0)
break;
@@ -495,8 +489,8 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
}
spin_lock_bh(&target->tx_lock);
-
}
+
/* done with this endpoint, we can clear the count */
ep->tx_proc_cnt = 0;
spin_unlock_bh(&target->tx_lock);
@@ -1106,7 +1100,6 @@ free_skb:
dev_kfree_skb(skb);
return status;
-
}
static void htc_flush_rx_queue(struct htc_target *target,
@@ -1258,7 +1251,6 @@ static int ath6kl_htc_pipe_conn_service(struct htc_target *target,
tx_alloc = 0;
} else {
-
tx_alloc = htc_get_credit_alloc(target, conn_req->svc_id);
if (tx_alloc == 0) {
status = -ENOMEM;
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index 4f316bdcbab5..d5ef211f261c 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -1192,7 +1192,6 @@ static int ath6kl_upload_board_file(struct ath6kl *ar)
if (board_ext_address &&
ar->fw_board_len == (board_data_size + board_ext_data_size)) {
-
/* write extended board data */
ath6kl_dbg(ATH6KL_DBG_BOOT,
"writing extended board data to 0x%x (%d B)\n",
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index 5839fc23bdc7..d56554674da4 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -571,7 +571,6 @@ void ath6kl_scan_complete_evt(struct ath6kl_vif *vif, int status)
static int ath6kl_commit_ch_switch(struct ath6kl_vif *vif, u16 channel)
{
-
struct ath6kl *ar = vif->ar;
vif->profile.ch = cpu_to_le16(channel);
@@ -600,7 +599,6 @@ static int ath6kl_commit_ch_switch(struct ath6kl_vif *vif, u16 channel)
static void ath6kl_check_ch_switch(struct ath6kl *ar, u16 channel)
{
-
struct ath6kl_vif *vif;
int res = 0;
@@ -692,9 +690,9 @@ void ath6kl_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, bool ismcast)
cfg80211_michael_mic_failure(vif->ndev, sta->mac,
NL80211_KEYTYPE_PAIRWISE, keyid,
tsc, GFP_KERNEL);
- } else
+ } else {
ath6kl_cfg80211_tkip_micerr_event(vif, keyid, ismcast);
-
+ }
}
static void ath6kl_update_target_stats(struct ath6kl_vif *vif, u8 *ptr, u32 len)
@@ -1093,8 +1091,9 @@ static int ath6kl_open(struct net_device *dev)
if (test_bit(CONNECTED, &vif->flags)) {
netif_carrier_on(dev);
netif_wake_queue(dev);
- } else
+ } else {
netif_carrier_off(dev);
+ }
return 0;
}
@@ -1146,7 +1145,6 @@ static int ath6kl_set_features(struct net_device *dev,
dev->features = features | NETIF_F_RXCSUM;
return err;
}
-
}
return err;
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index 7126bdd4236c..339d89f14d32 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -348,7 +348,7 @@ static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio,
int i, scat_req_sz, scat_list_sz, size;
u8 *virt_buf;
- scat_list_sz = (n_scat_entry - 1) * sizeof(struct hif_scatter_item);
+ scat_list_sz = n_scat_entry * sizeof(struct hif_scatter_item);
scat_req_sz = sizeof(*s_req) + scat_list_sz;
if (!virt_scat)
@@ -425,8 +425,9 @@ static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
memcpy(tbuf, buf, len);
bounced = true;
- } else
+ } else {
tbuf = buf;
+ }
ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len);
if ((request & HIF_READ) && bounced)
@@ -441,9 +442,9 @@ static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio,
struct bus_request *req)
{
- if (req->scat_req)
+ if (req->scat_req) {
ath6kl_sdio_scat_rw(ar_sdio, req);
- else {
+ } else {
void *context;
int status;
@@ -656,7 +657,6 @@ static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar,
list_add_tail(&s_req->list, &ar_sdio->scat_req);
spin_unlock_bh(&ar_sdio->scat_lock);
-
}
/* scatter gather read write request */
@@ -674,9 +674,9 @@ static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
"hif-scatter: total len: %d scatter entries: %d\n",
scat_req->len, scat_req->scat_entries);
- if (request & HIF_SYNCHRONOUS)
+ if (request & HIF_SYNCHRONOUS) {
status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest);
- else {
+ } else {
spin_lock_bh(&ar_sdio->wr_async_lock);
list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq);
spin_unlock_bh(&ar_sdio->wr_async_lock);
@@ -856,7 +856,6 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
if (ar->suspend_mode == WLAN_POWER_STATE_WOW ||
(!ar->suspend_mode && wow)) {
-
ret = ath6kl_set_sdio_pm_caps(ar);
if (ret)
goto cut_pwr;
@@ -878,7 +877,6 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
if (ar->suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP ||
!ar->suspend_mode || try_deepsleep) {
-
flags = sdio_get_host_pm_caps(func);
if (!(flags & MMC_PM_KEEP_POWER))
goto cut_pwr;
@@ -1061,7 +1059,6 @@ static int ath6kl_sdio_bmi_credits(struct ath6kl *ar)
timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) {
-
/*
* Hit the credit counter with a 4-byte access, the first byte
* read will hit the counter and cause a decrement, while the
diff --git a/drivers/net/wireless/ath/ath6kl/target.h b/drivers/net/wireless/ath/ath6kl/target.h
index a580a629a0da..d5eeeae7711b 100644
--- a/drivers/net/wireless/ath/ath6kl/target.h
+++ b/drivers/net/wireless/ath/ath6kl/target.h
@@ -289,7 +289,7 @@ struct host_interest {
u32 hi_hp_rx_traffic_ratio; /* 0xd8 */
/* test applications flags */
- u32 hi_test_apps_related ; /* 0xdc */
+ u32 hi_test_apps_related; /* 0xdc */
/* location of test script */
u32 hi_ota_testscript; /* 0xe0 */
/* location of CAL data */
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
index ebb24045a8ae..40432fe7a5d2 100644
--- a/drivers/net/wireless/ath/ath6kl/txrx.c
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -125,8 +125,9 @@ static bool ath6kl_process_uapsdq(struct ath6kl_sta *conn,
*flags |= WMI_DATA_HDR_FLAGS_UAPSD;
spin_unlock_bh(&conn->psq_lock);
return false;
- } else if (!conn->apsd_info)
+ } else if (!conn->apsd_info) {
return false;
+ }
if (test_bit(WMM_ENABLED, &vif->flags)) {
ether_type = be16_to_cpu(datap->h_proto);
@@ -316,8 +317,9 @@ int ath6kl_control_tx(void *devt, struct sk_buff *skb,
cookie = NULL;
ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n",
skb, skb->len);
- } else
+ } else {
cookie = ath6kl_alloc_cookie(ar);
+ }
if (cookie == NULL) {
spin_unlock_bh(&ar->lock);
@@ -359,7 +361,7 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
struct ath6kl_vif *vif = netdev_priv(dev);
u32 map_no = 0;
u16 htc_tag = ATH6KL_DATA_PKT_TAG;
- u8 ac = 99 ; /* initialize to unmapped ac */
+ u8 ac = 99; /* initialize to unmapped ac */
bool chk_adhoc_ps_mapping = false;
int ret;
struct wmi_tx_meta_v2 meta_v2;
@@ -449,8 +451,9 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
if (ret)
goto fail_tx;
}
- } else
+ } else {
goto fail_tx;
+ }
spin_lock_bh(&ar->lock);
@@ -702,7 +705,6 @@ void ath6kl_tx_complete(struct htc_target *target,
/* reap completed packets */
while (!list_empty(packet_queue)) {
-
packet = list_first_entry(packet_queue, struct htc_packet,
list);
list_del(&packet->list);
@@ -1089,8 +1091,9 @@ static void aggr_deque_frms(struct aggr_info_conn *agg_conn, u8 tid,
else
skb_queue_tail(&rxtid->q, node->skb);
node->skb = NULL;
- } else
+ } else {
stats->num_hole++;
+ }
rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next);
idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
@@ -1211,7 +1214,7 @@ static bool aggr_process_recv_frm(struct aggr_info_conn *agg_conn, u8 tid,
return is_queued;
spin_lock_bh(&rxtid->lock);
- for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) {
+ for (idx = 0; idx < rxtid->hold_q_sz; idx++) {
if (rxtid->hold_q[idx].skb) {
/*
* There is a frame in the queue and no
@@ -1265,7 +1268,6 @@ static void ath6kl_uapsd_trigger_frame_rx(struct ath6kl_vif *vif,
is_apsdq_empty_at_start = is_apsdq_empty;
while ((!is_apsdq_empty) && (num_frames_to_deliver)) {
-
spin_lock_bh(&conn->psq_lock);
skb = skb_dequeue(&conn->apsdq);
is_apsdq_empty = skb_queue_empty(&conn->apsdq);
@@ -1606,16 +1608,18 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
if (!conn)
return;
aggr_conn = conn->aggr_conn;
- } else
+ } else {
aggr_conn = vif->aggr_cntxt->aggr_conn;
+ }
if (aggr_process_recv_frm(aggr_conn, tid, seq_no,
is_amsdu, skb)) {
/* aggregation code will handle the skb */
return;
}
- } else if (!is_broadcast_ether_addr(datap->h_dest))
+ } else if (!is_broadcast_ether_addr(datap->h_dest)) {
vif->net_stats.multicast++;
+ }
ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
}
@@ -1710,8 +1714,9 @@ void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid_mux, u16 seq_no,
sta = ath6kl_find_sta_by_aid(vif->ar, aid);
if (sta)
aggr_conn = sta->aggr_conn;
- } else
+ } else {
aggr_conn = vif->aggr_cntxt->aggr_conn;
+ }
if (!aggr_conn)
return;
@@ -1766,7 +1771,6 @@ void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info,
skb_queue_head_init(&rxtid->q);
spin_lock_init(&rxtid->lock);
}
-
}
struct aggr_info *aggr_init(struct ath6kl_vif *vif)
@@ -1806,8 +1810,9 @@ void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid_mux)
sta = ath6kl_find_sta_by_aid(vif->ar, aid);
if (sta)
aggr_conn = sta->aggr_conn;
- } else
+ } else {
aggr_conn = vif->aggr_cntxt->aggr_conn;
+ }
if (!aggr_conn)
return;
diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c
index 56c3fd5cef65..3afc5a463d06 100644
--- a/drivers/net/wireless/ath/ath6kl/usb.c
+++ b/drivers/net/wireless/ath/ath6kl/usb.c
@@ -236,7 +236,6 @@ static void ath6kl_usb_free_pipe_resources(struct ath6kl_usb_pipe *pipe)
break;
kfree(urb_context);
}
-
}
static void ath6kl_usb_cleanup_pipe_resources(struct ath6kl_usb *ar_usb)
@@ -245,7 +244,6 @@ static void ath6kl_usb_cleanup_pipe_resources(struct ath6kl_usb *ar_usb)
for (i = 0; i < ATH6KL_USB_PIPE_MAX; i++)
ath6kl_usb_free_pipe_resources(&ar_usb->pipes[i]);
-
}
static u8 ath6kl_usb_get_logical_pipe_num(struct ath6kl_usb *ar_usb,
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 8b4ce28e3ce8..0c0e1e36e40f 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -289,8 +289,9 @@ int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, u8 if_idx,
ath6kl_wmi_determine_user_priority(((u8 *) llc_hdr) +
sizeof(struct ath6kl_llc_snap_hdr),
layer2_priority);
- } else
+ } else {
usr_pri = layer2_priority & 0x7;
+ }
/*
* Queue the EAPOL frames in the same WMM_AC_VO queue
@@ -359,8 +360,9 @@ int ath6kl_wmi_dot11_hdr_remove(struct wmi *wmi, struct sk_buff *skb)
hdr_size = roundup(sizeof(struct ieee80211_qos_hdr),
sizeof(u32));
skb_pull(skb, hdr_size);
- } else if (sub_type == cpu_to_le16(IEEE80211_STYPE_DATA))
+ } else if (sub_type == cpu_to_le16(IEEE80211_STYPE_DATA)) {
skb_pull(skb, sizeof(struct ieee80211_hdr_3addr));
+ }
datap = skb->data;
llc_hdr = (struct ath6kl_llc_snap_hdr *)(datap);
@@ -936,7 +938,6 @@ ath6kl_regd_find_country_by_rd(u16 regdmn)
static void ath6kl_wmi_regdomain_event(struct wmi *wmi, u8 *datap, int len)
{
-
struct ath6kl_wmi_regdomain *ev;
struct country_code_to_enum_rd *country = NULL;
struct reg_dmn_pair_mapping *regpair = NULL;
@@ -946,10 +947,9 @@ static void ath6kl_wmi_regdomain_event(struct wmi *wmi, u8 *datap, int len)
ev = (struct ath6kl_wmi_regdomain *) datap;
reg_code = le32_to_cpu(ev->reg_code);
- if ((reg_code >> ATH6KL_COUNTRY_RD_SHIFT) & COUNTRY_ERD_FLAG)
+ if ((reg_code >> ATH6KL_COUNTRY_RD_SHIFT) & COUNTRY_ERD_FLAG) {
country = ath6kl_regd_find_country((u16) reg_code);
- else if (!(((u16) reg_code & WORLD_SKU_MASK) == WORLD_SKU_PREFIX)) {
-
+ } else if (!(((u16) reg_code & WORLD_SKU_MASK) == WORLD_SKU_PREFIX)) {
regpair = ath6kl_get_regpair((u16) reg_code);
country = ath6kl_regd_find_country_by_rd((u16) reg_code);
if (regpair)
@@ -1499,7 +1499,6 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len,
if ((reply->cac_indication == CAC_INDICATION_ADMISSION_RESP) &&
(reply->status_code != IEEE80211_TSPEC_STATUS_ADMISS_ACCEPTED)) {
-
ts = (struct ieee80211_tspec_ie *) &(reply->tspec_suggestion);
tsinfo = le16_to_cpu(ts->tsinfo);
tsid = (tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) &
@@ -1530,7 +1529,6 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len,
* for delete qos stream from AP
*/
else if (reply->cac_indication == CAC_INDICATION_DELETE) {
-
ts = (struct ieee80211_tspec_ie *) &(reply->tspec_suggestion);
tsinfo = le16_to_cpu(ts->tsinfo);
ts_id = ((tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) &
@@ -2479,7 +2477,6 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx)
goto free_data_skb;
for (index = 0; index < num_pri_streams; index++) {
-
if (WARN_ON(!data_sync_bufs[index].skb))
goto free_data_skb;
@@ -2704,7 +2701,6 @@ static void ath6kl_wmi_relinquish_implicit_pstream_credits(struct wmi *wmi)
for (i = 0; i < WMM_NUM_AC; i++) {
if (stream_exist & (1 << i)) {
-
/*
* FIXME: Is this lock & unlock inside
* for loop correct? may need rework.
@@ -2870,8 +2866,9 @@ int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx,
if (host_mode == ATH6KL_HOST_MODE_ASLEEP) {
ath6kl_wmi_relinquish_implicit_pstream_credits(wmi);
cmd->asleep = cpu_to_le32(1);
- } else
+ } else {
cmd->awake = cpu_to_le32(1);
+ }
ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
WMI_SET_HOST_SLEEP_MODE_CMDID,
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
index b5f226503baf..1f05ecd97c91 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.h
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -898,7 +898,6 @@ struct wmi_start_scan_cmd {
* flags here
*/
enum wmi_scan_ctrl_flags_bits {
-
/* set if can scan in the connect cmd */
CONNECT_SCAN_CTRL_FLAGS = 0x01,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index 0a6163e9248c..c38399bc9aa9 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -410,7 +410,7 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
{0x00009e30, 0x06336f77},
{0x00009e34, 0x6af6532f},
{0x00009e38, 0x0cc80c00},
- {0x00009e40, 0x0d261820},
+ {0x00009e40, 0x0d261800},
{0x00009e4c, 0x00001004},
{0x00009e50, 0x00ff03f1},
{0x00009e54, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
index f76139bbb74f..2c42ff05efa3 100644
--- a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
@@ -592,7 +592,7 @@ static const u32 ar9331_1p1_baseband_core[][2] = {
{0x00009e30, 0x06336f77},
{0x00009e34, 0x6af6532f},
{0x00009e38, 0x0cc80c00},
- {0x00009e40, 0x0d261820},
+ {0x00009e40, 0x0d261800},
{0x00009e4c, 0x00001004},
{0x00009e50, 0x00ff03f1},
{0x00009fc0, 0x803e4788},
diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
index 0ac8be96097f..2154efcd3900 100644
--- a/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
@@ -231,7 +231,7 @@ static const u32 ar9331_1p2_baseband_core[][2] = {
{0x00009e30, 0x06336f77},
{0x00009e34, 0x6af6532f},
{0x00009e38, 0x0cc80c00},
- {0x00009e40, 0x0d261820},
+ {0x00009e40, 0x0d261800},
{0x00009e4c, 0x00001004},
{0x00009e50, 0x00ff03f1},
{0x00009fc0, 0x803e4788},
diff --git a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
index a01f0edb6518..b995ffe88b33 100644
--- a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
@@ -318,7 +318,7 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
{0x00009e30, 0x06336f77},
{0x00009e34, 0x6af6532f},
{0x00009e38, 0x0cc80c00},
- {0x00009e40, 0x0d261820},
+ {0x00009e40, 0x0d261800},
{0x00009e4c, 0x00001004},
{0x00009e50, 0x00ff03f1},
{0x00009e54, 0x00000000},
@@ -348,9 +348,9 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
{0x0000a370, 0x00000000},
{0x0000a390, 0x00000001},
{0x0000a394, 0x00000444},
- {0x0000a398, 0x00000000},
- {0x0000a39c, 0x210d0401},
- {0x0000a3a0, 0xab9a7144},
+ {0x0000a398, 0x001f0e0f},
+ {0x0000a39c, 0x0075393f},
+ {0x0000a3a0, 0xb79f6427},
{0x0000a3a4, 0x00000000},
{0x0000a3a8, 0xaaaaaaaa},
{0x0000a3ac, 0x3c466478},
diff --git a/drivers/net/wireless/ath/ath9k/ar953x_initvals.h b/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
index 3c9113d9b1bc..8e5c3b9786e3 100644
--- a/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
@@ -257,9 +257,9 @@ static const u32 qca953x_1p0_baseband_core[][2] = {
{0x0000a370, 0x00000000},
{0x0000a390, 0x00000001},
{0x0000a394, 0x00000444},
- {0x0000a398, 0x1f020503},
- {0x0000a39c, 0x29180c03},
- {0x0000a3a0, 0x9a8b6844},
+ {0x0000a398, 0x001f0e0f},
+ {0x0000a39c, 0x0075393f},
+ {0x0000a3a0, 0xb79f6427},
{0x0000a3a4, 0x000000ff},
{0x0000a3a8, 0x6a6a6a6a},
{0x0000a3ac, 0x6a6a6a6a},
diff --git a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
index e6aec2c0207f..a5ca65240af3 100644
--- a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
@@ -90,7 +90,7 @@ static const u32 ar9580_1p0_baseband_core[][2] = {
{0x00009e30, 0x06336f77},
{0x00009e34, 0x6af6532f},
{0x00009e38, 0x0cc80c00},
- {0x00009e40, 0x0d261820},
+ {0x00009e40, 0x0d261800},
{0x00009e4c, 0x00001004},
{0x00009e50, 0x00ff03f1},
{0x00009e54, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 3ba03dde4215..33a2ae77b595 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -114,6 +114,9 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
#define ATH_TXFIFO_DEPTH 8
#define ATH_TX_ERROR 0x01
+/* Stop tx traffic 1ms before the GO goes away */
+#define ATH_P2P_PS_STOP_TIME 1000
+
#define IEEE80211_SEQ_SEQ_SHIFT 4
#define IEEE80211_SEQ_MAX 4096
#define IEEE80211_WEP_IVLEN 3
@@ -366,11 +369,15 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
/********/
struct ath_vif {
+ struct ieee80211_vif *vif;
struct ath_node mcast_node;
int av_bslot;
bool primary_sta_vif;
__le64 tsf_adjust; /* TSF adjustment for staggered beacons */
struct ath_buf *av_bcbuf;
+
+ /* P2P Client */
+ struct ieee80211_noa_data noa;
};
struct ath9k_vif_iter_data {
@@ -463,6 +470,8 @@ int ath_update_survey_stats(struct ath_softc *sc);
void ath_update_survey_nf(struct ath_softc *sc, int channel);
void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type);
void ath_ps_full_sleep(unsigned long data);
+void ath9k_p2p_ps_timer(void *priv);
+void ath9k_update_p2p_ps(struct ath_softc *sc, struct ieee80211_vif *vif);
/**********/
/* BTCOEX */
@@ -713,6 +722,9 @@ struct ath_softc {
struct completion paprd_complete;
wait_queue_head_t tx_wait;
+ struct ath_gen_timer *p2p_ps_timer;
+ struct ath_vif *p2p_ps_vif;
+
unsigned long driver_data;
u8 gtt_cnt;
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 36ae6490e554..4243509616bd 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -589,6 +589,9 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
if (ret)
goto err_btcoex;
+ sc->p2p_ps_timer = ath_gen_timer_alloc(sc->sc_ah, ath9k_p2p_ps_timer,
+ NULL, sc, AR_FIRST_NDP_TIMER);
+
ath9k_cmn_init_crypto(sc->sc_ah);
ath9k_init_misc(sc);
ath_fill_led_pin(sc);
@@ -644,13 +647,13 @@ static void ath9k_init_txpower_limits(struct ath_softc *sc)
static const struct ieee80211_iface_limit if_limits[] = {
{ .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_WDS) },
{ .max = 8, .types =
#ifdef CONFIG_MAC80211_MESH
BIT(NL80211_IFTYPE_MESH_POINT) |
#endif
- BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_AP) },
+ { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO) },
};
@@ -855,6 +858,9 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
{
int i = 0;
+ if (sc->p2p_ps_timer)
+ ath_gen_timer_free(sc->sc_ah, sc->p2p_ps_timer);
+
ath9k_deinit_btcoex(sc);
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index d69853b848ce..8d7b9b66fefa 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -261,6 +261,8 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
sc->gtt_cnt = 0;
ieee80211_wake_queues(sc->hw);
+ ath9k_p2p_ps_timer(sc);
+
return true;
}
@@ -1119,6 +1121,8 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
if (ath9k_uses_beacons(vif->type))
ath9k_beacon_assign_slot(sc, vif);
+ avp->vif = vif;
+
an->sc = sc;
an->sta = NULL;
an->vif = vif;
@@ -1163,6 +1167,29 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
return 0;
}
+static void
+ath9k_update_p2p_ps_timer(struct ath_softc *sc, struct ath_vif *avp)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ s32 tsf, target_tsf;
+
+ if (!avp || !avp->noa.has_next_tsf)
+ return;
+
+ ath9k_hw_gen_timer_stop(ah, sc->p2p_ps_timer);
+
+ tsf = ath9k_hw_gettsf32(sc->sc_ah);
+
+ target_tsf = avp->noa.next_tsf;
+ if (!avp->noa.absent)
+ target_tsf -= ATH_P2P_PS_STOP_TIME;
+
+ if (target_tsf - tsf < ATH_P2P_PS_STOP_TIME)
+ target_tsf = tsf + ATH_P2P_PS_STOP_TIME;
+
+ ath9k_hw_gen_timer_start(ah, sc->p2p_ps_timer, (u32) target_tsf, 1000000);
+}
+
static void ath9k_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
@@ -1174,6 +1201,13 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
mutex_lock(&sc->mutex);
+ spin_lock_bh(&sc->sc_pcu_lock);
+ if (avp == sc->p2p_ps_vif) {
+ sc->p2p_ps_vif = NULL;
+ ath9k_update_p2p_ps_timer(sc, NULL);
+ }
+ spin_unlock_bh(&sc->sc_pcu_lock);
+
sc->nvifs--;
sc->tx99_vif = NULL;
@@ -1636,6 +1670,72 @@ static void ath9k_bss_assoc_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
ath9k_set_assoc_state(sc, vif);
}
+void ath9k_p2p_ps_timer(void *priv)
+{
+ struct ath_softc *sc = priv;
+ struct ath_vif *avp = sc->p2p_ps_vif;
+ struct ieee80211_vif *vif;
+ struct ieee80211_sta *sta;
+ struct ath_node *an;
+ u32 tsf;
+
+ if (!avp)
+ return;
+
+ tsf = ath9k_hw_gettsf32(sc->sc_ah);
+ if (!avp->noa.absent)
+ tsf += ATH_P2P_PS_STOP_TIME;
+
+ if (!avp->noa.has_next_tsf ||
+ avp->noa.next_tsf - tsf > BIT(31))
+ ieee80211_update_p2p_noa(&avp->noa, tsf);
+
+ ath9k_update_p2p_ps_timer(sc, avp);
+
+ rcu_read_lock();
+
+ vif = avp->vif;
+ sta = ieee80211_find_sta(vif, vif->bss_conf.bssid);
+ if (!sta)
+ goto out;
+
+ an = (void *) sta->drv_priv;
+ if (an->sleeping == !!avp->noa.absent)
+ goto out;
+
+ an->sleeping = avp->noa.absent;
+ if (an->sleeping)
+ ath_tx_aggr_sleep(sta, sc, an);
+ else
+ ath_tx_aggr_wakeup(sc, an);
+
+out:
+ rcu_read_unlock();
+}
+
+void ath9k_update_p2p_ps(struct ath_softc *sc, struct ieee80211_vif *vif)
+{
+ struct ath_vif *avp = (void *)vif->drv_priv;
+ unsigned long flags;
+ u32 tsf;
+
+ if (!sc->p2p_ps_timer)
+ return;
+
+ if (vif->type != NL80211_IFTYPE_STATION || !vif->p2p)
+ return;
+
+ sc->p2p_ps_vif = avp;
+
+ spin_lock_irqsave(&sc->sc_pm_lock, flags);
+ if (!(sc->ps_flags & PS_BEACON_SYNC)) {
+ tsf = ath9k_hw_gettsf32(sc->sc_ah);
+ ieee80211_parse_p2p_noa(&vif->bss_conf.p2p_noa_attr, &avp->noa, tsf);
+ ath9k_update_p2p_ps_timer(sc, avp);
+ }
+ spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+}
+
static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
@@ -1710,6 +1810,12 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
}
}
+ if (changed & BSS_CHANGED_P2P_PS) {
+ spin_lock_bh(&sc->sc_pcu_lock);
+ ath9k_update_p2p_ps(sc, vif);
+ spin_unlock_bh(&sc->sc_pcu_lock);
+ }
+
if (changed & CHECK_ANI)
ath_check_ani(sc);
@@ -1883,7 +1989,8 @@ static bool ath9k_has_tx_pending(struct ath_softc *sc)
return !!npend;
}
-static void ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void ath9k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 19df969ec909..441c71448e4c 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -539,6 +539,9 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
ath_dbg(common, PS,
"Reconfigure beacon timers based on synchronized timestamp\n");
ath9k_set_beacon(sc);
+
+ if (sc->p2p_ps_vif)
+ ath9k_update_p2p_ps(sc, sc->p2p_ps_vif->vif);
}
if (ath_beacon_dtim_pending_cab(skb)) {
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 4c8cdb097b65..f8ded84b7be8 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1707,7 +1707,9 @@ found:
return 0;
}
-static void carl9170_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void carl9170_op_flush(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
struct ar9170 *ar = hw->priv;
unsigned int vid;
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 5824cd41e4ba..73593aa3cd98 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -338,7 +338,7 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
}
if (isr)
- wil_err(wil, "un-handled MISC ISR bits 0x%08x\n", isr);
+ wil_dbg_irq(wil, "un-handled MISC ISR bits 0x%08x\n", isr);
wil->isr_misc = 0;
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 95f4efe9ef37..1b265fd19de2 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -363,8 +363,8 @@ static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
wil_err(wil, "Firmware not ready\n");
return -ETIME;
} else {
- wil_dbg_misc(wil, "FW ready after %d ms\n",
- jiffies_to_msecs(to-left));
+ wil_info(wil, "FW ready after %d ms. HW version 0x%08x\n",
+ jiffies_to_msecs(to-left), wil->hw_version);
}
return 0;
}
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index f1e1bb338d68..066088418307 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -74,8 +74,6 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
if (rc)
goto release_irq;
- wil_info(wil, "HW version: 0x%08x\n", wil->hw_version);
-
return 0;
release_irq:
diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c
index d04629fe053f..ec29954bd44d 100644
--- a/drivers/net/wireless/ath/wil6210/rx_reorder.c
+++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c
@@ -91,6 +91,22 @@ void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb)
spin_lock(&r->reorder_lock);
+ /** Due to the race between WMI events, where BACK establishment
+ * reported, and data Rx, few packets may be pass up before reorder
+ * buffer get allocated. Catch up by pretending SSN is what we
+ * see in the 1-st Rx packet
+ */
+ if (r->first_time) {
+ r->first_time = false;
+ if (seq != r->head_seq_num) {
+ wil_err(wil, "Error: 1-st frame with wrong sequence"
+ " %d, should be %d. Fixing...\n", seq,
+ r->head_seq_num);
+ r->head_seq_num = seq;
+ r->ssn = seq;
+ }
+ }
+
/* frame with out of date sequence number */
if (seq_less(seq, r->head_seq_num)) {
dev_kfree_skb(skb);
@@ -162,6 +178,7 @@ struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
r->head_seq_num = ssn;
r->buf_size = size;
r->stored_mpdu_num = 0;
+ r->first_time = true;
return r;
}
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 2a2dec75f026..d3b8659ca32e 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -35,7 +35,7 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
#define WIL6210_MEM_SIZE (2*1024*1024UL)
#define WIL6210_RX_RING_SIZE (128)
-#define WIL6210_TX_RING_SIZE (128)
+#define WIL6210_TX_RING_SIZE (512)
#define WIL6210_MAX_TX_RINGS (24) /* HW limit */
#define WIL6210_MAX_CID (8) /* HW limit */
#define WIL6210_NAPI_BUDGET (16) /* arbitrary */
@@ -301,6 +301,7 @@ struct wil_tid_ampdu_rx {
u16 buf_size;
u16 timeout;
u8 dialog_token;
+ bool first_time; /* is it 1-st time this buffer used? */
};
struct wil6210_stats {
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 2ba56eef0c45..e9a11cb3428a 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -192,7 +192,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
might_sleep();
if (!test_bit(wil_status_fwready, &wil->status)) {
- wil_err(wil, "FW not ready\n");
+ wil_err(wil, "WMI: cannot send command while FW not ready\n");
return -EAGAIN;
}
@@ -276,8 +276,8 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
wil->fw_version = le32_to_cpu(evt->sw_version);
wil->n_mids = evt->numof_additional_mids;
- wil_dbg_wmi(wil, "FW ver. %d; MAC %pM; %d MID's\n", wil->fw_version,
- evt->mac, wil->n_mids);
+ wil_info(wil, "FW ver. %d; MAC %pM; %d MID's\n", wil->fw_version,
+ evt->mac, wil->n_mids);
if (!is_valid_ether_addr(ndev->dev_addr)) {
memcpy(ndev->dev_addr, evt->mac, ETH_ALEN);
@@ -290,7 +290,7 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
static void wmi_evt_fw_ready(struct wil6210_priv *wil, int id, void *d,
int len)
{
- wil_dbg_wmi(wil, "WMI: FW ready\n");
+ wil_dbg_wmi(wil, "WMI: got FW ready event\n");
set_bit(wil_status_fwready, &wil->status);
/* reuse wmi_ready for the firmware ready indication */
@@ -348,7 +348,7 @@ static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
{
if (wil->scan_request) {
struct wmi_scan_complete_event *data = d;
- bool aborted = (data->status != 0);
+ bool aborted = (data->status != WMI_SCAN_SUCCESS);
wil_dbg_wmi(wil, "SCAN_COMPLETE(0x%08x)\n", data->status);
cfg80211_scan_done(wil->scan_request, aborted);
@@ -802,6 +802,7 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan)
.network_type = wmi_nettype,
.disable_sec_offload = 1,
.channel = chan - 1,
+ .pcp_max_assoc_sta = WIL6210_MAX_CID,
};
struct {
struct wil6210_mbox_hdr_wmi wmi;
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index 50b8528394f4..17334c852866 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -28,7 +28,7 @@
#define __WILOCITY_WMI_H__
/* General */
-
+#define WILOCITY_MAX_ASSOC_STA (8)
#define WMI_MAC_LEN (6)
#define WMI_PROX_RANGE_NUM (3)
@@ -219,15 +219,6 @@ struct wmi_disconnect_sta_cmd {
__le16 disconnect_reason;
} __packed;
-/*
- * WMI_RECONNECT_CMDID
- */
-struct wmi_reconnect_cmd {
- u8 channel; /* hint */
- u8 reserved;
- u8 bssid[WMI_MAC_LEN]; /* mandatory if set */
-} __packed;
-
/*
* WMI_SET_PMK_CMDID
@@ -296,11 +287,13 @@ enum wmi_scan_type {
WMI_LONG_SCAN = 0,
WMI_SHORT_SCAN = 1,
WMI_PBC_SCAN = 2,
+ WMI_ACTIVE_SCAN = 3,
+ WMI_DIRECT_SCAN = 4,
};
struct wmi_start_scan_cmd {
- u8 reserved[8];
-
+ u8 direct_scan_mac_addr[6];
+ u8 reserved[2];
__le32 home_dwell_time; /* Max duration in the home channel(ms) */
__le32 force_scan_interval; /* Time interval between scans (ms)*/
u8 scan_type; /* wmi_scan_type */
@@ -332,6 +325,7 @@ struct wmi_probed_ssid_cmd {
u8 ssid[WMI_MAX_SSID_LEN];
} __packed;
+
/*
* WMI_SET_APPIE_CMDID
* Add Application specified IE to a management frame
@@ -427,7 +421,7 @@ struct wmi_bcon_ctrl_cmd {
__le16 frag_num;
__le64 ss_mask;
u8 network_type;
- u8 reserved;
+ u8 pcp_max_assoc_sta;
u8 disable_sec_offload;
u8 disable_sec;
} __packed;
@@ -450,7 +444,7 @@ enum wmi_port_role {
struct wmi_port_allocate_cmd {
u8 mac[WMI_MAC_LEN];
u8 port_role;
- u8 midid;
+ u8 mid;
} __packed;
/*
@@ -467,6 +461,7 @@ struct wmi_delete_port_cmd {
enum wmi_discovery_mode {
WMI_DISCOVERY_MODE_NON_OFFLOAD = 0,
WMI_DISCOVERY_MODE_OFFLOAD = 1,
+ WMI_DISCOVERY_MODE_PEER2PEER = 2,
};
struct wmi_p2p_cfg_cmd {
@@ -493,7 +488,8 @@ struct wmi_power_mgmt_cfg_cmd {
*/
struct wmi_pcp_start_cmd {
__le16 bcon_interval;
- u8 reserved0[10];
+ u8 pcp_max_assoc_sta;
+ u8 reserved0[9];
u8 network_type;
u8 channel;
u8 disable_sec_offload;
@@ -857,6 +853,7 @@ enum wmi_event_id {
WMI_RF_MGMT_STATUS_EVENTID = 0x1853,
WMI_BF_SM_MGMT_DONE_EVENTID = 0x1838,
WMI_RX_MGMT_PACKET_EVENTID = 0x1840,
+ WMI_TX_MGMT_PACKET_EVENTID = 0x1841,
/* Performance monitoring events */
WMI_DATA_PORT_OPEN_EVENTID = 0x1860,
@@ -1040,16 +1037,23 @@ enum wmi_disconnect_reason {
struct wmi_disconnect_event {
__le16 protocol_reason_status; /* reason code, see 802.11 spec. */
u8 bssid[WMI_MAC_LEN]; /* set if known */
- u8 disconnect_reason; /* see wmi_disconnect_reason_e */
- u8 assoc_resp_len;
- u8 assoc_info[0];
+ u8 disconnect_reason; /* see wmi_disconnect_reason */
+ u8 assoc_resp_len; /* not in use */
+ u8 assoc_info[0]; /* not in use */
} __packed;
/*
* WMI_SCAN_COMPLETE_EVENTID
*/
+enum scan_status {
+ WMI_SCAN_SUCCESS = 0,
+ WMI_SCAN_FAILED = 1,
+ WMI_SCAN_ABORTED = 2,
+ WMI_SCAN_REJECTED = 3,
+};
+
struct wmi_scan_complete_event {
- __le32 status;
+ __le32 status; /* scan_status */
} __packed;
/*
@@ -1256,6 +1260,14 @@ struct wmi_rx_mgmt_info {
u8 channel; /* From Radio MNGR */
} __packed;
+
+/*
+ * WMI_TX_MGMT_PACKET_EVENTID
+ */
+struct wmi_tx_mgmt_packet_event {
+ u8 payload[0];
+} __packed;
+
struct wmi_rx_mgmt_packet_event {
struct wmi_rx_mgmt_info info;
u8 payload[0];
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 54376fddfaf9..4113b6934764 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -915,10 +915,6 @@ struct b43_wl {
char rng_name[30 + 1];
#endif /* CONFIG_B43_HWRNG */
- /* List of all wireless devices on this chip */
- struct list_head devlist;
- u8 nr_devs;
-
bool radiotap_enabled;
bool radio_enabled;
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 69fc3d65531a..32e08d35c06e 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -1195,8 +1195,13 @@ static void b43_bcma_wireless_core_reset(struct b43_wldev *dev, bool gmode)
B43_BCMA_CLKCTLST_PHY_PLL_REQ;
u32 status = B43_BCMA_CLKCTLST_80211_PLL_ST |
B43_BCMA_CLKCTLST_PHY_PLL_ST;
+ u32 flags;
+
+ flags = B43_BCMA_IOCTL_PHY_CLKEN;
+ if (gmode)
+ flags |= B43_BCMA_IOCTL_GMODE;
+ b43_device_enable(dev, flags);
- b43_device_enable(dev, B43_BCMA_IOCTL_PHY_CLKEN);
bcma_core_set_clockmode(dev->dev->bdev, BCMA_CLKMODE_FAST);
b43_bcma_phy_reset(dev);
bcma_core_pll_ctl(dev->dev->bdev, req, status, true);
@@ -3735,40 +3740,35 @@ static int b43_switch_band(struct b43_wl *wl, struct ieee80211_channel *chan)
{
struct b43_wldev *up_dev = NULL;
struct b43_wldev *down_dev;
- struct b43_wldev *d;
int err;
bool uninitialized_var(gmode);
int prev_status;
/* Find a device and PHY which supports the band. */
- list_for_each_entry(d, &wl->devlist, list) {
- switch (chan->band) {
- case IEEE80211_BAND_5GHZ:
- if (d->phy.supports_5ghz) {
- up_dev = d;
- gmode = false;
- }
- break;
- case IEEE80211_BAND_2GHZ:
- if (d->phy.supports_2ghz) {
- up_dev = d;
- gmode = true;
- }
- break;
- default:
- B43_WARN_ON(1);
- return -EINVAL;
+ switch (chan->band) {
+ case IEEE80211_BAND_5GHZ:
+ if (wl->current_dev->phy.supports_5ghz) {
+ up_dev = wl->current_dev;
+ gmode = false;
}
- if (up_dev)
- break;
+ break;
+ case IEEE80211_BAND_2GHZ:
+ if (wl->current_dev->phy.supports_2ghz) {
+ up_dev = wl->current_dev;
+ gmode = true;
+ }
+ break;
+ default:
+ B43_WARN_ON(1);
+ return -EINVAL;
}
+
if (!up_dev) {
b43err(wl, "Could not find a device for %s-GHz band operation\n",
band_to_string(chan->band));
return -ENODEV;
}
- if ((up_dev == wl->current_dev) &&
- (!!wl->current_dev->phy.gmode == !!gmode)) {
+ if (!!wl->current_dev->phy.gmode == !!gmode) {
/* This device is already running. */
return 0;
}
@@ -5178,7 +5178,6 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
}
dev->phy.gmode = have_2ghz_phy;
- dev->phy.radio_on = true;
b43_wireless_core_reset(dev, dev->phy.gmode);
err = b43_phy_versioning(dev);
@@ -5270,7 +5269,6 @@ static void b43_one_core_detach(struct b43_bus_dev *dev)
b43_debugfs_remove_device(wldev);
b43_wireless_core_detach(wldev);
list_del(&wldev->list);
- wl->nr_devs--;
b43_bus_set_wldev(dev, NULL);
kfree(wldev);
}
@@ -5295,8 +5293,6 @@ static int b43_one_core_attach(struct b43_bus_dev *dev, struct b43_wl *wl)
if (err)
goto err_kfree_wldev;
- list_add(&wldev->list, &wl->devlist);
- wl->nr_devs++;
b43_bus_set_wldev(dev, wldev);
b43_debugfs_add_device(wldev);
@@ -5386,7 +5382,6 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
wl->hw = hw;
mutex_init(&wl->mutex);
spin_lock_init(&wl->hardirq_lock);
- INIT_LIST_HEAD(&wl->devlist);
INIT_WORK(&wl->beacon_update_trigger, b43_beacon_update_trigger_work);
INIT_WORK(&wl->txpower_adjust_work, b43_phy_txpower_adjust_work);
INIT_WORK(&wl->tx_work, b43_tx_work);
@@ -5486,39 +5481,42 @@ int b43_ssb_probe(struct ssb_device *sdev, const struct ssb_device_id *id)
struct b43_bus_dev *dev;
struct b43_wl *wl;
int err;
- int first = 0;
dev = b43_bus_dev_ssb_init(sdev);
if (!dev)
return -ENOMEM;
wl = ssb_get_devtypedata(sdev);
- if (!wl) {
- /* Probing the first core. Must setup common struct b43_wl */
- first = 1;
- b43_sprom_fixup(sdev->bus);
- wl = b43_wireless_init(dev);
- if (IS_ERR(wl)) {
- err = PTR_ERR(wl);
- goto out;
- }
- ssb_set_devtypedata(sdev, wl);
- B43_WARN_ON(ssb_get_devtypedata(sdev) != wl);
+ if (wl) {
+ b43err(NULL, "Dual-core devices are not supported\n");
+ err = -ENOTSUPP;
+ goto err_ssb_kfree_dev;
+ }
+
+ b43_sprom_fixup(sdev->bus);
+
+ wl = b43_wireless_init(dev);
+ if (IS_ERR(wl)) {
+ err = PTR_ERR(wl);
+ goto err_ssb_kfree_dev;
}
+ ssb_set_devtypedata(sdev, wl);
+ B43_WARN_ON(ssb_get_devtypedata(sdev) != wl);
+
err = b43_one_core_attach(dev, wl);
if (err)
- goto err_wireless_exit;
+ goto err_ssb_wireless_exit;
/* setup and start work to load firmware */
INIT_WORK(&wl->firmware_load, b43_request_firmware);
schedule_work(&wl->firmware_load);
- out:
return err;
- err_wireless_exit:
- if (first)
- b43_wireless_exit(dev, wl);
+err_ssb_wireless_exit:
+ b43_wireless_exit(dev, wl);
+err_ssb_kfree_dev:
+ kfree(dev);
return err;
}
@@ -5546,13 +5544,8 @@ static void b43_ssb_remove(struct ssb_device *sdev)
/* Unregister HW RNG driver */
b43_rng_exit(wl);
- if (list_empty(&wl->devlist)) {
- b43_leds_unregister(wl);
- /* Last core on the chip unregistered.
- * We can destroy common struct b43_wl.
- */
- b43_wireless_exit(dev, wl);
- }
+ b43_leds_unregister(wl);
+ b43_wireless_exit(dev, wl);
}
static struct ssb_driver b43_ssb_driver = {
diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c
index dbaa51890198..3e45989f418d 100644
--- a/drivers/net/wireless/b43/phy_common.c
+++ b/drivers/net/wireless/b43/phy_common.c
@@ -96,7 +96,7 @@ int b43_phy_init(struct b43_wldev *dev)
phy->channel = ops->get_default_chan(dev);
- ops->software_rfkill(dev, false);
+ b43_software_rfkill(dev, false);
err = ops->init(dev);
if (err) {
b43err(dev->wl, "PHY init failed\n");
@@ -116,7 +116,7 @@ err_phy_exit:
if (ops->exit)
ops->exit(dev);
err_block_rf:
- ops->software_rfkill(dev, true);
+ b43_software_rfkill(dev, true);
return err;
}
@@ -125,7 +125,7 @@ void b43_phy_exit(struct b43_wldev *dev)
{
const struct b43_phy_operations *ops = dev->phy.ops;
- ops->software_rfkill(dev, true);
+ b43_software_rfkill(dev, true);
if (ops->exit)
ops->exit(dev);
}
diff --git a/drivers/net/wireless/b43/phy_g.c b/drivers/net/wireless/b43/phy_g.c
index 12f467b8d564..8f5c14bc10e6 100644
--- a/drivers/net/wireless/b43/phy_g.c
+++ b/drivers/net/wireless/b43/phy_g.c
@@ -1587,6 +1587,7 @@ static void b43_phy_initb5(struct b43_wldev *dev)
b43_write16(dev, 0x03E4, (b43_read16(dev, 0x03E4) & 0xFFC0) | 0x0004);
}
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/Init/B6 */
static void b43_phy_initb6(struct b43_wldev *dev)
{
struct b43_phy *phy = &dev->phy;
@@ -1670,7 +1671,7 @@ static void b43_phy_initb6(struct b43_wldev *dev)
b43_radio_write16(dev, 0x50, 0x20);
}
if (phy->radio_rev <= 2) {
- b43_radio_write16(dev, 0x7C, 0x20);
+ b43_radio_write16(dev, 0x50, 0x20);
b43_radio_write16(dev, 0x5A, 0x70);
b43_radio_write16(dev, 0x5B, 0x7B);
b43_radio_write16(dev, 0x5C, 0xB0);
@@ -1686,9 +1687,8 @@ static void b43_phy_initb6(struct b43_wldev *dev)
b43_phy_write(dev, 0x2A, 0x8AC0);
b43_phy_write(dev, 0x0038, 0x0668);
b43_set_txpower_g(dev, &gphy->bbatt, &gphy->rfatt, gphy->tx_control);
- if (phy->radio_rev <= 5) {
+ if (phy->radio_rev == 4 || phy->radio_rev == 5)
b43_phy_maskset(dev, 0x5D, 0xFF80, 0x0003);
- }
if (phy->radio_rev <= 2)
b43_radio_write16(dev, 0x005D, 0x000D);
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 24ccbe96e0c8..41dab89a2942 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -257,6 +257,72 @@ static void b43_nphy_rf_ctl_override(struct b43_wldev *dev, u16 field,
}
}
+static void b43_nphy_rf_ctl_intc_override_rev7(struct b43_wldev *dev,
+ enum n_intc_override intc_override,
+ u16 value, u8 core_sel)
+{
+ u16 reg, tmp, tmp2, val;
+ int core;
+
+ for (core = 0; core < 2; core++) {
+ if ((core_sel == 1 && core != 0) ||
+ (core_sel == 2 && core != 1))
+ continue;
+
+ reg = (core == 0) ? B43_NPHY_RFCTL_INTC1 : B43_NPHY_RFCTL_INTC2;
+
+ switch (intc_override) {
+ case N_INTC_OVERRIDE_OFF:
+ b43_phy_write(dev, reg, 0);
+ b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
+ break;
+ case N_INTC_OVERRIDE_TRSW:
+ b43_phy_maskset(dev, reg, ~0xC0, value << 6);
+ b43_phy_set(dev, reg, 0x400);
+
+ b43_phy_mask(dev, 0x2ff, ~0xC000 & 0xFFFF);
+ b43_phy_set(dev, 0x2ff, 0x2000);
+ b43_phy_set(dev, 0x2ff, 0x0001);
+ break;
+ case N_INTC_OVERRIDE_PA:
+ tmp = 0x0030;
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+ val = value << 5;
+ else
+ val = value << 4;
+ b43_phy_maskset(dev, reg, ~tmp, val);
+ b43_phy_set(dev, reg, 0x1000);
+ break;
+ case N_INTC_OVERRIDE_EXT_LNA_PU:
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ tmp = 0x0001;
+ tmp2 = 0x0004;
+ val = value;
+ } else {
+ tmp = 0x0004;
+ tmp2 = 0x0001;
+ val = value << 2;
+ }
+ b43_phy_maskset(dev, reg, ~tmp, val);
+ b43_phy_mask(dev, reg, ~tmp2);
+ break;
+ case N_INTC_OVERRIDE_EXT_LNA_GAIN:
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ tmp = 0x0002;
+ tmp2 = 0x0008;
+ val = value << 1;
+ } else {
+ tmp = 0x0008;
+ tmp2 = 0x0002;
+ val = value << 3;
+ }
+ b43_phy_maskset(dev, reg, ~tmp, val);
+ b43_phy_mask(dev, reg, ~tmp2);
+ break;
+ }
+ }
+}
+
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlIntcOverride */
static void b43_nphy_rf_ctl_intc_override(struct b43_wldev *dev,
enum n_intc_override intc_override,
@@ -265,6 +331,12 @@ static void b43_nphy_rf_ctl_intc_override(struct b43_wldev *dev,
u8 i, j;
u16 reg, tmp, val;
+ if (dev->phy.rev >= 7) {
+ b43_nphy_rf_ctl_intc_override_rev7(dev, intc_override, value,
+ core);
+ return;
+ }
+
B43_WARN_ON(dev->phy.rev < 3);
for (i = 0; i < 2; i++) {
@@ -419,7 +491,8 @@ static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev, bool enable)
static const u16 clip[] = { 0xFFFF, 0xFFFF };
if (nphy->deaf_count++ == 0) {
nphy->classifier_state = b43_nphy_classifier(dev, 0, 0);
- b43_nphy_classifier(dev, 0x7, 0);
+ b43_nphy_classifier(dev, 0x7,
+ B43_NPHY_CLASSCTL_WAITEDEN);
b43_nphy_read_clip_detection(dev, nphy->clip_state);
b43_nphy_write_clip_detection(dev, clip);
}
@@ -734,9 +807,16 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
u16 bias, cbias;
u16 pag_boost, padg_boost, pgag_boost, mixg_boost;
u16 paa_boost, pada_boost, pgaa_boost, mixa_boost;
+ bool is_pkg_fab_smic;
B43_WARN_ON(dev->phy.rev < 3);
+ is_pkg_fab_smic =
+ ((dev->dev->chip_id == BCMA_CHIP_ID_BCM43224 ||
+ dev->dev->chip_id == BCMA_CHIP_ID_BCM43225 ||
+ dev->dev->chip_id == BCMA_CHIP_ID_BCM43421) &&
+ dev->dev->chip_pkg == BCMA_PKG_ID_BCM43224_FAB_SMIC);
+
b43_chantab_radio_2056_upload(dev, e);
b2056_upload_syn_pll_cp2(dev, band == IEEE80211_BAND_5GHZ);
@@ -744,7 +824,8 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F);
- if (dev->dev->chip_id == 0x4716) {
+ if (dev->dev->chip_id == BCMA_CHIP_ID_BCM4716 ||
+ dev->dev->chip_id == BCMA_CHIP_ID_BCM47162) {
b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x14);
b43_radio_write(dev, B2056_SYN_PLL_CP2, 0);
} else {
@@ -752,6 +833,13 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x14);
}
}
+ if (sprom->boardflags2_hi & B43_BFH2_GPLL_WAR2 &&
+ b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1f);
+ b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1f);
+ b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x0b);
+ b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x20);
+ }
if (sprom->boardflags2_lo & B43_BFL2_APLL_WAR &&
b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
@@ -767,7 +855,8 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
b43_radio_write(dev,
offset | B2056_TX_PADG_IDAC, 0xcc);
- if (dev->dev->chip_id == 0x4716) {
+ if (dev->dev->chip_id == BCMA_CHIP_ID_BCM4716 ||
+ dev->dev->chip_id == BCMA_CHIP_ID_BCM47162) {
bias = 0x40;
cbias = 0x45;
pag_boost = 0x5;
@@ -776,6 +865,10 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
} else {
bias = 0x25;
cbias = 0x20;
+ if (is_pkg_fab_smic) {
+ bias = 0x2a;
+ cbias = 0x38;
+ }
pag_boost = 0x4;
pgag_boost = 0x03;
mixg_boost = 0x65;
@@ -844,6 +937,8 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
mixa_boost = 0xF;
}
+ cbias = is_pkg_fab_smic ? 0x35 : 0x30;
+
for (i = 0; i < 2; i++) {
offset = i ? B2056_TX1 : B2056_TX0;
@@ -862,11 +957,11 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
b43_radio_write(dev,
offset | B2056_TX_PADA_CASCBIAS, 0x03);
b43_radio_write(dev,
- offset | B2056_TX_INTPAA_IAUX_STAT, 0x50);
+ offset | B2056_TX_INTPAA_IAUX_STAT, 0x30);
b43_radio_write(dev,
- offset | B2056_TX_INTPAA_IMAIN_STAT, 0x50);
+ offset | B2056_TX_INTPAA_IMAIN_STAT, 0x30);
b43_radio_write(dev,
- offset | B2056_TX_INTPAA_CASCBIAS, 0x30);
+ offset | B2056_TX_INTPAA_CASCBIAS, cbias);
}
}
@@ -1164,23 +1259,20 @@ static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops,
u16 seq_mode;
u32 tmp;
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, true);
+ b43_nphy_stay_in_carrier_search(dev, true);
if ((nphy->bb_mult_save & 0x80000000) == 0) {
tmp = b43_ntab_read(dev, B43_NTAB16(15, 87));
nphy->bb_mult_save = (tmp & 0xFFFF) | 0x80000000;
}
+ /* TODO: add modify_bbmult argument */
if (!dev->phy.is_40mhz)
tmp = 0x6464;
else
tmp = 0x4747;
b43_ntab_write(dev, B43_NTAB16(15, 87), tmp);
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, false);
-
b43_phy_write(dev, B43_NPHY_SAMP_DEPCNT, (samps - 1));
if (loops != 0xFFFF)
@@ -1213,6 +1305,8 @@ static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops,
b43err(dev->wl, "run samples timeout\n");
b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode);
+
+ b43_nphy_stay_in_carrier_search(dev, false);
}
/**************************************************
@@ -1588,8 +1682,8 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
struct b43_phy_n *nphy = dev->phy.n;
u16 saved_regs_phy_rfctl[2];
- u16 saved_regs_phy[13];
- u16 regs_to_store[] = {
+ u16 saved_regs_phy[22];
+ u16 regs_to_store_rev3[] = {
B43_NPHY_AFECTL_OVER1, B43_NPHY_AFECTL_OVER,
B43_NPHY_AFECTL_C1, B43_NPHY_AFECTL_C2,
B43_NPHY_TXF_40CO_B1S1, B43_NPHY_RFCTL_OVER,
@@ -1598,6 +1692,20 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
B43_NPHY_RFCTL_LUT_TRSW_UP1, B43_NPHY_RFCTL_LUT_TRSW_UP2,
B43_NPHY_RFCTL_RSSIO1, B43_NPHY_RFCTL_RSSIO2
};
+ u16 regs_to_store_rev7[] = {
+ B43_NPHY_AFECTL_OVER1, B43_NPHY_AFECTL_OVER,
+ B43_NPHY_AFECTL_C1, B43_NPHY_AFECTL_C2,
+ B43_NPHY_TXF_40CO_B1S1, B43_NPHY_RFCTL_OVER,
+ 0x342, 0x343, 0x346, 0x347,
+ 0x2ff,
+ B43_NPHY_TXF_40CO_B1S0, B43_NPHY_TXF_40CO_B32S1,
+ B43_NPHY_RFCTL_CMD,
+ B43_NPHY_RFCTL_LUT_TRSW_UP1, B43_NPHY_RFCTL_LUT_TRSW_UP2,
+ 0x340, 0x341, 0x344, 0x345,
+ B43_NPHY_RFCTL_RSSIO1, B43_NPHY_RFCTL_RSSIO2
+ };
+ u16 *regs_to_store;
+ int regs_amount;
u16 class;
@@ -1617,6 +1725,15 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
u8 rx_core_state;
int core, i, j, vcm;
+ if (dev->phy.rev >= 7) {
+ regs_to_store = regs_to_store_rev7;
+ regs_amount = ARRAY_SIZE(regs_to_store_rev7);
+ } else {
+ regs_to_store = regs_to_store_rev3;
+ regs_amount = ARRAY_SIZE(regs_to_store_rev3);
+ }
+ BUG_ON(regs_amount > ARRAY_SIZE(saved_regs_phy));
+
class = b43_nphy_classifier(dev, 0, 0);
b43_nphy_classifier(dev, 7, 4);
b43_nphy_read_clip_detection(dev, clip_state);
@@ -1624,22 +1741,29 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
saved_regs_phy_rfctl[0] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
saved_regs_phy_rfctl[1] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
- for (i = 0; i < ARRAY_SIZE(regs_to_store); i++)
+ for (i = 0; i < regs_amount; i++)
saved_regs_phy[i] = b43_phy_read(dev, regs_to_store[i]);
b43_nphy_rf_ctl_intc_override(dev, N_INTC_OVERRIDE_OFF, 0, 7);
b43_nphy_rf_ctl_intc_override(dev, N_INTC_OVERRIDE_TRSW, 1, 7);
- b43_nphy_rf_ctl_override(dev, 0x1, 0, 0, false);
- b43_nphy_rf_ctl_override(dev, 0x2, 1, 0, false);
- b43_nphy_rf_ctl_override(dev, 0x80, 1, 0, false);
- b43_nphy_rf_ctl_override(dev, 0x40, 1, 0, false);
-
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
- b43_nphy_rf_ctl_override(dev, 0x20, 0, 0, false);
- b43_nphy_rf_ctl_override(dev, 0x10, 1, 0, false);
+
+ if (dev->phy.rev >= 7) {
+ /* TODO */
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ } else {
+ }
} else {
- b43_nphy_rf_ctl_override(dev, 0x10, 0, 0, false);
- b43_nphy_rf_ctl_override(dev, 0x20, 1, 0, false);
+ b43_nphy_rf_ctl_override(dev, 0x1, 0, 0, false);
+ b43_nphy_rf_ctl_override(dev, 0x2, 1, 0, false);
+ b43_nphy_rf_ctl_override(dev, 0x80, 1, 0, false);
+ b43_nphy_rf_ctl_override(dev, 0x40, 1, 0, false);
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ b43_nphy_rf_ctl_override(dev, 0x20, 0, 0, false);
+ b43_nphy_rf_ctl_override(dev, 0x10, 1, 0, false);
+ } else {
+ b43_nphy_rf_ctl_override(dev, 0x10, 0, 0, false);
+ b43_nphy_rf_ctl_override(dev, 0x20, 1, 0, false);
+ }
}
rx_core_state = b43_nphy_get_rx_core_state(dev);
@@ -1654,8 +1778,11 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
/* Grab RSSI results for every possible VCM */
for (vcm = 0; vcm < 8; vcm++) {
- b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC, 0xE3,
- vcm << 2);
+ if (dev->phy.rev >= 7)
+ ;
+ else
+ b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC,
+ 0xE3, vcm << 2);
b43_nphy_poll_rssi(dev, N_RSSI_NB, results[vcm], 8);
}
@@ -1682,8 +1809,11 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
}
/* Select the best VCM */
- b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC, 0xE3,
- vcm_final << 2);
+ if (dev->phy.rev >= 7)
+ ;
+ else
+ b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC,
+ 0xE3, vcm_final << 2);
for (i = 0; i < 4; i++) {
if (core != i / 2)
@@ -1736,9 +1866,9 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
b43_phy_set(dev, B43_NPHY_RFCTL_OVER, 0x1);
b43_phy_set(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_RXTX);
- b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S1, ~0x1);
+ b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~0x1);
- for (i = 0; i < ARRAY_SIZE(regs_to_store); i++)
+ for (i = 0; i < regs_amount; i++)
b43_phy_write(dev, regs_to_store[i], saved_regs_phy[i]);
/* Store for future configuration */
@@ -2494,8 +2624,8 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
struct ssb_sprom *sprom = dev->dev->bus_sprom;
/* TX to RX */
- u8 tx2rx_events[8] = { 0x4, 0x3, 0x6, 0x5, 0x2, 0x1, 0x8, 0x1F };
- u8 tx2rx_delays[8] = { 8, 4, 2, 2, 4, 4, 6, 1 };
+ u8 tx2rx_events[7] = { 0x4, 0x3, 0x5, 0x2, 0x1, 0x8, 0x1F };
+ u8 tx2rx_delays[7] = { 8, 4, 4, 4, 4, 6, 1 };
/* RX to TX */
u8 rx2tx_events_ipa[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0xF, 0x3,
0x1F };
@@ -2503,6 +2633,23 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
u8 rx2tx_events[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0x3, 0x4, 0x1F };
u8 rx2tx_delays[9] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 };
+ u16 vmids[5][4] = {
+ { 0xa2, 0xb4, 0xb4, 0x89, }, /* 0 */
+ { 0xb4, 0xb4, 0xb4, 0x24, }, /* 1 */
+ { 0xa2, 0xb4, 0xb4, 0x74, }, /* 2 */
+ { 0xa2, 0xb4, 0xb4, 0x270, }, /* 3 */
+ { 0xa2, 0xb4, 0xb4, 0x00, }, /* 4 and 5 */
+ };
+ u16 gains[5][4] = {
+ { 0x02, 0x02, 0x02, 0x00, }, /* 0 */
+ { 0x02, 0x02, 0x02, 0x02, }, /* 1 */
+ { 0x02, 0x02, 0x02, 0x04, }, /* 2 */
+ { 0x02, 0x02, 0x02, 0x00, }, /* 3 */
+ { 0x02, 0x02, 0x02, 0x00, }, /* 4 and 5 */
+ };
+ u16 *vmid, *gain;
+
+ u8 pdet_range;
u16 tmp16;
u32 tmp32;
@@ -2561,7 +2708,71 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
b43_ntab_write(dev, B43_NTAB16(8, 0), 2);
b43_ntab_write(dev, B43_NTAB16(8, 16), 2);
- /* TODO */
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ pdet_range = sprom->fem.ghz2.pdet_range;
+ else
+ pdet_range = sprom->fem.ghz5.pdet_range;
+ vmid = vmids[min_t(u16, pdet_range, 4)];
+ gain = gains[min_t(u16, pdet_range, 4)];
+ switch (pdet_range) {
+ case 3:
+ if (!(dev->phy.rev >= 4 &&
+ b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ))
+ break;
+ /* FALL THROUGH */
+ case 0:
+ case 1:
+ b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x08), 4, vmid);
+ b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x18), 4, vmid);
+ b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x0c), 4, gain);
+ b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x1c), 4, gain);
+ break;
+ case 2:
+ if (dev->phy.rev >= 6) {
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ vmid[3] = 0x94;
+ else
+ vmid[3] = 0x8e;
+ gain[3] = 3;
+ } else if (dev->phy.rev == 5) {
+ vmid[3] = 0x84;
+ gain[3] = 2;
+ }
+ b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x08), 4, vmid);
+ b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x18), 4, vmid);
+ b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x0c), 4, gain);
+ b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x1c), 4, gain);
+ break;
+ case 4:
+ case 5:
+ if (b43_current_band(dev->wl) != IEEE80211_BAND_2GHZ) {
+ if (pdet_range == 4) {
+ vmid[3] = 0x8e;
+ tmp16 = 0x96;
+ gain[3] = 0x2;
+ } else {
+ vmid[3] = 0x89;
+ tmp16 = 0x89;
+ gain[3] = 0;
+ }
+ } else {
+ if (pdet_range == 4) {
+ vmid[3] = 0x89;
+ tmp16 = 0x8b;
+ gain[3] = 0x2;
+ } else {
+ vmid[3] = 0x74;
+ tmp16 = 0x70;
+ gain[3] = 0;
+ }
+ }
+ b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x08), 4, vmid);
+ b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x0c), 4, gain);
+ vmid[3] = tmp16;
+ b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x18), 4, vmid);
+ b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x1c), 4, gain);
+ break;
+ }
b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_MAST_BIAS, 0x00);
b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_MAST_BIAS, 0x00);
@@ -2600,7 +2811,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
/* Dropped probably-always-true condition */
b43_phy_write(dev, B43_NPHY_ED_CRS40ASSERTTHRESH0, 0x03eb);
b43_phy_write(dev, B43_NPHY_ED_CRS40ASSERTTHRESH1, 0x03eb);
- b43_phy_write(dev, B43_NPHY_ED_CRS40DEASSERTTHRESH1, 0x0341);
+ b43_phy_write(dev, B43_NPHY_ED_CRS40DEASSERTTHRESH0, 0x0341);
b43_phy_write(dev, B43_NPHY_ED_CRS40DEASSERTTHRESH1, 0x0341);
b43_phy_write(dev, B43_NPHY_ED_CRS20LASSERTTHRESH0, 0x042b);
b43_phy_write(dev, B43_NPHY_ED_CRS20LASSERTTHRESH1, 0x042b);
@@ -3211,6 +3422,20 @@ static void b43_nphy_tx_prepare_adjusted_power_table(struct b43_wldev *dev)
u8 idx, delta;
u8 i, stf_mode;
+ /* Array adj_pwr_tbl corresponds to the hardware table. It consists of
+ * 21 groups, each containing 4 entries.
+ *
+ * First group has entries for CCK modulation.
+ * The rest of groups has 1 entry per modulation (SISO, CDD, STBC, SDM).
+ *
+ * Group 0 is for CCK
+ * Groups 1..4 use BPSK (group per coding rate)
+ * Groups 5..8 use QPSK (group per coding rate)
+ * Groups 9..12 use 16-QAM (group per coding rate)
+ * Groups 13..16 use 64-QAM (group per coding rate)
+ * Groups 17..20 are unknown
+ */
+
for (i = 0; i < 4; i++)
nphy->adj_pwr_tbl[i] = nphy->tx_power_offset[i];
@@ -3409,10 +3634,8 @@ static void b43_nphy_tx_power_ctl_setup(struct b43_wldev *dev)
}
b43_nphy_tx_prepare_adjusted_power_table(dev);
- /*
b43_ntab_write_bulk(dev, B43_NTAB16(26, 64), 84, nphy->adj_pwr_tbl);
b43_ntab_write_bulk(dev, B43_NTAB16(27, 64), 84, nphy->adj_pwr_tbl);
- */
if (nphy->hang_avoid)
b43_nphy_stay_in_carrier_search(dev, false);
@@ -5124,7 +5347,7 @@ static int b43_phy_initn(struct b43_wldev *dev)
b43_phy_write(dev, B43_NPHY_TXMACIF_HOLDOFF, 0x0015);
b43_phy_write(dev, B43_NPHY_TXMACDELAY, 0x0320);
if (phy->rev >= 3 && phy->rev <= 6)
- b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 0x0014);
+ b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 0x0032);
b43_nphy_tx_lp_fbw(dev);
if (phy->rev >= 3)
b43_nphy_spur_workaround(dev);
@@ -5441,8 +5664,11 @@ static u16 b43_nphy_op_radio_read(struct b43_wldev *dev, u16 reg)
{
/* Register 1 is a 32-bit register. */
B43_WARN_ON(reg == 1);
- /* N-PHY needs 0x100 for read access */
- reg |= 0x100;
+
+ if (dev->phy.rev >= 7)
+ reg |= 0x200; /* Radio 0x2057 */
+ else
+ reg |= 0x100;
b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg);
return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW);
diff --git a/drivers/net/wireless/b43/radio_2056.c b/drivers/net/wireless/b43/radio_2056.c
index b4fd9345d673..a07e4cacab77 100644
--- a/drivers/net/wireless/b43/radio_2056.c
+++ b/drivers/net/wireless/b43/radio_2056.c
@@ -48,7 +48,7 @@ struct b2056_inittabs_pts {
unsigned int rx_length;
};
-static const struct b2056_inittab_entry b2056_inittab_rev3_syn[] = {
+static const struct b2056_inittab_entry b2056_inittab_phy_rev3_syn[] = {
[B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -232,7 +232,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev3_syn[] = {
[B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev3_tx[] = {
+static const struct b2056_inittab_entry b2056_inittab_phy_rev3_tx[] = {
[B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -380,7 +380,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev3_tx[] = {
[B2056_TX_STATUS_TXLPF_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev3_rx[] = {
+static const struct b2056_inittab_entry b2056_inittab_phy_rev3_rx[] = {
[B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -530,7 +530,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev3_rx[] = {
[B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev4_syn[] = {
+static const struct b2056_inittab_entry b2056_inittab_phy_rev4_syn[] = {
[B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -714,7 +714,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev4_syn[] = {
[B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev4_tx[] = {
+static const struct b2056_inittab_entry b2056_inittab_phy_rev4_tx[] = {
[B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -862,7 +862,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev4_tx[] = {
[B2056_TX_STATUS_TXLPF_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev4_rx[] = {
+static const struct b2056_inittab_entry b2056_inittab_phy_rev4_rx[] = {
[B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1012,7 +1012,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev4_rx[] = {
[B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev5_syn[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev5_syn[] = {
[B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1196,7 +1196,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev5_syn[] = {
[B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev5_tx[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev5_tx[] = {
[B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1352,7 +1352,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev5_tx[] = {
[B2056_TX_GMBB_IDAC7] = { .ghz5 = 0x0075, .ghz2 = 0x0075, UPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev5_rx[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev5_rx[] = {
[B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1502,7 +1502,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev5_rx[] = {
[B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev6_syn[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev6_syn[] = {
[B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1686,7 +1686,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev6_syn[] = {
[B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev6_tx[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev6_tx[] = {
[B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1842,7 +1842,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev6_tx[] = {
[B2056_TX_GMBB_IDAC7] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev6_rx[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev6_rx[] = {
[B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1992,7 +1992,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev6_rx[] = {
[B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev7_syn[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev7_9_syn[] = {
[B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -2176,7 +2176,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev7_syn[] = {
[B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev7_tx[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev7_9_tx[] = {
[B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -2332,7 +2332,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev7_tx[] = {
[B2056_TX_GMBB_IDAC7] = { .ghz5 = 0x0075, .ghz2 = 0x0075, UPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev7_rx[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev7_9_rx[] = {
[B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -2482,7 +2482,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev7_rx[] = {
[B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev8_syn[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev8_syn[] = {
[B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -2666,7 +2666,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev8_syn[] = {
[B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev8_tx[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev8_tx[] = {
[B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -2822,7 +2822,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev8_tx[] = {
[B2056_TX_GMBB_IDAC7] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev8_rx[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev8_rx[] = {
[B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -2972,24 +2972,69 @@ static const struct b2056_inittab_entry b2056_inittab_rev8_rx[] = {
[B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
-#define INITTABSPTS(prefix) \
- .syn = prefix##_syn, \
- .syn_length = ARRAY_SIZE(prefix##_syn), \
- .tx = prefix##_tx, \
- .tx_length = ARRAY_SIZE(prefix##_tx), \
- .rx = prefix##_rx, \
- .rx_length = ARRAY_SIZE(prefix##_rx)
+static const struct b2056_inittab_entry b2056_inittab_radio_rev11_syn[] = {
+ [B2056_SYN_PLL_PFD] = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
+ [B2056_SYN_PLL_CP2] = { .ghz5 = 0x003f, .ghz2 = 0x003f, UPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER1] = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER2] = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER4] = { .ghz5 = 0x002b, .ghz2 = 0x002b, UPLOAD, },
+ [B2056_SYN_PLL_VCO2] = { .ghz5 = 0x00f7, .ghz2 = 0x00f7, UPLOAD, },
+ [B2056_SYN_PLL_VCOCAL12] = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, },
+ [B2056_SYN_LOGENBUF2] = { .ghz5 = 0x008f, .ghz2 = 0x008f, UPLOAD, },
+};
-static const struct b2056_inittabs_pts b2056_inittabs[] = {
- [3] = { INITTABSPTS(b2056_inittab_rev3) },
- [4] = { INITTABSPTS(b2056_inittab_rev4) },
- [5] = { INITTABSPTS(b2056_inittab_rev5) },
- [6] = { INITTABSPTS(b2056_inittab_rev6) },
- [7] = { INITTABSPTS(b2056_inittab_rev7) },
- [8] = { INITTABSPTS(b2056_inittab_rev8) },
- [9] = { INITTABSPTS(b2056_inittab_rev7) },
+static const struct b2056_inittab_entry b2056_inittab_radio_rev11_tx[] = {
+ [B2056_TX_PA_SPARE2] = { .ghz5 = 0x00ee, .ghz2 = 0x00ee, UPLOAD, },
+ [B2056_TX_INTPAA_IAUX_STAT] = { .ghz5 = 0x0050, .ghz2 = 0x0050, UPLOAD, },
+ [B2056_TX_INTPAA_IMAIN_STAT] = { .ghz5 = 0x0050, .ghz2 = 0x0050, UPLOAD, },
+ [B2056_TX_INTPAA_PASLOPE] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
+ [B2056_TX_INTPAG_PASLOPE] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
+ [B2056_TX_PADA_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
+ [B2056_TX_PADA_SLOPE] = { .ghz5 = 0x0070, .ghz2 = 0x0070, UPLOAD, },
+ [B2056_TX_PADG_SLOPE] = { .ghz5 = 0x0070, .ghz2 = 0x0070, UPLOAD, },
+ [B2056_TX_PGAA_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
+ [B2056_TX_PGAA_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, UPLOAD, },
+ [B2056_TX_PGAG_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, UPLOAD, },
+ [B2056_TX_GMBB_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, UPLOAD, },
+ [B2056_TX_TXSPARE1] = { .ghz5 = 0x0030, .ghz2 = 0x0030, UPLOAD, },
+};
+
+static const struct b2056_inittab_entry b2056_inittab_radio_rev11_rx[] = {
+ [B2056_RX_BIASPOLE_LNAA1_IDAC] = { .ghz5 = 0x0017, .ghz2 = 0x0017, UPLOAD, },
+ [B2056_RX_LNAA2_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
+ [B2056_RX_BIASPOLE_LNAG1_IDAC] = { .ghz5 = 0x0017, .ghz2 = 0x0017, UPLOAD, },
+ [B2056_RX_LNAG2_IDAC] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
+ [B2056_RX_MIXA_VCM] = { .ghz5 = 0x0055, .ghz2 = 0x0055, UPLOAD, },
+ [B2056_RX_MIXA_LOB_BIAS] = { .ghz5 = 0x0088, .ghz2 = 0x0088, UPLOAD, },
+ [B2056_RX_MIXA_BIAS_AUX] = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, },
+ [B2056_RX_MIXG_VCM] = { .ghz5 = 0x0055, .ghz2 = 0x0055, UPLOAD, },
+ [B2056_RX_TIA_IOPAMP] = { .ghz5 = 0x0026, .ghz2 = 0x0026, UPLOAD, },
+ [B2056_RX_TIA_QOPAMP] = { .ghz5 = 0x0026, .ghz2 = 0x0026, UPLOAD, },
+ [B2056_RX_TIA_IMISC] = { .ghz5 = 0x000f, .ghz2 = 0x000f, UPLOAD, },
+ [B2056_RX_TIA_QMISC] = { .ghz5 = 0x000f, .ghz2 = 0x000f, UPLOAD, },
+ [B2056_RX_RXLPF_OUTVCM] = { .ghz5 = 0x0004, .ghz2 = 0x0004, UPLOAD, },
+ [B2056_RX_VGA_BIAS_DCCANCEL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, UPLOAD, },
+ [B2056_RX_RXSPARE3] = { .ghz5 = 0x0005, .ghz2 = 0x0005, UPLOAD, },
};
+#define INITTABSPTS(prefix) \
+ static const struct b2056_inittabs_pts prefix = { \
+ .syn = prefix##_syn, \
+ .syn_length = ARRAY_SIZE(prefix##_syn), \
+ .tx = prefix##_tx, \
+ .tx_length = ARRAY_SIZE(prefix##_tx), \
+ .rx = prefix##_rx, \
+ .rx_length = ARRAY_SIZE(prefix##_rx), \
+ }
+
+INITTABSPTS(b2056_inittab_phy_rev3);
+INITTABSPTS(b2056_inittab_phy_rev4);
+INITTABSPTS(b2056_inittab_radio_rev5);
+INITTABSPTS(b2056_inittab_radio_rev6);
+INITTABSPTS(b2056_inittab_radio_rev7_9);
+INITTABSPTS(b2056_inittab_radio_rev8);
+INITTABSPTS(b2056_inittab_radio_rev11);
+
#define RADIOREGS3(r00, r01, r02, r03, r04, r05, r06, r07, r08, r09, \
r10, r11, r12, r13, r14, r15, r16, r17, r18, r19, \
r20, r21, r22, r23, r24, r25, r26, r27, r28, r29, \
@@ -3041,7 +3086,7 @@ static const struct b2056_inittabs_pts b2056_inittabs[] = {
.phy_regs.phy_bw6 = r5
/* http://bcm-v4.sipsolutions.net/802.11/Radio/2056/ChannelTable */
-static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev3[] = {
+static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_phy_rev3[] = {
{ .freq = 4920,
RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@@ -4036,7 +4081,7 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev3[] =
},
};
-static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev4[] = {
+static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_phy_rev4[] = {
{ .freq = 4920,
RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@@ -5031,7 +5076,7 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev4[] =
},
};
-static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev5[] = {
+static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_radio_rev5[] = {
{ .freq = 4920,
RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@@ -6026,7 +6071,7 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev5[] =
},
};
-static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev6[] = {
+static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_radio_rev6[] = {
{ .freq = 4920,
RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@@ -7021,7 +7066,7 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev6[] =
},
};
-static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev7_9[] = {
+static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_radio_rev7_9[] = {
{ .freq = 4920,
RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@@ -8016,7 +8061,7 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev7_9[]
},
};
-static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev8[] = {
+static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_radio_rev8[] = {
{ .freq = 4920,
RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@@ -9011,6 +9056,236 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev8[] =
},
};
+static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_radio_rev11[] = {
+ {
+ .freq = 5180,
+ RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x06, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xf9, 0x00, 0x06, 0x00, 0x77, 0x00, 0x0e,
+ 0x00, 0x6f, 0x00, 0xf9, 0x00, 0x06, 0x00, 0x77,
+ 0x00, 0x0e, 0x00, 0x6f, 0x00),
+ PHYREGS(0x081c, 0x0818, 0x0814, 0x01f9, 0x01fa, 0x01fb),
+ },
+ {
+ .freq = 5200,
+ RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x08, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xf9, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
+ 0x00, 0x6f, 0x00, 0xf9, 0x00, 0x05, 0x00, 0x77,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0824, 0x0820, 0x081c, 0x01f7, 0x01f8, 0x01f9),
+ },
+ {
+ .freq = 5220,
+ RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0a, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
+ 0xfe, 0xd8, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
+ 0x00, 0x6f, 0x00, 0xd8, 0x00, 0x05, 0x00, 0x77,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x082c, 0x0828, 0x0824, 0x01f5, 0x01f6, 0x01f7),
+ },
+ {
+ .freq = 5745,
+ RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7d, 0x05, 0x05, 0x02,
+ 0x15, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
+ 0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6d, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6d, 0x00),
+ PHYREGS(0x08fe, 0x08fa, 0x08f6, 0x01c8, 0x01c8, 0x01c9),
+ },
+ {
+ .freq = 5765,
+ RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x81, 0x05, 0x05, 0x02,
+ 0x15, 0x01, 0x05, 0x05, 0x05, 0x86, 0x05, 0x00,
+ 0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6c, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6c, 0x00),
+ PHYREGS(0x0906, 0x0902, 0x08fe, 0x01c6, 0x01c7, 0x01c8),
+ },
+ {
+ .freq = 5785,
+ RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x85, 0x05, 0x05, 0x02,
+ 0x15, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6b, 0x00),
+ PHYREGS(0x090e, 0x090a, 0x0906, 0x01c4, 0x01c5, 0x01c6),
+ },
+ {
+ .freq = 5805,
+ RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x89, 0x05, 0x05, 0x02,
+ 0x15, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6a, 0x00),
+ PHYREGS(0x0916, 0x0912, 0x090e, 0x01c3, 0x01c4, 0x01c4),
+ },
+ {
+ .freq = 5825,
+ RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8d, 0x05, 0x05, 0x02,
+ 0x15, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x69, 0x00),
+ PHYREGS(0x091e, 0x091a, 0x0916, 0x01c1, 0x01c2, 0x01c3),
+ },
+ {
+ .freq = 2412,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x6c, 0x06, 0x06, 0x04,
+ 0x2b, 0x01, 0x04, 0x04, 0x04, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x78, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0b, 0x00, 0x0a, 0x00, 0x89, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0b, 0x00, 0x0a),
+ PHYREGS(0x03c9, 0x03c5, 0x03c1, 0x043a, 0x043f, 0x0443),
+ },
+ {
+ .freq = 2417,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x71, 0x06, 0x06, 0x04,
+ 0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x78, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0b, 0x00, 0x0a, 0x00, 0x89, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0b, 0x00, 0x0a),
+ PHYREGS(0x03cb, 0x03c7, 0x03c3, 0x0438, 0x043d, 0x0441),
+ },
+ {
+ .freq = 2422,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x76, 0x06, 0x06, 0x04,
+ 0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x67, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0b, 0x00, 0x0a, 0x00, 0x89, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0b, 0x00, 0x0a),
+ PHYREGS(0x03cd, 0x03c9, 0x03c5, 0x0436, 0x043a, 0x043f),
+ },
+ {
+ .freq = 2427,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x7b, 0x06, 0x06, 0x04,
+ 0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x57, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0a, 0x00, 0x0a, 0x00, 0x78, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0a, 0x00, 0x0a),
+ PHYREGS(0x03cf, 0x03cb, 0x03c7, 0x0434, 0x0438, 0x043d),
+ },
+ {
+ .freq = 2432,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x80, 0x06, 0x06, 0x04,
+ 0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x56, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0a, 0x00, 0x0a, 0x00, 0x77, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0a, 0x00, 0x0a),
+ PHYREGS(0x03d1, 0x03cd, 0x03c9, 0x0431, 0x0436, 0x043a),
+ },
+ {
+ .freq = 2437,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x85, 0x06, 0x06, 0x04,
+ 0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x46, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0a, 0x00, 0x0a, 0x00, 0x76, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0a, 0x00, 0x0a),
+ PHYREGS(0x03d3, 0x03cf, 0x03cb, 0x042f, 0x0434, 0x0438),
+ },
+ {
+ .freq = 2442,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8a, 0x06, 0x06, 0x04,
+ 0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x45, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x0a, 0x00, 0x0a, 0x00, 0x66, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x0a, 0x00, 0x0a),
+ PHYREGS(0x03d5, 0x03d1, 0x03cd, 0x042d, 0x0431, 0x0436),
+ },
+ {
+ .freq = 2447,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8f, 0x06, 0x06, 0x04,
+ 0x2b, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x34, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x0a, 0x00, 0x09, 0x00, 0x55, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x0a, 0x00, 0x09),
+ PHYREGS(0x03d7, 0x03d3, 0x03cf, 0x042b, 0x042f, 0x0434),
+ },
+ {
+ .freq = 2452,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x94, 0x06, 0x06, 0x04,
+ 0x2b, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x23, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x0a, 0x00, 0x09, 0x00, 0x45, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x0a, 0x00, 0x09),
+ PHYREGS(0x03d9, 0x03d5, 0x03d1, 0x0429, 0x042d, 0x0431),
+ },
+ {
+ .freq = 2457,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x99, 0x06, 0x06, 0x04,
+ 0x2b, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x12, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x0a, 0x00, 0x09, 0x00, 0x34, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x0a, 0x00, 0x09),
+ PHYREGS(0x03db, 0x03d7, 0x03d3, 0x0427, 0x042b, 0x042f),
+ },
+ {
+ .freq = 2462,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x9e, 0x06, 0x06, 0x04,
+ 0x2b, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x02, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x09, 0x00, 0x09, 0x00, 0x33, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x09, 0x00, 0x09),
+ PHYREGS(0x03dd, 0x03d9, 0x03d5, 0x0424, 0x0429, 0x042d),
+ },
+ {
+ .freq = 2467,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa3, 0x06, 0x06, 0x04,
+ 0x2b, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x09, 0x00, 0x09, 0x00, 0x22, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x09, 0x00, 0x09),
+ PHYREGS(0x03df, 0x03db, 0x03d7, 0x0422, 0x0427, 0x042b),
+ },
+ {
+ .freq = 2472,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa8, 0x06, 0x06, 0x04,
+ 0x2b, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x09, 0x00, 0x09, 0x00, 0x11, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x09, 0x00, 0x09),
+ PHYREGS(0x03e1, 0x03dd, 0x03d9, 0x0420, 0x0424, 0x0429),
+ },
+ {
+ .freq = 2484,
+ RADIOREGS3(0xff, 0x01, 0x03, 0x09, 0xb4, 0x06, 0x06, 0x04,
+ 0x2b, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x20, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x09, 0x00, 0x09, 0x00, 0x00, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x09, 0x00, 0x09),
+ PHYREGS(0x03e6, 0x03e2, 0x03de, 0x041b, 0x041f, 0x0424),
+ },
+};
+
+static const struct b2056_inittabs_pts
+*b43_nphy_get_inittabs_rev3(struct b43_wldev *dev)
+{
+ struct b43_phy *phy = &dev->phy;
+
+ switch (dev->phy.rev) {
+ case 3:
+ return &b2056_inittab_phy_rev3;
+ case 4:
+ return &b2056_inittab_phy_rev4;
+ default:
+ switch (phy->radio_rev) {
+ case 5:
+ return &b2056_inittab_radio_rev5;
+ case 6:
+ return &b2056_inittab_radio_rev6;
+ case 7:
+ case 9:
+ return &b2056_inittab_radio_rev7_9;
+ case 8:
+ return &b2056_inittab_radio_rev8;
+ case 11:
+ return &b2056_inittab_radio_rev11;
+ }
+ }
+
+ return NULL;
+}
+
static void b2056_upload_inittab(struct b43_wldev *dev, bool ghz5,
bool ignore_uploadflag, u16 routing,
const struct b2056_inittab_entry *e,
@@ -9037,11 +9312,11 @@ void b2056_upload_inittabs(struct b43_wldev *dev,
{
const struct b2056_inittabs_pts *pts;
- if (dev->phy.rev >= ARRAY_SIZE(b2056_inittabs)) {
+ pts = b43_nphy_get_inittabs_rev3(dev);
+ if (!pts) {
B43_WARN_ON(1);
return;
}
- pts = &b2056_inittabs[dev->phy.rev];
b2056_upload_inittab(dev, ghz5, ignore_uploadflag,
B2056_SYN, pts->syn, pts->syn_length);
@@ -9060,11 +9335,12 @@ void b2056_upload_syn_pll_cp2(struct b43_wldev *dev, bool ghz5)
const struct b2056_inittabs_pts *pts;
const struct b2056_inittab_entry *e;
- if (dev->phy.rev >= ARRAY_SIZE(b2056_inittabs)) {
+ pts = b43_nphy_get_inittabs_rev3(dev);
+ if (!pts) {
B43_WARN_ON(1);
return;
}
- pts = &b2056_inittabs[dev->phy.rev];
+
e = &pts->syn[B2056_SYN_PLL_CP2];
b43_radio_write(dev, B2056_SYN_PLL_CP2, ghz5 ? e->ghz5 : e->ghz2);
@@ -9073,38 +9349,46 @@ void b2056_upload_syn_pll_cp2(struct b43_wldev *dev, bool ghz5)
const struct b43_nphy_channeltab_entry_rev3 *
b43_nphy_get_chantabent_rev3(struct b43_wldev *dev, u16 freq)
{
+ struct b43_phy *phy = &dev->phy;
const struct b43_nphy_channeltab_entry_rev3 *e;
unsigned int length, i;
- switch (dev->phy.rev) {
+ switch (phy->rev) {
case 3:
- e = b43_nphy_channeltab_rev3;
- length = ARRAY_SIZE(b43_nphy_channeltab_rev3);
+ e = b43_nphy_channeltab_phy_rev3;
+ length = ARRAY_SIZE(b43_nphy_channeltab_phy_rev3);
break;
case 4:
- e = b43_nphy_channeltab_rev4;
- length = ARRAY_SIZE(b43_nphy_channeltab_rev4);
- break;
- case 5:
- e = b43_nphy_channeltab_rev5;
- length = ARRAY_SIZE(b43_nphy_channeltab_rev5);
- break;
- case 6:
- e = b43_nphy_channeltab_rev6;
- length = ARRAY_SIZE(b43_nphy_channeltab_rev6);
- break;
- case 7:
- case 9:
- e = b43_nphy_channeltab_rev7_9;
- length = ARRAY_SIZE(b43_nphy_channeltab_rev7_9);
- break;
- case 8:
- e = b43_nphy_channeltab_rev8;
- length = ARRAY_SIZE(b43_nphy_channeltab_rev8);
+ e = b43_nphy_channeltab_phy_rev4;
+ length = ARRAY_SIZE(b43_nphy_channeltab_phy_rev4);
break;
default:
- B43_WARN_ON(1);
- return NULL;
+ switch (phy->radio_rev) {
+ case 5:
+ e = b43_nphy_channeltab_radio_rev5;
+ length = ARRAY_SIZE(b43_nphy_channeltab_radio_rev5);
+ break;
+ case 6:
+ e = b43_nphy_channeltab_radio_rev6;
+ length = ARRAY_SIZE(b43_nphy_channeltab_radio_rev6);
+ break;
+ case 7:
+ case 9:
+ e = b43_nphy_channeltab_radio_rev7_9;
+ length = ARRAY_SIZE(b43_nphy_channeltab_radio_rev7_9);
+ break;
+ case 8:
+ e = b43_nphy_channeltab_radio_rev8;
+ length = ARRAY_SIZE(b43_nphy_channeltab_radio_rev8);
+ break;
+ case 11:
+ e = b43_nphy_channeltab_radio_rev11;
+ length = ARRAY_SIZE(b43_nphy_channeltab_radio_rev11);
+ break;
+ default:
+ B43_WARN_ON(1);
+ return NULL;
+ }
}
for (i = 0; i < length; i++, e++) {
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index 94c755fdda14..50d03ffeac8c 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -1627,74 +1627,7 @@ static const u32 b43_ntab_tdtrn_r3[] = {
0xfa58fc00, 0x0b64fc7e, 0x0800f7b6, 0x00f006be,
};
-static const u32 b43_ntab_noisevar0_r3[] = {
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-};
-
-static const u32 b43_ntab_noisevar1_r3[] = {
+static const u32 b43_ntab_noisevar_r3[] = {
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
@@ -3114,8 +3047,7 @@ static void b43_nphy_tables_init_rev3(struct b43_wldev *dev)
ntab_upload(dev, B43_NTAB_TMAP_R3, b43_ntab_tmap_r3);
ntab_upload(dev, B43_NTAB_INTLEVEL_R3, b43_ntab_intlevel_r3);
ntab_upload(dev, B43_NTAB_TDTRN_R3, b43_ntab_tdtrn_r3);
- ntab_upload(dev, B43_NTAB_NOISEVAR0_R3, b43_ntab_noisevar0_r3);
- ntab_upload(dev, B43_NTAB_NOISEVAR1_R3, b43_ntab_noisevar1_r3);
+ ntab_upload(dev, B43_NTAB_NOISEVAR_R3, b43_ntab_noisevar_r3);
ntab_upload(dev, B43_NTAB_MCS_R3, b43_ntab_mcs_r3);
ntab_upload(dev, B43_NTAB_TDI20A0_R3, b43_ntab_tdi20a0_r3);
ntab_upload(dev, B43_NTAB_TDI20A1_R3, b43_ntab_tdi20a1_r3);
diff --git a/drivers/net/wireless/b43/tables_nphy.h b/drivers/net/wireless/b43/tables_nphy.h
index 9ff33adcff89..3a58aee4c4cf 100644
--- a/drivers/net/wireless/b43/tables_nphy.h
+++ b/drivers/net/wireless/b43/tables_nphy.h
@@ -143,8 +143,7 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
#define B43_NTAB_TMAP_R3 B43_NTAB32(12, 0) /* TM AP */
#define B43_NTAB_INTLEVEL_R3 B43_NTAB32(13, 0) /* INT LV */
#define B43_NTAB_TDTRN_R3 B43_NTAB32(14, 0) /* TD TRN */
-#define B43_NTAB_NOISEVAR0_R3 B43_NTAB32(16, 0) /* noise variance 0 */
-#define B43_NTAB_NOISEVAR1_R3 B43_NTAB32(16, 128) /* noise variance 1 */
+#define B43_NTAB_NOISEVAR_R3 B43_NTAB32(16, 0) /* noise variance */
#define B43_NTAB_MCS_R3 B43_NTAB16(18, 0) /* MCS */
#define B43_NTAB_TDI20A0_R3 B43_NTAB32(19, 128) /* TDI 20/0 */
#define B43_NTAB_TDI20A1_R3 B43_NTAB32(19, 256) /* TDI 20/1 */
diff --git a/drivers/net/wireless/b43/wa.c b/drivers/net/wireless/b43/wa.c
index 9b1a038be08b..c218c08fb2f5 100644
--- a/drivers/net/wireless/b43/wa.c
+++ b/drivers/net/wireless/b43/wa.c
@@ -441,7 +441,7 @@ static void b43_wa_altagc(struct b43_wldev *dev)
static void b43_wa_tr_ltov(struct b43_wldev *dev) /* TR Lookup Table Original Values */
{
- b43_gtab_write(dev, B43_GTAB_ORIGTR, 0, 0xC480);
+ b43_gtab_write(dev, B43_GTAB_ORIGTR, 0, 0x7654);
}
static void b43_wa_cpll_nonpilot(struct b43_wldev *dev)
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 8c5fa4e58139..43c71bfaa474 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -897,7 +897,8 @@ static bool brcms_tx_flush_completed(struct brcms_info *wl)
return result;
}
-static void brcms_ops_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void brcms_ops_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
struct brcms_info *wl = hw->priv;
int ret;
diff --git a/drivers/net/wireless/cw1200/sta.c b/drivers/net/wireless/cw1200/sta.c
index 103f7bce8932..cd0cad7f7759 100644
--- a/drivers/net/wireless/cw1200/sta.c
+++ b/drivers/net/wireless/cw1200/sta.c
@@ -936,7 +936,8 @@ static int __cw1200_flush(struct cw1200_common *priv, bool drop)
return ret;
}
-void cw1200_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+void cw1200_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
struct cw1200_common *priv = hw->priv;
diff --git a/drivers/net/wireless/cw1200/sta.h b/drivers/net/wireless/cw1200/sta.h
index 35babb62cc6a..b7e386b7662b 100644
--- a/drivers/net/wireless/cw1200/sta.h
+++ b/drivers/net/wireless/cw1200/sta.h
@@ -40,7 +40,8 @@ int cw1200_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
int cw1200_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
-void cw1200_flush(struct ieee80211_hw *hw, u32 queues, bool drop);
+void cw1200_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop);
u64 cw1200_prepare_multicast(struct ieee80211_hw *hw,
struct netdev_hw_addr_list *mc_list);
diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c
index d37a6fd90d40..b598e2803500 100644
--- a/drivers/net/wireless/iwlegacy/3945.c
+++ b/drivers/net/wireless/iwlegacy/3945.c
@@ -573,7 +573,7 @@ il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
rx_status.flag |= RX_FLAG_SHORTPRE;
if ((unlikely(rx_stats->phy_count > 20))) {
- D_DROP("dsp size out of range [0,20]: %d/n",
+ D_DROP("dsp size out of range [0,20]: %d\n",
rx_stats->phy_count);
return;
}
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 888ad5c74639..c159c05db6ef 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -670,7 +670,7 @@ il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
}
if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
- D_DROP("dsp size out of range [0,20]: %d/n",
+ D_DROP("dsp size out of range [0,20]: %d\n",
phy_res->cfg_phy_cnt);
return;
}
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index 4f42174d9994..ecc674627e6e 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -4755,7 +4755,8 @@ out:
}
EXPORT_SYMBOL(il_mac_change_interface);
-void il_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+void il_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
struct il_priv *il = hw->priv;
unsigned long timeout = jiffies + msecs_to_jiffies(500);
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
index dfb13c70efe8..ea5c0f863c4e 100644
--- a/drivers/net/wireless/iwlegacy/common.h
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -1723,7 +1723,8 @@ void il_mac_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
int il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum nl80211_iftype newtype, bool newp2p);
-void il_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop);
+void il_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop);
int il_alloc_txq_mem(struct il_priv *il);
void il_free_txq_mem(struct il_priv *il);
diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
index 576f7ee38ca5..d169228f59e7 100644
--- a/drivers/net/wireless/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/lib.c
@@ -180,7 +180,7 @@ void iwlagn_dev_txfifo_flush(struct iwl_priv *priv)
goto done;
}
IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n");
- iwl_trans_wait_tx_queue_empty(priv->trans);
+ iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff);
done:
ieee80211_wake_queues(priv->hw);
mutex_unlock(&priv->mutex);
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index dd55c9cf7ba8..29af7b51e370 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -1091,7 +1091,8 @@ static void iwlagn_configure_filter(struct ieee80211_hw *hw,
FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
}
-static void iwlagn_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
@@ -1119,7 +1120,7 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
}
}
IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n");
- iwl_trans_wait_tx_queue_empty(priv->trans);
+ iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff);
done:
mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index 6a6df71af1d7..6a00353768f3 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -2053,6 +2053,17 @@ static bool iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
return false;
}
+static void iwl_napi_add(struct iwl_op_mode *op_mode,
+ struct napi_struct *napi,
+ struct net_device *napi_dev,
+ int (*poll)(struct napi_struct *, int),
+ int weight)
+{
+ struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+
+ ieee80211_napi_add(priv->hw, napi, napi_dev, poll, weight);
+}
+
static const struct iwl_op_mode_ops iwl_dvm_ops = {
.start = iwl_op_mode_dvm_start,
.stop = iwl_op_mode_dvm_stop,
@@ -2065,6 +2076,7 @@ static const struct iwl_op_mode_ops iwl_dvm_ops = {
.cmd_queue_full = iwl_cmd_queue_full,
.nic_config = iwl_nic_config,
.wimax_active = iwl_wimax_active,
+ .napi_add = iwl_napi_add,
};
/*****************************************************************************
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 854ba84ccb73..c3817fae16c0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -62,6 +62,7 @@ static const struct iwl_base_params iwl1000_base_params = {
.led_compensation = 51,
.wd_timeout = IWL_WATCHDOG_DISABLED,
.max_event_log_size = 128,
+ .scd_chain_ext_wa = true,
};
static const struct iwl_ht_params iwl1000_ht_params = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index 3e63323637f3..21e5d0843a62 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -75,6 +75,7 @@ static const struct iwl_base_params iwl2000_base_params = {
.wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 512,
.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+ .scd_chain_ext_wa = true,
};
@@ -88,6 +89,7 @@ static const struct iwl_base_params iwl2030_base_params = {
.wd_timeout = IWL_LONG_WD_TIMEOUT,
.max_event_log_size = 512,
.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+ .scd_chain_ext_wa = true,
};
static const struct iwl_ht_params iwl2000_ht_params = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 6674f2c4541c..332bbede39e5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -61,6 +61,7 @@ static const struct iwl_base_params iwl5000_base_params = {
.led_compensation = 51,
.wd_timeout = IWL_WATCHDOG_DISABLED,
.max_event_log_size = 512,
+ .scd_chain_ext_wa = true,
};
static const struct iwl_ht_params iwl5000_ht_params = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 8048de90233f..8f2c3c8c6b84 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -85,6 +85,7 @@ static const struct iwl_base_params iwl6000_base_params = {
.wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 512,
.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+ .scd_chain_ext_wa = true,
};
static const struct iwl_base_params iwl6050_base_params = {
@@ -97,6 +98,7 @@ static const struct iwl_base_params iwl6050_base_params = {
.wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 1024,
.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+ .scd_chain_ext_wa = true,
};
static const struct iwl_base_params iwl6000_g2_base_params = {
@@ -109,6 +111,7 @@ static const struct iwl_base_params iwl6000_g2_base_params = {
.wd_timeout = IWL_LONG_WD_TIMEOUT,
.max_event_log_size = 512,
.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+ .scd_chain_ext_wa = true,
};
static const struct iwl_ht_params iwl6000_ht_params = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 4c2d4ef28b22..f73de239cdc1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -71,12 +71,12 @@
#define IWL3160_UCODE_API_MAX 9
/* Oldest version we won't warn about */
-#define IWL7260_UCODE_API_OK 8
-#define IWL3160_UCODE_API_OK 8
+#define IWL7260_UCODE_API_OK 9
+#define IWL3160_UCODE_API_OK 9
/* Lowest firmware API version supported */
-#define IWL7260_UCODE_API_MIN 7
-#define IWL3160_UCODE_API_MIN 7
+#define IWL7260_UCODE_API_MIN 8
+#define IWL3160_UCODE_API_MIN 8
/* NVM versions */
#define IWL7260_NVM_VERSION 0x0a1d
@@ -107,6 +107,7 @@ static const struct iwl_base_params iwl7000_base_params = {
.max_event_log_size = 512,
.shadow_reg_enable = true,
.pcie_l1_allowed = true,
+ .apmg_wake_up_wa = true,
};
static const struct iwl_ht_params iwl7000_ht_params = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 3f17dc3f2c8a..7ce82d9c7222 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -146,6 +146,9 @@ static inline u8 num_of_ant(u8 mask)
* @wd_timeout: TX queues watchdog timeout
* @max_event_log_size: size of event log buffer size for ucode event logging
* @shadow_reg_enable: HW shadow register support
+ * @apmg_wake_up_wa: should the MAC access REQ be asserted when a command
+ * is in flight. This is due to a HW bug in 7260, 3160 and 7265.
+ * @scd_chain_ext_wa: should the chain extension feature in SCD be disabled.
*/
struct iwl_base_params {
int eeprom_size;
@@ -160,6 +163,8 @@ struct iwl_base_params {
u32 max_event_log_size;
const bool shadow_reg_enable;
const bool pcie_l1_allowed;
+ const bool apmg_wake_up_wa;
+ const bool scd_chain_ext_wa;
};
/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index d14f19339d61..f5927d0cf9b6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -77,26 +77,21 @@
* @IWL_UCODE_TLV_FLAGS_UAPSD: This uCode image supports uAPSD
* @IWL_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of black list instead of 64 in scan
* offload profile config command.
- * @IWL_UCODE_TLV_FLAGS_RX_ENERGY_API: supports rx signal strength api
- * @IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2: using the new time event API.
* @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six
* (rather than two) IPv6 addresses
- * @IWL_UCODE_TLV_FLAGS_BF_UPDATED: new beacon filtering API
* @IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID: not sending a probe with the SSID element
* from the probe request template.
- * @IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API: modified D3 API to allow keeping
- * connection when going back to D0
* @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version)
* @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version)
- * @IWL_UCODE_TLV_FLAGS_SCHED_SCAN: this uCode image supports scheduled scan.
- * @IWL_UCODE_TLV_FLAGS_STA_KEY_CMD: new ADD_STA and ADD_STA_KEY command API
- * @IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD: support device wide power command
- * containing CAM (Continuous Active Mode) indication.
+ * @IWL_UCODE_TLV_FLAGS_P2P_PM: P2P client supports PM as a stand alone MAC
* @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_DCM: support power save on BSS station and
* P2P client interfaces simultaneously if they are in different bindings.
+ * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_SCM: support power save on BSS station and
+ * P2P client interfaces simultaneously if they are in same bindings.
* @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
* @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
* @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients
+ * @IWL_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS.
*/
enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
@@ -104,22 +99,15 @@ enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
IWL_UCODE_TLV_FLAGS_P2P = BIT(3),
IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4),
- IWL_UCODE_TLV_FLAGS_NEWBT_COEX = BIT(5),
- IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT = BIT(6),
IWL_UCODE_TLV_FLAGS_SHORT_BL = BIT(7),
- IWL_UCODE_TLV_FLAGS_RX_ENERGY_API = BIT(8),
- IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2 = BIT(9),
IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10),
- IWL_UCODE_TLV_FLAGS_BF_UPDATED = BIT(11),
IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID = BIT(12),
- IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API = BIT(14),
IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL = BIT(15),
IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE = BIT(16),
- IWL_UCODE_TLV_FLAGS_SCHED_SCAN = BIT(17),
- IWL_UCODE_TLV_FLAGS_STA_KEY_CMD = BIT(19),
- IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD = BIT(20),
+ IWL_UCODE_TLV_FLAGS_P2P_PM = BIT(21),
IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM = BIT(22),
- IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24),
+ IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM = BIT(23),
+ IWL_UCODE_TLV_FLAGS_EBS_SUPPORT = BIT(25),
IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26),
IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29),
IWL_UCODE_TLV_FLAGS_GO_UAPSD = BIT(30),
@@ -183,6 +171,7 @@ enum iwl_ucode_sec {
#define IWL_UCODE_SECTION_MAX 12
#define IWL_API_ARRAY_SIZE 1
#define IWL_CAPABILITIES_ARRAY_SIZE 1
+#define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC
struct iwl_ucode_capabilities {
u32 max_probe_length;
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index 6be30c698506..4049c0d626ba 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -134,12 +134,13 @@ static const u8 iwl_nvm_channels_family_8000[] = {
149, 153, 157, 161, 165, 169, 173, 177, 181
};
-#define IWL_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels)
+#define IWL_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels)
#define IWL_NUM_CHANNELS_FAMILY_8000 ARRAY_SIZE(iwl_nvm_channels_family_8000)
-#define NUM_2GHZ_CHANNELS 14
-#define FIRST_2GHZ_HT_MINUS 5
-#define LAST_2GHZ_HT_PLUS 9
-#define LAST_5GHZ_HT 161
+#define NUM_2GHZ_CHANNELS 14
+#define NUM_2GHZ_CHANNELS_FAMILY_8000 13
+#define FIRST_2GHZ_HT_MINUS 5
+#define LAST_2GHZ_HT_PLUS 9
+#define LAST_5GHZ_HT 161
#define DEFAULT_MAX_TX_POWER 16
@@ -202,21 +203,23 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
struct ieee80211_channel *channel;
u16 ch_flags;
bool is_5ghz;
- int num_of_ch;
+ int num_of_ch, num_2ghz_channels;
const u8 *nvm_chan;
if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
num_of_ch = IWL_NUM_CHANNELS;
nvm_chan = &iwl_nvm_channels[0];
+ num_2ghz_channels = NUM_2GHZ_CHANNELS;
} else {
num_of_ch = IWL_NUM_CHANNELS_FAMILY_8000;
nvm_chan = &iwl_nvm_channels_family_8000[0];
+ num_2ghz_channels = NUM_2GHZ_CHANNELS_FAMILY_8000;
}
for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
- if (ch_idx >= NUM_2GHZ_CHANNELS &&
+ if (ch_idx >= num_2ghz_channels &&
!data->sku_cap_band_52GHz_enable)
ch_flags &= ~NVM_CHANNEL_VALID;
@@ -225,7 +228,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
"Ch. %d Flags %x [%sGHz] - No traffic\n",
nvm_chan[ch_idx],
ch_flags,
- (ch_idx >= NUM_2GHZ_CHANNELS) ?
+ (ch_idx >= num_2ghz_channels) ?
"5.2" : "2.4");
continue;
}
@@ -234,7 +237,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
n_channels++;
channel->hw_value = nvm_chan[ch_idx];
- channel->band = (ch_idx < NUM_2GHZ_CHANNELS) ?
+ channel->band = (ch_idx < num_2ghz_channels) ?
IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
channel->center_freq =
ieee80211_channel_to_frequency(
@@ -242,7 +245,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
/* TODO: Need to be dependent to the NVM */
channel->flags = IEEE80211_CHAN_NO_HT40;
- if (ch_idx < NUM_2GHZ_CHANNELS &&
+ if (ch_idx < num_2ghz_channels &&
(ch_flags & NVM_CHANNEL_40MHZ)) {
if (nvm_chan[ch_idx] <= LAST_2GHZ_HT_PLUS)
channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
@@ -250,7 +253,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
} else if (nvm_chan[ch_idx] <= LAST_5GHZ_HT &&
(ch_flags & NVM_CHANNEL_40MHZ)) {
- if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0)
+ if ((ch_idx - num_2ghz_channels) % 2 == 0)
channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
else
channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index ea29504ac617..99785c892f96 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -63,6 +63,7 @@
#ifndef __iwl_op_mode_h__
#define __iwl_op_mode_h__
+#include <linux/netdevice.h>
#include <linux/debugfs.h>
struct iwl_op_mode;
@@ -112,8 +113,11 @@ struct iwl_cfg;
* @stop: stop the op_mode. Must free all the memory allocated.
* May sleep
* @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
- * HCMD this Rx responds to.
- * This callback may sleep, it is called from a threaded IRQ handler.
+ * HCMD this Rx responds to. Can't sleep.
+ * @napi_add: NAPI initialisation. The transport is fully responsible for NAPI,
+ * but the higher layers need to know about it (in particular mac80211 to
+ * to able to call the right NAPI RX functions); this function is needed
+ * to eventually call netif_napi_add() with higher layer involvement.
* @queue_full: notifies that a HW queue is full.
* Must be atomic and called with BH disabled.
* @queue_not_full: notifies that a HW queue is not full any more.
@@ -143,6 +147,11 @@ struct iwl_op_mode_ops {
void (*stop)(struct iwl_op_mode *op_mode);
int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
+ void (*napi_add)(struct iwl_op_mode *op_mode,
+ struct napi_struct *napi,
+ struct net_device *napi_dev,
+ int (*poll)(struct napi_struct *, int),
+ int weight);
void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
@@ -180,7 +189,6 @@ static inline int iwl_op_mode_rx(struct iwl_op_mode *op_mode,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
- might_sleep();
return op_mode->ops->rx(op_mode, rxb, cmd);
}
@@ -249,4 +257,15 @@ static inline int iwl_op_mode_exit_d0i3(struct iwl_op_mode *op_mode)
return op_mode->ops->exit_d0i3(op_mode);
}
+static inline void iwl_op_mode_napi_add(struct iwl_op_mode *op_mode,
+ struct napi_struct *napi,
+ struct net_device *napi_dev,
+ int (*poll)(struct napi_struct *, int),
+ int weight)
+{
+ if (!op_mode->ops->napi_add)
+ return;
+ op_mode->ops->napi_add(op_mode, napi, napi_dev, poll, weight);
+}
+
#endif /* __iwl_op_mode_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 5f657c501406..779311080a9e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -348,4 +348,12 @@ enum secure_load_status_reg {
#define LMPM_SECURE_TIME_OUT (100)
+/* Rx FIFO */
+#define RXF_SIZE_ADDR (0xa00c88)
+#define RXF_SIZE_BYTE_CND_POS (7)
+#define RXF_SIZE_BYTE_CNT_MSK (0x3ff << RXF_SIZE_BYTE_CND_POS)
+
+#define RXF_LD_FENCE_OFFSET_ADDR (0xa00c10)
+#define RXF_FIFO_RD_FENCE_ADDR (0xa00c0c)
+
#endif /* __iwl_prph_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 8cdb0dd618a6..22fd94ec8048 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -437,8 +437,7 @@ struct iwl_trans;
* this one. The op_mode must not configure the HCMD queue. May sleep.
* @txq_disable: de-configure a Tx queue to send AMPDUs
* Must be atomic
- * @wait_tx_queue_empty: wait until all tx queues are empty
- * May sleep
+ * @wait_tx_queue_empty: wait until tx queues are empty. May sleep.
* @dbgfs_register: add the dbgfs files under this directory. Files will be
* automatically deleted.
* @write8: write a u8 to a register at offset ofs from the BAR
@@ -490,7 +489,7 @@ struct iwl_trans_ops {
void (*txq_disable)(struct iwl_trans *trans, int queue);
int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
- int (*wait_tx_queue_empty)(struct iwl_trans *trans);
+ int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm);
void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
@@ -759,12 +758,13 @@ static inline void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue,
IWL_MAX_TID_COUNT, IWL_FRAME_LIMIT, 0);
}
-static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
+static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans,
+ u32 txq_bm)
{
if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
- return trans->ops->wait_tx_queue_empty(trans);
+ return trans->ops->wait_tx_queue_empty(trans, txq_bm);
}
static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans,
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
index fa858d548d13..8f4b03dbaf3f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@ -104,11 +104,8 @@ static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
#define BT_DISABLE_REDUCED_TXPOWER_THRESHOLD (-65)
#define BT_ANTENNA_COUPLING_THRESHOLD (30)
-int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
+static int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
{
- if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
- return 0;
-
return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, CMD_SYNC,
sizeof(struct iwl_bt_coex_prio_tbl_cmd),
&iwl_bt_prio_tbl);
@@ -573,8 +570,9 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
int ret;
u32 flags;
- if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
- return 0;
+ ret = iwl_send_bt_prio_tbl(mvm);
+ if (ret)
+ return ret;
bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
if (!bt_cmd)
@@ -582,10 +580,12 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
cmd.data[0] = bt_cmd;
bt_cmd->max_kill = 5;
- bt_cmd->bt4_antenna_isolation_thr = BT_ANTENNA_COUPLING_THRESHOLD,
- bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling,
- bt_cmd->bt4_tx_tx_delta_freq_thr = 15,
- bt_cmd->bt4_tx_rx_max_freq0 = 15,
+ bt_cmd->bt4_antenna_isolation_thr = BT_ANTENNA_COUPLING_THRESHOLD;
+ bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling;
+ bt_cmd->bt4_tx_tx_delta_freq_thr = 15;
+ bt_cmd->bt4_tx_rx_max_freq0 = 15;
+ bt_cmd->override_primary_lut = BT_COEX_INVALID_LUT;
+ bt_cmd->override_secondary_lut = BT_COEX_INVALID_LUT;
flags = iwlwifi_mod_params.bt_coex_active ?
BT_COEX_NW : BT_COEX_DISABLE;
@@ -1215,6 +1215,17 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
return iwl_get_coex_type(mvm, mvmsta->vif) == BT_COEX_TIGHT_LUT;
}
+bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
+ enum ieee80211_band band)
+{
+ u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
+
+ if (band != IEEE80211_BAND_2GHZ)
+ return false;
+
+ return bt_activity >= BT_LOW_TRAFFIC;
+}
+
u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *info, u8 ac)
{
@@ -1249,9 +1260,6 @@ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
{
- if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
- return;
-
iwl_mvm_bt_coex_notif_handle(mvm);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index e56f5a0edf85..7694472a303e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -744,10 +744,8 @@ static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm,
int err;
u32 size;
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API) {
- cmd.data[0] = &query_cmd;
- cmd.len[0] = sizeof(query_cmd);
- }
+ cmd.data[0] = &query_cmd;
+ cmd.len[0] = sizeof(query_cmd);
err = iwl_mvm_send_cmd(mvm, &cmd);
if (err)
@@ -758,10 +756,8 @@ static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm,
err = -EINVAL;
} else {
err = le16_to_cpup((__le16 *)cmd.resp_pkt->data);
- /* new API returns next, not last-used seqno */
- if (mvm->fw->ucode_capa.flags &
- IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API)
- err = (u16) (err - 0x10);
+ /* firmware returns next, not last-used seqno */
+ err = (u16) (err - 0x10);
}
iwl_free_resp(&cmd);
@@ -785,10 +781,6 @@ void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
mvmvif->seqno_valid = false;
- if (!(mvm->fw->ucode_capa.flags &
- IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API))
- return;
-
if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, CMD_SYNC,
sizeof(query_cmd), &query_cmd))
IWL_ERR(mvm, "failed to set non-QoS seqno\n");
@@ -1082,6 +1074,15 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ if (iwl_mvm_is_d0i3_supported(mvm)) {
+ mutex_lock(&mvm->d0i3_suspend_mutex);
+ __set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
+ mutex_unlock(&mvm->d0i3_suspend_mutex);
+ return 0;
+ }
+
return __iwl_mvm_suspend(hw, wowlan, false);
}
@@ -1277,7 +1278,7 @@ static void iwl_mvm_set_tkip_rx_seq(struct tkip_sc *scs,
}
static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key,
- struct iwl_wowlan_status_v6 *status)
+ struct iwl_wowlan_status *status)
{
union iwl_all_tsc_rsc *rsc = &status->gtk.rsc.all_tsc_rsc;
@@ -1294,7 +1295,7 @@ static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key,
}
struct iwl_mvm_d3_gtk_iter_data {
- struct iwl_wowlan_status_v6 *status;
+ struct iwl_wowlan_status *status;
void *last_gtk;
u32 cipher;
bool find_phase, unhandled_cipher;
@@ -1370,7 +1371,7 @@ static void iwl_mvm_d3_update_gtks(struct ieee80211_hw *hw,
static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- struct iwl_wowlan_status_v6 *status)
+ struct iwl_wowlan_status *status)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_d3_gtk_iter_data gtkdata = {
@@ -1468,7 +1469,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
.flags = CMD_SYNC | CMD_WANT_SKB,
};
struct iwl_wowlan_status_data status;
- struct iwl_wowlan_status_v6 *status_v6;
+ struct iwl_wowlan_status *fw_status;
int ret, len, status_size, i;
bool keep;
struct ieee80211_sta *ap_sta;
@@ -1505,10 +1506,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
if (!cmd.resp_pkt)
goto out_unlock;
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API)
- status_size = sizeof(struct iwl_wowlan_status_v6);
- else
- status_size = sizeof(struct iwl_wowlan_status_v4);
+ status_size = sizeof(*fw_status);
len = iwl_rx_packet_payload_len(cmd.resp_pkt);
if (len < status_size) {
@@ -1516,35 +1514,18 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
goto out_free_resp;
}
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API) {
- status_v6 = (void *)cmd.resp_pkt->data;
-
- status.pattern_number = le16_to_cpu(status_v6->pattern_number);
- for (i = 0; i < 8; i++)
- status.qos_seq_ctr[i] =
- le16_to_cpu(status_v6->qos_seq_ctr[i]);
- status.wakeup_reasons = le32_to_cpu(status_v6->wakeup_reasons);
- status.wake_packet_length =
- le32_to_cpu(status_v6->wake_packet_length);
- status.wake_packet_bufsize =
- le32_to_cpu(status_v6->wake_packet_bufsize);
- status.wake_packet = status_v6->wake_packet;
- } else {
- struct iwl_wowlan_status_v4 *status_v4;
- status_v6 = NULL;
- status_v4 = (void *)cmd.resp_pkt->data;
-
- status.pattern_number = le16_to_cpu(status_v4->pattern_number);
- for (i = 0; i < 8; i++)
- status.qos_seq_ctr[i] =
- le16_to_cpu(status_v4->qos_seq_ctr[i]);
- status.wakeup_reasons = le32_to_cpu(status_v4->wakeup_reasons);
- status.wake_packet_length =
- le32_to_cpu(status_v4->wake_packet_length);
- status.wake_packet_bufsize =
- le32_to_cpu(status_v4->wake_packet_bufsize);
- status.wake_packet = status_v4->wake_packet;
- }
+ fw_status = (void *)cmd.resp_pkt->data;
+
+ status.pattern_number = le16_to_cpu(fw_status->pattern_number);
+ for (i = 0; i < 8; i++)
+ status.qos_seq_ctr[i] =
+ le16_to_cpu(fw_status->qos_seq_ctr[i]);
+ status.wakeup_reasons = le32_to_cpu(fw_status->wakeup_reasons);
+ status.wake_packet_length =
+ le32_to_cpu(fw_status->wake_packet_length);
+ status.wake_packet_bufsize =
+ le32_to_cpu(fw_status->wake_packet_bufsize);
+ status.wake_packet = fw_status->wake_packet;
if (len != status_size + ALIGN(status.wake_packet_bufsize, 4)) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
@@ -1571,7 +1552,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
iwl_mvm_report_wakeup_reasons(mvm, vif, &status);
- keep = iwl_mvm_setup_connection_keep(mvm, vif, status_v6);
+ keep = iwl_mvm_setup_connection_keep(mvm, vif, fw_status);
iwl_free_resp(&cmd);
return keep;
@@ -1674,6 +1655,19 @@ int iwl_mvm_resume(struct ieee80211_hw *hw)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ if (iwl_mvm_is_d0i3_supported(mvm)) {
+ bool exit_now;
+
+ mutex_lock(&mvm->d0i3_suspend_mutex);
+ __clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
+ exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP,
+ &mvm->d0i3_suspend_flags);
+ mutex_unlock(&mvm->d0i3_suspend_mutex);
+ if (exit_now)
+ _iwl_mvm_exit_d0i3(mvm);
+ return 0;
+ }
+
return __iwl_mvm_resume(mvm, false);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
index 9b59e1d7ae71..6047cfdafb95 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
@@ -103,10 +103,6 @@ static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
IWL_DEBUG_POWER(mvm, "tx_data_timeout=%d\n", val);
dbgfs_pm->tx_data_timeout = val;
break;
- case MVM_DEBUGFS_PM_DISABLE_POWER_OFF:
- IWL_DEBUG_POWER(mvm, "disable_power_off=%d\n", val);
- dbgfs_pm->disable_power_off = val;
- break;
case MVM_DEBUGFS_PM_LPRX_ENA:
IWL_DEBUG_POWER(mvm, "lprx %s\n", val ? "enabled" : "disabled");
dbgfs_pm->lprx_ena = val;
@@ -154,12 +150,6 @@ static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf,
if (sscanf(buf + 16, "%d", &val) != 1)
return -EINVAL;
param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT;
- } else if (!strncmp("disable_power_off=", buf, 18) &&
- !(mvm->fw->ucode_capa.flags &
- IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)) {
- if (sscanf(buf + 18, "%d", &val) != 1)
- return -EINVAL;
- param = MVM_DEBUGFS_PM_DISABLE_POWER_OFF;
} else if (!strncmp("lprx=", buf, 5)) {
if (sscanf(buf + 5, "%d", &val) != 1)
return -EINVAL;
@@ -592,8 +582,7 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return;
}
- if ((mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT) &&
- iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
+ if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) ||
(vif->type == NL80211_IFTYPE_STATION && vif->p2p &&
mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)))
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index 1b52deea6081..f462c9baa2b5 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -136,9 +136,6 @@ static int iwl_dbgfs_fw_error_dump_open(struct inode *inode, struct file *file)
file->private_data = mvm->fw_error_dump;
mvm->fw_error_dump = NULL;
- kfree(mvm->fw_error_sram);
- mvm->fw_error_sram = NULL;
- mvm->fw_error_sram_len = 0;
ret = 0;
out:
@@ -1004,6 +1001,7 @@ static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file,
PRINT_MVM_REF(IWL_MVM_REF_P2P_CLIENT);
PRINT_MVM_REF(IWL_MVM_REF_AP_IBSS);
PRINT_MVM_REF(IWL_MVM_REF_USER);
+ PRINT_MVM_REF(IWL_MVM_REF_EXIT_WORK);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
@@ -1108,9 +1106,9 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8);
static const struct file_operations iwl_dbgfs_fw_error_dump_ops = {
- .open = iwl_dbgfs_fw_error_dump_open,
- .read = iwl_dbgfs_fw_error_dump_read,
- .release = iwl_dbgfs_fw_error_dump_release,
+ .open = iwl_dbgfs_fw_error_dump_open,
+ .read = iwl_dbgfs_fw_error_dump_read,
+ .release = iwl_dbgfs_fw_error_dump_release,
};
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
@@ -1138,9 +1136,8 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
MVM_DEBUGFS_ADD_FILE(fw_error_dump, dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, S_IRUSR);
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)
- MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir,
- S_IRUSR | S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir,
+ S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(drv_rx_stats, mvm->debugfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR);
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
index 21877e5966a8..5fe82c29c8ad 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
@@ -141,7 +141,8 @@ enum iwl_bt_coex_lut_type {
BT_COEX_TX_DIS_LUT,
BT_COEX_MAX_LUT,
-};
+ BT_COEX_INVALID_LUT = 0xff,
+}; /* BT_COEX_DECISION_LUT_INDEX_API_E_VER_1 */
#define BT_COEX_LUT_SIZE (12)
#define BT_COEX_CORUN_LUT_SIZE (32)
@@ -154,19 +155,23 @@ enum iwl_bt_coex_lut_type {
* @flags:&enum iwl_bt_coex_flags
* @max_kill:
* @bt_reduced_tx_power: enum %iwl_bt_reduced_tx_power
- * @bt4_antenna_isolation:
- * @bt4_antenna_isolation_thr:
- * @bt4_tx_tx_delta_freq_thr:
- * @bt4_tx_rx_max_freq0:
- * @bt_prio_boost:
+ * @override_primary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT
+ * should be set by default
+ * @override_secondary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT
+ * should be set by default
+ * @bt4_antenna_isolation: antenna isolation
+ * @bt4_antenna_isolation_thr: antenna threshold value
+ * @bt4_tx_tx_delta_freq_thr: TxTx delta frequency
+ * @bt4_tx_rx_max_freq0: TxRx max frequency
+ * @bt_prio_boost: BT priority boost registers
* @wifi_tx_prio_boost: SW boost of wifi tx priority
* @wifi_rx_prio_boost: SW boost of wifi rx priority
- * @kill_ack_msk:
- * @kill_cts_msk:
- * @decision_lut:
- * @bt4_multiprio_lut:
- * @bt4_corun_lut20:
- * @bt4_corun_lut40:
+ * @kill_ack_msk: kill ACK mask. 1 - Tx ACK, 0 - kill Tx of ACK.
+ * @kill_cts_msk: kill CTS mask. 1 - Tx CTS, 0 - kill Tx of CTS.
+ * @decision_lut: PTA decision LUT, per Prio-Ch
+ * @bt4_multiprio_lut: multi priority LUT configuration
+ * @bt4_corun_lut20: co-running 20 MHz LUT configuration
+ * @bt4_corun_lut40: co-running 40 MHz LUT configuration
* @valid_bit_msk: enum %iwl_bt_coex_valid_bit_msk
*
* The structure is used for the BT_COEX command.
@@ -175,7 +180,8 @@ struct iwl_bt_coex_cmd {
__le32 flags;
u8 max_kill;
u8 bt_reduced_tx_power;
- u8 reserved[2];
+ u8 override_primary_lut;
+ u8 override_secondary_lut;
u8 bt4_antenna_isolation;
u8 bt4_antenna_isolation_thr;
@@ -194,7 +200,7 @@ struct iwl_bt_coex_cmd {
__le32 bt4_corun_lut40[BT_COEX_CORUN_LUT_SIZE];
__le32 valid_bit_msk;
-} __packed; /* BT_COEX_CMD_API_S_VER_3 */
+} __packed; /* BT_COEX_CMD_API_S_VER_5 */
/**
* struct iwl_bt_coex_ci_cmd - bt coex channel inhibition command
@@ -282,7 +288,7 @@ enum iwl_bt_activity_grading {
BT_ON_NO_CONNECTION = 1,
BT_LOW_TRAFFIC = 2,
BT_HIGH_TRAFFIC = 3,
-};
+}; /* BT_COEX_BT_ACTIVITY_GRADING_API_E_VER_1 */
/**
* struct iwl_bt_coex_profile_notif - notification about BT coex
@@ -310,7 +316,7 @@ struct iwl_bt_coex_profile_notif {
__le32 primary_ch_lut;
__le32 secondary_ch_lut;
__le32 bt_activity_grading;
-} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_2 */
+} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_3 */
enum iwl_bt_coex_prio_table_event {
BT_COEX_PRIO_TBL_EVT_INIT_CALIB1 = 0,
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
index 10fcc1a79ebd..13696fe419b7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
@@ -345,21 +345,6 @@ enum iwl_wowlan_wakeup_reason {
IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET = BIT(12),
}; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */
-struct iwl_wowlan_status_v4 {
- __le64 replay_ctr;
- __le16 pattern_number;
- __le16 non_qos_seq_ctr;
- __le16 qos_seq_ctr[8];
- __le32 wakeup_reasons;
- __le32 rekey_status;
- __le32 num_of_gtk_rekeys;
- __le32 transmitted_ndps;
- __le32 received_beacons;
- __le32 wake_packet_length;
- __le32 wake_packet_bufsize;
- u8 wake_packet[]; /* can be truncated from _length to _bufsize */
-} __packed; /* WOWLAN_STATUSES_API_S_VER_4 */
-
struct iwl_wowlan_gtk_status {
u8 key_index;
u8 reserved[3];
@@ -368,7 +353,7 @@ struct iwl_wowlan_gtk_status {
struct iwl_wowlan_rsc_tsc_params_cmd rsc;
} __packed;
-struct iwl_wowlan_status_v6 {
+struct iwl_wowlan_status {
struct iwl_wowlan_gtk_status gtk;
__le64 replay_ctr;
__le16 pattern_number;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
index 39148b5bb332..8bb5b94bf963 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
@@ -334,7 +334,7 @@ enum {
*/
struct iwl_lq_cmd {
u8 sta_id;
- u8 reserved1;
+ u8 reduced_tpc;
u16 control;
/* LINK_QUAL_GENERAL_PARAMS_API_S_VER_1 */
u8 flags;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 9426905de6b2..6174c027ff59 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -169,8 +169,12 @@ enum iwl_scan_type {
SCAN_TYPE_DISCOVERY_FORCED = 6,
}; /* SCAN_ACTIVITY_TYPE_E_VER_1 */
-/* Maximal number of channels to scan */
-#define MAX_NUM_SCAN_CHANNELS 0x24
+/**
+ * Maximal number of channels to scan
+ * it should be equal to:
+ * max(IWL_NUM_CHANNELS, IWL_NUM_CHANNELS_FAMILY_8000)
+ */
+#define MAX_NUM_SCAN_CHANNELS 50
/**
* struct iwl_scan_cmd - scan request command
@@ -534,13 +538,16 @@ struct iwl_scan_offload_schedule {
*
* IWL_SCAN_OFFLOAD_FLAG_PASS_ALL: pass all results - no filtering.
* IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL: add cached channels to partial scan.
- * IWL_SCAN_OFFLOAD_FLAG_ENERGY_SCAN: use energy based scan before partial scan
- * on A band.
+ * IWL_SCAN_OFFLOAD_FLAG_EBS_QUICK_MODE: EBS duration is 100mSec - typical
+ * beacon period. Finding channel activity in this mode is not guaranteed.
+ * IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE: EBS duration is 200mSec.
+ * Assuming beacon period is 100ms finding channel activity is guaranteed.
*/
enum iwl_scan_offload_flags {
IWL_SCAN_OFFLOAD_FLAG_PASS_ALL = BIT(0),
IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL = BIT(2),
- IWL_SCAN_OFFLOAD_FLAG_ENERGY_SCAN = BIT(3),
+ IWL_SCAN_OFFLOAD_FLAG_EBS_QUICK_MODE = BIT(5),
+ IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE = BIT(6),
};
/**
@@ -563,17 +570,24 @@ enum iwl_scan_offload_compleate_status {
IWL_SCAN_OFFLOAD_ABORTED = 2,
};
+enum iwl_scan_ebs_status {
+ IWL_SCAN_EBS_SUCCESS,
+ IWL_SCAN_EBS_FAILED,
+ IWL_SCAN_EBS_CHAN_NOT_FOUND,
+};
+
/**
* iwl_scan_offload_complete - SCAN_OFFLOAD_COMPLETE_NTF_API_S_VER_1
* @last_schedule_line: last schedule line executed (fast or regular)
* @last_schedule_iteration: last scan iteration executed before scan abort
* @status: enum iwl_scan_offload_compleate_status
+ * @ebs_status: last EBS status, see IWL_SCAN_EBS_*
*/
struct iwl_scan_offload_complete {
u8 last_schedule_line;
u8 last_schedule_iteration;
u8 status;
- u8 reserved;
+ u8 ebs_status;
} __packed;
/**
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
index d63647867262..39cebee8016f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
@@ -255,22 +255,19 @@ struct iwl_mvm_keyinfo {
} __packed;
/**
- * struct iwl_mvm_add_sta_cmd_v5 - Add/modify a station in the fw's sta table.
+ * struct iwl_mvm_add_sta_cmd - Add/modify a station in the fw's sta table.
* ( REPLY_ADD_STA = 0x18 )
* @add_modify: 1: modify existing, 0: add new station
- * @unicast_tx_key_id: unicast tx key id. Relevant only when unicast key sent
- * @multicast_tx_key_id: multicast tx key id. Relevant only when multicast key
- * sent
+ * @awake_acs:
+ * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable
+ * AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field.
* @mac_id_n_color: the Mac context this station belongs to
* @addr[ETH_ALEN]: station's MAC address
* @sta_id: index of station in uCode's station table
* @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave
* alone. 1 - modify, 0 - don't change.
- * @key: look at %iwl_mvm_keyinfo
* @station_flags: look at %iwl_sta_flags
* @station_flags_msk: what of %station_flags have changed
- * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable
- * AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field.
* @add_immediate_ba_tid: tid for which to add block-ack support (Rx)
* Set %STA_MODIFY_ADD_BA_TID to use this field, and also set
* add_immediate_ba_ssn.
@@ -294,40 +291,7 @@ struct iwl_mvm_keyinfo {
* ADD_STA sets up the table entry for one station, either creating a new
* entry, or modifying a pre-existing one.
*/
-struct iwl_mvm_add_sta_cmd_v5 {
- u8 add_modify;
- u8 unicast_tx_key_id;
- u8 multicast_tx_key_id;
- u8 reserved1;
- __le32 mac_id_n_color;
- u8 addr[ETH_ALEN];
- __le16 reserved2;
- u8 sta_id;
- u8 modify_mask;
- __le16 reserved3;
- struct iwl_mvm_keyinfo key;
- __le32 station_flags;
- __le32 station_flags_msk;
- __le16 tid_disable_tx;
- __le16 reserved4;
- u8 add_immediate_ba_tid;
- u8 remove_immediate_ba_tid;
- __le16 add_immediate_ba_ssn;
- __le16 sleep_tx_count;
- __le16 sleep_state_flags;
- __le16 assoc_id;
- __le16 beamform_flags;
- __le32 tfd_queue_msk;
-} __packed; /* ADD_STA_CMD_API_S_VER_5 */
-
-/**
- * struct iwl_mvm_add_sta_cmd_v7 - Add / modify a station
- * VER_7 of this command is quite similar to VER_5 except
- * exclusion of all fields related to the security key installation.
- * It only differs from VER_6 by the "awake_acs" field that is
- * reserved and ignored in VER_6.
- */
-struct iwl_mvm_add_sta_cmd_v7 {
+struct iwl_mvm_add_sta_cmd {
u8 add_modify;
u8 awake_acs;
__le16 tid_disable_tx;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
index 8e122f3a7a74..6cc5f52b807f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
@@ -482,7 +482,8 @@ struct iwl_mvm_tx_resp {
u8 pa_integ_res_b[3];
u8 pa_integ_res_c[3];
__le16 measurement_req_id;
- __le16 reserved;
+ u8 reduced_tpc;
+ u8 reserved;
__le32 tfd_info;
__le16 seq_ctl;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index 6e75b52588de..309a9b9a94fe 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -71,6 +71,7 @@
#include "fw-api-power.h"
#include "fw-api-d3.h"
#include "fw-api-coex.h"
+#include "fw-api-scan.h"
/* maximal number of Tx queues in any platform */
#define IWL_MVM_MAX_QUEUES 20
@@ -604,52 +605,7 @@ enum {
TE_V1_NOTIF_INTERNAL_FRAG_END = BIT(7),
}; /* MAC_EVENT_ACTION_API_E_VER_2 */
-
-/**
- * struct iwl_time_event_cmd_api_v1 - configuring Time Events
- * with struct MAC_TIME_EVENT_DATA_API_S_VER_1 (see also
- * with version 2. determined by IWL_UCODE_TLV_FLAGS)
- * ( TIME_EVENT_CMD = 0x29 )
- * @id_and_color: ID and color of the relevant MAC
- * @action: action to perform, one of FW_CTXT_ACTION_*
- * @id: this field has two meanings, depending on the action:
- * If the action is ADD, then it means the type of event to add.
- * For all other actions it is the unique event ID assigned when the
- * event was added by the FW.
- * @apply_time: When to start the Time Event (in GP2)
- * @max_delay: maximum delay to event's start (apply time), in TU
- * @depends_on: the unique ID of the event we depend on (if any)
- * @interval: interval between repetitions, in TU
- * @interval_reciprocal: 2^32 / interval
- * @duration: duration of event in TU
- * @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS
- * @dep_policy: one of TE_V1_INDEPENDENT, TE_V1_DEP_OTHER, TE_V1_DEP_TSF
- * and TE_V1_EVENT_SOCIOPATHIC
- * @is_present: 0 or 1, are we present or absent during the Time Event
- * @max_frags: maximal number of fragments the Time Event can be divided to
- * @notify: notifications using TE_V1_NOTIF_* (whom to notify when)
- */
-struct iwl_time_event_cmd_v1 {
- /* COMMON_INDEX_HDR_API_S_VER_1 */
- __le32 id_and_color;
- __le32 action;
- __le32 id;
- /* MAC_TIME_EVENT_DATA_API_S_VER_1 */
- __le32 apply_time;
- __le32 max_delay;
- __le32 dep_policy;
- __le32 depends_on;
- __le32 is_present;
- __le32 max_frags;
- __le32 interval;
- __le32 interval_reciprocal;
- __le32 duration;
- __le32 repeat;
- __le32 notify;
-} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_1 */
-
-
-/* Time event - defines for command API v2 */
+/* Time event - defines for command API */
/*
* @TE_V2_FRAG_NONE: fragmentation of the time event is NOT allowed.
@@ -680,7 +636,7 @@ enum {
#define TE_V2_PLACEMENT_POS 12
#define TE_V2_ABSENCE_POS 15
-/* Time event policy values (for time event cmd api v2)
+/* Time event policy values
* A notification (both event and fragment) includes a status indicating weather
* the FW was able to schedule the event or not. For fragment start/end
* notification the status is always success. There is no start/end fragment
@@ -727,7 +683,7 @@ enum {
};
/**
- * struct iwl_time_event_cmd_api_v2 - configuring Time Events
+ * struct iwl_time_event_cmd_api - configuring Time Events
* with struct MAC_TIME_EVENT_DATA_API_S_VER_2 (see also
* with version 1. determined by IWL_UCODE_TLV_FLAGS)
* ( TIME_EVENT_CMD = 0x29 )
@@ -750,7 +706,7 @@ enum {
* TE_EVENT_SOCIOPATHIC
* using TE_ABSENCE and using TE_NOTIF_*
*/
-struct iwl_time_event_cmd_v2 {
+struct iwl_time_event_cmd {
/* COMMON_INDEX_HDR_API_S_VER_1 */
__le32 id_and_color;
__le32 action;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-error-dump.h b/drivers/net/wireless/iwlwifi/mvm/fw-error-dump.h
index 58c8941c0d95..f381908be7e5 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-error-dump.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-error-dump.h
@@ -71,10 +71,12 @@
* enum iwl_fw_error_dump_type - types of data in the dump file
* @IWL_FW_ERROR_DUMP_SRAM:
* @IWL_FW_ERROR_DUMP_REG:
+ * @IWL_FW_ERROR_DUMP_RXF:
*/
enum iwl_fw_error_dump_type {
IWL_FW_ERROR_DUMP_SRAM = 0,
IWL_FW_ERROR_DUMP_REG = 1,
+ IWL_FW_ERROR_DUMP_RXF = 2,
IWL_FW_ERROR_DUMP_MAX,
};
@@ -89,7 +91,7 @@ struct iwl_fw_error_dump_data {
__le32 type;
__le32 len;
__u8 data[];
-} __packed __aligned(4);
+} __packed;
/**
* struct iwl_fw_error_dump_file - the layout of the header of the file
@@ -101,6 +103,6 @@ struct iwl_fw_error_dump_file {
__le32 barker;
__le32 file_len;
u8 data[0];
-} __packed __aligned(4);
+} __packed;
#endif /* __fw_error_dump_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index 7ce20062f32d..3d99cf564ba6 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -288,7 +288,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
goto error;
}
- ret = iwl_send_bt_prio_tbl(mvm);
+ ret = iwl_send_bt_init_conf(mvm);
if (ret)
goto error;
@@ -424,10 +424,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (ret)
goto error;
- ret = iwl_send_bt_prio_tbl(mvm);
- if (ret)
- goto error;
-
ret = iwl_send_bt_init_conf(mvm);
if (ret)
goto error;
@@ -468,12 +464,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
/* Initialize tx backoffs to the minimal possible */
iwl_mvm_tt_tx_backoff(mvm, 0);
- if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
- ret = iwl_power_legacy_set_cam_mode(mvm);
- if (ret)
- goto error;
- }
-
ret = iwl_mvm_power_update_device(mvm);
if (ret)
goto error;
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index 9ccec10bba16..7110ec2605d6 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -667,12 +667,9 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
if (vif->bss_conf.qos)
cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
- /* Don't use cts to self as the fw doesn't support it currently. */
if (vif->bss_conf.use_cts_prot) {
cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
- if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 8)
- cmd->protection_flags |=
- cpu_to_le32(MAC_PROT_FLG_SELF_CTS_EN);
+ cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_SELF_CTS_EN);
}
IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n",
vif->bss_conf.use_cts_prot,
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index f0cebf12c7b8..97c3deae6552 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -276,6 +276,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
IEEE80211_HW_AMPDU_AGGREGATION |
IEEE80211_HW_TIMING_BEACON_ONLY |
IEEE80211_HW_CONNECTION_MONITOR |
+ IEEE80211_HW_SUPPORTS_UAPSD |
IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
IEEE80211_HW_SUPPORTS_STATIC_SMPS;
@@ -285,6 +286,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
IEEE80211_RADIOTAP_MCS_HAVE_STBC;
hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC;
hw->rate_control_algorithm = "iwl-mvm-rs";
+ hw->uapsd_queues = IWL_UAPSD_AC_INFO;
+ hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
/*
* Enable 11w if advertised by firmware and software crypto
@@ -295,11 +298,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
!iwlwifi_mod_params.sw_crypto)
hw->flags |= IEEE80211_HW_MFP_CAPABLE;
- if (0 && mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT) {
- hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD;
- hw->uapsd_queues = IWL_UAPSD_AC_INFO;
- hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
- }
+ /* Disable uAPSD due to firmware issues */
+ if (true)
+ hw->flags &= ~IEEE80211_HW_SUPPORTS_UAPSD;
hw->sta_data_size = sizeof(struct iwl_mvm_sta);
hw->vif_data_size = sizeof(struct iwl_mvm_vif);
@@ -309,11 +310,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_P2P_GO) |
- BIT(NL80211_IFTYPE_P2P_DEVICE);
-
- /* IBSS has bugs in older versions */
- if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 8)
- hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
+ BIT(NL80211_IFTYPE_P2P_DEVICE) |
+ BIT(NL80211_IFTYPE_ADHOC);
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
@@ -365,14 +363,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
else
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) {
- hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
- hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
- hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
- /* we create the 802.11 header and zero length SSID IE. */
- hw->wiphy->max_sched_scan_ie_len =
- SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
- }
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+ hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
+ hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
+ /* we create the 802.11 header and zero length SSID IE. */
+ hw->wiphy->max_sched_scan_ie_len = SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
NL80211_FEATURE_P2P_GO_OPPPS;
@@ -386,7 +381,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
}
#ifdef CONFIG_PM_SLEEP
- if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
+ if (iwl_mvm_is_d0i3_supported(mvm) &&
+ device_can_wakeup(mvm->trans->dev)) {
+ mvm->wowlan.flags = WIPHY_WOWLAN_ANY;
+ hw->wiphy->wowlan = &mvm->wowlan;
+ } else if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
mvm->trans->ops->d3_suspend &&
mvm->trans->ops->d3_resume &&
device_can_wakeup(mvm->trans->dev)) {
@@ -827,8 +826,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
goto out_remove_mac;
if (!mvm->bf_allowed_vif &&
- vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
- mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED){
+ vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
mvm->bf_allowed_vif = mvmvif;
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
IEEE80211_VIF_SUPPORTS_CQM_RSSI;
@@ -1223,6 +1221,10 @@ static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
return 0;
+ /* bcast filtering isn't supported for P2P client */
+ if (vif->p2p)
+ return 0;
+
if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
return 0;
@@ -1697,6 +1699,11 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
ret = iwl_mvm_add_sta(mvm, vif, sta);
} else if (old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_AUTH) {
+ /*
+ * EBS may be disabled due to previous failures reported by FW.
+ * Reset EBS status here assuming environment has been changed.
+ */
+ mvm->last_ebs_successful = true;
ret = 0;
} else if (old_state == IEEE80211_STA_AUTH &&
new_state == IEEE80211_STA_ASSOC) {
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index d564233a65da..17c42da5f9f2 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -164,7 +164,6 @@ enum iwl_dbgfs_pm_mask {
MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS = BIT(2),
MVM_DEBUGFS_PM_RX_DATA_TIMEOUT = BIT(3),
MVM_DEBUGFS_PM_TX_DATA_TIMEOUT = BIT(4),
- MVM_DEBUGFS_PM_DISABLE_POWER_OFF = BIT(5),
MVM_DEBUGFS_PM_LPRX_ENA = BIT(6),
MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7),
MVM_DEBUGFS_PM_SNOOZE_ENABLE = BIT(8),
@@ -177,7 +176,6 @@ struct iwl_dbgfs_pm {
u32 tx_data_timeout;
bool skip_over_dtim;
u8 skip_dtim_periods;
- bool disable_power_off;
bool lprx_ena;
u32 lprx_rssi_threshold;
bool snooze_ena;
@@ -232,6 +230,7 @@ enum iwl_mvm_ref_type {
IWL_MVM_REF_USER,
IWL_MVM_REF_TX,
IWL_MVM_REF_TX_AGG,
+ IWL_MVM_REF_EXIT_WORK,
IWL_MVM_REF_COUNT,
};
@@ -265,6 +264,7 @@ struct iwl_mvm_vif_bf_data {
* @uploaded: indicates the MAC context has been added to the device
* @ap_ibss_active: indicates that AP/IBSS is configured and that the interface
* should get quota etc.
+ * @pm_enabled - Indicate if MAC power management is allowed
* @monitor_active: indicates that monitor context is configured, and that the
* interface should get quota etc.
* @low_latency: indicates that this interface is in low-latency mode
@@ -283,6 +283,7 @@ struct iwl_mvm_vif {
bool uploaded;
bool ap_ibss_active;
+ bool pm_enabled;
bool monitor_active;
bool low_latency;
struct iwl_mvm_vif_bf_data bf_data;
@@ -451,6 +452,11 @@ struct iwl_mvm_frame_stats {
int last_frame_idx;
};
+enum {
+ D0I3_DEFER_WAKEUP,
+ D0I3_PENDING_WAKEUP,
+};
+
struct iwl_mvm {
/* for logger access */
struct device *dev;
@@ -535,6 +541,8 @@ struct iwl_mvm {
/* Internal station */
struct iwl_mvm_int_sta aux_sta;
+ bool last_ebs_successful;
+
u8 scan_last_antenna_idx; /* to toggle TX between antennas */
u8 mgmt_last_antenna_idx;
@@ -578,6 +586,8 @@ struct iwl_mvm {
void *fw_error_dump;
void *fw_error_sram;
u32 fw_error_sram_len;
+ u32 *fw_error_rxf;
+ u32 fw_error_rxf_len;
struct led_classdev led;
@@ -601,6 +611,9 @@ struct iwl_mvm {
bool d0i3_offloading;
struct work_struct d0i3_exit_work;
struct sk_buff_head d0i3_tx;
+ /* protect d0i3_suspend_flags */
+ struct mutex d0i3_suspend_mutex;
+ unsigned long d0i3_suspend_flags;
/* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */
spinlock_t d0i3_tx_lock;
wait_queue_head_t d0i3_exit_waitq;
@@ -629,8 +642,6 @@ struct iwl_mvm {
/* Indicate if device power save is allowed */
bool ps_disabled;
- /* Indicate if device power management is allowed */
- bool pm_disabled;
};
/* Extract MVM priv from op_mode and _hw */
@@ -705,6 +716,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
#ifdef CONFIG_IWLWIFI_DEBUGFS
void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm);
void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm);
+void iwl_mvm_fw_error_rxf_dump(struct iwl_mvm *mvm);
#endif
u8 first_antenna(u8 mask);
u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
@@ -874,8 +886,6 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm,
int rs_pretty_print_rate(char *buf, const u32 rate);
/* power management */
-int iwl_power_legacy_set_cam_mode(struct iwl_mvm *mvm);
-
int iwl_mvm_power_update_device(struct iwl_mvm *mvm);
int iwl_mvm_power_update_mac(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@@ -922,9 +932,9 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq);
+int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm);
/* BT Coex */
-int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm);
int iwl_send_bt_init_conf(struct iwl_mvm *mvm);
int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
@@ -936,6 +946,8 @@ u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
struct ieee80211_sta *sta);
bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
struct ieee80211_sta *sta);
+bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
+ enum ieee80211_band band);
u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *info, u8 ac);
int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable);
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index 9545d7fdd4bf..7a5a8bac5fd0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -402,6 +402,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->sf_state = SF_UNINIT;
mutex_init(&mvm->mutex);
+ mutex_init(&mvm->d0i3_suspend_mutex);
spin_lock_init(&mvm->async_handlers_lock);
INIT_LIST_HEAD(&mvm->time_event_list);
INIT_LIST_HEAD(&mvm->async_handlers_list);
@@ -538,6 +539,7 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
kfree(mvm->scan_cmd);
vfree(mvm->fw_error_dump);
kfree(mvm->fw_error_sram);
+ kfree(mvm->fw_error_rxf);
kfree(mvm->mcast_filter_cmd);
mvm->mcast_filter_cmd = NULL;
@@ -821,8 +823,9 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
return;
file_len = mvm->fw_error_sram_len +
+ mvm->fw_error_rxf_len +
sizeof(*dump_file) +
- sizeof(*dump_data);
+ sizeof(*dump_data) * 2;
dump_file = vmalloc(file_len);
if (!dump_file)
@@ -833,7 +836,12 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
dump_file->file_len = cpu_to_le32(file_len);
dump_data = (void *)dump_file->data;
- dump_data->type = IWL_FW_ERROR_DUMP_SRAM;
+ dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
+ dump_data->len = cpu_to_le32(mvm->fw_error_rxf_len);
+ memcpy(dump_data->data, mvm->fw_error_rxf, mvm->fw_error_rxf_len);
+
+ dump_data = (void *)((u8 *)dump_data->data + mvm->fw_error_rxf_len);
+ dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_SRAM);
dump_data->len = cpu_to_le32(mvm->fw_error_sram_len);
/*
@@ -842,6 +850,14 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
* mvm->fw_error_sram right now.
*/
memcpy(dump_data->data, mvm->fw_error_sram, mvm->fw_error_sram_len);
+
+ kfree(mvm->fw_error_rxf);
+ mvm->fw_error_rxf = NULL;
+ mvm->fw_error_rxf_len = 0;
+
+ kfree(mvm->fw_error_sram);
+ mvm->fw_error_sram = NULL;
+ mvm->fw_error_sram_len = 0;
}
#endif
@@ -853,6 +869,7 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
#ifdef CONFIG_IWLWIFI_DEBUGFS
iwl_mvm_fw_error_sram_dump(mvm);
+ iwl_mvm_fw_error_rxf_dump(mvm);
#endif
iwl_mvm_nic_restart(mvm);
@@ -1128,7 +1145,7 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
.id = WOWLAN_GET_STATUSES,
.flags = CMD_SYNC | CMD_HIGH_PRIO | CMD_WANT_SKB,
};
- struct iwl_wowlan_status_v6 *status;
+ struct iwl_wowlan_status *status;
int ret;
u32 disconnection_reasons, wakeup_reasons;
__le16 *qos_seq = NULL;
@@ -1158,18 +1175,27 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
iwl_free_resp(&get_status_cmd);
out:
iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_EXIT_WORK);
mutex_unlock(&mvm->mutex);
}
-static int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
+int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm)
{
- struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE |
CMD_WAKE_UP_TRANS;
int ret;
IWL_DEBUG_RPM(mvm, "MVM exiting D0i3\n");
+ mutex_lock(&mvm->d0i3_suspend_mutex);
+ if (test_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags)) {
+ IWL_DEBUG_RPM(mvm, "Deferring d0i3 exit until resume\n");
+ __set_bit(D0I3_PENDING_WAKEUP, &mvm->d0i3_suspend_flags);
+ mutex_unlock(&mvm->d0i3_suspend_mutex);
+ return 0;
+ }
+ mutex_unlock(&mvm->d0i3_suspend_mutex);
+
ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL);
if (ret)
goto out;
@@ -1183,6 +1209,25 @@ out:
return ret;
}
+static int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+ iwl_mvm_ref(mvm, IWL_MVM_REF_EXIT_WORK);
+ return _iwl_mvm_exit_d0i3(mvm);
+}
+
+static void iwl_mvm_napi_add(struct iwl_op_mode *op_mode,
+ struct napi_struct *napi,
+ struct net_device *napi_dev,
+ int (*poll)(struct napi_struct *, int),
+ int weight)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+ ieee80211_napi_add(mvm->hw, napi, napi_dev, poll, weight);
+}
+
static const struct iwl_op_mode_ops iwl_mvm_ops = {
.start = iwl_op_mode_mvm_start,
.stop = iwl_op_mode_mvm_stop,
@@ -1196,4 +1241,5 @@ static const struct iwl_op_mode_ops iwl_mvm_ops = {
.nic_config = iwl_mvm_nic_config,
.enter_d0i3 = iwl_mvm_enter_d0i3,
.exit_d0i3 = iwl_mvm_exit_d0i3,
+ .napi_add = iwl_mvm_napi_add,
};
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
index 6b636eab3339..78309f7d0b7b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/iwlwifi/mvm/power.c
@@ -268,6 +268,30 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
IWL_MVM_PS_HEAVY_RX_THLD_PERCENT;
}
+static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (!memcmp(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid,
+ ETH_ALEN))
+ return false;
+
+ if (vif->p2p &&
+ !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD))
+ return false;
+ /*
+ * Avoid using uAPSD if P2P client is associated to GO that uses
+ * opportunistic power save. This is due to current FW limitation.
+ */
+ if (vif->p2p &&
+ (vif->bss_conf.p2p_noa_attr.oppps_ctwindow &
+ IEEE80211_P2P_OPPPS_ENABLE_BIT))
+ return false;
+
+ return true;
+}
+
static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_mac_power_cmd *cmd)
@@ -280,7 +304,6 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
bool radar_detect = false;
struct iwl_mvm_vif *mvmvif __maybe_unused =
iwl_mvm_vif_from_mac80211(vif);
- bool allow_uapsd = true;
cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
mvmvif->color));
@@ -303,13 +326,8 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
- mvmvif->dbgfs_pm.disable_power_off)
- cmd->flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
-#endif
if (!vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif) ||
- mvm->pm_disabled)
+ !mvmvif->pm_enabled)
return;
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
@@ -351,23 +369,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
}
- if (!memcmp(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid,
- ETH_ALEN))
- allow_uapsd = false;
-
- if (vif->p2p &&
- !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD))
- allow_uapsd = false;
- /*
- * Avoid using uAPSD if P2P client is associated to GO that uses
- * opportunistic power save. This is due to current FW limitation.
- */
- if (vif->p2p &&
- vif->bss_conf.p2p_noa_attr.oppps_ctwindow &
- IEEE80211_P2P_OPPPS_ENABLE_BIT)
- allow_uapsd = false;
-
- if (allow_uapsd)
+ if (iwl_mvm_power_allow_uapsd(mvm, vif))
iwl_mvm_power_configure_uapsd(mvm, vif, cmd);
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -421,13 +423,6 @@ static int iwl_mvm_power_send_cmd(struct iwl_mvm *mvm,
{
struct iwl_mac_power_cmd cmd = {};
- if (vif->type != NL80211_IFTYPE_STATION)
- return 0;
-
- if (vif->p2p &&
- !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM))
- return 0;
-
iwl_mvm_power_build_cmd(mvm, vif, &cmd);
iwl_mvm_power_log(mvm, &cmd);
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -444,12 +439,6 @@ int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
.flags = cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
};
- if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT))
- return 0;
-
- if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
- return 0;
-
if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
mvm->ps_disabled = true;
@@ -508,86 +497,69 @@ int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
return 0;
}
-struct iwl_power_constraint {
+struct iwl_power_vifs {
struct ieee80211_vif *bf_vif;
struct ieee80211_vif *bss_vif;
struct ieee80211_vif *p2p_vif;
- u16 bss_phyctx_id;
- u16 p2p_phyctx_id;
- bool pm_disabled;
- bool ps_disabled;
- struct iwl_mvm *mvm;
+ struct ieee80211_vif *ap_vif;
+ struct ieee80211_vif *monitor_vif;
+ bool p2p_active;
+ bool bss_active;
+ bool ap_active;
+ bool monitor_active;
};
static void iwl_mvm_power_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_power_constraint *power_iterator = _data;
- struct iwl_mvm *mvm = power_iterator->mvm;
+ struct iwl_power_vifs *power_iterator = _data;
+ mvmvif->pm_enabled = false;
switch (ieee80211_vif_type_p2p(vif)) {
case NL80211_IFTYPE_P2P_DEVICE:
break;
case NL80211_IFTYPE_P2P_GO:
case NL80211_IFTYPE_AP:
- /* no BSS power mgmt if we have an active AP */
- if (mvmvif->ap_ibss_active)
- power_iterator->pm_disabled = true;
+ /* only a single MAC of the same type */
+ WARN_ON(power_iterator->ap_vif);
+ power_iterator->ap_vif = vif;
+ if (mvmvif->phy_ctxt)
+ if (mvmvif->phy_ctxt->id < MAX_PHYS)
+ power_iterator->ap_active = true;
break;
case NL80211_IFTYPE_MONITOR:
- /* no BSS power mgmt and no device power save */
- power_iterator->pm_disabled = true;
- power_iterator->ps_disabled = true;
+ /* only a single MAC of the same type */
+ WARN_ON(power_iterator->monitor_vif);
+ power_iterator->monitor_vif = vif;
+ if (mvmvif->phy_ctxt)
+ if (mvmvif->phy_ctxt->id < MAX_PHYS)
+ power_iterator->monitor_active = true;
break;
case NL80211_IFTYPE_P2P_CLIENT:
- if (mvmvif->phy_ctxt)
- power_iterator->p2p_phyctx_id = mvmvif->phy_ctxt->id;
-
- /* we should have only one P2P vif */
+ /* only a single MAC of the same type */
WARN_ON(power_iterator->p2p_vif);
power_iterator->p2p_vif = vif;
-
- IWL_DEBUG_POWER(mvm, "p2p: p2p_id=%d, bss_id=%d\n",
- power_iterator->p2p_phyctx_id,
- power_iterator->bss_phyctx_id);
- if (!(mvm->fw->ucode_capa.flags &
- IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)) {
- /* no BSS power mgmt if we have a P2P client*/
- power_iterator->pm_disabled = true;
- } else if (power_iterator->p2p_phyctx_id < MAX_PHYS &&
- power_iterator->bss_phyctx_id < MAX_PHYS &&
- power_iterator->p2p_phyctx_id ==
- power_iterator->bss_phyctx_id) {
- power_iterator->pm_disabled = true;
- }
+ if (mvmvif->phy_ctxt)
+ if (mvmvif->phy_ctxt->id < MAX_PHYS)
+ power_iterator->p2p_active = true;
break;
case NL80211_IFTYPE_STATION:
- if (mvmvif->phy_ctxt)
- power_iterator->bss_phyctx_id = mvmvif->phy_ctxt->id;
-
- /* we should have only one BSS vif */
+ /* only a single MAC of the same type */
WARN_ON(power_iterator->bss_vif);
power_iterator->bss_vif = vif;
+ if (mvmvif->phy_ctxt)
+ if (mvmvif->phy_ctxt->id < MAX_PHYS)
+ power_iterator->bss_active = true;
if (mvmvif->bf_data.bf_enabled &&
!WARN_ON(power_iterator->bf_vif))
power_iterator->bf_vif = vif;
- IWL_DEBUG_POWER(mvm, "bss: p2p_id=%d, bss_id=%d\n",
- power_iterator->p2p_phyctx_id,
- power_iterator->bss_phyctx_id);
- if (mvm->fw->ucode_capa.flags &
- IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM &&
- (power_iterator->p2p_phyctx_id < MAX_PHYS &&
- power_iterator->bss_phyctx_id < MAX_PHYS &&
- power_iterator->p2p_phyctx_id ==
- power_iterator->bss_phyctx_id))
- power_iterator->pm_disabled = true;
break;
default:
@@ -596,70 +568,118 @@ static void iwl_mvm_power_iterator(void *_data, u8 *mac,
}
static void
-iwl_mvm_power_get_global_constraint(struct iwl_mvm *mvm,
- struct iwl_power_constraint *constraint)
+iwl_mvm_power_set_pm(struct iwl_mvm *mvm,
+ struct iwl_power_vifs *vifs)
{
- lockdep_assert_held(&mvm->mutex);
+ struct iwl_mvm_vif *bss_mvmvif = NULL;
+ struct iwl_mvm_vif *p2p_mvmvif = NULL;
+ struct iwl_mvm_vif *ap_mvmvif = NULL;
+ bool client_same_channel = false;
+ bool ap_same_channel = false;
- if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM) {
- constraint->pm_disabled = true;
- constraint->ps_disabled = true;
- }
+ lockdep_assert_held(&mvm->mutex);
+ /* get vifs info + set pm_enable to false */
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_power_iterator, constraint);
+ iwl_mvm_power_iterator, vifs);
+
+ if (vifs->bss_vif)
+ bss_mvmvif = iwl_mvm_vif_from_mac80211(vifs->bss_vif);
+
+ if (vifs->p2p_vif)
+ p2p_mvmvif = iwl_mvm_vif_from_mac80211(vifs->p2p_vif);
+
+ if (vifs->ap_vif)
+ ap_mvmvif = iwl_mvm_vif_from_mac80211(vifs->ap_vif);
+
+ /* enable PM on bss if bss stand alone */
+ if (vifs->bss_active && !vifs->p2p_active && !vifs->ap_active) {
+ bss_mvmvif->pm_enabled = true;
+ return;
+ }
+
+ /* enable PM on p2p if p2p stand alone */
+ if (vifs->p2p_active && !vifs->bss_active && !vifs->ap_active) {
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM)
+ p2p_mvmvif->pm_enabled = true;
+ return;
+ }
+
+ if (vifs->bss_active && vifs->p2p_active)
+ client_same_channel = (bss_mvmvif->phy_ctxt->id ==
+ p2p_mvmvif->phy_ctxt->id);
+ if (vifs->bss_active && vifs->ap_active)
+ ap_same_channel = (bss_mvmvif->phy_ctxt->id ==
+ ap_mvmvif->phy_ctxt->id);
+
+ /* bss is not stand alone: enable PM if alone on its channel */
+ if (vifs->bss_active && !(client_same_channel || ap_same_channel) &&
+ (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)) {
+ bss_mvmvif->pm_enabled = true;
+ return;
+ }
+
+ /*
+ * There is only one channel in the system and there are only
+ * bss and p2p clients that share it
+ */
+ if (client_same_channel && !vifs->ap_active &&
+ (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM)) {
+ /* share same channel*/
+ bss_mvmvif->pm_enabled = true;
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM)
+ p2p_mvmvif->pm_enabled = true;
+ }
}
int iwl_mvm_power_update_mac(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_power_constraint constraint = {
- .p2p_phyctx_id = MAX_PHYS,
- .bss_phyctx_id = MAX_PHYS,
- .mvm = mvm,
- };
+ struct iwl_mvm_vif *mvmvif;
+ struct iwl_power_vifs vifs = {};
bool ba_enable;
int ret;
lockdep_assert_held(&mvm->mutex);
- if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT))
- return 0;
-
- iwl_mvm_power_get_global_constraint(mvm, &constraint);
- mvm->ps_disabled = constraint.ps_disabled;
- mvm->pm_disabled = constraint.pm_disabled;
+ iwl_mvm_power_set_pm(mvm, &vifs);
+ /* disable PS if CAM */
+ if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM) {
+ mvm->ps_disabled = true;
+ } else {
/* don't update device power state unless we add / remove monitor */
- if (vif->type == NL80211_IFTYPE_MONITOR) {
- ret = iwl_mvm_power_update_device(mvm);
- if (ret)
- return ret;
+ if (vifs.monitor_vif) {
+ if (vifs.monitor_active)
+ mvm->ps_disabled = true;
+ ret = iwl_mvm_power_update_device(mvm);
+ if (ret)
+ return ret;
+ }
}
- if (constraint.bss_vif) {
- ret = iwl_mvm_power_send_cmd(mvm, constraint.bss_vif);
+ if (vifs.bss_vif) {
+ ret = iwl_mvm_power_send_cmd(mvm, vifs.bss_vif);
if (ret)
return ret;
}
- if (constraint.p2p_vif) {
- ret = iwl_mvm_power_send_cmd(mvm, constraint.p2p_vif);
+ if (vifs.p2p_vif) {
+ ret = iwl_mvm_power_send_cmd(mvm, vifs.p2p_vif);
if (ret)
return ret;
}
- if (!constraint.bf_vif)
+ if (!vifs.bf_vif)
return 0;
- vif = constraint.bf_vif;
+ vif = vifs.bf_vif;
mvmvif = iwl_mvm_vif_from_mac80211(vif);
- ba_enable = !(constraint.pm_disabled || constraint.ps_disabled ||
+ ba_enable = !(!mvmvif->pm_enabled || mvm->ps_disabled ||
!vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif));
- return iwl_mvm_update_beacon_abort(mvm, constraint.bf_vif, ba_enable);
+ return iwl_mvm_update_beacon_abort(mvm, vifs.bf_vif, ba_enable);
}
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -671,19 +691,10 @@ int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
struct iwl_mac_power_cmd cmd = {};
int pos = 0;
- if (WARN_ON(!(mvm->fw->ucode_capa.flags &
- IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)))
- return 0;
-
mutex_lock(&mvm->mutex);
memcpy(&cmd, &mvmvif->mac_pwr_cmd, sizeof(cmd));
mutex_unlock(&mvm->mutex);
- if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
- pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
- (cmd.flags &
- cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
- 0 : 1);
pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
iwlmvm_mod_params.power_scheme);
pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
@@ -826,8 +837,7 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ret;
- if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED) ||
- vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+ if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return 0;
ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, flags);
@@ -914,13 +924,3 @@ int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
return iwl_mvm_enable_beacon_filter(mvm, vif, flags);
}
-
-int iwl_power_legacy_set_cam_mode(struct iwl_mvm *mvm)
-{
- struct iwl_powertable_cmd cmd = {
- .keep_alive_seconds = POWER_KEEP_ALIVE_PERIOD_SEC,
- };
-
- return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC,
- sizeof(cmd), &cmd);
-}
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 9f52c5b3f0ec..d44b2b33b5cc 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -527,6 +527,9 @@ static void rs_rate_scale_clear_tbl_windows(struct iwl_mvm *mvm,
IWL_DEBUG_RATE(mvm, "Clearing up window stats\n");
for (i = 0; i < IWL_RATE_COUNT; i++)
rs_rate_scale_clear_window(&tbl->win[i]);
+
+ for (i = 0; i < ARRAY_SIZE(tbl->tpc_win); i++)
+ rs_rate_scale_clear_window(&tbl->tpc_win[i]);
}
static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
@@ -656,17 +659,34 @@ static int _rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
return 0;
}
-static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
- int scale_index, int attempts, int successes)
+static int rs_collect_tx_data(struct iwl_lq_sta *lq_sta,
+ struct iwl_scale_tbl_info *tbl,
+ int scale_index, int attempts, int successes,
+ u8 reduced_txp)
{
struct iwl_rate_scale_data *window = NULL;
+ int ret;
if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
return -EINVAL;
+ if (tbl->column != RS_COLUMN_INVALID) {
+ lq_sta->tx_stats[tbl->column][scale_index].total += attempts;
+ lq_sta->tx_stats[tbl->column][scale_index].success += successes;
+ }
+
/* Select window for current tx bit rate */
window = &(tbl->win[scale_index]);
+ ret = _rs_collect_tx_data(tbl, scale_index, attempts, successes,
+ window);
+ if (ret)
+ return ret;
+
+ if (WARN_ON_ONCE(reduced_txp > TPC_MAX_REDUCTION))
+ return -EINVAL;
+
+ window = &tbl->tpc_win[reduced_txp];
return _rs_collect_tx_data(tbl, scale_index, attempts, successes,
window);
}
@@ -1000,6 +1020,7 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
u32 ucode_rate;
struct rs_rate rate;
struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
+ u8 reduced_txp = (uintptr_t)info->status.status_driver_data[0];
/* Treat uninitialized rate scaling data same as non-existing. */
if (!lq_sta) {
@@ -1141,9 +1162,10 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
if (info->flags & IEEE80211_TX_STAT_AMPDU) {
ucode_rate = le32_to_cpu(table->rs_table[0]);
rs_rate_from_ucode_rate(ucode_rate, info->band, &rate);
- rs_collect_tx_data(curr_tbl, rate.index,
+ rs_collect_tx_data(lq_sta, curr_tbl, rate.index,
info->status.ampdu_len,
- info->status.ampdu_ack_len);
+ info->status.ampdu_ack_len,
+ reduced_txp);
/* Update success/fail counts if not searching for new mode */
if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
@@ -1176,8 +1198,9 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
else
continue;
- rs_collect_tx_data(tmp_tbl, rate.index, 1,
- i < retries ? 0 : legacy_success);
+ rs_collect_tx_data(lq_sta, tmp_tbl, rate.index, 1,
+ i < retries ? 0 : legacy_success,
+ reduced_txp);
}
/* Update success/fail counts if not searching for new mode */
@@ -1188,6 +1211,7 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
}
/* The last TX rate is cached in lq_sta; it's set in if/else above */
lq_sta->last_rate_n_flags = ucode_rate;
+ IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp);
done:
/* See if there's a better rate or modulation mode to try. */
if (sta && sta->supp_rates[sband->band])
@@ -1769,6 +1793,198 @@ out:
return action;
}
+static void rs_get_adjacent_txp(struct iwl_mvm *mvm, int index,
+ int *weaker, int *stronger)
+{
+ *weaker = index + TPC_TX_POWER_STEP;
+ if (*weaker > TPC_MAX_REDUCTION)
+ *weaker = TPC_INVALID;
+
+ *stronger = index - TPC_TX_POWER_STEP;
+ if (*stronger < 0)
+ *stronger = TPC_INVALID;
+}
+
+static bool rs_tpc_allowed(struct iwl_mvm *mvm, struct rs_rate *rate,
+ enum ieee80211_band band)
+{
+ int index = rate->index;
+
+ /*
+ * allow tpc only if power management is enabled, or bt coex
+ * activity grade allows it and we are on 2.4Ghz.
+ */
+ if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM &&
+ !iwl_mvm_bt_coex_is_tpc_allowed(mvm, band))
+ return false;
+
+ IWL_DEBUG_RATE(mvm, "check rate, table type: %d\n", rate->type);
+ if (is_legacy(rate))
+ return index == IWL_RATE_54M_INDEX;
+ if (is_ht(rate))
+ return index == IWL_RATE_MCS_7_INDEX;
+ if (is_vht(rate))
+ return index == IWL_RATE_MCS_7_INDEX ||
+ index == IWL_RATE_MCS_8_INDEX ||
+ index == IWL_RATE_MCS_9_INDEX;
+
+ WARN_ON_ONCE(1);
+ return false;
+}
+
+enum tpc_action {
+ TPC_ACTION_STAY,
+ TPC_ACTION_DECREASE,
+ TPC_ACTION_INCREASE,
+ TPC_ACTION_NO_RESTIRCTION,
+};
+
+static enum tpc_action rs_get_tpc_action(struct iwl_mvm *mvm,
+ s32 sr, int weak, int strong,
+ int current_tpt,
+ int weak_tpt, int strong_tpt)
+{
+ /* stay until we have valid tpt */
+ if (current_tpt == IWL_INVALID_VALUE) {
+ IWL_DEBUG_RATE(mvm, "no current tpt. stay.\n");
+ return TPC_ACTION_STAY;
+ }
+
+ /* Too many failures, increase txp */
+ if (sr <= TPC_SR_FORCE_INCREASE || current_tpt == 0) {
+ IWL_DEBUG_RATE(mvm, "increase txp because of weak SR\n");
+ return TPC_ACTION_NO_RESTIRCTION;
+ }
+
+ /* try decreasing first if applicable */
+ if (weak != TPC_INVALID) {
+ if (weak_tpt == IWL_INVALID_VALUE &&
+ (strong_tpt == IWL_INVALID_VALUE ||
+ current_tpt >= strong_tpt)) {
+ IWL_DEBUG_RATE(mvm,
+ "no weak txp measurement. decrease txp\n");
+ return TPC_ACTION_DECREASE;
+ }
+
+ if (weak_tpt > current_tpt) {
+ IWL_DEBUG_RATE(mvm,
+ "lower txp has better tpt. decrease txp\n");
+ return TPC_ACTION_DECREASE;
+ }
+ }
+
+ /* next, increase if needed */
+ if (sr < TPC_SR_NO_INCREASE && strong != TPC_INVALID) {
+ if (weak_tpt == IWL_INVALID_VALUE &&
+ strong_tpt != IWL_INVALID_VALUE &&
+ current_tpt < strong_tpt) {
+ IWL_DEBUG_RATE(mvm,
+ "higher txp has better tpt. increase txp\n");
+ return TPC_ACTION_INCREASE;
+ }
+
+ if (weak_tpt < current_tpt &&
+ (strong_tpt == IWL_INVALID_VALUE ||
+ strong_tpt > current_tpt)) {
+ IWL_DEBUG_RATE(mvm,
+ "lower txp has worse tpt. increase txp\n");
+ return TPC_ACTION_INCREASE;
+ }
+ }
+
+ IWL_DEBUG_RATE(mvm, "no need to increase or decrease txp - stay\n");
+ return TPC_ACTION_STAY;
+}
+
+static bool rs_tpc_perform(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct iwl_lq_sta *lq_sta,
+ struct iwl_scale_tbl_info *tbl)
+{
+ struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+ struct ieee80211_vif *vif = mvm_sta->vif;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ enum ieee80211_band band;
+ struct iwl_rate_scale_data *window;
+ struct rs_rate *rate = &tbl->rate;
+ enum tpc_action action;
+ s32 sr;
+ u8 cur = lq_sta->lq.reduced_tpc;
+ int current_tpt;
+ int weak, strong;
+ int weak_tpt = IWL_INVALID_VALUE, strong_tpt = IWL_INVALID_VALUE;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+ if (lq_sta->dbg_fixed_txp_reduction <= TPC_MAX_REDUCTION) {
+ IWL_DEBUG_RATE(mvm, "fixed tpc: %d",
+ lq_sta->dbg_fixed_txp_reduction);
+ lq_sta->lq.reduced_tpc = lq_sta->dbg_fixed_txp_reduction;
+ return cur != lq_sta->dbg_fixed_txp_reduction;
+ }
+#endif
+
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(vif->chanctx_conf);
+ if (WARN_ON(!chanctx_conf))
+ band = IEEE80211_NUM_BANDS;
+ else
+ band = chanctx_conf->def.chan->band;
+ rcu_read_unlock();
+
+ if (!rs_tpc_allowed(mvm, rate, band)) {
+ IWL_DEBUG_RATE(mvm,
+ "tpc is not allowed. remove txp restrictions");
+ lq_sta->lq.reduced_tpc = TPC_NO_REDUCTION;
+ return cur != TPC_NO_REDUCTION;
+ }
+
+ rs_get_adjacent_txp(mvm, cur, &weak, &strong);
+
+ /* Collect measured throughputs for current and adjacent rates */
+ window = tbl->tpc_win;
+ sr = window[cur].success_ratio;
+ current_tpt = window[cur].average_tpt;
+ if (weak != TPC_INVALID)
+ weak_tpt = window[weak].average_tpt;
+ if (strong != TPC_INVALID)
+ strong_tpt = window[strong].average_tpt;
+
+ IWL_DEBUG_RATE(mvm,
+ "(TPC: %d): cur_tpt %d SR %d weak %d strong %d weak_tpt %d strong_tpt %d\n",
+ cur, current_tpt, sr, weak, strong,
+ weak_tpt, strong_tpt);
+
+ action = rs_get_tpc_action(mvm, sr, weak, strong,
+ current_tpt, weak_tpt, strong_tpt);
+
+ /* override actions if we are on the edge */
+ if (weak == TPC_INVALID && action == TPC_ACTION_DECREASE) {
+ IWL_DEBUG_RATE(mvm, "already in lowest txp, stay");
+ action = TPC_ACTION_STAY;
+ } else if (strong == TPC_INVALID &&
+ (action == TPC_ACTION_INCREASE ||
+ action == TPC_ACTION_NO_RESTIRCTION)) {
+ IWL_DEBUG_RATE(mvm, "already in highest txp, stay");
+ action = TPC_ACTION_STAY;
+ }
+
+ switch (action) {
+ case TPC_ACTION_DECREASE:
+ lq_sta->lq.reduced_tpc = weak;
+ return true;
+ case TPC_ACTION_INCREASE:
+ lq_sta->lq.reduced_tpc = strong;
+ return true;
+ case TPC_ACTION_NO_RESTIRCTION:
+ lq_sta->lq.reduced_tpc = TPC_NO_REDUCTION;
+ return true;
+ case TPC_ACTION_STAY:
+ /* do nothing */
+ break;
+ }
+ return false;
+}
+
/*
* Do rate scaling and search for new modulation mode.
*/
@@ -2019,6 +2235,8 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
break;
case RS_ACTION_STAY:
/* No change */
+ update_lq = rs_tpc_perform(mvm, sta, lq_sta, tbl);
+ break;
default:
break;
}
@@ -2478,6 +2696,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
lq_sta->is_agg = 0;
#ifdef CONFIG_MAC80211_DEBUGFS
lq_sta->dbg_fixed_rate = 0;
+ lq_sta->dbg_fixed_txp_reduction = TPC_INVALID;
#endif
#ifdef CONFIG_IWLWIFI_DEBUGFS
iwl_mvm_reset_frame_stats(mvm, &mvm->drv_rx_stats);
@@ -2653,6 +2872,7 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
rs_build_rates_table_from_fixed(mvm, lq_cmd,
lq_sta->band,
lq_sta->dbg_fixed_rate);
+ lq_cmd->reduced_tpc = 0;
ant = (lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) >>
RATE_MCS_ANT_POS;
} else
@@ -2783,7 +3003,6 @@ static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
size_t buf_size;
u32 parsed_rate;
-
mvm = lq_sta->drv;
memset(buf, 0, sizeof(buf));
buf_size = min(count, sizeof(buf) - 1);
@@ -2856,6 +3075,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
lq_sta->lq.agg_disable_start_th,
lq_sta->lq.agg_frame_cnt_limit);
+ desc += sprintf(buff+desc, "reduced tpc=%d\n", lq_sta->lq.reduced_tpc);
desc += sprintf(buff+desc,
"Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
lq_sta->lq.initial_rate_index[0],
@@ -2928,6 +3148,94 @@ static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
.llseek = default_llseek,
};
+static ssize_t rs_sta_dbgfs_drv_tx_stats_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ static const char * const column_name[] = {
+ [RS_COLUMN_LEGACY_ANT_A] = "LEGACY_ANT_A",
+ [RS_COLUMN_LEGACY_ANT_B] = "LEGACY_ANT_B",
+ [RS_COLUMN_SISO_ANT_A] = "SISO_ANT_A",
+ [RS_COLUMN_SISO_ANT_B] = "SISO_ANT_B",
+ [RS_COLUMN_SISO_ANT_A_SGI] = "SISO_ANT_A_SGI",
+ [RS_COLUMN_SISO_ANT_B_SGI] = "SISO_ANT_B_SGI",
+ [RS_COLUMN_MIMO2] = "MIMO2",
+ [RS_COLUMN_MIMO2_SGI] = "MIMO2_SGI",
+ };
+
+ static const char * const rate_name[] = {
+ [IWL_RATE_1M_INDEX] = "1M",
+ [IWL_RATE_2M_INDEX] = "2M",
+ [IWL_RATE_5M_INDEX] = "5.5M",
+ [IWL_RATE_11M_INDEX] = "11M",
+ [IWL_RATE_6M_INDEX] = "6M|MCS0",
+ [IWL_RATE_9M_INDEX] = "9M",
+ [IWL_RATE_12M_INDEX] = "12M|MCS1",
+ [IWL_RATE_18M_INDEX] = "18M|MCS2",
+ [IWL_RATE_24M_INDEX] = "24M|MCS3",
+ [IWL_RATE_36M_INDEX] = "36M|MCS4",
+ [IWL_RATE_48M_INDEX] = "48M|MCS5",
+ [IWL_RATE_54M_INDEX] = "54M|MCS6",
+ [IWL_RATE_MCS_7_INDEX] = "MCS7",
+ [IWL_RATE_MCS_8_INDEX] = "MCS8",
+ [IWL_RATE_MCS_9_INDEX] = "MCS9",
+ };
+
+ char *buff, *pos, *endpos;
+ int col, rate;
+ ssize_t ret;
+ struct iwl_lq_sta *lq_sta = file->private_data;
+ struct rs_rate_stats *stats;
+ static const size_t bufsz = 1024;
+
+ buff = kmalloc(bufsz, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ pos = buff;
+ endpos = pos + bufsz;
+
+ pos += scnprintf(pos, endpos - pos, "COLUMN,");
+ for (rate = 0; rate < IWL_RATE_COUNT; rate++)
+ pos += scnprintf(pos, endpos - pos, "%s,", rate_name[rate]);
+ pos += scnprintf(pos, endpos - pos, "\n");
+
+ for (col = 0; col < RS_COLUMN_COUNT; col++) {
+ pos += scnprintf(pos, endpos - pos,
+ "%s,", column_name[col]);
+
+ for (rate = 0; rate < IWL_RATE_COUNT; rate++) {
+ stats = &(lq_sta->tx_stats[col][rate]);
+ pos += scnprintf(pos, endpos - pos,
+ "%llu/%llu,",
+ stats->success,
+ stats->total);
+ }
+ pos += scnprintf(pos, endpos - pos, "\n");
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff);
+ kfree(buff);
+ return ret;
+}
+
+static ssize_t rs_sta_dbgfs_drv_tx_stats_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_lq_sta *lq_sta = file->private_data;
+ memset(lq_sta->tx_stats, 0, sizeof(lq_sta->tx_stats));
+
+ return count;
+}
+
+static const struct file_operations rs_sta_dbgfs_drv_tx_stats_ops = {
+ .read = rs_sta_dbgfs_drv_tx_stats_read,
+ .write = rs_sta_dbgfs_drv_tx_stats_write,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
{
struct iwl_lq_sta *lq_sta = mvm_sta;
@@ -2937,9 +3245,15 @@ static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
lq_sta->rs_sta_dbgfs_stats_table_file =
debugfs_create_file("rate_stats_table", S_IRUSR, dir,
lq_sta, &rs_sta_dbgfs_stats_table_ops);
+ lq_sta->rs_sta_dbgfs_drv_tx_stats_file =
+ debugfs_create_file("drv_tx_stats", S_IRUSR | S_IWUSR, dir,
+ lq_sta, &rs_sta_dbgfs_drv_tx_stats_ops);
lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
&lq_sta->tx_agg_tid_en);
+ lq_sta->rs_sta_dbgfs_reduced_txp_file =
+ debugfs_create_u8("reduced_tpc", S_IRUSR | S_IWUSR, dir,
+ &lq_sta->dbg_fixed_txp_reduction);
}
static void rs_remove_debugfs(void *mvm, void *mvm_sta)
@@ -2947,7 +3261,9 @@ static void rs_remove_debugfs(void *mvm, void *mvm_sta)
struct iwl_lq_sta *lq_sta = mvm_sta;
debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
+ debugfs_remove(lq_sta->rs_sta_dbgfs_drv_tx_stats_file);
debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
+ debugfs_remove(lq_sta->rs_sta_dbgfs_reduced_txp_file);
}
#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
index 0acfac96a56c..374a83d7db25 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.h
@@ -158,6 +158,13 @@ enum {
#define RS_SR_FORCE_DECREASE 1920 /* 15% */
#define RS_SR_NO_DECREASE 10880 /* 85% */
+#define TPC_SR_FORCE_INCREASE 9600 /* 75% */
+#define TPC_SR_NO_INCREASE 10880 /* 85% */
+#define TPC_TX_POWER_STEP 3
+#define TPC_MAX_REDUCTION 15
+#define TPC_NO_REDUCTION 0
+#define TPC_INVALID 0xff
+
#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000)
#define LINK_QUAL_AGG_TIME_LIMIT_MIN (100)
@@ -266,9 +273,16 @@ enum rs_column {
RS_COLUMN_MIMO2_SGI,
RS_COLUMN_LAST = RS_COLUMN_MIMO2_SGI,
+ RS_COLUMN_COUNT = RS_COLUMN_LAST + 1,
RS_COLUMN_INVALID,
};
+/* Packet stats per rate */
+struct rs_rate_stats {
+ u64 success;
+ u64 total;
+};
+
/**
* struct iwl_scale_tbl_info -- tx params and success history for all rates
*
@@ -280,6 +294,8 @@ struct iwl_scale_tbl_info {
enum rs_column column;
const u16 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
+ /* per txpower-reduction history */
+ struct iwl_rate_scale_data tpc_win[TPC_MAX_REDUCTION + 1];
};
enum {
@@ -315,6 +331,8 @@ struct iwl_lq_sta {
bool is_vht;
enum ieee80211_band band;
+ struct rs_rate_stats tx_stats[RS_COLUMN_COUNT][IWL_RATE_COUNT];
+
/* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
unsigned long active_legacy_rate;
unsigned long active_siso_rate;
@@ -334,8 +352,11 @@ struct iwl_lq_sta {
#ifdef CONFIG_MAC80211_DEBUGFS
struct dentry *rs_sta_dbgfs_scale_table_file;
struct dentry *rs_sta_dbgfs_stats_table_file;
+ struct dentry *rs_sta_dbgfs_drv_tx_stats_file;
struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
+ struct dentry *rs_sta_dbgfs_reduced_txp_file;
u32 dbg_fixed_rate;
+ u8 dbg_fixed_txp_reduction;
#endif
struct iwl_mvm *drv;
@@ -345,6 +366,9 @@ struct iwl_lq_sta {
u32 last_rate_n_flags;
/* packets destined for this STA are aggregated */
u8 is_agg;
+
+ /* tx power reduce for this sta */
+ int tpc_reduce;
};
/* Initialize station's rate scaling information after adding station */
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index 6061553a5e44..cf7276967acd 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -60,7 +60,6 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#include "iwl-trans.h"
-
#include "mvm.h"
#include "fw-api.h"
@@ -130,42 +129,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
- ieee80211_rx_ni(mvm->hw, skb);
-}
-
-static void iwl_mvm_calc_rssi(struct iwl_mvm *mvm,
- struct iwl_rx_phy_info *phy_info,
- struct ieee80211_rx_status *rx_status)
-{
- int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
- u32 agc_a, agc_b;
- u32 val;
-
- val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]);
- agc_a = (val & IWL_OFDM_AGC_A_MSK) >> IWL_OFDM_AGC_A_POS;
- agc_b = (val & IWL_OFDM_AGC_B_MSK) >> IWL_OFDM_AGC_B_POS;
-
- val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_AB_IDX]);
- rssi_a = (val & IWL_OFDM_RSSI_INBAND_A_MSK) >> IWL_OFDM_RSSI_A_POS;
- rssi_b = (val & IWL_OFDM_RSSI_INBAND_B_MSK) >> IWL_OFDM_RSSI_B_POS;
-
- /*
- * dBm = rssi dB - agc dB - constant.
- * Higher AGC (higher radio gain) means lower signal.
- */
- rssi_a_dbm = rssi_a - IWL_RSSI_OFFSET - agc_a;
- rssi_b_dbm = rssi_b - IWL_RSSI_OFFSET - agc_b;
- max_rssi_dbm = max_t(int, rssi_a_dbm, rssi_b_dbm);
-
- IWL_DEBUG_STATS(mvm, "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
- rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
-
- rx_status->signal = max_rssi_dbm;
- rx_status->chains = (le16_to_cpu(phy_info->phy_flags) &
- RX_RES_PHY_FLAGS_ANTENNA)
- >> RX_RES_PHY_FLAGS_ANTENNA_POS;
- rx_status->chain_signal[0] = rssi_a_dbm;
- rx_status->chain_signal[1] = rssi_b_dbm;
+ ieee80211_rx(mvm->hw, skb);
}
/*
@@ -337,10 +301,7 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
*/
/*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_RX_ENERGY_API)
- iwl_mvm_get_signal_strength(mvm, phy_info, &rx_status);
- else
- iwl_mvm_calc_rssi(mvm, phy_info, &rx_status);
+ iwl_mvm_get_signal_strength(mvm, phy_info, &rx_status);
IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status.signal,
(unsigned long long)rx_status.mactime);
@@ -394,6 +355,8 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
rx_status.rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
rx_status.flag |= RX_FLAG_VHT;
rx_status.flag |= stbc << RX_FLAG_STBC_SHIFT;
+ if (rate_n_flags & RATE_MCS_BF_MSK)
+ rx_status.vht_flag |= RX_VHT_FLAG_BF;
} else {
rx_status.rate_idx =
iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index c91dc8498852..63e7b16edb55 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -348,7 +348,10 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params params = {};
lockdep_assert_held(&mvm->mutex);
- BUG_ON(mvm->scan_cmd == NULL);
+
+ /* we should have failed registration if scan_cmd was NULL */
+ if (WARN_ON(mvm->scan_cmd == NULL))
+ return -ENOMEM;
IWL_DEBUG_SCAN(mvm, "Handling mac80211 scan request\n");
mvm->scan_status = IWL_MVM_SCAN_OS;
@@ -567,9 +570,13 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
/* scan status must be locked for proper checking */
lockdep_assert_held(&mvm->mutex);
- IWL_DEBUG_SCAN(mvm, "Scheduled scan completed, status %s\n",
+ IWL_DEBUG_SCAN(mvm,
+ "Scheduled scan completed, status %s EBS status %s:%d\n",
scan_notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
- "completed" : "aborted");
+ "completed" : "aborted", scan_notif->ebs_status ==
+ IWL_SCAN_EBS_SUCCESS ? "success" : "failed",
+ scan_notif->ebs_status);
+
/* only call mac80211 completion if the stop was initiated by FW */
if (mvm->scan_status == IWL_MVM_SCAN_SCHED) {
@@ -577,6 +584,8 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
ieee80211_sched_scan_stopped(mvm->hw);
}
+ mvm->last_ebs_successful = !scan_notif->ebs_status;
+
return 0;
}
@@ -913,6 +922,11 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
scan_req.flags |= cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_PASS_ALL);
}
+ if (mvm->last_ebs_successful &&
+ mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT)
+ scan_req.flags |=
+ cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE);
+
return iwl_mvm_send_cmd_pdu(mvm, SCAN_OFFLOAD_REQUEST_CMD, CMD_SYNC,
sizeof(scan_req), &scan_req);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sf.c b/drivers/net/wireless/iwlwifi/mvm/sf.c
index 88809b2d1654..7edfd15efc9d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sf.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sf.c
@@ -237,9 +237,6 @@ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif,
.sta_vif_ap_sta_id = IWL_MVM_STATION_COUNT,
};
- if (IWL_UCODE_API(mvm->fw->ucode_ver) < 8)
- return 0;
-
/*
* Ignore the call if we are in HW Restart flow, or if the handled
* vif is a p2p device.
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index f339ef884250..3e11b9d802e7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -66,115 +66,6 @@
#include "sta.h"
#include "rs.h"
-static void iwl_mvm_add_sta_cmd_v7_to_v5(struct iwl_mvm_add_sta_cmd_v7 *cmd_v7,
- struct iwl_mvm_add_sta_cmd_v5 *cmd_v5)
-{
- memset(cmd_v5, 0, sizeof(*cmd_v5));
-
- cmd_v5->add_modify = cmd_v7->add_modify;
- cmd_v5->tid_disable_tx = cmd_v7->tid_disable_tx;
- cmd_v5->mac_id_n_color = cmd_v7->mac_id_n_color;
- memcpy(cmd_v5->addr, cmd_v7->addr, ETH_ALEN);
- cmd_v5->sta_id = cmd_v7->sta_id;
- cmd_v5->modify_mask = cmd_v7->modify_mask;
- cmd_v5->station_flags = cmd_v7->station_flags;
- cmd_v5->station_flags_msk = cmd_v7->station_flags_msk;
- cmd_v5->add_immediate_ba_tid = cmd_v7->add_immediate_ba_tid;
- cmd_v5->remove_immediate_ba_tid = cmd_v7->remove_immediate_ba_tid;
- cmd_v5->add_immediate_ba_ssn = cmd_v7->add_immediate_ba_ssn;
- cmd_v5->sleep_tx_count = cmd_v7->sleep_tx_count;
- cmd_v5->sleep_state_flags = cmd_v7->sleep_state_flags;
- cmd_v5->assoc_id = cmd_v7->assoc_id;
- cmd_v5->beamform_flags = cmd_v7->beamform_flags;
- cmd_v5->tfd_queue_msk = cmd_v7->tfd_queue_msk;
-}
-
-static void
-iwl_mvm_add_sta_key_to_add_sta_cmd_v5(struct iwl_mvm_add_sta_key_cmd *key_cmd,
- struct iwl_mvm_add_sta_cmd_v5 *sta_cmd,
- u32 mac_id_n_color)
-{
- memset(sta_cmd, 0, sizeof(*sta_cmd));
-
- sta_cmd->sta_id = key_cmd->sta_id;
- sta_cmd->add_modify = STA_MODE_MODIFY;
- sta_cmd->modify_mask = STA_MODIFY_KEY;
- sta_cmd->mac_id_n_color = cpu_to_le32(mac_id_n_color);
-
- sta_cmd->key.key_offset = key_cmd->key_offset;
- sta_cmd->key.key_flags = key_cmd->key_flags;
- memcpy(sta_cmd->key.key, key_cmd->key, sizeof(sta_cmd->key.key));
- sta_cmd->key.tkip_rx_tsc_byte2 = key_cmd->tkip_rx_tsc_byte2;
- memcpy(sta_cmd->key.tkip_rx_ttak, key_cmd->tkip_rx_ttak,
- sizeof(sta_cmd->key.tkip_rx_ttak));
-}
-
-static int iwl_mvm_send_add_sta_cmd_status(struct iwl_mvm *mvm,
- struct iwl_mvm_add_sta_cmd_v7 *cmd,
- int *status)
-{
- struct iwl_mvm_add_sta_cmd_v5 cmd_v5;
-
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
- return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(*cmd),
- cmd, status);
-
- iwl_mvm_add_sta_cmd_v7_to_v5(cmd, &cmd_v5);
-
- return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd_v5),
- &cmd_v5, status);
-}
-
-static int iwl_mvm_send_add_sta_cmd(struct iwl_mvm *mvm, u32 flags,
- struct iwl_mvm_add_sta_cmd_v7 *cmd)
-{
- struct iwl_mvm_add_sta_cmd_v5 cmd_v5;
-
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
- return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags,
- sizeof(*cmd), cmd);
-
- iwl_mvm_add_sta_cmd_v7_to_v5(cmd, &cmd_v5);
-
- return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags, sizeof(cmd_v5),
- &cmd_v5);
-}
-
-static int
-iwl_mvm_send_add_sta_key_cmd_status(struct iwl_mvm *mvm,
- struct iwl_mvm_add_sta_key_cmd *cmd,
- u32 mac_id_n_color,
- int *status)
-{
- struct iwl_mvm_add_sta_cmd_v5 sta_cmd;
-
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
- return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY,
- sizeof(*cmd), cmd, status);
-
- iwl_mvm_add_sta_key_to_add_sta_cmd_v5(cmd, &sta_cmd, mac_id_n_color);
-
- return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(sta_cmd),
- &sta_cmd, status);
-}
-
-static int iwl_mvm_send_add_sta_key_cmd(struct iwl_mvm *mvm,
- u32 flags,
- struct iwl_mvm_add_sta_key_cmd *cmd,
- u32 mac_id_n_color)
-{
- struct iwl_mvm_add_sta_cmd_v5 sta_cmd;
-
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
- return iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, flags,
- sizeof(*cmd), cmd);
-
- iwl_mvm_add_sta_key_to_add_sta_cmd_v5(cmd, &sta_cmd, mac_id_n_color);
-
- return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags, sizeof(sta_cmd),
- &sta_cmd);
-}
-
static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
enum nl80211_iftype iftype)
{
@@ -207,7 +98,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
bool update)
{
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
- struct iwl_mvm_add_sta_cmd_v7 add_sta_cmd;
+ struct iwl_mvm_add_sta_cmd add_sta_cmd;
int ret;
u32 status;
u32 agg_size = 0, mpdu_dens = 0;
@@ -295,7 +186,8 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_add_sta_cmd_status(mvm, &add_sta_cmd, &status);
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(add_sta_cmd),
+ &add_sta_cmd, &status);
if (ret)
return ret;
@@ -380,7 +272,7 @@ int iwl_mvm_update_sta(struct iwl_mvm *mvm,
int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
bool drain)
{
- struct iwl_mvm_add_sta_cmd_v7 cmd = {};
+ struct iwl_mvm_add_sta_cmd cmd = {};
int ret;
u32 status;
@@ -393,7 +285,8 @@ int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+ &cmd, &status);
if (ret)
return ret;
@@ -498,7 +391,7 @@ void iwl_mvm_sta_drained_wk(struct work_struct *wk)
sta_id);
continue;
}
- rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], NULL);
+ RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
clear_bit(sta_id, mvm->sta_drained);
}
@@ -520,14 +413,6 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
/* flush its queues here since we are freeing mvm_sta */
ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, true);
- /*
- * Put a non-NULL since the fw station isn't removed.
- * It will be removed after the MAC will be set as
- * unassoc.
- */
- rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
- ERR_PTR(-EINVAL));
-
/* if we are associated - we can't remove the AP STA now */
if (vif->bss_conf.assoc)
return ret;
@@ -557,7 +442,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
} else {
spin_unlock_bh(&mvm_sta->lock);
ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
- rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
+ RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
}
return ret;
@@ -571,7 +456,7 @@ int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
- rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], NULL);
+ RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
return ret;
}
@@ -593,7 +478,7 @@ int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta,
void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
{
- rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
+ RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
sta->sta_id = IWL_MVM_STATION_COUNT;
}
@@ -603,13 +488,13 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
const u8 *addr,
u16 mac_id, u16 color)
{
- struct iwl_mvm_add_sta_cmd_v7 cmd;
+ struct iwl_mvm_add_sta_cmd cmd;
int ret;
u32 status;
lockdep_assert_held(&mvm->mutex);
- memset(&cmd, 0, sizeof(struct iwl_mvm_add_sta_cmd_v7));
+ memset(&cmd, 0, sizeof(cmd));
cmd.sta_id = sta->sta_id;
cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
color));
@@ -619,7 +504,8 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
if (addr)
memcpy(cmd.addr, addr, ETH_ALEN);
- ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+ &cmd, &status);
if (ret)
return ret;
@@ -753,7 +639,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u16 ssn, bool start)
{
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
- struct iwl_mvm_add_sta_cmd_v7 cmd = {};
+ struct iwl_mvm_add_sta_cmd cmd = {};
int ret;
u32 status;
@@ -777,7 +663,8 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
STA_MODIFY_REMOVE_BA_TID;
status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+ &cmd, &status);
if (ret)
return ret;
@@ -812,7 +699,7 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u8 queue, bool start)
{
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
- struct iwl_mvm_add_sta_cmd_v7 cmd = {};
+ struct iwl_mvm_add_sta_cmd cmd = {};
int ret;
u32 status;
@@ -834,7 +721,8 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+ &cmd, &status);
if (ret)
return ret;
@@ -1129,12 +1017,11 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
u8 sta_id, u32 tkip_iv32, u16 *tkip_p1k,
u32 cmd_flags)
{
- __le16 key_flags;
struct iwl_mvm_add_sta_key_cmd cmd = {};
+ __le16 key_flags;
int ret, status;
u16 keyidx;
int i;
- u32 mac_id_n_color = mvm_sta->mac_id_n_color;
keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
STA_KEY_FLG_KEYID_MSK;
@@ -1167,12 +1054,11 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
status = ADD_STA_SUCCESS;
if (cmd_flags == CMD_SYNC)
- ret = iwl_mvm_send_add_sta_key_cmd_status(mvm, &cmd,
- mac_id_n_color,
- &status);
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
+ &cmd, &status);
else
- ret = iwl_mvm_send_add_sta_key_cmd(mvm, CMD_ASYNC, &cmd,
- mac_id_n_color);
+ ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC,
+ sizeof(cmd), &cmd);
switch (status) {
case ADD_STA_SUCCESS:
@@ -1399,9 +1285,8 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
cmd.sta_id = sta_id;
status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_add_sta_key_cmd_status(mvm, &cmd,
- mvm_sta->mac_id_n_color,
- &status);
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
+ &cmd, &status);
switch (status) {
case ADD_STA_SUCCESS:
@@ -1448,7 +1333,7 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
struct ieee80211_sta *sta)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- struct iwl_mvm_add_sta_cmd_v7 cmd = {
+ struct iwl_mvm_add_sta_cmd cmd = {
.add_modify = STA_MODE_MODIFY,
.sta_id = mvmsta->sta_id,
.station_flags_msk = cpu_to_le32(STA_FLG_PS),
@@ -1456,7 +1341,7 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
};
int ret;
- ret = iwl_mvm_send_add_sta_cmd(mvm, CMD_ASYNC, &cmd);
+ ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
if (ret)
IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
}
@@ -1468,7 +1353,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
bool agg)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- struct iwl_mvm_add_sta_cmd_v7 cmd = {
+ struct iwl_mvm_add_sta_cmd cmd = {
.add_modify = STA_MODE_MODIFY,
.sta_id = mvmsta->sta_id,
.modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
@@ -1538,7 +1423,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD);
}
- ret = iwl_mvm_send_add_sta_cmd(mvm, CMD_ASYNC, &cmd);
+ ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
if (ret)
IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h
index 2ed84c421481..e5e3071ff252 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.h
@@ -253,6 +253,8 @@ enum iwl_mvm_agg_state {
* This is basically (last acked packet++).
* @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
* Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
+ * @reduced_tpc: Reduced tx power. Holds the data between the
+ * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
* @state: state of the BA agreement establishment / tear down.
* @txq_id: Tx queue used by the BA session
* @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or
@@ -265,6 +267,7 @@ struct iwl_mvm_tid_data {
u16 next_reclaimed;
/* The rest is Tx AGG related */
u32 rate_n_flags;
+ u8 reduced_tpc;
enum iwl_mvm_agg_state state;
u16 txq_id;
u16 ssn;
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index 61331245ad93..a9402937f767 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -273,67 +273,10 @@ static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait,
return true;
}
-/* used to convert from time event API v2 to v1 */
-#define TE_V2_DEP_POLICY_MSK (TE_V2_DEP_OTHER | TE_V2_DEP_TSF |\
- TE_V2_EVENT_SOCIOPATHIC)
-static inline u16 te_v2_get_notify(__le16 policy)
-{
- return le16_to_cpu(policy) & TE_V2_NOTIF_MSK;
-}
-
-static inline u16 te_v2_get_dep_policy(__le16 policy)
-{
- return (le16_to_cpu(policy) & TE_V2_DEP_POLICY_MSK) >>
- TE_V2_PLACEMENT_POS;
-}
-
-static inline u16 te_v2_get_absence(__le16 policy)
-{
- return (le16_to_cpu(policy) & TE_V2_ABSENCE) >> TE_V2_ABSENCE_POS;
-}
-
-static void iwl_mvm_te_v2_to_v1(const struct iwl_time_event_cmd_v2 *cmd_v2,
- struct iwl_time_event_cmd_v1 *cmd_v1)
-{
- cmd_v1->id_and_color = cmd_v2->id_and_color;
- cmd_v1->action = cmd_v2->action;
- cmd_v1->id = cmd_v2->id;
- cmd_v1->apply_time = cmd_v2->apply_time;
- cmd_v1->max_delay = cmd_v2->max_delay;
- cmd_v1->depends_on = cmd_v2->depends_on;
- cmd_v1->interval = cmd_v2->interval;
- cmd_v1->duration = cmd_v2->duration;
- if (cmd_v2->repeat == TE_V2_REPEAT_ENDLESS)
- cmd_v1->repeat = cpu_to_le32(TE_V1_REPEAT_ENDLESS);
- else
- cmd_v1->repeat = cpu_to_le32(cmd_v2->repeat);
- cmd_v1->max_frags = cpu_to_le32(cmd_v2->max_frags);
- cmd_v1->interval_reciprocal = 0; /* unused */
-
- cmd_v1->dep_policy = cpu_to_le32(te_v2_get_dep_policy(cmd_v2->policy));
- cmd_v1->is_present = cpu_to_le32(!te_v2_get_absence(cmd_v2->policy));
- cmd_v1->notify = cpu_to_le32(te_v2_get_notify(cmd_v2->policy));
-}
-
-static int iwl_mvm_send_time_event_cmd(struct iwl_mvm *mvm,
- const struct iwl_time_event_cmd_v2 *cmd)
-{
- struct iwl_time_event_cmd_v1 cmd_v1;
-
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
- return iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
- sizeof(*cmd), cmd);
-
- iwl_mvm_te_v2_to_v1(cmd, &cmd_v1);
- return iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
- sizeof(cmd_v1), &cmd_v1);
-}
-
-
static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_mvm_time_event_data *te_data,
- struct iwl_time_event_cmd_v2 *te_cmd)
+ struct iwl_time_event_cmd *te_cmd)
{
static const u8 time_event_response[] = { TIME_EVENT_CMD };
struct iwl_notification_wait wait_time_event;
@@ -369,7 +312,8 @@ static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
ARRAY_SIZE(time_event_response),
iwl_mvm_time_event_response, te_data);
- ret = iwl_mvm_send_time_event_cmd(mvm, te_cmd);
+ ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
+ sizeof(*te_cmd), te_cmd);
if (ret) {
IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret);
iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
@@ -397,7 +341,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
- struct iwl_time_event_cmd_v2 time_cmd = {};
+ struct iwl_time_event_cmd time_cmd = {};
lockdep_assert_held(&mvm->mutex);
@@ -453,7 +397,7 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif,
struct iwl_mvm_time_event_data *te_data)
{
- struct iwl_time_event_cmd_v2 time_cmd = {};
+ struct iwl_time_event_cmd time_cmd = {};
u32 id, uid;
int ret;
@@ -490,7 +434,8 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id));
- ret = iwl_mvm_send_time_event_cmd(mvm, &time_cmd);
+ ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
+ sizeof(time_cmd), &time_cmd);
if (WARN_ON(ret))
return;
}
@@ -510,7 +455,7 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
- struct iwl_time_event_cmd_v2 time_cmd = {};
+ struct iwl_time_event_cmd time_cmd = {};
lockdep_assert_held(&mvm->mutex);
if (te_data->running) {
diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c
index 7a99fa361954..39a3e03a0acd 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tt.c
@@ -468,13 +468,14 @@ void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
}
if (params->support_tx_backoff) {
- tx_backoff = 0;
+ tx_backoff = tt->min_backoff;
for (i = 0; i < TT_TX_BACKOFF_SIZE; i++) {
if (temperature < params->tx_backoff[i].temperature)
break;
- tx_backoff = params->tx_backoff[i].backoff;
+ tx_backoff = max(tt->min_backoff,
+ params->tx_backoff[i].backoff);
}
- if (tx_backoff != 0)
+ if (tx_backoff != tt->min_backoff)
throttle_enable = true;
if (tt->tx_backoff != tx_backoff)
iwl_mvm_tt_tx_backoff(mvm, tx_backoff);
@@ -484,7 +485,8 @@ void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
IWL_WARN(mvm,
"Due to high temperature thermal throttling initiated\n");
tt->throttle = true;
- } else if (tt->throttle && !tt->dynamic_smps && tt->tx_backoff == 0 &&
+ } else if (tt->throttle && !tt->dynamic_smps &&
+ tt->tx_backoff == tt->min_backoff &&
temperature <= params->tx_protection_exit) {
IWL_WARN(mvm,
"Temperature is back to normal thermal throttling stopped\n");
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 879aeac46cc1..ff1b630e130e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -636,7 +636,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
seq_ctl = le16_to_cpu(hdr->seq_ctrl);
}
- ieee80211_tx_status_ni(mvm->hw, skb);
+ BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
+ info->status.status_driver_data[0] =
+ (void *)(uintptr_t)tx_resp->reduced_tpc;
+
+ ieee80211_tx_status(mvm->hw, skb);
}
if (txq_id >= mvm->first_agg_queue) {
@@ -815,6 +819,7 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
mvmsta->tid_data[tid].rate_n_flags =
le32_to_cpu(tx_resp->initial_rate);
+ mvmsta->tid_data[tid].reduced_tpc = tx_resp->reduced_tpc;
}
rcu_read_unlock();
@@ -928,6 +933,8 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
info->status.ampdu_len = ba_notif->txed;
iwl_mvm_hwrate_to_tx_status(tid_data->rate_n_flags,
info);
+ info->status.status_driver_data[0] =
+ (void *)(uintptr_t)tid_data->reduced_tpc;
}
}
@@ -937,7 +944,7 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
while (!skb_queue_empty(&reclaimed_skbs)) {
skb = __skb_dequeue(&reclaimed_skbs);
- ieee80211_tx_status_ni(mvm->hw, skb);
+ ieee80211_tx_status(mvm->hw, skb);
}
return 0;
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index d619851745a1..c5f4532cafa9 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -64,6 +64,7 @@
#include "iwl-debug.h"
#include "iwl-io.h"
+#include "iwl-prph.h"
#include "mvm.h"
#include "fw-api-rs.h"
@@ -469,6 +470,8 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
mvm->status, table.valid);
}
+ /* Do not change this output - scripts rely on it */
+
IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
@@ -522,7 +525,7 @@ void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm)
u32 ofs, sram_len;
void *sram;
- if (!mvm->ucode_loaded || mvm->fw_error_sram)
+ if (!mvm->ucode_loaded || mvm->fw_error_sram || mvm->fw_error_dump)
return;
img = &mvm->fw->img[mvm->cur_ucode];
@@ -538,6 +541,47 @@ void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm)
mvm->fw_error_sram_len = sram_len;
}
+void iwl_mvm_fw_error_rxf_dump(struct iwl_mvm *mvm)
+{
+ int i, reg_val;
+ unsigned long flags;
+
+ if (!mvm->ucode_loaded || mvm->fw_error_rxf || mvm->fw_error_dump)
+ return;
+
+ /* reading buffer size */
+ reg_val = iwl_trans_read_prph(mvm->trans, RXF_SIZE_ADDR);
+ mvm->fw_error_rxf_len =
+ (reg_val & RXF_SIZE_BYTE_CNT_MSK) >> RXF_SIZE_BYTE_CND_POS;
+
+ /* the register holds the value divided by 128 */
+ mvm->fw_error_rxf_len = mvm->fw_error_rxf_len << 7;
+
+ if (!mvm->fw_error_rxf_len)
+ return;
+
+ mvm->fw_error_rxf = kzalloc(mvm->fw_error_rxf_len, GFP_ATOMIC);
+ if (!mvm->fw_error_rxf) {
+ mvm->fw_error_rxf_len = 0;
+ return;
+ }
+
+ if (!iwl_trans_grab_nic_access(mvm->trans, false, &flags)) {
+ kfree(mvm->fw_error_rxf);
+ mvm->fw_error_rxf = NULL;
+ mvm->fw_error_rxf_len = 0;
+ return;
+ }
+
+ for (i = 0; i < (mvm->fw_error_rxf_len / sizeof(u32)); i++) {
+ iwl_trans_write_prph(mvm->trans, RXF_LD_FENCE_OFFSET_ADDR,
+ i * sizeof(u32));
+ mvm->fw_error_rxf[i] =
+ iwl_trans_read_prph(mvm->trans, RXF_FIFO_RD_FENCE_ADDR);
+ }
+ iwl_trans_release_nic_access(mvm->trans, &flags);
+}
+
/**
* iwl_mvm_send_lq_cmd() - Send link quality command
* @init: This command is sent as part of station initialization right
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 9091513ea738..1b95d856dfd5 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -102,7 +102,7 @@ struct iwl_rxq {
u32 write_actual;
struct list_head rx_free;
struct list_head rx_used;
- int need_update;
+ bool need_update;
struct iwl_rb_status *rb_stts;
dma_addr_t rb_stts_dma;
spinlock_t lock;
@@ -231,7 +231,7 @@ struct iwl_txq {
spinlock_t lock;
struct timer_list stuck_timer;
struct iwl_trans_pcie *trans_pcie;
- u8 need_update;
+ bool need_update;
u8 active;
bool ampdu;
};
@@ -270,6 +270,9 @@ struct iwl_trans_pcie {
struct iwl_trans *trans;
struct iwl_drv *drv;
+ struct net_device napi_dev;
+ struct napi_struct napi;
+
/* INT ICT Table */
__le32 *ict_tbl;
dma_addr_t ict_tbl_dma;
@@ -362,7 +365,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue);
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, int txq_id);
-void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq);
+void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
struct iwl_rx_cmd_buffer *rxb, int handler_status);
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index fdfa3969cac9..4a26a082a1ba 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -145,15 +145,13 @@ int iwl_pcie_rx_stop(struct iwl_trans *trans)
/*
* iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
*/
-static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
- struct iwl_rxq *rxq)
+static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
u32 reg;
- spin_lock(&rxq->lock);
-
- if (rxq->need_update == 0)
- goto exit_unlock;
+ lockdep_assert_held(&rxq->lock);
/*
* explicitly wake up the NIC if:
@@ -169,13 +167,27 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
reg);
iwl_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- goto exit_unlock;
+ rxq->need_update = true;
+ return;
}
}
rxq->write_actual = round_down(rxq->write, 8);
iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
- rxq->need_update = 0;
+}
+
+static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
+
+ spin_lock(&rxq->lock);
+
+ if (!rxq->need_update)
+ goto exit_unlock;
+
+ iwl_pcie_rxq_inc_wr_ptr(trans);
+ rxq->need_update = false;
exit_unlock:
spin_unlock(&rxq->lock);
@@ -236,9 +248,8 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
* Increment device's write pointer in multiples of 8. */
if (rxq->write_actual != (rxq->write & ~0x7)) {
spin_lock(&rxq->lock);
- rxq->need_update = 1;
+ iwl_pcie_rxq_inc_wr_ptr(trans);
spin_unlock(&rxq->lock);
- iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
}
}
@@ -362,20 +373,9 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
* Also restock the Rx queue via iwl_pcie_rxq_restock.
* This is called as a scheduled work item (except for during initialization)
*/
-static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
-
- spin_lock(&trans_pcie->irq_lock);
- iwl_pcie_rxq_restock(trans);
- spin_unlock(&trans_pcie->irq_lock);
-}
-
-static void iwl_pcie_rx_replenish_now(struct iwl_trans *trans)
+static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp)
{
- iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
+ iwl_pcie_rxq_alloc_rbs(trans, gfp);
iwl_pcie_rxq_restock(trans);
}
@@ -385,7 +385,7 @@ static void iwl_pcie_rx_replenish_work(struct work_struct *data)
struct iwl_trans_pcie *trans_pcie =
container_of(data, struct iwl_trans_pcie, rx_replenish);
- iwl_pcie_rx_replenish(trans_pcie->trans);
+ iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL);
}
static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
@@ -521,14 +521,13 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
spin_unlock(&rxq->lock);
- iwl_pcie_rx_replenish(trans);
+ iwl_pcie_rx_replenish(trans, GFP_KERNEL);
iwl_pcie_rx_hw_init(trans, rxq);
- spin_lock(&trans_pcie->irq_lock);
- rxq->need_update = 1;
- iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
- spin_unlock(&trans_pcie->irq_lock);
+ spin_lock(&rxq->lock);
+ iwl_pcie_rxq_inc_wr_ptr(trans);
+ spin_unlock(&rxq->lock);
return 0;
}
@@ -673,7 +672,6 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
/* Reuse the page if possible. For notification packets and
* SKBs that fail to Rx correctly, add them back into the
* rx_free list for reuse later. */
- spin_lock(&rxq->lock);
if (rxb->page != NULL) {
rxb->page_dma =
dma_map_page(trans->dev, rxb->page, 0,
@@ -694,7 +692,6 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
}
} else
list_add_tail(&rxb->list, &rxq->rx_used);
- spin_unlock(&rxq->lock);
}
/*
@@ -709,6 +706,8 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
u32 count = 8;
int total_empty;
+restart:
+ spin_lock(&rxq->lock);
/* uCode's read index (stored in shared DRAM) indicates the last Rx
* buffer that the driver may process (last buffer filled by ucode). */
r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
@@ -743,18 +742,25 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
count++;
if (count >= 8) {
rxq->read = i;
- iwl_pcie_rx_replenish_now(trans);
+ spin_unlock(&rxq->lock);
+ iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
count = 0;
+ goto restart;
}
}
}
/* Backtrack one entry */
rxq->read = i;
+ spin_unlock(&rxq->lock);
+
if (fill_rx)
- iwl_pcie_rx_replenish_now(trans);
+ iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
else
iwl_pcie_rxq_restock(trans);
+
+ if (trans_pcie->napi.poll)
+ napi_gro_flush(&trans_pcie->napi, false);
}
/*
@@ -876,7 +882,6 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
u32 inta = 0;
u32 handled = 0;
- u32 i;
lock_map_acquire(&trans->sync_cmd_lockdep_map);
@@ -1028,9 +1033,8 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
/* uCode wakes up after power-down sleep */
if (inta & CSR_INT_BIT_WAKEUP) {
IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
- iwl_pcie_rxq_inc_wr_ptr(trans, &trans_pcie->rxq);
- for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
- iwl_pcie_txq_inc_wr_ptr(trans, &trans_pcie->txq[i]);
+ iwl_pcie_rxq_check_wrptr(trans);
+ iwl_pcie_txq_check_wrptrs(trans);
isr_stats->wakeup++;
@@ -1068,8 +1072,6 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
iwl_write8(trans, CSR_INT_PERIODIC_REG,
CSR_INT_PERIODIC_DIS);
- iwl_pcie_rx_handle(trans);
-
/*
* Enable periodic interrupt in 8 msec only if we received
* real RX interrupt (instead of just periodic int), to catch
@@ -1082,6 +1084,10 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
CSR_INT_PERIODIC_ENA);
isr_stats->rx++;
+
+ local_bh_disable();
+ iwl_pcie_rx_handle(trans);
+ local_bh_enable();
}
/* This "Tx" DMA channel is used only for loading uCode */
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index dcfd6d866d09..f98ef1e62eb9 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -103,7 +103,6 @@ static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
-#define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC
static void iwl_pcie_apm_config(struct iwl_trans *trans)
{
@@ -1053,6 +1052,12 @@ static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
}
+static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
+{
+ WARN_ON(1);
+ return 0;
+}
+
static void iwl_trans_pcie_configure(struct iwl_trans *trans,
const struct iwl_trans_config *trans_cfg)
{
@@ -1079,6 +1084,18 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
trans_pcie->command_names = trans_cfg->command_names;
trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
+
+ /* Initialize NAPI here - it should be before registering to mac80211
+ * in the opmode but after the HW struct is allocated.
+ * As this function may be called again in some corner cases don't
+ * do anything if NAPI was already initialized.
+ */
+ if (!trans_pcie->napi.poll && trans->op_mode->ops->napi_add) {
+ init_dummy_netdev(&trans_pcie->napi_dev);
+ iwl_op_mode_napi_add(trans->op_mode, &trans_pcie->napi,
+ &trans_pcie->napi_dev,
+ iwl_pcie_dummy_napi_poll, 64);
+ }
}
void iwl_trans_pcie_free(struct iwl_trans *trans)
@@ -1099,6 +1116,9 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
pci_disable_device(trans_pcie->pci_dev);
kmem_cache_destroy(trans->dev_cmd_pool);
+ if (trans_pcie->napi.poll)
+ netif_napi_del(&trans_pcie->napi);
+
kfree(trans);
}
@@ -1237,7 +1257,7 @@ static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
#define IWL_FLUSH_WAIT_MS 2000
-static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
+static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq;
@@ -1250,13 +1270,31 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
/* waiting for all the tx frames complete might take a while */
for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
+ u8 wr_ptr;
+
if (cnt == trans_pcie->cmd_queue)
continue;
+ if (!test_bit(cnt, trans_pcie->queue_used))
+ continue;
+ if (!(BIT(cnt) & txq_bm))
+ continue;
+
+ IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt);
txq = &trans_pcie->txq[cnt];
q = &txq->q;
- while (q->read_ptr != q->write_ptr && !time_after(jiffies,
- now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
+ wr_ptr = ACCESS_ONCE(q->write_ptr);
+
+ while (q->read_ptr != ACCESS_ONCE(q->write_ptr) &&
+ !time_after(jiffies,
+ now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
+ u8 write_ptr = ACCESS_ONCE(q->write_ptr);
+
+ if (WARN_ONCE(wr_ptr != write_ptr,
+ "WR pointer moved while flushing %d -> %d\n",
+ wr_ptr, write_ptr))
+ return -ETIMEDOUT;
msleep(1);
+ }
if (q->read_ptr != q->write_ptr) {
IWL_ERR(trans,
@@ -1264,6 +1302,7 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
ret = -ETIMEDOUT;
break;
}
+ IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", cnt);
}
if (!ret)
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 3b0c72c10054..dde6031f4257 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -287,14 +287,14 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
/*
* iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
*/
-void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
+static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
+ struct iwl_txq *txq)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 reg = 0;
int txq_id = txq->q.id;
- if (txq->need_update == 0)
- return;
+ lockdep_assert_held(&txq->lock);
/*
* explicitly wake up the NIC if:
@@ -317,6 +317,7 @@ void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
txq_id, reg);
iwl_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ txq->need_update = true;
return;
}
}
@@ -327,8 +328,23 @@ void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
*/
IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->q.write_ptr);
iwl_write32(trans, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
+}
+
+void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int i;
- txq->need_update = 0;
+ for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
+ struct iwl_txq *txq = &trans_pcie->txq[i];
+
+ spin_lock(&txq->lock);
+ if (trans_pcie->txq[i].need_update) {
+ iwl_pcie_txq_inc_wr_ptr(trans, txq);
+ trans_pcie->txq[i].need_update = false;
+ }
+ spin_unlock(&txq->lock);
+ }
}
static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
@@ -542,7 +558,7 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
{
int ret;
- txq->need_update = 0;
+ txq->need_update = false;
/* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
* iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
@@ -680,7 +696,8 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
/* The chain extension of the SCD doesn't work well. This feature is
* enabled by default by the HW, so we need to disable it manually.
*/
- iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
+ if (trans->cfg->base_params->scd_chain_ext_wa)
+ iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
trans_pcie->cmd_fifo);
@@ -1028,7 +1045,8 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
}
}
- if (q->read_ptr == q->write_ptr) {
+ if (trans->cfg->base_params->apmg_wake_up_wa &&
+ q->read_ptr == q->write_ptr) {
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
WARN_ON(!trans_pcie->cmd_in_flight);
trans_pcie->cmd_in_flight = false;
@@ -1392,8 +1410,6 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
kfree(txq->entries[idx].free_buf);
txq->entries[idx].free_buf = dup_buf;
- txq->need_update = 1;
-
trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr);
/* start timer if queue currently empty */
@@ -1405,9 +1421,11 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
/*
* wake up the NIC to make sure that the firmware will see the host
* command - we will let the NIC sleep once all the host commands
- * returned.
+ * returned. This needs to be done only on NICs that have
+ * apmg_wake_up_wa set.
*/
- if (!trans_pcie->cmd_in_flight) {
+ if (trans->cfg->base_params->apmg_wake_up_wa &&
+ !trans_pcie->cmd_in_flight) {
trans_pcie->cmd_in_flight = true;
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
@@ -1661,7 +1679,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
dma_addr_t tb0_phys, tb1_phys, scratch_phys;
void *tb1_addr;
u16 len, tb1_len, tb2_len;
- u8 wait_write_ptr = 0;
+ bool wait_write_ptr;
__le16 fc = hdr->frame_control;
u8 hdr_len = ieee80211_hdrlen(fc);
u16 wifi_seq;
@@ -1762,12 +1780,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
trace_iwlwifi_dev_tx_data(trans->dev, skb,
skb->data + hdr_len, tb2_len);
- if (!ieee80211_has_morefrags(fc)) {
- txq->need_update = 1;
- } else {
- wait_write_ptr = 1;
- txq->need_update = 0;
- }
+ wait_write_ptr = ieee80211_has_morefrags(fc);
/* start timer if queue currently empty */
if (txq->need_update && q->read_ptr == q->write_ptr &&
@@ -1776,21 +1789,18 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
/* Tell device the write index *just past* this latest filled TFD */
q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
- iwl_pcie_txq_inc_wr_ptr(trans, txq);
+ if (!wait_write_ptr)
+ iwl_pcie_txq_inc_wr_ptr(trans, txq);
/*
* At this point the frame is "transmitted" successfully
- * and we will get a TX status notification eventually,
- * regardless of the value of ret. "ret" only indicates
- * whether or not we should update the write pointer.
+ * and we will get a TX status notification eventually.
*/
if (iwl_queue_space(q) < q->high_mark) {
- if (wait_write_ptr) {
- txq->need_update = 1;
+ if (wait_write_ptr)
iwl_pcie_txq_inc_wr_ptr(trans, txq);
- } else {
+ else
iwl_stop_queue(trans, txq);
- }
}
spin_unlock(&txq->lock);
return 0;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 9d7a52f5a410..a312c653d116 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1676,7 +1676,9 @@ static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw,
return 0;
}
-static void mac80211_hwsim_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void mac80211_hwsim_flush(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
/* Not implemented, queues only on kernel side */
}
@@ -2056,6 +2058,7 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
WIPHY_FLAG_AP_UAPSD |
WIPHY_FLAG_HAS_CHANNEL_SWITCH;
hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
+ hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
/* ask mac80211 to reserve space for magic */
hw->vif_data_size = sizeof(struct hwsim_vif_priv);
diff --git a/drivers/net/wireless/mwifiex/11ac.c b/drivers/net/wireless/mwifiex/11ac.c
index c92f27aa71ed..706831df1fa2 100644
--- a/drivers/net/wireless/mwifiex/11ac.c
+++ b/drivers/net/wireless/mwifiex/11ac.c
@@ -212,8 +212,7 @@ int mwifiex_cmd_append_11ac_tlv(struct mwifiex_private *priv,
sizeof(struct mwifiex_ie_types_header));
memcpy((u8 *)vht_op +
sizeof(struct mwifiex_ie_types_header),
- (u8 *)bss_desc->bcn_vht_oper +
- sizeof(struct ieee_types_header),
+ (u8 *)bss_desc->bcn_vht_oper,
le16_to_cpu(vht_op->header.len));
/* negotiate the channel width and central freq
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index d14ead8beca8..2bd07d681c5e 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -345,8 +345,7 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
memcpy((u8 *) ht_info +
sizeof(struct mwifiex_ie_types_header),
- (u8 *) bss_desc->bcn_ht_oper +
- sizeof(struct ieee_types_header),
+ (u8 *)bss_desc->bcn_ht_oper,
le16_to_cpu(ht_info->header.len));
if (!(sband->ht_cap.cap &
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index 63211707f939..860dfe71cf96 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -160,6 +160,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
int pad = 0, ret;
struct mwifiex_tx_param tx_param;
struct txpd *ptx_pd = NULL;
+ struct timeval tv;
int headroom = adapter->iface_type == MWIFIEX_USB ? 0 : INTF_HEADER_LEN;
skb_src = skb_peek(&pra_list->skb_head);
@@ -184,6 +185,9 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
tx_info_aggr->bss_num = tx_info_src->bss_num;
skb_aggr->priority = skb_src->priority;
+ do_gettimeofday(&tv);
+ skb_aggr->tstamp = timeval_to_ktime(tv);
+
do {
/* Check if AMSDU can accommodate this MSDU */
if (skb_tailroom(skb_aggr) < (skb_src->len + LLC_SNAP_LEN))
@@ -236,18 +240,11 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_DATA,
skb_aggr, NULL);
} else {
- /*
- * Padding per MSDU will affect the length of next
- * packet and hence the exact length of next packet
- * is uncertain here.
- *
- * Also, aggregation of transmission buffer, while
- * downloading the data to the card, wont gain much
- * on the AMSDU packets as the AMSDU packets utilizes
- * the transmission buffer space to the maximum
- * (adapter->tx_buf_size).
- */
- tx_param.next_pkt_len = 0;
+ if (skb_src)
+ tx_param.next_pkt_len =
+ skb_src->len + sizeof(struct txpd);
+ else
+ tx_param.next_pkt_len = 0;
ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
skb_aggr, &tx_param);
diff --git a/drivers/net/wireless/mwifiex/README b/drivers/net/wireless/mwifiex/README
index b9242c3dca43..3b55ce5690a5 100644
--- a/drivers/net/wireless/mwifiex/README
+++ b/drivers/net/wireless/mwifiex/README
@@ -200,4 +200,11 @@ getlog
cat getlog
+fw_dump
+ This command is used to dump firmware memory into files.
+ Separate file will be created for each memory segment.
+ Usage:
+
+ cat fw_dump
+
===============================================================================
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 1062c918a7bf..8dee6c86f4f1 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -955,8 +955,6 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
adapter->cmd_wait_q.status = -ETIMEDOUT;
wake_up_interruptible(&adapter->cmd_wait_q.wait);
mwifiex_cancel_pending_ioctl(adapter);
- /* reset cmd_sent flag to unblock new commands */
- adapter->cmd_sent = false;
}
}
if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING)
diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c
index b8a49aad12fd..7b419bbcd544 100644
--- a/drivers/net/wireless/mwifiex/debugfs.c
+++ b/drivers/net/wireless/mwifiex/debugfs.c
@@ -257,6 +257,29 @@ free_and_exit:
}
/*
+ * Proc firmware dump read handler.
+ *
+ * This function is called when the 'fw_dump' file is opened for
+ * reading.
+ * This function dumps firmware memory in different files
+ * (ex. DTCM, ITCM, SQRAM etc.) based on the the segments for
+ * debugging.
+ */
+static ssize_t
+mwifiex_fw_dump_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct mwifiex_private *priv = file->private_data;
+
+ if (!priv->adapter->if_ops.fw_dump)
+ return -EIO;
+
+ priv->adapter->if_ops.fw_dump(priv->adapter);
+
+ return 0;
+}
+
+/*
* Proc getlog file read handler.
*
* This function is called when the 'getlog' file is opened for reading
@@ -699,6 +722,7 @@ static const struct file_operations mwifiex_dfs_##name##_fops = { \
MWIFIEX_DFS_FILE_READ_OPS(info);
MWIFIEX_DFS_FILE_READ_OPS(debug);
MWIFIEX_DFS_FILE_READ_OPS(getlog);
+MWIFIEX_DFS_FILE_READ_OPS(fw_dump);
MWIFIEX_DFS_FILE_OPS(regrdwr);
MWIFIEX_DFS_FILE_OPS(rdeeprom);
@@ -722,6 +746,7 @@ mwifiex_dev_debugfs_init(struct mwifiex_private *priv)
MWIFIEX_DFS_ADD_FILE(getlog);
MWIFIEX_DFS_ADD_FILE(regrdwr);
MWIFIEX_DFS_ADD_FILE(rdeeprom);
+ MWIFIEX_DFS_ADD_FILE(fw_dump);
}
/*
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index e7b3e16e5d34..38da6ff6f416 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -42,12 +42,12 @@
#define MWIFIEX_MAX_TX_BASTREAM_SUPPORTED 2
#define MWIFIEX_MAX_RX_BASTREAM_SUPPORTED 16
-#define MWIFIEX_STA_AMPDU_DEF_TXWINSIZE 16
-#define MWIFIEX_STA_AMPDU_DEF_RXWINSIZE 32
+#define MWIFIEX_STA_AMPDU_DEF_TXWINSIZE 64
+#define MWIFIEX_STA_AMPDU_DEF_RXWINSIZE 64
#define MWIFIEX_UAP_AMPDU_DEF_TXWINSIZE 32
#define MWIFIEX_UAP_AMPDU_DEF_RXWINSIZE 16
-#define MWIFIEX_11AC_STA_AMPDU_DEF_TXWINSIZE 32
-#define MWIFIEX_11AC_STA_AMPDU_DEF_RXWINSIZE 48
+#define MWIFIEX_11AC_STA_AMPDU_DEF_TXWINSIZE 64
+#define MWIFIEX_11AC_STA_AMPDU_DEF_RXWINSIZE 64
#define MWIFIEX_11AC_UAP_AMPDU_DEF_TXWINSIZE 48
#define MWIFIEX_11AC_UAP_AMPDU_DEF_RXWINSIZE 32
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index ee494db54060..1b576722671d 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -303,7 +303,7 @@ struct mwifiex_ds_ant_cfg {
u32 rx_ant;
};
-#define MWIFIEX_NUM_OF_CMD_BUFFER 20
+#define MWIFIEX_NUM_OF_CMD_BUFFER 50
#define MWIFIEX_SIZE_OF_CMD_BUFFER 2048
enum {
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 9c771b3e9918..cbabc12fbda3 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -521,7 +521,6 @@ done:
release_firmware(adapter->firmware);
adapter->firmware = NULL;
}
- complete(&adapter->fw_load);
if (init_failed)
mwifiex_free_adapter(adapter);
up(sem);
@@ -535,7 +534,6 @@ static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter)
{
int ret;
- init_completion(&adapter->fw_load);
ret = request_firmware_nowait(THIS_MODULE, 1, adapter->fw_name,
adapter->dev, GFP_KERNEL, adapter,
mwifiex_fw_dpc);
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index d53e1e8c9467..34181192a666 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -672,6 +672,7 @@ struct mwifiex_if_ops {
int (*init_fw_port) (struct mwifiex_adapter *);
int (*dnld_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *);
void (*card_reset) (struct mwifiex_adapter *);
+ void (*fw_dump)(struct mwifiex_adapter *);
int (*clean_pcie_ring) (struct mwifiex_adapter *adapter);
};
@@ -787,7 +788,6 @@ struct mwifiex_adapter {
struct mwifiex_wait_queue cmd_wait_q;
u8 scan_wait_q_woken;
spinlock_t queue_lock; /* lock for tx queues */
- struct completion fw_load;
u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
u16 max_mgmt_ie_index;
u8 scan_delay_cnt;
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index a7e8b96b2d90..c2cfeec466d8 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -221,9 +221,6 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
if (!adapter || !adapter->priv_num)
return;
- /* In case driver is removed when asynchronous FW load is in progress */
- wait_for_completion(&adapter->fw_load);
-
if (user_rmmod) {
#ifdef CONFIG_PM_SLEEP
if (adapter->is_suspended)
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 7b3af3d29ded..d75f4ebd4bdc 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -29,9 +29,6 @@
#define MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN 14
#define MWIFIEX_DEF_CHANNELS_PER_SCAN_CMD 4
-#define MWIFIEX_LIMIT_1_CHANNEL_PER_SCAN_CMD 15
-#define MWIFIEX_LIMIT_2_CHANNELS_PER_SCAN_CMD 27
-#define MWIFIEX_LIMIT_3_CHANNELS_PER_SCAN_CMD 35
/* Memory needed to store a max sized Channel List TLV for a firmware scan */
#define CHAN_TLV_MAX_SIZE (sizeof(struct mwifiex_ie_types_header) \
@@ -1055,20 +1052,10 @@ mwifiex_config_scan(struct mwifiex_private *priv,
/*
* In associated state we will reduce the number of channels scanned per
- * scan command to avoid any traffic delay/loss. This number is decided
- * based on total number of channels to be scanned due to constraints
- * of command buffers.
+ * scan command to 1 to avoid any traffic delay/loss.
*/
- if (priv->media_connected) {
- if (chan_num < MWIFIEX_LIMIT_1_CHANNEL_PER_SCAN_CMD)
+ if (priv->media_connected)
*max_chan_per_scan = 1;
- else if (chan_num < MWIFIEX_LIMIT_2_CHANNELS_PER_SCAN_CMD)
- *max_chan_per_scan = 2;
- else if (chan_num < MWIFIEX_LIMIT_3_CHANNELS_PER_SCAN_CMD)
- *max_chan_per_scan = 3;
- else
- *max_chan_per_scan = 4;
- }
}
/*
@@ -1353,23 +1340,17 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
bss_entry->beacon_buf);
break;
case WLAN_EID_BSS_COEX_2040:
- bss_entry->bcn_bss_co_2040 = current_ptr +
- sizeof(struct ieee_types_header);
- bss_entry->bss_co_2040_offset = (u16) (current_ptr +
- sizeof(struct ieee_types_header) -
- bss_entry->beacon_buf);
+ bss_entry->bcn_bss_co_2040 = current_ptr;
+ bss_entry->bss_co_2040_offset =
+ (u16) (current_ptr - bss_entry->beacon_buf);
break;
case WLAN_EID_EXT_CAPABILITY:
- bss_entry->bcn_ext_cap = current_ptr +
- sizeof(struct ieee_types_header);
- bss_entry->ext_cap_offset = (u16) (current_ptr +
- sizeof(struct ieee_types_header) -
- bss_entry->beacon_buf);
+ bss_entry->bcn_ext_cap = current_ptr;
+ bss_entry->ext_cap_offset =
+ (u16) (current_ptr - bss_entry->beacon_buf);
break;
case WLAN_EID_OPMODE_NOTIF:
- bss_entry->oper_mode =
- (void *)(current_ptr +
- sizeof(struct ieee_types_header));
+ bss_entry->oper_mode = (void *)current_ptr;
bss_entry->oper_mode_offset =
(u16)((u8 *)bss_entry->oper_mode -
bss_entry->beacon_buf);
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index d206f04d4994..a1773d3cb49f 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -85,6 +85,8 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
card->supports_sdio_new_mode = data->supports_sdio_new_mode;
card->has_control_mask = data->has_control_mask;
card->tx_buf_size = data->tx_buf_size;
+ card->mp_tx_agg_buf_size = data->mp_tx_agg_buf_size;
+ card->mp_rx_agg_buf_size = data->mp_rx_agg_buf_size;
}
sdio_claim_host(func);
@@ -177,9 +179,6 @@ mwifiex_sdio_remove(struct sdio_func *func)
if (!adapter || !adapter->priv_num)
return;
- /* In case driver is removed when asynchronous FW load is in progress */
- wait_for_completion(&adapter->fw_load);
-
if (user_rmmod) {
if (adapter->is_suspended)
mwifiex_sdio_resume(adapter->dev);
@@ -1842,8 +1841,8 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter)
card->mpa_rx.len_arr = kzalloc(sizeof(*card->mpa_rx.len_arr) *
card->mp_agg_pkt_limit, GFP_KERNEL);
ret = mwifiex_alloc_sdio_mpa_buffers(adapter,
- SDIO_MP_TX_AGGR_DEF_BUF_SIZE,
- SDIO_MP_RX_AGGR_DEF_BUF_SIZE);
+ card->mp_tx_agg_buf_size,
+ card->mp_rx_agg_buf_size);
if (ret) {
dev_err(adapter->dev, "failed to alloc sdio mp-a buffers\n");
kfree(card->mp_regs);
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h
index c71201b2e2a3..6eea30b43ed7 100644
--- a/drivers/net/wireless/mwifiex/sdio.h
+++ b/drivers/net/wireless/mwifiex/sdio.h
@@ -64,10 +64,8 @@
#define UP_LD_CMD_PORT_HOST_INT_STATUS (0x40U)
#define DN_LD_CMD_PORT_HOST_INT_STATUS (0x80U)
-#define SDIO_MP_TX_AGGR_DEF_BUF_SIZE (8192) /* 8K */
-
-/* Multi port RX aggregation buffer size */
-#define SDIO_MP_RX_AGGR_DEF_BUF_SIZE (16384) /* 16K */
+#define MWIFIEX_MP_AGGR_BUF_SIZE_16K (16384)
+#define MWIFIEX_MP_AGGR_BUF_SIZE_32K (32768)
/* Misc. Config Register : Auto Re-enable interrupts */
#define AUTO_RE_ENABLE_INT BIT(4)
@@ -234,6 +232,8 @@ struct sdio_mmc_card {
bool supports_sdio_new_mode;
bool has_control_mask;
u16 tx_buf_size;
+ u32 mp_tx_agg_buf_size;
+ u32 mp_rx_agg_buf_size;
u32 mp_rd_bitmap;
u32 mp_wr_bitmap;
@@ -258,6 +258,8 @@ struct mwifiex_sdio_device {
bool supports_sdio_new_mode;
bool has_control_mask;
u16 tx_buf_size;
+ u32 mp_tx_agg_buf_size;
+ u32 mp_rx_agg_buf_size;
};
static const struct mwifiex_sdio_card_reg mwifiex_reg_sd87xx = {
@@ -315,6 +317,8 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = {
.supports_sdio_new_mode = false,
.has_control_mask = true,
.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
+ .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
+ .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
@@ -325,6 +329,8 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
.supports_sdio_new_mode = false,
.has_control_mask = true,
.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
+ .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
+ .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
@@ -335,6 +341,8 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
.supports_sdio_new_mode = false,
.has_control_mask = true,
.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
+ .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
+ .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
@@ -345,6 +353,8 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
.supports_sdio_new_mode = true,
.has_control_mask = false,
.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
+ .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
+ .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
};
/*
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index 9be6544bdded..32643555dd2a 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -175,17 +175,19 @@ mwifiex_set_ht_params(struct mwifiex_private *priv,
switch (GET_RXSTBC(cap_info)) {
case MWIFIEX_RX_STBC1:
/* HT_CAP 1X1 mode */
- memset(&bss_cfg->ht_cap.mcs, 0xff, 1);
+ bss_cfg->ht_cap.mcs.rx_mask[0] = 0xff;
break;
case MWIFIEX_RX_STBC12: /* fall through */
case MWIFIEX_RX_STBC123:
/* HT_CAP 2X2 mode */
- memset(&bss_cfg->ht_cap.mcs, 0xff, 2);
+ bss_cfg->ht_cap.mcs.rx_mask[0] = 0xff;
+ bss_cfg->ht_cap.mcs.rx_mask[1] = 0xff;
break;
default:
dev_warn(priv->adapter->dev,
"Unsupported RX-STBC, default to 2x2\n");
- memset(&bss_cfg->ht_cap.mcs, 0xff, 2);
+ bss_cfg->ht_cap.mcs.rx_mask[0] = 0xff;
+ bss_cfg->ht_cap.mcs.rx_mask[1] = 0xff;
break;
}
priv->ap_11n_enabled = 1;
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index edbe4aff00d8..a8ce8130cfae 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -22,9 +22,9 @@
#define USB_VERSION "1.0"
+static u8 user_rmmod;
static struct mwifiex_if_ops usb_ops;
static struct semaphore add_remove_card_sem;
-static struct usb_card_rec *usb_card;
static struct usb_device_id mwifiex_usb_table[] = {
/* 8797 */
@@ -532,28 +532,38 @@ static int mwifiex_usb_resume(struct usb_interface *intf)
static void mwifiex_usb_disconnect(struct usb_interface *intf)
{
struct usb_card_rec *card = usb_get_intfdata(intf);
+ struct mwifiex_adapter *adapter;
- if (!card) {
- pr_err("%s: card is NULL\n", __func__);
+ if (!card || !card->adapter) {
+ pr_err("%s: card or card->adapter is NULL\n", __func__);
return;
}
- mwifiex_usb_free(card);
+ adapter = card->adapter;
+ if (!adapter->priv_num)
+ return;
- if (card->adapter) {
- struct mwifiex_adapter *adapter = card->adapter;
+ if (user_rmmod) {
+#ifdef CONFIG_PM
+ if (adapter->is_suspended)
+ mwifiex_usb_resume(intf);
+#endif
- if (!adapter->priv_num)
- return;
+ mwifiex_deauthenticate_all(adapter);
- dev_dbg(adapter->dev, "%s: removing card\n", __func__);
- mwifiex_remove_card(adapter, &add_remove_card_sem);
+ mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter,
+ MWIFIEX_BSS_ROLE_ANY),
+ MWIFIEX_FUNC_SHUTDOWN);
}
+ mwifiex_usb_free(card);
+
+ dev_dbg(adapter->dev, "%s: removing card\n", __func__);
+ mwifiex_remove_card(adapter, &add_remove_card_sem);
+
usb_set_intfdata(intf, NULL);
usb_put_dev(interface_to_usbdev(intf));
kfree(card);
- usb_card = NULL;
return;
}
@@ -565,6 +575,7 @@ static struct usb_driver mwifiex_usb_driver = {
.id_table = mwifiex_usb_table,
.suspend = mwifiex_usb_suspend,
.resume = mwifiex_usb_resume,
+ .soft_unbind = 1,
};
static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter)
@@ -762,7 +773,6 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
card->adapter = adapter;
adapter->dev = &card->udev->dev;
- usb_card = card;
switch (le16_to_cpu(card->udev->descriptor.idProduct)) {
case USB8897_PID_1:
@@ -1025,25 +1035,8 @@ static void mwifiex_usb_cleanup_module(void)
if (!down_interruptible(&add_remove_card_sem))
up(&add_remove_card_sem);
- if (usb_card && usb_card->adapter) {
- struct mwifiex_adapter *adapter = usb_card->adapter;
-
- /* In case driver is removed when asynchronous FW downloading is
- * in progress
- */
- wait_for_completion(&adapter->fw_load);
-
-#ifdef CONFIG_PM
- if (adapter->is_suspended)
- mwifiex_usb_resume(usb_card->intf);
-#endif
-
- mwifiex_deauthenticate_all(adapter);
-
- mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter,
- MWIFIEX_BSS_ROLE_ANY),
- MWIFIEX_FUNC_SHUTDOWN);
- }
+ /* set the flag as user is removing this module */
+ user_rmmod = 1;
usb_deregister(&mwifiex_usb_driver);
}
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 0a7cc742aed7..94b6c74ba727 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -426,15 +426,6 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
priv->tos_to_tid_inv[i];
}
- priv->aggr_prio_tbl[6].amsdu
- = priv->aggr_prio_tbl[6].ampdu_ap
- = priv->aggr_prio_tbl[6].ampdu_user
- = BA_STREAM_NOT_ALLOWED;
-
- priv->aggr_prio_tbl[7].amsdu = priv->aggr_prio_tbl[7].ampdu_ap
- = priv->aggr_prio_tbl[7].ampdu_user
- = BA_STREAM_NOT_ALLOWED;
-
mwifiex_set_ba_params(priv);
mwifiex_reset_11n_rx_seq_num(priv);
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index eede90b63f84..7be3a4839640 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -669,7 +669,8 @@ static unsigned int p54_flush_count(struct p54_common *priv)
return total;
}
-static void p54_flush(struct ieee80211_hw *dev, u32 queues, bool drop)
+static void p54_flush(struct ieee80211_hw *dev, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
struct p54_common *priv = dev->priv;
unsigned int total, i;
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index cbf0a589d32a..8330fa33e50b 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -343,7 +343,7 @@ static void ray_detach(struct pcmcia_device *link)
ray_release(link);
local = netdev_priv(dev);
- del_timer(&local->timer);
+ del_timer_sync(&local->timer);
if (link->priv) {
unregister_netdev(dev);
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index 1b28cda6ca88..2eefbf159bc0 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -1083,7 +1083,7 @@ void rsi_inform_bss_status(struct rsi_common *common,
{
if (status) {
rsi_hal_send_sta_notify_frame(common,
- NL80211_IFTYPE_STATION,
+ RSI_IFTYPE_STATION,
STA_CONNECTED,
bssid,
qos_enable,
@@ -1092,7 +1092,7 @@ void rsi_inform_bss_status(struct rsi_common *common,
rsi_send_auto_rate_request(common);
} else {
rsi_hal_send_sta_notify_frame(common,
- NL80211_IFTYPE_STATION,
+ RSI_IFTYPE_STATION,
STA_DISCONNECTED,
bssid,
qos_enable,
diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h
index ac67c4ad63c2..225215a3b8bb 100644
--- a/drivers/net/wireless/rsi/rsi_mgmt.h
+++ b/drivers/net/wireless/rsi/rsi_mgmt.h
@@ -73,6 +73,7 @@
#define RX_BA_INDICATION 1
#define RSI_TBL_SZ 40
#define MAX_RETRIES 8
+#define RSI_IFTYPE_STATION 0
#define STD_RATE_MCS7 0x07
#define STD_RATE_MCS6 0x06
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 41d4a8167dc3..c17fcf272728 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -1005,10 +1005,9 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
entry->skb->len + padding_len);
/*
- * Enable beaconing again.
+ * Restore beaconing state.
*/
- rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
- rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+ rt2800_register_write(rt2x00dev, BCN_TIME_CFG, orig_reg);
/*
* Clean up beacon skb.
@@ -1039,13 +1038,14 @@ static inline void rt2800_clear_beacon_register(struct rt2x00_dev *rt2x00dev,
void rt2800_clear_beacon(struct queue_entry *entry)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- u32 reg;
+ u32 orig_reg, reg;
/*
* Disable beaconing while we are reloading the beacon data,
* otherwise we might be sending out invalid data.
*/
- rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+ rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &orig_reg);
+ reg = orig_reg;
rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
@@ -1055,10 +1055,9 @@ void rt2800_clear_beacon(struct queue_entry *entry)
rt2800_clear_beacon_register(rt2x00dev, entry->entry_idx);
/*
- * Enabled beaconing again.
+ * Restore beaconing state.
*/
- rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
- rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+ rt2800_register_write(rt2x00dev, BCN_TIME_CFG, orig_reg);
}
EXPORT_SYMBOL_GPL(rt2800_clear_beacon);
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index e3b885d8f7db..010b76505243 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -1448,7 +1448,8 @@ int rt2x00mac_conf_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params);
void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw);
-void rt2x00mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop);
+void rt2x00mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop);
int rt2x00mac_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant);
int rt2x00mac_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant);
void rt2x00mac_get_ringparam(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index a87ee9b6585a..212ac4842c16 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -749,7 +749,8 @@ void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw)
}
EXPORT_SYMBOL_GPL(rt2x00mac_rfkill_poll);
-void rt2x00mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+void rt2x00mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
struct data_queue *queue;
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 24402984ee57..9048a9cbe52c 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -2031,13 +2031,14 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
static void rt61pci_clear_beacon(struct queue_entry *entry)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- u32 reg;
+ u32 orig_reg, reg;
/*
* Disable beaconing while we are reloading the beacon data,
* otherwise we might be sending out invalid data.
*/
- rt2x00mmio_register_read(rt2x00dev, TXRX_CSR9, &reg);
+ rt2x00mmio_register_read(rt2x00dev, TXRX_CSR9, &orig_reg);
+ reg = orig_reg;
rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, reg);
@@ -2048,10 +2049,9 @@ static void rt61pci_clear_beacon(struct queue_entry *entry)
HW_BEACON_OFFSET(entry->entry_idx), 0);
/*
- * Enable beaconing again.
+ * Restore global beaconing state.
*/
- rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
- rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, reg);
+ rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, orig_reg);
}
/*
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index a140170b1eb3..95724ff9c726 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -1597,13 +1597,14 @@ static void rt73usb_clear_beacon(struct queue_entry *entry)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
unsigned int beacon_base;
- u32 reg;
+ u32 orig_reg, reg;
/*
* Disable beaconing while we are reloading the beacon data,
* otherwise we might be sending out invalid data.
*/
- rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
+ rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &orig_reg);
+ reg = orig_reg;
rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
@@ -1614,10 +1615,9 @@ static void rt73usb_clear_beacon(struct queue_entry *entry)
rt2x00usb_register_write(rt2x00dev, beacon_base, 0);
/*
- * Enable beaconing again.
+ * Restore beaconing state.
*/
- rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
- rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
+ rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, orig_reg);
}
static int rt73usb_get_tx_data_len(struct queue_entry *entry)
diff --git a/drivers/net/wireless/rtl818x/rtl8180/Makefile b/drivers/net/wireless/rtl818x/rtl8180/Makefile
index 08b056db4a3b..21005bd8b43c 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/Makefile
+++ b/drivers/net/wireless/rtl818x/rtl8180/Makefile
@@ -1,5 +1,5 @@
-rtl8180-objs := dev.o rtl8225.o sa2400.o max2820.o grf5101.o rtl8225se.o
+rtl818x_pci-objs := dev.o rtl8225.o sa2400.o max2820.o grf5101.o rtl8225se.o
-obj-$(CONFIG_RTL8180) += rtl8180.o
+obj-$(CONFIG_RTL8180) += rtl818x_pci.o
ccflags-y += -Idrivers/net/wireless/rtl818x
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 98d8256f0377..50d69b13f984 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -683,9 +683,8 @@ static void rtl8180_int_enable(struct ieee80211_hw *dev)
struct rtl8180_priv *priv = dev->priv;
if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
- rtl818x_iowrite32(priv, &priv->map->IMR, IMR_TMGDOK |
- IMR_TBDER | IMR_THPDER |
- IMR_THPDER | IMR_THPDOK |
+ rtl818x_iowrite32(priv, &priv->map->IMR,
+ IMR_TBDER | IMR_TBDOK |
IMR_TVODER | IMR_TVODOK |
IMR_TVIDER | IMR_TVIDOK |
IMR_TBEDER | IMR_TBEDOK |
@@ -911,7 +910,10 @@ static int rtl8180_init_hw(struct ieee80211_hw *dev)
reg32 &= 0x00ffff00;
reg32 |= 0xb8000054;
rtl818x_iowrite32(priv, &priv->map->RF_PARA, reg32);
- }
+ } else
+ /* stop unused queus (no dma alloc) */
+ rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING,
+ (1<<1) | (1<<2));
priv->rf->init(dev);
diff --git a/drivers/net/wireless/rtl818x/rtl818x.h b/drivers/net/wireless/rtl818x/rtl818x.h
index 45ea4e1c4abe..7abef95d278b 100644
--- a/drivers/net/wireless/rtl818x/rtl818x.h
+++ b/drivers/net/wireless/rtl818x/rtl818x.h
@@ -334,9 +334,9 @@ struct rtl818x_csr {
* I don't like to introduce a ton of "reserved"..
* They are for RTL8187SE
*/
-#define REG_ADDR1(addr) ((u8 __iomem *)priv->map + addr)
-#define REG_ADDR2(addr) ((__le16 __iomem *)priv->map + (addr >> 1))
-#define REG_ADDR4(addr) ((__le32 __iomem *)priv->map + (addr >> 2))
+#define REG_ADDR1(addr) ((u8 __iomem *)priv->map + (addr))
+#define REG_ADDR2(addr) ((__le16 __iomem *)priv->map + ((addr) >> 1))
+#define REG_ADDR4(addr) ((__le32 __iomem *)priv->map + ((addr) >> 2))
#define FEMR_SE REG_ADDR2(0x1D4)
#define ARFR REG_ADDR2(0x1E0)
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 4ec424f26672..b1ed6d0796f6 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -1387,7 +1387,8 @@ static void rtl_op_rfkill_poll(struct ieee80211_hw *hw)
* before switch channel or power save, or tx buffer packet
* maybe send after offchannel or rf sleep, this may cause
* dis-association by AP */
-static void rtl_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void rtl_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
index 1b4101bf9974..347af1e4f438 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
@@ -93,7 +93,6 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
u8 tid;
rtl8188ee_bt_reg_init(hw);
- rtlpci->msi_support = true;
rtlpriv->dm.dm_initialgain_enable = 1;
rtlpriv->dm.dm_flag = 0;
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
index b4577ebc4bb0..a07213645da0 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
@@ -92,7 +92,7 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
rtl8723be_bt_reg_init(hw);
- rtlpci->msi_support = true;
+ rtlpci->msi_support = false;
rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
rtlpriv->dm.dm_initialgain_enable = 1;
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index ed88d3913483..077eb5b9cd74 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -5184,7 +5184,8 @@ out:
mutex_unlock(&wl->mutex);
}
-static void wlcore_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
struct wl1271 *wl = hw->priv;
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index 29ef2492951f..d3dd7bfdf3f1 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -217,7 +217,7 @@ static struct wl1271_if_operations sdio_ops = {
static int wl1271_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
- struct wlcore_platdev_data *pdev_data;
+ struct wlcore_platdev_data pdev_data;
struct wl12xx_sdio_glue *glue;
struct resource res[1];
mmc_pm_flag_t mmcflags;
@@ -228,16 +228,13 @@ static int wl1271_probe(struct sdio_func *func,
if (func->num != 0x02)
return -ENODEV;
- pdev_data = kzalloc(sizeof(*pdev_data), GFP_KERNEL);
- if (!pdev_data)
- goto out;
-
- pdev_data->if_ops = &sdio_ops;
+ memset(&pdev_data, 0x00, sizeof(pdev_data));
+ pdev_data.if_ops = &sdio_ops;
glue = kzalloc(sizeof(*glue), GFP_KERNEL);
if (!glue) {
dev_err(&func->dev, "can't allocate glue\n");
- goto out_free_pdev_data;
+ goto out;
}
glue->dev = &func->dev;
@@ -248,9 +245,9 @@ static int wl1271_probe(struct sdio_func *func,
/* Use block mode for transferring over one block size of data */
func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
- pdev_data->pdata = wl12xx_get_platform_data();
- if (IS_ERR(pdev_data->pdata)) {
- ret = PTR_ERR(pdev_data->pdata);
+ pdev_data.pdata = wl12xx_get_platform_data();
+ if (IS_ERR(pdev_data.pdata)) {
+ ret = PTR_ERR(pdev_data.pdata);
dev_err(glue->dev, "missing wlan platform data: %d\n", ret);
goto out_free_glue;
}
@@ -260,7 +257,7 @@ static int wl1271_probe(struct sdio_func *func,
dev_dbg(glue->dev, "sdio PM caps = 0x%x\n", mmcflags);
if (mmcflags & MMC_PM_KEEP_POWER)
- pdev_data->pdata->pwr_in_suspend = true;
+ pdev_data.pdata->pwr_in_suspend = true;
sdio_set_drvdata(func, glue);
@@ -289,7 +286,7 @@ static int wl1271_probe(struct sdio_func *func,
memset(res, 0x00, sizeof(res));
- res[0].start = pdev_data->pdata->irq;
+ res[0].start = pdev_data.pdata->irq;
res[0].flags = IORESOURCE_IRQ;
res[0].name = "irq";
@@ -299,8 +296,8 @@ static int wl1271_probe(struct sdio_func *func,
goto out_dev_put;
}
- ret = platform_device_add_data(glue->core, pdev_data,
- sizeof(*pdev_data));
+ ret = platform_device_add_data(glue->core, &pdev_data,
+ sizeof(pdev_data));
if (ret) {
dev_err(glue->dev, "can't add platform data\n");
goto out_dev_put;
@@ -319,9 +316,6 @@ out_dev_put:
out_free_glue:
kfree(glue);
-out_free_pdev_data:
- kfree(pdev_data);
-
out:
return ret;
}
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index dbe826dd7c23..5f3a389dd74c 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -327,27 +327,25 @@ static struct wl1271_if_operations spi_ops = {
static int wl1271_probe(struct spi_device *spi)
{
struct wl12xx_spi_glue *glue;
- struct wlcore_platdev_data *pdev_data;
+ struct wlcore_platdev_data pdev_data;
struct resource res[1];
int ret = -ENOMEM;
- pdev_data = kzalloc(sizeof(*pdev_data), GFP_KERNEL);
- if (!pdev_data)
- goto out;
+ memset(&pdev_data, 0x00, sizeof(pdev_data));
- pdev_data->pdata = dev_get_platdata(&spi->dev);
- if (!pdev_data->pdata) {
+ pdev_data.pdata = dev_get_platdata(&spi->dev);
+ if (!pdev_data.pdata) {
dev_err(&spi->dev, "no platform data\n");
ret = -ENODEV;
- goto out_free_pdev_data;
+ goto out;
}
- pdev_data->if_ops = &spi_ops;
+ pdev_data.if_ops = &spi_ops;
glue = kzalloc(sizeof(*glue), GFP_KERNEL);
if (!glue) {
dev_err(&spi->dev, "can't allocate glue\n");
- goto out_free_pdev_data;
+ goto out;
}
glue->dev = &spi->dev;
@@ -385,8 +383,8 @@ static int wl1271_probe(struct spi_device *spi)
goto out_dev_put;
}
- ret = platform_device_add_data(glue->core, pdev_data,
- sizeof(*pdev_data));
+ ret = platform_device_add_data(glue->core, &pdev_data,
+ sizeof(pdev_data));
if (ret) {
dev_err(glue->dev, "can't add platform data\n");
goto out_dev_put;
@@ -406,9 +404,6 @@ out_dev_put:
out_free_glue:
kfree(glue);
-out_free_pdev_data:
- kfree(pdev_data);
-
out:
return ret;
}
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index e25d2bc898e5..296b0ec8744d 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -142,7 +142,10 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct timex *tx)
delta = ktime_to_ns(kt);
err = ops->adjtime(ops, delta);
} else if (tx->modes & ADJ_FREQUENCY) {
- err = ops->adjfreq(ops, scaled_ppm_to_ppb(tx->freq));
+ s32 ppb = scaled_ppm_to_ppb(tx->freq);
+ if (ppb > ops->max_adj || ppb < -ops->max_adj)
+ return -ERANGE;
+ err = ops->adjfreq(ops, ppb);
ptp->dialed_frequency = tx->freq;
} else if (tx->modes == 0) {
tx->freq = ptp->dialed_frequency;
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index fd7b3bd80789..d837c3c5330f 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -3348,7 +3348,7 @@ static int __init claw_init(void)
}
CLAW_DBF_TEXT(2, setup, "init_mod");
claw_root_dev = root_device_register("claw");
- ret = PTR_RET(claw_root_dev);
+ ret = PTR_ERR_OR_ZERO(claw_root_dev);
if (ret)
goto register_err;
ret = ccw_driver_register(&claw_ccw_driver);
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 70b3a023100e..03b6ad035577 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1837,7 +1837,7 @@ static int __init ctcm_init(void)
if (ret)
goto out_err;
ctcm_root_dev = root_device_register("ctcm");
- ret = PTR_RET(ctcm_root_dev);
+ ret = PTR_ERR_OR_ZERO(ctcm_root_dev);
if (ret)
goto register_err;
ret = ccw_driver_register(&ctcm_ccw_driver);
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index c461f2aac610..8d5d96969c39 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -2442,7 +2442,7 @@ __init lcs_init_module(void)
if (rc)
goto out_err;
lcs_root_dev = root_device_register("lcs");
- rc = PTR_RET(lcs_root_dev);
+ rc = PTR_ERR_OR_ZERO(lcs_root_dev);
if (rc)
goto register_err;
rc = ccw_driver_register(&lcs_ccw_driver);
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 5333b2c018e7..a2088af51cc5 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -268,10 +268,8 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
#define QETH_NO_PRIO_QUEUEING 0
#define QETH_PRIO_Q_ING_PREC 1
#define QETH_PRIO_Q_ING_TOS 2
-#define IP_TOS_LOWDELAY 0x10
-#define IP_TOS_HIGHTHROUGHPUT 0x08
-#define IP_TOS_HIGHRELIABILITY 0x04
-#define IP_TOS_NOTIMPORTANT 0x02
+#define QETH_PRIO_Q_ING_SKB 3
+#define QETH_PRIO_Q_ING_VLAN 4
/* Packing */
#define QETH_LOW_WATERMARK_PACK 2
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 22470a3b182f..34993009a9e1 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -20,6 +20,7 @@
#include <linux/kthread.h>
#include <linux/slab.h>
#include <net/iucv/af_iucv.h>
+#include <net/dsfield.h>
#include <asm/ebcdic.h>
#include <asm/io.h>
@@ -3670,42 +3671,56 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev,
}
EXPORT_SYMBOL_GPL(qeth_qdio_output_handler);
+/**
+ * Note: Function assumes that we have 4 outbound queues.
+ */
int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
int ipv, int cast_type)
{
- if (!ipv && (card->info.type == QETH_CARD_TYPE_OSD ||
- card->info.type == QETH_CARD_TYPE_OSX))
- return card->qdio.default_out_queue;
- switch (card->qdio.no_out_queues) {
- case 4:
- if (cast_type && card->info.is_multicast_different)
- return card->info.is_multicast_different &
- (card->qdio.no_out_queues - 1);
- if (card->qdio.do_prio_queueing && (ipv == 4)) {
- const u8 tos = ip_hdr(skb)->tos;
-
- if (card->qdio.do_prio_queueing ==
- QETH_PRIO_Q_ING_TOS) {
- if (tos & IP_TOS_NOTIMPORTANT)
- return 3;
- if (tos & IP_TOS_HIGHRELIABILITY)
- return 2;
- if (tos & IP_TOS_HIGHTHROUGHPUT)
- return 1;
- if (tos & IP_TOS_LOWDELAY)
- return 0;
- }
- if (card->qdio.do_prio_queueing ==
- QETH_PRIO_Q_ING_PREC)
- return 3 - (tos >> 6);
- } else if (card->qdio.do_prio_queueing && (ipv == 6)) {
- /* TODO: IPv6!!! */
+ __be16 *tci;
+ u8 tos;
+
+ if (cast_type && card->info.is_multicast_different)
+ return card->info.is_multicast_different &
+ (card->qdio.no_out_queues - 1);
+
+ switch (card->qdio.do_prio_queueing) {
+ case QETH_PRIO_Q_ING_TOS:
+ case QETH_PRIO_Q_ING_PREC:
+ switch (ipv) {
+ case 4:
+ tos = ipv4_get_dsfield(ip_hdr(skb));
+ break;
+ case 6:
+ tos = ipv6_get_dsfield(ipv6_hdr(skb));
+ break;
+ default:
+ return card->qdio.default_out_queue;
}
- return card->qdio.default_out_queue;
- case 1: /* fallthrough for single-out-queue 1920-device */
+ if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
+ return ~tos >> 6 & 3;
+ if (tos & IPTOS_MINCOST)
+ return 3;
+ if (tos & IPTOS_RELIABILITY)
+ return 2;
+ if (tos & IPTOS_THROUGHPUT)
+ return 1;
+ if (tos & IPTOS_LOWDELAY)
+ return 0;
+ break;
+ case QETH_PRIO_Q_ING_SKB:
+ if (skb->priority > 5)
+ return 0;
+ return ~skb->priority >> 1 & 3;
+ case QETH_PRIO_Q_ING_VLAN:
+ tci = &((struct ethhdr *)skb->data)->h_proto;
+ if (*tci == ETH_P_8021Q)
+ return ~*(tci + 1) >> (VLAN_PRIO_SHIFT + 1) & 3;
+ break;
default:
- return card->qdio.default_out_queue;
+ break;
}
+ return card->qdio.default_out_queue;
}
EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
@@ -5824,7 +5839,7 @@ static int __init qeth_core_init(void)
if (rc)
goto out_err;
qeth_core_root_dev = root_device_register("qeth");
- rc = PTR_RET(qeth_core_root_dev);
+ rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
if (rc)
goto register_err;
qeth_core_header_cache = kmem_cache_create("qeth_hdr",
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 425c0ecf1f3b..8a25a2be9890 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -217,6 +217,10 @@ static ssize_t qeth_dev_prioqing_show(struct device *dev,
return sprintf(buf, "%s\n", "by precedence");
case QETH_PRIO_Q_ING_TOS:
return sprintf(buf, "%s\n", "by type of service");
+ case QETH_PRIO_Q_ING_SKB:
+ return sprintf(buf, "%s\n", "by skb-priority");
+ case QETH_PRIO_Q_ING_VLAN:
+ return sprintf(buf, "%s\n", "by VLAN headers");
default:
return sprintf(buf, "always queue %i\n",
card->qdio.default_out_queue);
@@ -250,11 +254,23 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
}
tmp = strsep((char **) &buf, "\n");
- if (!strcmp(tmp, "prio_queueing_prec"))
+ if (!strcmp(tmp, "prio_queueing_prec")) {
card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_PREC;
- else if (!strcmp(tmp, "prio_queueing_tos"))
+ card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
+ } else if (!strcmp(tmp, "prio_queueing_skb")) {
+ card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_SKB;
+ card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
+ } else if (!strcmp(tmp, "prio_queueing_tos")) {
card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_TOS;
- else if (!strcmp(tmp, "no_prio_queueing:0")) {
+ card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
+ } else if (!strcmp(tmp, "prio_queueing_vlan")) {
+ if (!card->options.layer2) {
+ rc = -ENOTSUPP;
+ goto out;
+ }
+ card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_VLAN;
+ card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
+ } else if (!strcmp(tmp, "no_prio_queueing:0")) {
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
card->qdio.default_out_queue = 0;
} else if (!strcmp(tmp, "no_prio_queueing:1")) {
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 8dea3f12ccc1..e232ceca38fe 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -725,15 +725,20 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
int elements = 0;
struct qeth_card *card = dev->ml_priv;
struct sk_buff *new_skb = skb;
- int ipv = qeth_get_ip_version(skb);
int cast_type = qeth_l2_get_cast_type(card, skb);
- struct qeth_qdio_out_q *queue = card->qdio.out_qs
- [qeth_get_priority_queue(card, skb, ipv, cast_type)];
+ struct qeth_qdio_out_q *queue;
int tx_bytes = skb->len;
int data_offset = -1;
int elements_needed = 0;
int hd_len = 0;
+ if (card->qdio.do_prio_queueing || (cast_type &&
+ card->info.is_multicast_different))
+ queue = card->qdio.out_qs[qeth_get_priority_queue(card, skb,
+ qeth_get_ip_version(skb), cast_type)];
+ else
+ queue = card->qdio.out_qs[card->qdio.default_out_queue];
+
if ((card->state != CARD_STATE_UP) || !card->lan_online) {
card->stats.tx_carrier_errors++;
goto tx_drop;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 3524d34ff694..c8d91d797ec8 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2926,8 +2926,11 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct sk_buff *new_skb = NULL;
int ipv = qeth_get_ip_version(skb);
int cast_type = qeth_l3_get_cast_type(card, skb);
- struct qeth_qdio_out_q *queue = card->qdio.out_qs
- [qeth_get_priority_queue(card, skb, ipv, cast_type)];
+ struct qeth_qdio_out_q *queue =
+ card->qdio.out_qs[card->qdio.do_prio_queueing
+ || (cast_type && card->info.is_multicast_different) ?
+ qeth_get_priority_queue(card, skb, ipv, cast_type) :
+ card->qdio.default_out_queue];
int tx_bytes = skb->len;
bool large_send;
int data_offset = -1;
diff --git a/drivers/staging/rtl8821ae/core.c b/drivers/staging/rtl8821ae/core.c
index ff3139b6da65..63ae2d1997d3 100644
--- a/drivers/staging/rtl8821ae/core.c
+++ b/drivers/staging/rtl8821ae/core.c
@@ -1414,23 +1414,15 @@ static void rtl_op_rfkill_poll(struct ieee80211_hw *hw)
* before switch channel or power save, or tx buffer packet
* maybe send after offchannel or rf sleep, this may cause
* dis-association by AP */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
-static void rtl_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void rtl_op_flush(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (rtlpriv->intf_ops->flush)
rtlpriv->intf_ops->flush(hw, queues, drop);
}
-#else
-static void rtl_op_flush(struct ieee80211_hw *hw, bool drop)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- if (rtlpriv->intf_ops->flush)
- rtlpriv->intf_ops->flush(hw, drop);
-}
-#endif
const struct ieee80211_ops rtl_ops = {
.start = rtl_op_start,