summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/atm/horizon.c24
-rw-r--r--drivers/atm/lanai.c9
-rw-r--r--drivers/bluetooth/bfusb.c2
-rw-r--r--drivers/bluetooth/btusb.c34
-rw-r--r--drivers/clk/rockchip/clk-rk3288.c14
-rw-r--r--drivers/clocksource/arm_arch_timer.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c98
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h26
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c13
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNipac.c12
-rw-r--r--drivers/isdn/hardware/mISDN/w6692.c6
-rw-r--r--drivers/isdn/hisax/hfc4s8s_l1.c21
-rw-r--r--drivers/net/bonding/bond_options.c5
-rw-r--r--drivers/net/ethernet/3com/typhoon.c4
-rw-r--r--drivers/net/ethernet/alteon/acenic.c8
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c8
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ptp.c8
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c94
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c109
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h3
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c9
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c4
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c8
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c29
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c4
-rw-r--r--drivers/net/ethernet/cadence/macb.c55
-rw-r--r--drivers/net/ethernet/cadence/macb.h525
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/mc5.c16
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h16
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c524
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c742
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c223
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c1115
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h19
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h367
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h2892
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_values.h118
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h81
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c44
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c57
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c43
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h16
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.c56
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.h5
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c19
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c173
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_stats.h5
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.c3
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h5
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c250
-rw-r--r--drivers/net/ethernet/freescale/Kconfig3
-rw-r--r--drivers/net/ethernet/freescale/fec.h3
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c104
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c16
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c15
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h2
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c96
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig9
-rw-r--r--drivers/net/ethernet/hisilicon/Makefile1
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c969
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_mdio.c186
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c5
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c11
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c125
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c43
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_fcoe.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c15
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c11
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c4
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h7
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c97
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c6
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c11
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c5
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c13
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c4
-rw-r--r--drivers/net/ethernet/jme.c4
-rw-r--r--drivers/net/ethernet/marvell/sky2.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c6
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c4
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c4
-rw-r--r--drivers/net/ethernet/neterion/s2io.c4
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c4
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c25
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c6
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169.c16
-rw-r--r--drivers/net/ethernet/rocker/rocker.c4
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c459
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h1
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c7
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c4
-rw-r--r--drivers/net/ethernet/ti/cpts.c5
-rw-r--r--drivers/net/ethernet/ti/cpts.h1
-rw-r--r--drivers/net/ethernet/via/via-rhine.c6
-rw-r--r--drivers/net/ethernet/via/via-velocity.c4
-rw-r--r--drivers/net/fddi/skfp/smt.c12
-rw-r--r--drivers/net/ieee802154/at86rf230.c82
-rw-r--r--drivers/net/ieee802154/cc2520.c27
-rw-r--r--drivers/net/ieee802154/mrf24j40.c6
-rw-r--r--drivers/net/irda/ali-ircc.c11
-rw-r--r--drivers/net/irda/ali-ircc.h5
-rw-r--r--drivers/net/irda/au1k_ir.c3
-rw-r--r--drivers/net/irda/irda-usb.c10
-rw-r--r--drivers/net/irda/irda-usb.h5
-rw-r--r--drivers/net/irda/kingsun-sir.c3
-rw-r--r--drivers/net/irda/ks959-sir.c3
-rw-r--r--drivers/net/irda/mcs7780.c2
-rw-r--r--drivers/net/irda/mcs7780.h1
-rw-r--r--drivers/net/irda/nsc-ircc.c7
-rw-r--r--drivers/net/irda/nsc-ircc.h5
-rw-r--r--drivers/net/irda/sa1100_ir.c2
-rw-r--r--drivers/net/irda/stir4200.c16
-rw-r--r--drivers/net/irda/via-ircc.h4
-rw-r--r--drivers/net/irda/vlsi_ir.c46
-rw-r--r--drivers/net/irda/vlsi_ir.h2
-rw-r--r--drivers/net/macvtap.c6
-rw-r--r--drivers/net/team/team.c7
-rw-r--r--drivers/net/tun.c15
-rw-r--r--drivers/net/usb/r8152.c132
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_defs.h1
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c54
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c27
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h6
-rw-r--r--drivers/net/vxlan.c281
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c71
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c2
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio_ops.c4
-rw-r--r--drivers/net/wireless/rtlwifi/base.c156
-rw-r--r--drivers/net/wireless/rtlwifi/base.h4
-rw-r--r--drivers/net/wireless/rtlwifi/core.c24
-rw-r--r--drivers/net/wireless/rtlwifi/core.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/trx.c162
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c165
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/phy.c5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.c30
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c13
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c28
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c20
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/fw.c17
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/fw.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/sw.c30
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.c27
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ee/trx.c196
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ee/trx.h9
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/def.h8
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/sw.c30
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.c23
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/trx.c162
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/phy.c25
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/phy.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/trx.c162
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/def.h54
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/pwrseq.h4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/sw.c74
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/trx.c232
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h96
-rw-r--r--drivers/net/xen-netfront.c196
-rw-r--r--drivers/phy/phy-miphy365x.c29
-rw-r--r--drivers/phy/phy-stih407-usb.c25
-rw-r--r--drivers/s390/net/qeth_l3_main.c8
-rw-r--r--drivers/scsi/csiostor/csio_hw.c796
-rw-r--r--drivers/scsi/csiostor/csio_hw.h13
-rw-r--r--drivers/scsi/csiostor/csio_hw_chip.h65
-rw-r--r--drivers/scsi/csiostor/csio_hw_t4.c144
-rw-r--r--drivers/scsi/csiostor/csio_hw_t5.c150
-rw-r--r--drivers/scsi/csiostor/csio_isr.c2
-rw-r--r--drivers/scsi/csiostor/csio_lnode.c2
-rw-r--r--drivers/scsi/csiostor/csio_mb.c53
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c4
-rw-r--r--drivers/scsi/csiostor/csio_wr.c154
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c18
-rw-r--r--drivers/vhost/net.c2
217 files changed, 9041 insertions, 5709 deletions
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
index 1dc0519333f2..527bbd595e37 100644
--- a/drivers/atm/horizon.c
+++ b/drivers/atm/horizon.c
@@ -458,12 +458,6 @@ static inline void update_tx_channel_config (hrz_dev * dev, short chan, u8 mode,
return;
}
-static inline u16 query_tx_channel_config (hrz_dev * dev, short chan, u8 mode) {
- wr_regw (dev, TX_CHANNEL_CONFIG_COMMAND_OFF,
- chan * TX_CHANNEL_CONFIG_MULT | mode);
- return rd_regw (dev, TX_CHANNEL_CONFIG_DATA_OFF);
-}
-
/********** dump functions **********/
static inline void dump_skb (char * prefix, unsigned int vc, struct sk_buff * skb) {
@@ -513,16 +507,6 @@ static inline void dump_framer (hrz_dev * dev) {
/* RX channels are 10 bit integers, these fns are quite paranoid */
-static inline int channel_to_vpivci (const u16 channel, short * vpi, int * vci) {
- unsigned short vci_bits = 10 - vpi_bits;
- if ((channel & RX_CHANNEL_MASK) == channel) {
- *vci = channel & ((~0)<<vci_bits);
- *vpi = channel >> vci_bits;
- return channel ? 0 : -EINVAL;
- }
- return -EINVAL;
-}
-
static inline int vpivci_to_channel (u16 * channel, const short vpi, const int vci) {
unsigned short vci_bits = 10 - vpi_bits;
if (0 <= vpi && vpi < 1<<vpi_bits && 0 <= vci && vci < 1<<vci_bits) {
@@ -1260,14 +1244,6 @@ static u32 rx_queue_entry_next (hrz_dev * dev) {
return rx_queue_entry;
}
-/********** handle RX disabled by device **********/
-
-static inline void rx_disabled_handler (hrz_dev * dev) {
- wr_regw (dev, RX_CONFIG_OFF, rd_regw (dev, RX_CONFIG_OFF) | RX_ENABLE);
- // count me please
- PRINTK (KERN_WARNING, "RX was disabled!");
-}
-
/********** handle RX data received by device **********/
// called from IRQ handler
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index 93eaf8d94492..d2e9ea8d504e 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -681,15 +681,6 @@ static inline int aal5_size(int size)
return cells * 48;
}
-/* How many bytes can we send if we have "space" space, assuming we have
- * to send full cells
- */
-static inline int aal5_spacefor(int space)
-{
- int cells = space / 48;
- return cells * 48;
-}
-
/* -------------------- FREE AN ATM SKB: */
static inline void lanai_free_skb(struct atm_vcc *atmvcc, struct sk_buff *skb)
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index b2e7e94a6771..fcfb72e9e0ee 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -696,6 +696,8 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i
hdev->flush = bfusb_flush;
hdev->send = bfusb_send_frame;
+ set_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks);
+
if (hci_register_dev(hdev) < 0) {
BT_ERR("Can't register HCI device");
hci_free_dev(hdev);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 19cf2cf22e87..f051a93c6cad 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -49,6 +49,7 @@ static struct usb_driver btusb_driver;
#define BTUSB_INTEL_BOOT 0x200
#define BTUSB_BCM_PATCHRAM 0x400
#define BTUSB_MARVELL 0x800
+#define BTUSB_AVM 0x1000
static const struct usb_device_id btusb_table[] = {
/* Generic Bluetooth USB device */
@@ -85,7 +86,7 @@ static const struct usb_device_id btusb_table[] = {
{ USB_DEVICE(0x05ac, 0x8281) },
/* AVM BlueFRITZ! USB v2.0 */
- { USB_DEVICE(0x057c, 0x3800) },
+ { USB_DEVICE(0x057c, 0x3800), .driver_info = BTUSB_AVM },
/* Bluetooth Ultraport Module from IBM */
{ USB_DEVICE(0x04bf, 0x030a) },
@@ -1943,6 +1944,31 @@ static int btusb_set_bdaddr_bcm(struct hci_dev *hdev, const bdaddr_t *bdaddr)
return 0;
}
+static int btusb_set_bdaddr_ath3012(struct hci_dev *hdev,
+ const bdaddr_t *bdaddr)
+{
+ struct sk_buff *skb;
+ u8 buf[10];
+ long ret;
+
+ buf[0] = 0x01;
+ buf[1] = 0x01;
+ buf[2] = 0x00;
+ buf[3] = sizeof(bdaddr_t);
+ memcpy(buf + 4, bdaddr, sizeof(bdaddr_t));
+
+ skb = __hci_cmd_sync(hdev, 0xfc0b, sizeof(buf), buf, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ ret = PTR_ERR(skb);
+ BT_ERR("%s: Change address command failed (%ld)",
+ hdev->name, ret);
+ return ret;
+ }
+ kfree_skb(skb);
+
+ return 0;
+}
+
static int btusb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -2055,9 +2081,15 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_MARVELL)
hdev->set_bdaddr = btusb_set_bdaddr_marvell;
+ if (id->driver_info & BTUSB_AVM)
+ set_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks);
+
if (id->driver_info & BTUSB_INTEL_BOOT)
set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
+ if (id->driver_info & BTUSB_ATH3012)
+ hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
+
/* Interface numbers are hardcoded in the specification */
data->isoc = usb_ifnum_to_if(data->udev, 1);
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
index ac6be7c0132d..40d267f5dea3 100644
--- a/drivers/clk/rockchip/clk-rk3288.c
+++ b/drivers/clk/rockchip/clk-rk3288.c
@@ -190,7 +190,7 @@ PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" };
PNAME(mux_uart3_p) = { "uart3_src", "uart3_frac", "xin24m" };
PNAME(mux_uart4_p) = { "uart4_src", "uart4_frac", "xin24m" };
PNAME(mux_cif_out_p) = { "cif_src", "xin24m" };
-PNAME(mux_macref_p) = { "mac_src", "ext_gmac" };
+PNAME(mux_mac_p) = { "mac_pll_src", "ext_gmac" };
PNAME(mux_hsadcout_p) = { "hsadc_src", "ext_hsadc" };
PNAME(mux_edp_24m_p) = { "ext_edp_24m", "xin24m" };
PNAME(mux_tspout_p) = { "cpll", "gpll", "npll", "xin27m" };
@@ -575,18 +575,18 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
MUX(SCLK_UART4, "sclk_uart4", mux_uart4_p, 0,
RK3288_CLKSEL_CON(3), 8, 2, MFLAGS),
- COMPOSITE(0, "mac_src", mux_pll_src_npll_cpll_gpll_p, 0,
+ COMPOSITE(0, "mac_pll_src", mux_pll_src_npll_cpll_gpll_p, 0,
RK3288_CLKSEL_CON(21), 0, 2, MFLAGS, 8, 5, DFLAGS,
RK3288_CLKGATE_CON(2), 5, GFLAGS),
- MUX(0, "macref", mux_macref_p, 0,
+ MUX(SCLK_MAC, "mac_clk", mux_mac_p, 0,
RK3288_CLKSEL_CON(21), 4, 1, MFLAGS),
- GATE(0, "sclk_macref_out", "macref", 0,
+ GATE(SCLK_MACREF_OUT, "sclk_macref_out", "mac_clk", 0,
RK3288_CLKGATE_CON(5), 3, GFLAGS),
- GATE(SCLK_MACREF, "sclk_macref", "macref", 0,
+ GATE(SCLK_MACREF, "sclk_macref", "mac_clk", 0,
RK3288_CLKGATE_CON(5), 2, GFLAGS),
- GATE(SCLK_MAC_RX, "sclk_mac_rx", "macref", 0,
+ GATE(SCLK_MAC_RX, "sclk_mac_rx", "mac_clk", 0,
RK3288_CLKGATE_CON(5), 0, GFLAGS),
- GATE(SCLK_MAC_TX, "sclk_mac_tx", "macref", 0,
+ GATE(SCLK_MAC_TX, "sclk_mac_tx", "mac_clk", 0,
RK3288_CLKGATE_CON(5), 1, GFLAGS),
COMPOSITE(0, "hsadc_src", mux_pll_src_cpll_gpll_p, 0,
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 095c1774592c..a3025e7ae35f 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -15,6 +15,7 @@
#include <linux/cpu.h>
#include <linux/cpu_pm.h>
#include <linux/clockchips.h>
+#include <linux/clocksource.h>
#include <linux/interrupt.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 9edc200b311d..694e03075b4b 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -235,19 +235,19 @@ static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
static void set_emss(struct c4iw_ep *ep, u16 opt)
{
- ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] -
+ ep->emss = ep->com.dev->rdev.lldi.mtus[TCPOPT_MSS_G(opt)] -
((AF_INET == ep->com.remote_addr.ss_family) ?
sizeof(struct iphdr) : sizeof(struct ipv6hdr)) -
sizeof(struct tcphdr);
ep->mss = ep->emss;
- if (GET_TCPOPT_TSTAMP(opt))
+ if (TCPOPT_TSTAMP_G(opt))
ep->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
if (ep->emss < 128)
ep->emss = 128;
if (ep->emss & 7)
PDBG("Warning: misaligned mtu idx %u mss %u emss=%u\n",
- GET_TCPOPT_MSS(opt), ep->mss, ep->emss);
- PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt),
+ TCPOPT_MSS_G(opt), ep->mss, ep->emss);
+ PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt),
ep->mss, ep->emss);
}
@@ -652,24 +652,24 @@ static int send_connect(struct c4iw_ep *ep)
if (win > RCV_BUFSIZ_M)
win = RCV_BUFSIZ_M;
- opt0 = (nocong ? NO_CONG(1) : 0) |
+ opt0 = (nocong ? NO_CONG_F : 0) |
KEEP_ALIVE_F |
- DELACK(1) |
+ DELACK_F |
WND_SCALE_V(wscale) |
MSS_IDX_V(mtu_idx) |
L2T_IDX_V(ep->l2t->idx) |
TX_CHAN_V(ep->tx_chan) |
SMAC_SEL_V(ep->smac_idx) |
- DSCP(ep->tos) |
+ DSCP_V(ep->tos) |
ULP_MODE_V(ULP_MODE_TCPDDP) |
RCV_BUFSIZ_V(win);
opt2 = RX_CHANNEL_V(0) |
- CCTRL_ECN(enable_ecn) |
+ CCTRL_ECN_V(enable_ecn) |
RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
if (enable_tcp_timestamps)
- opt2 |= TSTAMPS_EN(1);
+ opt2 |= TSTAMPS_EN_F;
if (enable_tcp_sack)
- opt2 |= SACK_EN(1);
+ opt2 |= SACK_EN_F;
if (wscale && enable_tcp_window_scaling)
opt2 |= WND_SCALE_EN_F;
if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
@@ -1042,7 +1042,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_ep *ep;
struct cpl_act_establish *req = cplhdr(skb);
unsigned int tid = GET_TID(req);
- unsigned int atid = GET_TID_TID(ntohl(req->tos_atid));
+ unsigned int atid = TID_TID_G(ntohl(req->tos_atid));
struct tid_info *t = dev->rdev.lldi.tids;
ep = lookup_atid(t, atid);
@@ -1751,7 +1751,7 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req));
memset(req, 0, sizeof(*req));
- req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR));
+ req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR));
req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
ep->com.dev->rdev.lldi.ports[0],
@@ -1782,27 +1782,27 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
if (win > RCV_BUFSIZ_M)
win = RCV_BUFSIZ_M;
- req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) |
- (nocong ? NO_CONG(1) : 0) |
+ req->tcb.opt0 = (__force __be64) (TCAM_BYPASS_F |
+ (nocong ? NO_CONG_F : 0) |
KEEP_ALIVE_F |
- DELACK(1) |
+ DELACK_F |
WND_SCALE_V(wscale) |
MSS_IDX_V(mtu_idx) |
L2T_IDX_V(ep->l2t->idx) |
TX_CHAN_V(ep->tx_chan) |
SMAC_SEL_V(ep->smac_idx) |
- DSCP(ep->tos) |
+ DSCP_V(ep->tos) |
ULP_MODE_V(ULP_MODE_TCPDDP) |
RCV_BUFSIZ_V(win));
- req->tcb.opt2 = (__force __be32) (PACE(1) |
- TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
+ req->tcb.opt2 = (__force __be32) (PACE_V(1) |
+ TX_QUEUE_V(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
RX_CHANNEL_V(0) |
- CCTRL_ECN(enable_ecn) |
+ CCTRL_ECN_V(enable_ecn) |
RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid));
if (enable_tcp_timestamps)
- req->tcb.opt2 |= (__force __be32)TSTAMPS_EN(1);
+ req->tcb.opt2 |= (__force __be32)TSTAMPS_EN_F;
if (enable_tcp_sack)
- req->tcb.opt2 |= (__force __be32)SACK_EN(1);
+ req->tcb.opt2 |= (__force __be32)SACK_EN_F;
if (wscale && enable_tcp_window_scaling)
req->tcb.opt2 |= (__force __be32)WND_SCALE_EN_F;
req->tcb.opt0 = cpu_to_be64((__force u64)req->tcb.opt0);
@@ -2023,10 +2023,10 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct c4iw_ep *ep;
struct cpl_act_open_rpl *rpl = cplhdr(skb);
- unsigned int atid = GET_TID_TID(GET_AOPEN_ATID(
- ntohl(rpl->atid_status)));
+ unsigned int atid = TID_TID_G(AOPEN_ATID_G(
+ ntohl(rpl->atid_status)));
struct tid_info *t = dev->rdev.lldi.tids;
- int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status));
+ int status = AOPEN_STATUS_G(ntohl(rpl->atid_status));
struct sockaddr_in *la;
struct sockaddr_in *ra;
struct sockaddr_in6 *la6;
@@ -2064,7 +2064,7 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
if (ep->com.local_addr.ss_family == AF_INET &&
dev->rdev.lldi.enable_fw_ofld_conn) {
send_fw_act_open_req(ep,
- GET_TID_TID(GET_AOPEN_ATID(
+ TID_TID_G(AOPEN_ATID_G(
ntohl(rpl->atid_status))));
return 0;
}
@@ -2181,24 +2181,24 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
win = ep->rcv_win >> 10;
if (win > RCV_BUFSIZ_M)
win = RCV_BUFSIZ_M;
- opt0 = (nocong ? NO_CONG(1) : 0) |
+ opt0 = (nocong ? NO_CONG_F : 0) |
KEEP_ALIVE_F |
- DELACK(1) |
+ DELACK_F |
WND_SCALE_V(wscale) |
MSS_IDX_V(mtu_idx) |
L2T_IDX_V(ep->l2t->idx) |
TX_CHAN_V(ep->tx_chan) |
SMAC_SEL_V(ep->smac_idx) |
- DSCP(ep->tos >> 2) |
+ DSCP_V(ep->tos >> 2) |
ULP_MODE_V(ULP_MODE_TCPDDP) |
RCV_BUFSIZ_V(win);
opt2 = RX_CHANNEL_V(0) |
RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
if (enable_tcp_timestamps && req->tcpopt.tstamp)
- opt2 |= TSTAMPS_EN(1);
+ opt2 |= TSTAMPS_EN_F;
if (enable_tcp_sack && req->tcpopt.sack)
- opt2 |= SACK_EN(1);
+ opt2 |= SACK_EN_F;
if (wscale && enable_tcp_window_scaling)
opt2 |= WND_SCALE_EN_F;
if (enable_ecn) {
@@ -2208,7 +2208,7 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
tcph = (const void *)(req + 1) + G_ETH_HDR_LEN(hlen) +
G_IP_HDR_LEN(hlen);
if (tcph->ece && tcph->cwr)
- opt2 |= CCTRL_ECN(1);
+ opt2 |= CCTRL_ECN_V(1);
}
if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
u32 isn = (prandom_u32() & ~7UL) - 1;
@@ -2277,7 +2277,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct c4iw_ep *child_ep = NULL, *parent_ep;
struct cpl_pass_accept_req *req = cplhdr(skb);
- unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid));
+ unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
struct tid_info *t = dev->rdev.lldi.tids;
unsigned int hwtid = GET_TID(req);
struct dst_entry *dst;
@@ -2310,14 +2310,14 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
ntohs(peer_port), peer_mss);
dst = find_route(dev, *(__be32 *)local_ip, *(__be32 *)peer_ip,
local_port, peer_port,
- GET_POPEN_TOS(ntohl(req->tos_stid)));
+ PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
} else {
PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
, __func__, parent_ep, hwtid,
local_ip, peer_ip, ntohs(local_port),
ntohs(peer_port), peer_mss);
dst = find_route6(dev, local_ip, peer_ip, local_port, peer_port,
- PASS_OPEN_TOS(ntohl(req->tos_stid)),
+ PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
((struct sockaddr_in6 *)
&parent_ep->com.local_addr)->sin6_scope_id);
}
@@ -2375,7 +2375,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
}
c4iw_get_ep(&parent_ep->com);
child_ep->parent_ep = parent_ep;
- child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid));
+ child_ep->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
child_ep->dst = dst;
child_ep->hwtid = hwtid;
@@ -3501,23 +3501,23 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req));
memset(req, 0, sizeof(*req));
req->l2info = cpu_to_be16(V_SYN_INTF(intf) |
- V_SYN_MAC_IDX(G_RX_MACIDX(
+ V_SYN_MAC_IDX(RX_MACIDX_G(
(__force int) htonl(l2info))) |
F_SYN_XACT_MATCH);
eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ?
- G_RX_ETHHDR_LEN((__force int) htonl(l2info)) :
- G_RX_T5_ETHHDR_LEN((__force int) htonl(l2info));
- req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN(
+ RX_ETHHDR_LEN_G((__force int)htonl(l2info)) :
+ RX_T5_ETHHDR_LEN_G((__force int)htonl(l2info));
+ req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(RX_CHAN_G(
(__force int) htonl(l2info))) |
- V_TCP_HDR_LEN(G_RX_TCPHDR_LEN(
+ V_TCP_HDR_LEN(RX_TCPHDR_LEN_G(
(__force int) htons(hdr_len))) |
- V_IP_HDR_LEN(G_RX_IPHDR_LEN(
+ V_IP_HDR_LEN(RX_IPHDR_LEN_G(
(__force int) htons(hdr_len))) |
- V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(eth_hdr_len)));
+ V_ETH_HDR_LEN(RX_ETHHDR_LEN_G(eth_hdr_len)));
req->vlan = (__force __be16) vlantag;
req->len = (__force __be16) len;
- req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) |
- PASS_OPEN_TOS(tos));
+ req->tos_stid = cpu_to_be32(PASS_OPEN_TID_V(stid) |
+ PASS_OPEN_TOS_V(tos));
req->tcpopt.mss = htons(tmp_opt.mss_clamp);
if (tmp_opt.wscale_ok)
req->tcpopt.wsf = tmp_opt.snd_wscale;
@@ -3542,7 +3542,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL);
req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req));
memset(req, 0, sizeof(*req));
- req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F);
+ req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F);
req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
req->le.version_cpl = htonl(FW_OFLD_CONNECTION_WR_CPL_F);
req->le.filter = (__force __be32) filter;
@@ -3556,7 +3556,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_RECV) |
FW_OFLD_CONNECTION_WR_RCV_SCALE_V(cpl->tcpopt.wsf) |
FW_OFLD_CONNECTION_WR_ASTID_V(
- GET_PASS_OPEN_TID(ntohl(cpl->tos_stid))));
+ PASS_OPEN_TID_G(ntohl(cpl->tos_stid))));
/*
* We store the qid in opt2 which will be used by the firmware
@@ -3613,7 +3613,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
struct neighbour *neigh;
/* Drop all non-SYN packets */
- if (!(cpl->l2info & cpu_to_be32(F_RXF_SYN)))
+ if (!(cpl->l2info & cpu_to_be32(RXF_SYN_F)))
goto reject;
/*
@@ -3635,8 +3635,8 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
}
eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ?
- G_RX_ETHHDR_LEN(htonl(cpl->l2info)) :
- G_RX_T5_ETHHDR_LEN(htonl(cpl->l2info));
+ RX_ETHHDR_LEN_G(htonl(cpl->l2info)) :
+ RX_T5_ETHHDR_LEN_G(htonl(cpl->l2info));
if (eth_hdr_len == ETH_HLEN) {
eh = (struct ethhdr *)(req + 1);
iph = (struct iphdr *)(eh + 1);
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index cb43c2299ac0..b9dc9fc6be66 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -86,14 +86,14 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
req->wr.wr_lo = wait ? (__force __be64)(unsigned long) &wr_wait : 0L;
req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE));
- req->cmd |= cpu_to_be32(V_T5_ULP_MEMIO_ORDER(1));
+ req->cmd |= cpu_to_be32(T5_ULP_MEMIO_ORDER_V(1));
req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(len>>5));
req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16));
req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr));
sgl = (struct ulptx_sgl *)(req + 1);
sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
- ULPTX_NSGE(1));
+ ULPTX_NSGE_V(1));
sgl->len0 = cpu_to_be32(len);
sgl->addr0 = cpu_to_be64(data);
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index c04e5134b30c..29e764e406e1 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -465,14 +465,14 @@ static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, u8 t5,
} else {
PDBG("%s: DB wq->sq.pidx = %d\n",
__func__, wq->sq.pidx);
- writel(PIDX_T5(inc), wq->sq.udb);
+ writel(PIDX_T5_V(inc), wq->sq.udb);
}
/* Flush user doorbell area writes. */
wmb();
return;
}
- writel(QID(wq->sq.qid) | PIDX(inc), wq->db);
+ writel(QID_V(wq->sq.qid) | PIDX_V(inc), wq->db);
}
static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5,
@@ -489,14 +489,14 @@ static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5,
} else {
PDBG("%s: DB wq->rq.pidx = %d\n",
__func__, wq->rq.pidx);
- writel(PIDX_T5(inc), wq->rq.udb);
+ writel(PIDX_T5_V(inc), wq->rq.udb);
}
/* Flush user doorbell area writes. */
wmb();
return;
}
- writel(QID(wq->rq.qid) | PIDX(inc), wq->db);
+ writel(QID_V(wq->rq.qid) | PIDX_V(inc), wq->db);
}
static inline int t4_wq_in_error(struct t4_wq *wq)
@@ -561,14 +561,14 @@ static inline int t4_arm_cq(struct t4_cq *cq, int se)
u32 val;
set_bit(CQ_ARMED, &cq->flags);
- while (cq->cidx_inc > CIDXINC_MASK) {
- val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7) |
- INGRESSQID(cq->cqid);
+ while (cq->cidx_inc > CIDXINC_M) {
+ val = SEINTARM_V(0) | CIDXINC_V(CIDXINC_M) | TIMERREG_V(7) |
+ INGRESSQID_V(cq->cqid);
writel(val, cq->gts);
- cq->cidx_inc -= CIDXINC_MASK;
+ cq->cidx_inc -= CIDXINC_M;
}
- val = SEINTARM(se) | CIDXINC(cq->cidx_inc) | TIMERREG(6) |
- INGRESSQID(cq->cqid);
+ val = SEINTARM_V(se) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(6) |
+ INGRESSQID_V(cq->cqid);
writel(val, cq->gts);
cq->cidx_inc = 0;
return 0;
@@ -597,11 +597,11 @@ static inline void t4_swcq_consume(struct t4_cq *cq)
static inline void t4_hwcq_consume(struct t4_cq *cq)
{
cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts;
- if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == CIDXINC_MASK) {
+ if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == CIDXINC_M) {
u32 val;
- val = SEINTARM(0) | CIDXINC(cq->cidx_inc) | TIMERREG(7) |
- INGRESSQID(cq->cqid);
+ val = SEINTARM_V(0) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(7) |
+ INGRESSQID_V(cq->cqid);
writel(val, cq->gts);
cq->cidx_inc = 0;
}
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index b56e4c5593ee..611a9fdf2f38 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -81,7 +81,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
for (k = 0; k < len; k++) {
if (!(i & mask)) {
tmp = (unsigned long)pfn;
- m = min(m, find_first_bit(&tmp, sizeof(tmp)));
+ m = min_t(unsigned long, m, find_first_bit(&tmp, sizeof(tmp)));
skip = 1 << m;
mask = skip - 1;
base = pfn;
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 49eb5111d2cd..70acda91eb2a 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -373,11 +373,11 @@ static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
wqe_fragment_length = (__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX];
/* setup the VLAN tag if present */
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
nes_debug(NES_DBG_NIC_TX, "%s: VLAN packet to send... VLAN = %08X\n",
- netdev->name, vlan_tx_tag_get(skb));
+ netdev->name, skb_vlan_tag_get(skb));
wqe_misc = NES_NIC_SQ_WQE_TAGVALUE_ENABLE;
- wqe_fragment_length[0] = (__force __le16) vlan_tx_tag_get(skb);
+ wqe_fragment_length[0] = (__force __le16) skb_vlan_tag_get(skb);
} else
wqe_misc = 0;
@@ -576,11 +576,12 @@ tso_sq_no_longer_full:
wqe_fragment_length =
(__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX];
/* setup the VLAN tag if present */
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
nes_debug(NES_DBG_NIC_TX, "%s: VLAN packet to send... VLAN = %08X\n",
- netdev->name, vlan_tx_tag_get(skb) );
+ netdev->name,
+ skb_vlan_tag_get(skb));
wqe_misc = NES_NIC_SQ_WQE_TAGVALUE_ENABLE;
- wqe_fragment_length[0] = (__force __le16) vlan_tx_tag_get(skb);
+ wqe_fragment_length[0] = (__force __le16) skb_vlan_tag_get(skb);
} else
wqe_misc = 0;
diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c
index ccd7d851be26..a77eea594b69 100644
--- a/drivers/isdn/hardware/mISDN/mISDNipac.c
+++ b/drivers/isdn/hardware/mISDN/mISDNipac.c
@@ -754,10 +754,10 @@ dbusy_timer_handler(struct isac_hw *isac)
}
static int
-open_dchannel(struct isac_hw *isac, struct channel_req *rq)
+open_dchannel_caller(struct isac_hw *isac, struct channel_req *rq, void *caller)
{
pr_debug("%s: %s dev(%d) open from %p\n", isac->name, __func__,
- isac->dch.dev.id, __builtin_return_address(1));
+ isac->dch.dev.id, caller);
if (rq->protocol != ISDN_P_TE_S0)
return -EINVAL;
if (rq->adr.channel == 1)
@@ -771,6 +771,12 @@ open_dchannel(struct isac_hw *isac, struct channel_req *rq)
return 0;
}
+static int
+open_dchannel(struct isac_hw *isac, struct channel_req *rq)
+{
+ return open_dchannel_caller(isac, rq, __builtin_return_address(0));
+}
+
static const char *ISACVer[] =
{"2086/2186 V1.1", "2085 B1", "2085 B2",
"2085 V2.3"};
@@ -1548,7 +1554,7 @@ ipac_dctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
case OPEN_CHANNEL:
rq = arg;
if (rq->protocol == ISDN_P_TE_S0)
- err = open_dchannel(isac, rq);
+ err = open_dchannel_caller(isac, rq, __builtin_return_address(0));
else
err = open_bchannel(ipac, rq);
if (err)
diff --git a/drivers/isdn/hardware/mISDN/w6692.c b/drivers/isdn/hardware/mISDN/w6692.c
index de69f6828c76..741675525b53 100644
--- a/drivers/isdn/hardware/mISDN/w6692.c
+++ b/drivers/isdn/hardware/mISDN/w6692.c
@@ -1176,10 +1176,10 @@ w6692_l1callback(struct dchannel *dch, u32 cmd)
}
static int
-open_dchannel(struct w6692_hw *card, struct channel_req *rq)
+open_dchannel(struct w6692_hw *card, struct channel_req *rq, void *caller)
{
pr_debug("%s: %s dev(%d) open from %p\n", card->name, __func__,
- card->dch.dev.id, __builtin_return_address(1));
+ card->dch.dev.id, caller);
if (rq->protocol != ISDN_P_TE_S0)
return -EINVAL;
if (rq->adr.channel == 1)
@@ -1207,7 +1207,7 @@ w6692_dctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
case OPEN_CHANNEL:
rq = arg;
if (rq->protocol == ISDN_P_TE_S0)
- err = open_dchannel(card, rq);
+ err = open_dchannel(card, rq, __builtin_return_address(0));
else
err = open_bchannel(card, rq);
if (err)
diff --git a/drivers/isdn/hisax/hfc4s8s_l1.c b/drivers/isdn/hisax/hfc4s8s_l1.c
index fc9f9d03fa13..0e5d673871c0 100644
--- a/drivers/isdn/hisax/hfc4s8s_l1.c
+++ b/drivers/isdn/hisax/hfc4s8s_l1.c
@@ -225,20 +225,6 @@ fWrite_hfc8(hfc4s8s_hw *a, u_char c)
}
static inline void
-Write_hfc16(hfc4s8s_hw *a, u_char b, u_short c)
-{
- SetRegAddr(a, b);
- outw(c, a->iobase);
-}
-
-static inline void
-Write_hfc32(hfc4s8s_hw *a, u_char b, u_long c)
-{
- SetRegAddr(a, b);
- outl(c, a->iobase);
-}
-
-static inline void
fWrite_hfc32(hfc4s8s_hw *a, u_long c)
{
outl(c, a->iobase);
@@ -266,13 +252,6 @@ Read_hfc16(hfc4s8s_hw *a, u_char b)
}
static inline u_long
-Read_hfc32(hfc4s8s_hw *a, u_char b)
-{
- SetRegAddr(a, b);
- return (inl((volatile u_int) a->iobase));
-}
-
-static inline u_long
fRead_hfc32(hfc4s8s_hw *a)
{
return (inl((volatile u_int) a->iobase));
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 1a61cc9b3402..9bd538d4474b 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -186,7 +186,7 @@ static const struct bond_opt_value bond_tlb_dynamic_lb_tbl[] = {
{ NULL, -1, 0}
};
-static const struct bond_option bond_opts[] = {
+static const struct bond_option bond_opts[BOND_OPT_LAST] = {
[BOND_OPT_MODE] = {
.id = BOND_OPT_MODE,
.name = "mode",
@@ -379,8 +379,7 @@ static const struct bond_option bond_opts[] = {
.values = bond_tlb_dynamic_lb_tbl,
.flags = BOND_OPTFLAG_IFDOWN,
.set = bond_option_tlb_dynamic_lb_set,
- },
- { }
+ }
};
/* Searches for an option by name */
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index dede43f4ce09..8f8418d2ac4a 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -769,11 +769,11 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
}
- if(vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
first_txd->processFlags |=
TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
first_txd->processFlags |=
- cpu_to_le32(htons(vlan_tx_tag_get(skb)) <<
+ cpu_to_le32(htons(skb_vlan_tag_get(skb)) <<
TYPHOON_TX_PF_VLAN_TAG_SHIFT);
}
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index b68074803de3..b90a26b13fdf 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -2429,9 +2429,9 @@ restart:
flagsize = (skb->len << 16) | (BD_FLG_END);
if (skb->ip_summed == CHECKSUM_PARTIAL)
flagsize |= BD_FLG_TCP_UDP_SUM;
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
flagsize |= BD_FLG_VLAN_TAG;
- vlan_tag = vlan_tx_tag_get(skb);
+ vlan_tag = skb_vlan_tag_get(skb);
}
desc = ap->tx_ring + idx;
idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
@@ -2450,9 +2450,9 @@ restart:
flagsize = (skb_headlen(skb) << 16);
if (skb->ip_summed == CHECKSUM_PARTIAL)
flagsize |= BD_FLG_TCP_UDP_SUM;
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
flagsize |= BD_FLG_VLAN_TAG;
- vlan_tag = vlan_tx_tag_get(skb);
+ vlan_tag = skb_vlan_tag_get(skb);
}
ace_load_tx_bd(ap, ap->tx_ring + idx, mapping, flagsize, vlan_tag);
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 841e6558db68..4c2ae2221780 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1299,11 +1299,11 @@ static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb,
lp->tx_ring[tx_index].tx_flags = 0;
#if AMD8111E_VLAN_TAG_USED
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
lp->tx_ring[tx_index].tag_ctrl_cmd |=
cpu_to_le16(TCC_VLAN_INSERT);
lp->tx_ring[tx_index].tag_ctrl_info =
- cpu_to_le16(vlan_tx_tag_get(skb));
+ cpu_to_le16(skb_vlan_tag_get(skb));
}
#endif
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 7bb5f07dbeef..2ba1dd22ad64 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1165,8 +1165,8 @@ static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
{
- if (vlan_tx_tag_present(skb))
- packet->vlan_ctag = vlan_tx_tag_get(skb);
+ if (skb_vlan_tag_present(skb))
+ packet->vlan_ctag = skb_vlan_tag_get(skb);
}
static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
@@ -1247,9 +1247,9 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
CSUM_ENABLE, 1);
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
/* VLAN requires an extra descriptor if tag is different */
- if (vlan_tx_tag_get(skb) != ring->tx.cur_vlan_ctag)
+ if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag)
/* We can share with the TSO context descriptor */
if (!context_desc) {
context_desc = 1;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
index a1bf9d1cdae1..f5acf4cc69bd 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
@@ -171,15 +171,9 @@ static int xgbe_adjtime(struct ptp_clock_info *info, s64 delta)
struct xgbe_prv_data,
ptp_clock_info);
unsigned long flags;
- u64 nsec;
spin_lock_irqsave(&pdata->tstamp_lock, flags);
-
- nsec = timecounter_read(&pdata->tstamp_tc);
-
- nsec += delta;
- timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, nsec);
-
+ timecounter_adjtime(&pdata->tstamp_tc, delta);
spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
return 0;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index f9ec762ac3f0..2af6affc35a7 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -124,7 +124,7 @@
#include <linux/if_vlan.h>
#include <linux/bitops.h>
#include <linux/ptp_clock_kernel.h>
-#include <linux/clocksource.h>
+#include <linux/timecounter.h>
#include <linux/net_tstamp.h>
#include <net/dcbnl.h>
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 7ba83ffb08ac..869d97fcf781 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -593,10 +593,12 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
if (!xgene_ring_mgr_init(pdata))
return -ENODEV;
- clk_prepare_enable(pdata->clk);
- clk_disable_unprepare(pdata->clk);
- clk_prepare_enable(pdata->clk);
- xgene_enet_ecc_init(pdata);
+ if (!efi_enabled(EFI_BOOT)) {
+ clk_prepare_enable(pdata->clk);
+ clk_disable_unprepare(pdata->clk);
+ clk_prepare_enable(pdata->clk);
+ xgene_enet_ecc_init(pdata);
+ }
xgene_enet_config_ring_if_assoc(pdata);
/* Enable auto-incr for scanning */
@@ -663,15 +665,20 @@ static int xgene_enet_phy_connect(struct net_device *ndev)
struct phy_device *phy_dev;
struct device *dev = &pdata->pdev->dev;
- phy_np = of_parse_phandle(dev->of_node, "phy-handle", 0);
- if (!phy_np) {
- netdev_dbg(ndev, "No phy-handle found\n");
- return -ENODEV;
+ if (dev->of_node) {
+ phy_np = of_parse_phandle(dev->of_node, "phy-handle", 0);
+ if (!phy_np) {
+ netdev_dbg(ndev, "No phy-handle found in DT\n");
+ return -ENODEV;
+ }
+ pdata->phy_dev = of_phy_find_device(phy_np);
}
- phy_dev = of_phy_connect(ndev, phy_np, &xgene_enet_adjust_link,
- 0, pdata->phy_mode);
- if (!phy_dev) {
+ phy_dev = pdata->phy_dev;
+
+ if (!phy_dev ||
+ phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link,
+ pdata->phy_mode)) {
netdev_err(ndev, "Could not connect to PHY\n");
return -ENODEV;
}
@@ -681,32 +688,71 @@ static int xgene_enet_phy_connect(struct net_device *ndev)
~SUPPORTED_100baseT_Half &
~SUPPORTED_1000baseT_Half;
phy_dev->advertising = phy_dev->supported;
- pdata->phy_dev = phy_dev;
return 0;
}
-int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
+static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata,
+ struct mii_bus *mdio)
{
- struct net_device *ndev = pdata->ndev;
struct device *dev = &pdata->pdev->dev;
+ struct net_device *ndev = pdata->ndev;
+ struct phy_device *phy;
struct device_node *child_np;
struct device_node *mdio_np = NULL;
- struct mii_bus *mdio_bus;
int ret;
+ u32 phy_id;
+
+ if (dev->of_node) {
+ for_each_child_of_node(dev->of_node, child_np) {
+ if (of_device_is_compatible(child_np,
+ "apm,xgene-mdio")) {
+ mdio_np = child_np;
+ break;
+ }
+ }
- for_each_child_of_node(dev->of_node, child_np) {
- if (of_device_is_compatible(child_np, "apm,xgene-mdio")) {
- mdio_np = child_np;
- break;
+ if (!mdio_np) {
+ netdev_dbg(ndev, "No mdio node in the dts\n");
+ return -ENXIO;
}
- }
- if (!mdio_np) {
- netdev_dbg(ndev, "No mdio node in the dts\n");
- return -ENXIO;
+ return of_mdiobus_register(mdio, mdio_np);
}
+ /* Mask out all PHYs from auto probing. */
+ mdio->phy_mask = ~0;
+
+ /* Register the MDIO bus */
+ ret = mdiobus_register(mdio);
+ if (ret)
+ return ret;
+
+ ret = device_property_read_u32(dev, "phy-channel", &phy_id);
+ if (ret)
+ ret = device_property_read_u32(dev, "phy-addr", &phy_id);
+ if (ret)
+ return -EINVAL;
+
+ phy = get_phy_device(mdio, phy_id, true);
+ if (!phy || IS_ERR(phy))
+ return -EIO;
+
+ ret = phy_device_register(phy);
+ if (ret)
+ phy_device_free(phy);
+ else
+ pdata->phy_dev = phy;
+
+ return ret;
+}
+
+int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
+{
+ struct net_device *ndev = pdata->ndev;
+ struct mii_bus *mdio_bus;
+ int ret;
+
mdio_bus = mdiobus_alloc();
if (!mdio_bus)
return -ENOMEM;
@@ -720,7 +766,7 @@ int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
mdio_bus->priv = pdata;
mdio_bus->parent = &ndev->dev;
- ret = of_mdiobus_register(mdio_bus, mdio_np);
+ ret = xgene_mdiobus_register(pdata, mdio_bus);
if (ret) {
netdev_err(ndev, "Failed to register MDIO bus\n");
mdiobus_free(mdio_bus);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 83a50280bb70..02add385a33d 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -24,6 +24,10 @@
#include "xgene_enet_sgmac.h"
#include "xgene_enet_xgmac.h"
+#define RES_ENET_CSR 0
+#define RES_RING_CSR 1
+#define RES_RING_CMD 2
+
static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
{
struct xgene_enet_raw_desc16 *raw_desc;
@@ -746,6 +750,41 @@ static const struct net_device_ops xgene_ndev_ops = {
.ndo_set_mac_address = xgene_enet_set_mac_address,
};
+static int xgene_get_mac_address(struct device *dev,
+ unsigned char *addr)
+{
+ int ret;
+
+ ret = device_property_read_u8_array(dev, "local-mac-address", addr, 6);
+ if (ret)
+ ret = device_property_read_u8_array(dev, "mac-address",
+ addr, 6);
+ if (ret)
+ return -ENODEV;
+
+ return ETH_ALEN;
+}
+
+static int xgene_get_phy_mode(struct device *dev)
+{
+ int i, ret;
+ char *modestr;
+
+ ret = device_property_read_string(dev, "phy-connection-type",
+ (const char **)&modestr);
+ if (ret)
+ ret = device_property_read_string(dev, "phy-mode",
+ (const char **)&modestr);
+ if (ret)
+ return -ENODEV;
+
+ for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) {
+ if (!strcasecmp(modestr, phy_modes(i)))
+ return i;
+ }
+ return -ENODEV;
+}
+
static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
{
struct platform_device *pdev;
@@ -753,32 +792,45 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
struct device *dev;
struct resource *res;
void __iomem *base_addr;
- const char *mac;
int ret;
pdev = pdata->pdev;
dev = &pdev->dev;
ndev = pdata->ndev;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "enet_csr");
- pdata->base_addr = devm_ioremap_resource(dev, res);
- if (IS_ERR(pdata->base_addr)) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR);
+ if (!res) {
+ dev_err(dev, "Resource enet_csr not defined\n");
+ return -ENODEV;
+ }
+ pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res));
+ if (!pdata->base_addr) {
dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
- return PTR_ERR(pdata->base_addr);
+ return -ENOMEM;
}
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_csr");
- pdata->ring_csr_addr = devm_ioremap_resource(dev, res);
- if (IS_ERR(pdata->ring_csr_addr)) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR);
+ if (!res) {
+ dev_err(dev, "Resource ring_csr not defined\n");
+ return -ENODEV;
+ }
+ pdata->ring_csr_addr = devm_ioremap(dev, res->start,
+ resource_size(res));
+ if (!pdata->ring_csr_addr) {
dev_err(dev, "Unable to retrieve ENET Ring CSR region\n");
- return PTR_ERR(pdata->ring_csr_addr);
+ return -ENOMEM;
}
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_cmd");
- pdata->ring_cmd_addr = devm_ioremap_resource(dev, res);
- if (IS_ERR(pdata->ring_cmd_addr)) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD);
+ if (!res) {
+ dev_err(dev, "Resource ring_cmd not defined\n");
+ return -ENODEV;
+ }
+ pdata->ring_cmd_addr = devm_ioremap(dev, res->start,
+ resource_size(res));
+ if (!pdata->ring_cmd_addr) {
dev_err(dev, "Unable to retrieve ENET Ring command region\n");
- return PTR_ERR(pdata->ring_cmd_addr);
+ return -ENOMEM;
}
ret = platform_get_irq(pdev, 0);
@@ -789,14 +841,12 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
}
pdata->rx_irq = ret;
- mac = of_get_mac_address(dev->of_node);
- if (mac)
- memcpy(ndev->dev_addr, mac, ndev->addr_len);
- else
+ if (xgene_get_mac_address(dev, ndev->dev_addr) != ETH_ALEN)
eth_hw_addr_random(ndev);
+
memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
- pdata->phy_mode = of_get_phy_mode(pdev->dev.of_node);
+ pdata->phy_mode = xgene_get_phy_mode(dev);
if (pdata->phy_mode < 0) {
dev_err(dev, "Unable to get phy-connection-type\n");
return pdata->phy_mode;
@@ -809,11 +859,9 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
}
pdata->clk = devm_clk_get(&pdev->dev, NULL);
- ret = IS_ERR(pdata->clk);
if (IS_ERR(pdata->clk)) {
- dev_err(&pdev->dev, "can't get clock\n");
- ret = PTR_ERR(pdata->clk);
- return ret;
+ /* Firmware may have set up the clock already. */
+ pdata->clk = NULL;
}
base_addr = pdata->base_addr;
@@ -924,7 +972,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
goto err;
}
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (ret) {
netdev_err(ndev, "No usable DMA configuration\n");
goto err;
@@ -972,17 +1020,26 @@ static int xgene_enet_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id xgene_enet_match[] = {
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgene_enet_acpi_match[] = {
+ { "APMC0D05", },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
+#endif
+
+static struct of_device_id xgene_enet_of_match[] = {
{.compatible = "apm,xgene-enet",},
{},
};
-MODULE_DEVICE_TABLE(of, xgene_enet_match);
+MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
static struct platform_driver xgene_enet_driver = {
.driver = {
.name = "xgene-enet",
- .of_match_table = xgene_enet_match,
+ .of_match_table = of_match_ptr(xgene_enet_of_match),
+ .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
},
.probe = xgene_enet_probe,
.remove = xgene_enet_remove,
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index f9958fae6ffd..c2d465c3db66 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -22,7 +22,10 @@
#ifndef __XGENE_ENET_MAIN_H__
#define __XGENE_ENET_MAIN_H__
+#include <linux/acpi.h>
#include <linux/clk.h>
+#include <linux/efi.h>
+#include <linux/io.h>
#include <linux/of_platform.h>
#include <linux/of_net.h>
#include <linux/of_mdio.h>
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index c9946c6c119e..587f63e87588 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -2235,8 +2235,8 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
return NETDEV_TX_OK;
}
- if (unlikely(vlan_tx_tag_present(skb))) {
- u16 vlan = vlan_tx_tag_get(skb);
+ if (unlikely(skb_vlan_tag_present(skb))) {
+ u16 vlan = skb_vlan_tag_get(skb);
__le16 tag;
vlan = cpu_to_le16(vlan);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 2326579f9454..59a03a193e83 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1892,8 +1892,8 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
tpd = atl1e_get_tpd(adapter);
- if (vlan_tx_tag_present(skb)) {
- u16 vlan_tag = vlan_tx_tag_get(skb);
+ if (skb_vlan_tag_present(skb)) {
+ u16 vlan_tag = skb_vlan_tag_get(skb);
u16 atl1e_vlan_tag;
tpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT;
@@ -2373,9 +2373,8 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_napi_add(netdev, &adapter->napi, atl1e_clean, 64);
- init_timer(&adapter->phy_config_timer);
- adapter->phy_config_timer.function = atl1e_phy_config;
- adapter->phy_config_timer.data = (unsigned long) adapter;
+ setup_timer(&adapter->phy_config_timer, atl1e_phy_config,
+ (unsigned long)adapter);
/* get user settings */
atl1e_check_options(adapter);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 2c8f398aeda9..eca1d113fee1 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -2415,8 +2415,8 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
(u16) atomic_read(&tpd_ring->next_to_use));
memset(ptpd, 0, sizeof(struct tx_packet_desc));
- if (vlan_tx_tag_present(skb)) {
- vlan_tag = vlan_tx_tag_get(skb);
+ if (skb_vlan_tag_present(skb)) {
+ vlan_tag = skb_vlan_tag_get(skb);
vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) |
((vlan_tag >> 9) & 0x8);
ptpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT;
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 84a09e8ddd9c..46a535318c7a 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -887,8 +887,8 @@ static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb,
offset = ((u32)(skb->len-copy_len + 3) & ~3);
}
#ifdef NETIF_F_HW_VLAN_CTAG_TX
- if (vlan_tx_tag_present(skb)) {
- u16 vlan_tag = vlan_tx_tag_get(skb);
+ if (skb_vlan_tag_present(skb)) {
+ u16 vlan_tag = skb_vlan_tag_get(skb);
vlan_tag = (vlan_tag << 4) |
(vlan_tag >> 13) |
((vlan_tag >> 9) & 0x8);
@@ -1436,13 +1436,11 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
atl2_check_options(adapter);
- init_timer(&adapter->watchdog_timer);
- adapter->watchdog_timer.function = atl2_watchdog;
- adapter->watchdog_timer.data = (unsigned long) adapter;
+ setup_timer(&adapter->watchdog_timer, atl2_watchdog,
+ (unsigned long)adapter);
- init_timer(&adapter->phy_config_timer);
- adapter->phy_config_timer.function = atl2_phy_config;
- adapter->phy_config_timer.data = (unsigned long) adapter;
+ setup_timer(&adapter->phy_config_timer, atl2_phy_config,
+ (unsigned long)adapter);
INIT_WORK(&adapter->reset_task, atl2_reset_task);
INIT_WORK(&adapter->link_chg_task, atl2_link_chg_task);
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 823d01c5684c..02bf0b86995b 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -6597,9 +6597,9 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
}
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
vlan_tag_flags |=
- (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
+ (TX_BD_FLAGS_VLAN_TAG | (skb_vlan_tag_get(skb) << 16));
}
if ((mss = skb_shinfo(skb)->gso_size)) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index c3a6072134f5..756053c028be 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -22,7 +22,7 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/net_tstamp.h>
-#include <linux/clocksource.h>
+#include <linux/timecounter.h>
/* compilation time flags */
@@ -1138,12 +1138,8 @@ struct bnx2x_port {
u32 link_config[LINK_CONFIG_SIZE];
u32 supported[LINK_CONFIG_SIZE];
-/* link settings - missing defines */
-#define SUPPORTED_2500baseX_Full (1 << 15)
u32 advertising[LINK_CONFIG_SIZE];
-/* link settings - missing defines */
-#define ADVERTISED_2500baseX_Full (1 << 15)
u32 phy_addr;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 1d1147c93d59..b51a18a09d4d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3865,9 +3865,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
"sending pkt %u @%p next_idx %u bd %u @%p\n",
pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
tx_start_bd->vlan_or_ethertype =
- cpu_to_le16(vlan_tx_tag_get(skb));
+ cpu_to_le16(skb_vlan_tag_get(skb));
tx_start_bd->bd_flags.as_bitfield |=
(X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
} else {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 72eef9fc883e..0758c8bef4ba 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -13267,14 +13267,10 @@ static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
- u64 now;
DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta);
- now = timecounter_read(&bp->timecounter);
- now += delta;
- /* Re-init the timecounter */
- timecounter_init(&bp->timecounter, &bp->cyclecounter, now);
+ timecounter_adjtime(&bp->timecounter, delta);
return 0;
}
@@ -14614,7 +14610,7 @@ static void bnx2x_init_cyclecounter(struct bnx2x *bp)
{
memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter));
bp->cyclecounter.read = bnx2x_cyclecounter_read;
- bp->cyclecounter.mask = CLOCKSOURCE_MASK(64);
+ bp->cyclecounter.mask = CYCLECOUNTER_MASK(64);
bp->cyclecounter.shift = 1;
bp->cyclecounter.mult = 1;
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 96bf01ba32dd..615a6dbde047 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -8008,9 +8008,9 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
!mss && skb->len > VLAN_ETH_FRAME_LEN)
base_flags |= TXD_FLAG_JMB_PKT;
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
base_flags |= TXD_FLAG_VLAN;
- vlan = vlan_tx_tag_get(skb);
+ vlan = skb_vlan_tag_get(skb);
}
if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
@@ -11573,11 +11573,7 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
tg3_flag_set(tp, INIT_COMPLETE);
tg3_enable_ints(tp);
- if (init)
- tg3_ptp_init(tp);
- else
- tg3_ptp_resume(tp);
-
+ tg3_ptp_resume(tp);
tg3_full_unlock(tp);
@@ -11698,13 +11694,6 @@ static int tg3_open(struct net_device *dev)
pci_set_power_state(tp->pdev, PCI_D3hot);
}
- if (tg3_flag(tp, PTP_CAPABLE)) {
- tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
- &tp->pdev->dev);
- if (IS_ERR(tp->ptp_clock))
- tp->ptp_clock = NULL;
- }
-
return err;
}
@@ -11718,8 +11707,6 @@ static int tg3_close(struct net_device *dev)
return -EAGAIN;
}
- tg3_ptp_fini(tp);
-
tg3_stop(tp);
/* Clear stats across close / open calls */
@@ -17897,6 +17884,14 @@ static int tg3_init_one(struct pci_dev *pdev,
goto err_out_apeunmap;
}
+ if (tg3_flag(tp, PTP_CAPABLE)) {
+ tg3_ptp_init(tp);
+ tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
+ &tp->pdev->dev);
+ if (IS_ERR(tp->ptp_clock))
+ tp->ptp_clock = NULL;
+ }
+
netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
tp->board_part_number,
tg3_chip_rev_id(tp),
@@ -17972,6 +17967,8 @@ static void tg3_remove_one(struct pci_dev *pdev)
if (dev) {
struct tg3 *tp = netdev_priv(dev);
+ tg3_ptp_fini(tp);
+
release_firmware(tp->fw);
tg3_reset_task_cancel(tp);
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 323721838cf9..7714d7790089 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -2824,8 +2824,8 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
u32 gso_size;
u16 vlan_tag = 0;
- if (vlan_tx_tag_present(skb)) {
- vlan_tag = (u16)vlan_tx_tag_get(skb);
+ if (skb_vlan_tag_present(skb)) {
+ vlan_tag = (u16)skb_vlan_tag_get(skb);
flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
}
if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 3767271c7667..dd8c202c0708 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -1827,12 +1827,23 @@ static int macb_close(struct net_device *dev)
static void gem_update_stats(struct macb *bp)
{
- u32 __iomem *reg = bp->regs + GEM_OTX;
+ int i;
u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
- u32 *end = &bp->hw_stats.gem.rx_udp_checksum_errors + 1;
- for (; p < end; p++, reg++)
- *p += __raw_readl(reg);
+ for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
+ u32 offset = gem_statistics[i].offset;
+ u64 val = __raw_readl(bp->regs+offset);
+
+ bp->ethtool_stats[i] += val;
+ *p += val;
+
+ if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
+ /* Add GEM_OCTTXH, GEM_OCTRXH */
+ val = __raw_readl(bp->regs+offset+4);
+ bp->ethtool_stats[i] += ((u64)val)<<32;
+ *(++p) += val;
+ }
+ }
}
static struct net_device_stats *gem_get_stats(struct macb *bp)
@@ -1873,6 +1884,39 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
return nstat;
}
+static void gem_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct macb *bp;
+
+ bp = netdev_priv(dev);
+ gem_update_stats(bp);
+ memcpy(data, &bp->ethtool_stats, sizeof(u64)*GEM_STATS_LEN);
+}
+
+static int gem_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return GEM_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
+{
+ int i;
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
+ memcpy(p, gem_statistics[i].stat_string,
+ ETH_GSTRING_LEN);
+ break;
+ }
+}
+
struct net_device_stats *macb_get_stats(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
@@ -1988,6 +2032,9 @@ const struct ethtool_ops macb_ethtool_ops = {
.get_regs = macb_get_regs,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_ethtool_stats = gem_get_ethtool_stats,
+ .get_strings = gem_get_ethtool_strings,
+ .get_sset_count = gem_get_sset_count,
};
EXPORT_SYMBOL_GPL(macb_ethtool_ops);
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 084191b6fad2..378b2183ab8d 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -15,20 +15,20 @@
#define MACB_MAX_QUEUES 8
/* MACB register offsets */
-#define MACB_NCR 0x0000
-#define MACB_NCFGR 0x0004
-#define MACB_NSR 0x0008
+#define MACB_NCR 0x0000 /* Network Control */
+#define MACB_NCFGR 0x0004 /* Network Config */
+#define MACB_NSR 0x0008 /* Network Status */
#define MACB_TAR 0x000c /* AT91RM9200 only */
#define MACB_TCR 0x0010 /* AT91RM9200 only */
-#define MACB_TSR 0x0014
-#define MACB_RBQP 0x0018
-#define MACB_TBQP 0x001c
-#define MACB_RSR 0x0020
-#define MACB_ISR 0x0024
-#define MACB_IER 0x0028
-#define MACB_IDR 0x002c
-#define MACB_IMR 0x0030
-#define MACB_MAN 0x0034
+#define MACB_TSR 0x0014 /* Transmit Status */
+#define MACB_RBQP 0x0018 /* RX Q Base Address */
+#define MACB_TBQP 0x001c /* TX Q Base Address */
+#define MACB_RSR 0x0020 /* Receive Status */
+#define MACB_ISR 0x0024 /* Interrupt Status */
+#define MACB_IER 0x0028 /* Interrupt Enable */
+#define MACB_IDR 0x002c /* Interrupt Disable */
+#define MACB_IMR 0x0030 /* Interrupt Mask */
+#define MACB_MAN 0x0034 /* PHY Maintenance */
#define MACB_PTR 0x0038
#define MACB_PFR 0x003c
#define MACB_FTO 0x0040
@@ -68,27 +68,180 @@
#define MACB_MID 0x00fc
/* GEM register offsets. */
-#define GEM_NCFGR 0x0004
-#define GEM_USRIO 0x000c
-#define GEM_DMACFG 0x0010
-#define GEM_HRB 0x0080
-#define GEM_HRT 0x0084
-#define GEM_SA1B 0x0088
-#define GEM_SA1T 0x008C
-#define GEM_SA2B 0x0090
-#define GEM_SA2T 0x0094
-#define GEM_SA3B 0x0098
-#define GEM_SA3T 0x009C
-#define GEM_SA4B 0x00A0
-#define GEM_SA4T 0x00A4
-#define GEM_OTX 0x0100
-#define GEM_DCFG1 0x0280
-#define GEM_DCFG2 0x0284
-#define GEM_DCFG3 0x0288
-#define GEM_DCFG4 0x028c
-#define GEM_DCFG5 0x0290
-#define GEM_DCFG6 0x0294
-#define GEM_DCFG7 0x0298
+#define GEM_NCFGR 0x0004 /* Network Config */
+#define GEM_USRIO 0x000c /* User IO */
+#define GEM_DMACFG 0x0010 /* DMA Configuration */
+#define GEM_HRB 0x0080 /* Hash Bottom */
+#define GEM_HRT 0x0084 /* Hash Top */
+#define GEM_SA1B 0x0088 /* Specific1 Bottom */
+#define GEM_SA1T 0x008C /* Specific1 Top */
+#define GEM_SA2B 0x0090 /* Specific2 Bottom */
+#define GEM_SA2T 0x0094 /* Specific2 Top */
+#define GEM_SA3B 0x0098 /* Specific3 Bottom */
+#define GEM_SA3T 0x009C /* Specific3 Top */
+#define GEM_SA4B 0x00A0 /* Specific4 Bottom */
+#define GEM_SA4T 0x00A4 /* Specific4 Top */
+#define GEM_OTX 0x0100 /* Octets transmitted */
+#define GEM_OCTTXL 0x0100 /* Octets transmitted
+ * [31:0]
+ */
+#define GEM_OCTTXH 0x0104 /* Octets transmitted
+ * [47:32]
+ */
+#define GEM_TXCNT 0x0108 /* Error-free Frames
+ * Transmitted counter
+ */
+#define GEM_TXBCCNT 0x010c /* Error-free Broadcast
+ * Frames counter
+ */
+#define GEM_TXMCCNT 0x0110 /* Error-free Multicast
+ * Frames counter
+ */
+#define GEM_TXPAUSECNT 0x0114 /* Pause Frames
+ * Transmitted Counter
+ */
+#define GEM_TX64CNT 0x0118 /* Error-free 64 byte
+ * Frames Transmitted
+ * counter
+ */
+#define GEM_TX65CNT 0x011c /* Error-free 65-127 byte
+ * Frames Transmitted
+ * counter
+ */
+#define GEM_TX128CNT 0x0120 /* Error-free 128-255
+ * byte Frames
+ * Transmitted counter
+ */
+#define GEM_TX256CNT 0x0124 /* Error-free 256-511
+ * byte Frames
+ * transmitted counter
+ */
+#define GEM_TX512CNT 0x0128 /* Error-free 512-1023
+ * byte Frames
+ * transmitted counter
+ */
+#define GEM_TX1024CNT 0x012c /* Error-free 1024-1518
+ * byte Frames
+ * transmitted counter
+ */
+#define GEM_TX1519CNT 0x0130 /* Error-free larger than
+ * 1519 byte Frames
+ * tranmitted counter
+ */
+#define GEM_TXURUNCNT 0x0134 /* TX under run error
+ * counter
+ */
+#define GEM_SNGLCOLLCNT 0x0138 /* Single Collision Frame
+ * Counter
+ */
+#define GEM_MULTICOLLCNT 0x013c /* Multiple Collision
+ * Frame Counter
+ */
+#define GEM_EXCESSCOLLCNT 0x0140 /* Excessive Collision
+ * Frame Counter
+ */
+#define GEM_LATECOLLCNT 0x0144 /* Late Collision Frame
+ * Counter
+ */
+#define GEM_TXDEFERCNT 0x0148 /* Deferred Transmission
+ * Frame Counter
+ */
+#define GEM_TXCSENSECNT 0x014c /* Carrier Sense Error
+ * Counter
+ */
+#define GEM_ORX 0x0150 /* Octets received */
+#define GEM_OCTRXL 0x0150 /* Octets received
+ * [31:0]
+ */
+#define GEM_OCTRXH 0x0154 /* Octets received
+ * [47:32]
+ */
+#define GEM_RXCNT 0x0158 /* Error-free Frames
+ * Received Counter
+ */
+#define GEM_RXBROADCNT 0x015c /* Error-free Broadcast
+ * Frames Received
+ * Counter
+ */
+#define GEM_RXMULTICNT 0x0160 /* Error-free Multicast
+ * Frames Received
+ * Counter
+ */
+#define GEM_RXPAUSECNT 0x0164 /* Error-free Pause
+ * Frames Received
+ * Counter
+ */
+#define GEM_RX64CNT 0x0168 /* Error-free 64 byte
+ * Frames Received
+ * Counter
+ */
+#define GEM_RX65CNT 0x016c /* Error-free 65-127 byte
+ * Frames Received
+ * Counter
+ */
+#define GEM_RX128CNT 0x0170 /* Error-free 128-255
+ * byte Frames Received
+ * Counter
+ */
+#define GEM_RX256CNT 0x0174 /* Error-free 256-511
+ * byte Frames Received
+ * Counter
+ */
+#define GEM_RX512CNT 0x0178 /* Error-free 512-1023
+ * byte Frames Received
+ * Counter
+ */
+#define GEM_RX1024CNT 0x017c /* Error-free 1024-1518
+ * byte Frames Received
+ * Counter
+ */
+#define GEM_RX1519CNT 0x0180 /* Error-free larger than
+ * 1519 Frames Received
+ * Counter
+ */
+#define GEM_RXUNDRCNT 0x0184 /* Undersize Frames
+ * Received Counter
+ */
+#define GEM_RXOVRCNT 0x0188 /* Oversize Frames
+ * Received Counter
+ */
+#define GEM_RXJABCNT 0x018c /* Jabbers Received
+ * Counter
+ */
+#define GEM_RXFCSCNT 0x0190 /* Frame Check Sequence
+ * Error Counter
+ */
+#define GEM_RXLENGTHCNT 0x0194 /* Length Field Error
+ * Counter
+ */
+#define GEM_RXSYMBCNT 0x0198 /* Symbol Error
+ * Counter
+ */
+#define GEM_RXALIGNCNT 0x019c /* Alignment Error
+ * Counter
+ */
+#define GEM_RXRESERRCNT 0x01a0 /* Receive Resource Error
+ * Counter
+ */
+#define GEM_RXORCNT 0x01a4 /* Receive Overrun
+ * Counter
+ */
+#define GEM_RXIPCCNT 0x01a8 /* IP header Checksum
+ * Error Counter
+ */
+#define GEM_RXTCPCCNT 0x01ac /* TCP Checksum Error
+ * Counter
+ */
+#define GEM_RXUDPCCNT 0x01b0 /* UDP Checksum Error
+ * Counter
+ */
+#define GEM_DCFG1 0x0280 /* Design Config 1 */
+#define GEM_DCFG2 0x0284 /* Design Config 2 */
+#define GEM_DCFG3 0x0288 /* Design Config 3 */
+#define GEM_DCFG4 0x028c /* Design Config 4 */
+#define GEM_DCFG5 0x0290 /* Design Config 5 */
+#define GEM_DCFG6 0x0294 /* Design Config 6 */
+#define GEM_DCFG7 0x0298 /* Design Config 7 */
#define GEM_ISR(hw_q) (0x0400 + ((hw_q) << 2))
#define GEM_TBQP(hw_q) (0x0440 + ((hw_q) << 2))
@@ -98,67 +251,73 @@
#define GEM_IMR(hw_q) (0x0640 + ((hw_q) << 2))
/* Bitfields in NCR */
-#define MACB_LB_OFFSET 0
+#define MACB_LB_OFFSET 0 /* reserved */
#define MACB_LB_SIZE 1
-#define MACB_LLB_OFFSET 1
+#define MACB_LLB_OFFSET 1 /* Loop back local */
#define MACB_LLB_SIZE 1
-#define MACB_RE_OFFSET 2
+#define MACB_RE_OFFSET 2 /* Receive enable */
#define MACB_RE_SIZE 1
-#define MACB_TE_OFFSET 3
+#define MACB_TE_OFFSET 3 /* Transmit enable */
#define MACB_TE_SIZE 1
-#define MACB_MPE_OFFSET 4
+#define MACB_MPE_OFFSET 4 /* Management port enable */
#define MACB_MPE_SIZE 1
-#define MACB_CLRSTAT_OFFSET 5
+#define MACB_CLRSTAT_OFFSET 5 /* Clear stats regs */
#define MACB_CLRSTAT_SIZE 1
-#define MACB_INCSTAT_OFFSET 6
+#define MACB_INCSTAT_OFFSET 6 /* Incremental stats regs */
#define MACB_INCSTAT_SIZE 1
-#define MACB_WESTAT_OFFSET 7
+#define MACB_WESTAT_OFFSET 7 /* Write enable stats regs */
#define MACB_WESTAT_SIZE 1
-#define MACB_BP_OFFSET 8
+#define MACB_BP_OFFSET 8 /* Back pressure */
#define MACB_BP_SIZE 1
-#define MACB_TSTART_OFFSET 9
+#define MACB_TSTART_OFFSET 9 /* Start transmission */
#define MACB_TSTART_SIZE 1
-#define MACB_THALT_OFFSET 10
+#define MACB_THALT_OFFSET 10 /* Transmit halt */
#define MACB_THALT_SIZE 1
-#define MACB_NCR_TPF_OFFSET 11
+#define MACB_NCR_TPF_OFFSET 11 /* Transmit pause frame */
#define MACB_NCR_TPF_SIZE 1
-#define MACB_TZQ_OFFSET 12
+#define MACB_TZQ_OFFSET 12 /* Transmit zero quantum
+ * pause frame
+ */
#define MACB_TZQ_SIZE 1
/* Bitfields in NCFGR */
-#define MACB_SPD_OFFSET 0
+#define MACB_SPD_OFFSET 0 /* Speed */
#define MACB_SPD_SIZE 1
-#define MACB_FD_OFFSET 1
+#define MACB_FD_OFFSET 1 /* Full duplex */
#define MACB_FD_SIZE 1
-#define MACB_BIT_RATE_OFFSET 2
+#define MACB_BIT_RATE_OFFSET 2 /* Discard non-VLAN frames */
#define MACB_BIT_RATE_SIZE 1
-#define MACB_JFRAME_OFFSET 3
+#define MACB_JFRAME_OFFSET 3 /* reserved */
#define MACB_JFRAME_SIZE 1
-#define MACB_CAF_OFFSET 4
+#define MACB_CAF_OFFSET 4 /* Copy all frames */
#define MACB_CAF_SIZE 1
-#define MACB_NBC_OFFSET 5
+#define MACB_NBC_OFFSET 5 /* No broadcast */
#define MACB_NBC_SIZE 1
-#define MACB_NCFGR_MTI_OFFSET 6
+#define MACB_NCFGR_MTI_OFFSET 6 /* Multicast hash enable */
#define MACB_NCFGR_MTI_SIZE 1
-#define MACB_UNI_OFFSET 7
+#define MACB_UNI_OFFSET 7 /* Unicast hash enable */
#define MACB_UNI_SIZE 1
-#define MACB_BIG_OFFSET 8
+#define MACB_BIG_OFFSET 8 /* Receive 1536 byte frames */
#define MACB_BIG_SIZE 1
-#define MACB_EAE_OFFSET 9
+#define MACB_EAE_OFFSET 9 /* External address match
+ * enable
+ */
#define MACB_EAE_SIZE 1
#define MACB_CLK_OFFSET 10
#define MACB_CLK_SIZE 2
-#define MACB_RTY_OFFSET 12
+#define MACB_RTY_OFFSET 12 /* Retry test */
#define MACB_RTY_SIZE 1
-#define MACB_PAE_OFFSET 13
+#define MACB_PAE_OFFSET 13 /* Pause enable */
#define MACB_PAE_SIZE 1
#define MACB_RM9200_RMII_OFFSET 13 /* AT91RM9200 only */
#define MACB_RM9200_RMII_SIZE 1 /* AT91RM9200 only */
-#define MACB_RBOF_OFFSET 14
+#define MACB_RBOF_OFFSET 14 /* Receive buffer offset */
#define MACB_RBOF_SIZE 2
-#define MACB_RLCE_OFFSET 16
+#define MACB_RLCE_OFFSET 16 /* Length field error frame
+ * discard
+ */
#define MACB_RLCE_SIZE 1
-#define MACB_DRFCS_OFFSET 17
+#define MACB_DRFCS_OFFSET 17 /* FCS remove */
#define MACB_DRFCS_SIZE 1
#define MACB_EFRHD_OFFSET 18
#define MACB_EFRHD_SIZE 1
@@ -166,111 +325,160 @@
#define MACB_IRXFCS_SIZE 1
/* GEM specific NCFGR bitfields. */
-#define GEM_GBE_OFFSET 10
+#define GEM_GBE_OFFSET 10 /* Gigabit mode enable */
#define GEM_GBE_SIZE 1
-#define GEM_CLK_OFFSET 18
+#define GEM_CLK_OFFSET 18 /* MDC clock division */
#define GEM_CLK_SIZE 3
-#define GEM_DBW_OFFSET 21
+#define GEM_DBW_OFFSET 21 /* Data bus width */
#define GEM_DBW_SIZE 2
#define GEM_RXCOEN_OFFSET 24
#define GEM_RXCOEN_SIZE 1
/* Constants for data bus width. */
-#define GEM_DBW32 0
-#define GEM_DBW64 1
-#define GEM_DBW128 2
+#define GEM_DBW32 0 /* 32 bit AMBA AHB data bus
+ * width
+ */
+#define GEM_DBW64 1 /* 64 bit AMBA AHB data bus
+ * width
+ */
+#define GEM_DBW128 2 /* 128 bit AMBA AHB data bus
+ * width
+ */
/* Bitfields in DMACFG. */
-#define GEM_FBLDO_OFFSET 0
+#define GEM_FBLDO_OFFSET 0 /* AHB fixed burst length for
+ * DMA data operations
+ */
#define GEM_FBLDO_SIZE 5
-#define GEM_ENDIA_OFFSET 7
+#define GEM_ENDIA_OFFSET 7 /* AHB endian swap mode enable
+ * for packet data accesses
+ */
#define GEM_ENDIA_SIZE 1
-#define GEM_RXBMS_OFFSET 8
+#define GEM_RXBMS_OFFSET 8 /* Receiver packet buffer
+ * memory size select
+ */
#define GEM_RXBMS_SIZE 2
-#define GEM_TXPBMS_OFFSET 10
+#define GEM_TXPBMS_OFFSET 10 /* Transmitter packet buffer
+ * memory size select
+ */
#define GEM_TXPBMS_SIZE 1
-#define GEM_TXCOEN_OFFSET 11
+#define GEM_TXCOEN_OFFSET 11 /* Transmitter IP, TCP and
+ * UDP checksum generation
+ * offload enable
+ */
#define GEM_TXCOEN_SIZE 1
-#define GEM_RXBS_OFFSET 16
+#define GEM_RXBS_OFFSET 16 /* DMA receive buffer size in
+ * AHB system memory
+ */
#define GEM_RXBS_SIZE 8
-#define GEM_DDRP_OFFSET 24
+#define GEM_DDRP_OFFSET 24 /* disc_when_no_ahb */
#define GEM_DDRP_SIZE 1
/* Bitfields in NSR */
-#define MACB_NSR_LINK_OFFSET 0
+#define MACB_NSR_LINK_OFFSET 0 /* pcs_link_state */
#define MACB_NSR_LINK_SIZE 1
-#define MACB_MDIO_OFFSET 1
+#define MACB_MDIO_OFFSET 1 /* status of the mdio_in
+ * pin
+ */
#define MACB_MDIO_SIZE 1
-#define MACB_IDLE_OFFSET 2
+#define MACB_IDLE_OFFSET 2 /* The PHY management logic is
+ * idle (i.e. has completed)
+ */
#define MACB_IDLE_SIZE 1
/* Bitfields in TSR */
-#define MACB_UBR_OFFSET 0
+#define MACB_UBR_OFFSET 0 /* Used bit read */
#define MACB_UBR_SIZE 1
-#define MACB_COL_OFFSET 1
+#define MACB_COL_OFFSET 1 /* Collision occurred */
#define MACB_COL_SIZE 1
-#define MACB_TSR_RLE_OFFSET 2
+#define MACB_TSR_RLE_OFFSET 2 /* Retry limit exceeded */
#define MACB_TSR_RLE_SIZE 1
-#define MACB_TGO_OFFSET 3
+#define MACB_TGO_OFFSET 3 /* Transmit go */
#define MACB_TGO_SIZE 1
-#define MACB_BEX_OFFSET 4
+#define MACB_BEX_OFFSET 4 /* Transmit frame corruption
+ * due to AHB error
+ */
#define MACB_BEX_SIZE 1
#define MACB_RM9200_BNQ_OFFSET 4 /* AT91RM9200 only */
#define MACB_RM9200_BNQ_SIZE 1 /* AT91RM9200 only */
-#define MACB_COMP_OFFSET 5
+#define MACB_COMP_OFFSET 5 /* Trnasmit complete */
#define MACB_COMP_SIZE 1
-#define MACB_UND_OFFSET 6
+#define MACB_UND_OFFSET 6 /* Trnasmit under run */
#define MACB_UND_SIZE 1
/* Bitfields in RSR */
-#define MACB_BNA_OFFSET 0
+#define MACB_BNA_OFFSET 0 /* Buffer not available */
#define MACB_BNA_SIZE 1
-#define MACB_REC_OFFSET 1
+#define MACB_REC_OFFSET 1 /* Frame received */
#define MACB_REC_SIZE 1
-#define MACB_OVR_OFFSET 2
+#define MACB_OVR_OFFSET 2 /* Receive overrun */
#define MACB_OVR_SIZE 1
/* Bitfields in ISR/IER/IDR/IMR */
-#define MACB_MFD_OFFSET 0
+#define MACB_MFD_OFFSET 0 /* Management frame sent */
#define MACB_MFD_SIZE 1
-#define MACB_RCOMP_OFFSET 1
+#define MACB_RCOMP_OFFSET 1 /* Receive complete */
#define MACB_RCOMP_SIZE 1
-#define MACB_RXUBR_OFFSET 2
+#define MACB_RXUBR_OFFSET 2 /* RX used bit read */
#define MACB_RXUBR_SIZE 1
-#define MACB_TXUBR_OFFSET 3
+#define MACB_TXUBR_OFFSET 3 /* TX used bit read */
#define MACB_TXUBR_SIZE 1
-#define MACB_ISR_TUND_OFFSET 4
+#define MACB_ISR_TUND_OFFSET 4 /* Enable trnasmit buffer
+ * under run interrupt
+ */
#define MACB_ISR_TUND_SIZE 1
-#define MACB_ISR_RLE_OFFSET 5
+#define MACB_ISR_RLE_OFFSET 5 /* Enable retry limit exceeded
+ * or late collision interrupt
+ */
#define MACB_ISR_RLE_SIZE 1
-#define MACB_TXERR_OFFSET 6
+#define MACB_TXERR_OFFSET 6 /* Enable transmit frame
+ * corruption due to AHB error
+ * interrupt
+ */
#define MACB_TXERR_SIZE 1
-#define MACB_TCOMP_OFFSET 7
+#define MACB_TCOMP_OFFSET 7 /* Enable transmit complete
+ * interrupt
+ */
#define MACB_TCOMP_SIZE 1
-#define MACB_ISR_LINK_OFFSET 9
+#define MACB_ISR_LINK_OFFSET 9 /* Enable link change
+ * interrupt
+ */
#define MACB_ISR_LINK_SIZE 1
-#define MACB_ISR_ROVR_OFFSET 10
+#define MACB_ISR_ROVR_OFFSET 10 /* Enable receive overrun
+ * interrupt
+ */
#define MACB_ISR_ROVR_SIZE 1
-#define MACB_HRESP_OFFSET 11
+#define MACB_HRESP_OFFSET 11 /* Enable hrsep not OK
+ * interrupt
+ */
#define MACB_HRESP_SIZE 1
-#define MACB_PFR_OFFSET 12
+#define MACB_PFR_OFFSET 12 /* Enable pause frame with
+ * non-zero pause quantum
+ * interrupt
+ */
#define MACB_PFR_SIZE 1
-#define MACB_PTZ_OFFSET 13
+#define MACB_PTZ_OFFSET 13 /* Enable pause time zero
+ * interrupt
+ */
#define MACB_PTZ_SIZE 1
/* Bitfields in MAN */
-#define MACB_DATA_OFFSET 0
+#define MACB_DATA_OFFSET 0 /* data */
#define MACB_DATA_SIZE 16
-#define MACB_CODE_OFFSET 16
+#define MACB_CODE_OFFSET 16 /* Must be written to 10 */
#define MACB_CODE_SIZE 2
-#define MACB_REGA_OFFSET 18
+#define MACB_REGA_OFFSET 18 /* Register address */
#define MACB_REGA_SIZE 5
-#define MACB_PHYA_OFFSET 23
+#define MACB_PHYA_OFFSET 23 /* PHY address */
#define MACB_PHYA_SIZE 5
-#define MACB_RW_OFFSET 28
+#define MACB_RW_OFFSET 28 /* Operation. 10 is read. 01
+ * is write.
+ */
#define MACB_RW_SIZE 2
-#define MACB_SOF_OFFSET 30
+#define MACB_SOF_OFFSET 30 /* Must be written to 1 for
+ * Clause 22 operation
+ */
#define MACB_SOF_SIZE 2
/* Bitfields in USRIO (AVR32) */
@@ -286,7 +494,7 @@
/* Bitfields in USRIO (AT91) */
#define MACB_RMII_OFFSET 0
#define MACB_RMII_SIZE 1
-#define GEM_RGMII_OFFSET 0 /* GEM gigabit mode */
+#define GEM_RGMII_OFFSET 0 /* GEM gigabit mode */
#define GEM_RGMII_SIZE 1
#define MACB_CLKEN_OFFSET 1
#define MACB_CLKEN_SIZE 1
@@ -595,6 +803,107 @@ struct gem_stats {
u32 rx_udp_checksum_errors;
};
+/* Describes the name and offset of an individual statistic register, as
+ * returned by `ethtool -S`. Also describes which net_device_stats statistics
+ * this register should contribute to.
+ */
+struct gem_statistic {
+ char stat_string[ETH_GSTRING_LEN];
+ int offset;
+ u32 stat_bits;
+};
+
+/* Bitfield defs for net_device_stat statistics */
+#define GEM_NDS_RXERR_OFFSET 0
+#define GEM_NDS_RXLENERR_OFFSET 1
+#define GEM_NDS_RXOVERERR_OFFSET 2
+#define GEM_NDS_RXCRCERR_OFFSET 3
+#define GEM_NDS_RXFRAMEERR_OFFSET 4
+#define GEM_NDS_RXFIFOERR_OFFSET 5
+#define GEM_NDS_TXERR_OFFSET 6
+#define GEM_NDS_TXABORTEDERR_OFFSET 7
+#define GEM_NDS_TXCARRIERERR_OFFSET 8
+#define GEM_NDS_TXFIFOERR_OFFSET 9
+#define GEM_NDS_COLLISIONS_OFFSET 10
+
+#define GEM_STAT_TITLE(name, title) GEM_STAT_TITLE_BITS(name, title, 0)
+#define GEM_STAT_TITLE_BITS(name, title, bits) { \
+ .stat_string = title, \
+ .offset = GEM_##name, \
+ .stat_bits = bits \
+}
+
+/* list of gem statistic registers. The names MUST match the
+ * corresponding GEM_* definitions.
+ */
+static const struct gem_statistic gem_statistics[] = {
+ GEM_STAT_TITLE(OCTTXL, "tx_octets"), /* OCTTXH combined with OCTTXL */
+ GEM_STAT_TITLE(TXCNT, "tx_frames"),
+ GEM_STAT_TITLE(TXBCCNT, "tx_broadcast_frames"),
+ GEM_STAT_TITLE(TXMCCNT, "tx_multicast_frames"),
+ GEM_STAT_TITLE(TXPAUSECNT, "tx_pause_frames"),
+ GEM_STAT_TITLE(TX64CNT, "tx_64_byte_frames"),
+ GEM_STAT_TITLE(TX65CNT, "tx_65_127_byte_frames"),
+ GEM_STAT_TITLE(TX128CNT, "tx_128_255_byte_frames"),
+ GEM_STAT_TITLE(TX256CNT, "tx_256_511_byte_frames"),
+ GEM_STAT_TITLE(TX512CNT, "tx_512_1023_byte_frames"),
+ GEM_STAT_TITLE(TX1024CNT, "tx_1024_1518_byte_frames"),
+ GEM_STAT_TITLE(TX1519CNT, "tx_greater_than_1518_byte_frames"),
+ GEM_STAT_TITLE_BITS(TXURUNCNT, "tx_underrun",
+ GEM_BIT(NDS_TXERR)|GEM_BIT(NDS_TXFIFOERR)),
+ GEM_STAT_TITLE_BITS(SNGLCOLLCNT, "tx_single_collision_frames",
+ GEM_BIT(NDS_TXERR)|GEM_BIT(NDS_COLLISIONS)),
+ GEM_STAT_TITLE_BITS(MULTICOLLCNT, "tx_multiple_collision_frames",
+ GEM_BIT(NDS_TXERR)|GEM_BIT(NDS_COLLISIONS)),
+ GEM_STAT_TITLE_BITS(EXCESSCOLLCNT, "tx_excessive_collisions",
+ GEM_BIT(NDS_TXERR)|
+ GEM_BIT(NDS_TXABORTEDERR)|
+ GEM_BIT(NDS_COLLISIONS)),
+ GEM_STAT_TITLE_BITS(LATECOLLCNT, "tx_late_collisions",
+ GEM_BIT(NDS_TXERR)|GEM_BIT(NDS_COLLISIONS)),
+ GEM_STAT_TITLE(TXDEFERCNT, "tx_deferred_frames"),
+ GEM_STAT_TITLE_BITS(TXCSENSECNT, "tx_carrier_sense_errors",
+ GEM_BIT(NDS_TXERR)|GEM_BIT(NDS_COLLISIONS)),
+ GEM_STAT_TITLE(OCTRXL, "rx_octets"), /* OCTRXH combined with OCTRXL */
+ GEM_STAT_TITLE(RXCNT, "rx_frames"),
+ GEM_STAT_TITLE(RXBROADCNT, "rx_broadcast_frames"),
+ GEM_STAT_TITLE(RXMULTICNT, "rx_multicast_frames"),
+ GEM_STAT_TITLE(RXPAUSECNT, "rx_pause_frames"),
+ GEM_STAT_TITLE(RX64CNT, "rx_64_byte_frames"),
+ GEM_STAT_TITLE(RX65CNT, "rx_65_127_byte_frames"),
+ GEM_STAT_TITLE(RX128CNT, "rx_128_255_byte_frames"),
+ GEM_STAT_TITLE(RX256CNT, "rx_256_511_byte_frames"),
+ GEM_STAT_TITLE(RX512CNT, "rx_512_1023_byte_frames"),
+ GEM_STAT_TITLE(RX1024CNT, "rx_1024_1518_byte_frames"),
+ GEM_STAT_TITLE(RX1519CNT, "rx_greater_than_1518_byte_frames"),
+ GEM_STAT_TITLE_BITS(RXUNDRCNT, "rx_undersized_frames",
+ GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXLENERR)),
+ GEM_STAT_TITLE_BITS(RXOVRCNT, "rx_oversize_frames",
+ GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXLENERR)),
+ GEM_STAT_TITLE_BITS(RXJABCNT, "rx_jabbers",
+ GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXLENERR)),
+ GEM_STAT_TITLE_BITS(RXFCSCNT, "rx_frame_check_sequence_errors",
+ GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXCRCERR)),
+ GEM_STAT_TITLE_BITS(RXLENGTHCNT, "rx_length_field_frame_errors",
+ GEM_BIT(NDS_RXERR)),
+ GEM_STAT_TITLE_BITS(RXSYMBCNT, "rx_symbol_errors",
+ GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXFRAMEERR)),
+ GEM_STAT_TITLE_BITS(RXALIGNCNT, "rx_alignment_errors",
+ GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXOVERERR)),
+ GEM_STAT_TITLE_BITS(RXRESERRCNT, "rx_resource_errors",
+ GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXOVERERR)),
+ GEM_STAT_TITLE_BITS(RXORCNT, "rx_overruns",
+ GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXFIFOERR)),
+ GEM_STAT_TITLE_BITS(RXIPCCNT, "rx_ip_header_checksum_errors",
+ GEM_BIT(NDS_RXERR)),
+ GEM_STAT_TITLE_BITS(RXTCPCCNT, "rx_tcp_checksum_errors",
+ GEM_BIT(NDS_RXERR)),
+ GEM_STAT_TITLE_BITS(RXUDPCCNT, "rx_udp_checksum_errors",
+ GEM_BIT(NDS_RXERR)),
+};
+
+#define GEM_STATS_LEN ARRAY_SIZE(gem_statistics)
+
struct macb;
struct macb_or_gem_ops {
@@ -673,6 +982,8 @@ struct macb {
dma_addr_t skb_physaddr; /* phys addr from pci_map_single */
int skb_length; /* saved skb length for pci_unmap_single */
unsigned int max_tx_length;
+
+ u64 ethtool_stats[GEM_STATS_LEN];
};
extern const struct ethtool_ops macb_ethtool_ops;
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index babe2a915b00..526ea74e82d9 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -1860,9 +1860,9 @@ netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
cpl->iff = dev->if_port;
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
cpl->vlan_valid = 1;
- cpl->vlan = htons(vlan_tx_tag_get(skb));
+ cpl->vlan = htons(skb_vlan_tag_get(skb));
st->vlan_insert++;
} else
cpl->vlan_valid = 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/mc5.c b/drivers/net/ethernet/chelsio/cxgb3/mc5.c
index e13b7fe9d082..338301b11518 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/mc5.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/mc5.c
@@ -97,14 +97,6 @@ static int mc5_cmd_write(struct adapter *adapter, u32 cmd)
F_DBGIRSPVALID, 1, MAX_WRITE_ATTEMPTS, 1);
}
-static inline void dbgi_wr_addr3(struct adapter *adapter, u32 v1, u32 v2,
- u32 v3)
-{
- t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, v1);
- t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR1, v2);
- t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR2, v3);
-}
-
static inline void dbgi_wr_data3(struct adapter *adapter, u32 v1, u32 v2,
u32 v3)
{
@@ -113,14 +105,6 @@ static inline void dbgi_wr_data3(struct adapter *adapter, u32 v1, u32 v2,
t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA2, v3);
}
-static inline void dbgi_rd_rsp3(struct adapter *adapter, u32 *v1, u32 *v2,
- u32 *v3)
-{
- *v1 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA0);
- *v2 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA1);
- *v3 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA2);
-}
-
/*
* Write data to the TCAM register at address (0, 0, addr_lo) using the TCAM
* command cmd. The data to be written must have been set up by the caller.
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 3dfcf600fcc6..d6aa602f168d 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -1148,8 +1148,8 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
cpl->len = htonl(skb->len);
cntrl = V_TXPKT_INTF(pi->port_id);
- if (vlan_tx_tag_present(skb))
- cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
+ if (skb_vlan_tag_present(skb))
+ cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(skb_vlan_tag_get(skb));
tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
if (tso_info) {
@@ -1282,7 +1282,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
qs->port_stats[SGE_PSTAT_TX_CSUM]++;
if (skb_shinfo(skb)->gso_size)
qs->port_stats[SGE_PSTAT_TSO]++;
- if (vlan_tx_tag_present(skb))
+ if (skb_vlan_tag_present(skb))
qs->port_stats[SGE_PSTAT_VLANINS]++;
/*
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 5ab5c3133acd..7c785b5e7757 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -290,11 +290,21 @@ enum chip_type {
T5_LAST_REV = T5_A1,
};
+struct devlog_params {
+ u32 memtype; /* which memory (EDC0, EDC1, MC) */
+ u32 start; /* start of log in firmware memory */
+ u32 size; /* size of log */
+};
+
struct adapter_params {
struct sge_params sge;
struct tp_params tp;
struct vpd_params vpd;
struct pci_params pci;
+ struct devlog_params devlog;
+ enum pcie_memwin drv_memwin;
+
+ unsigned int cim_la_size;
unsigned int sf_size; /* serial flash size in bytes */
unsigned int sf_nsec; /* # of flash sectors */
@@ -1026,6 +1036,12 @@ int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
u64 *parity);
int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
u64 *parity);
+int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
+ unsigned int *valp);
+int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
+ const unsigned int *valp);
+int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr);
+void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres);
const char *t4_get_port_type_description(enum fw_port_type port_type);
void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index c98a350d857e..e9f348942eb0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -43,6 +43,526 @@
#include "cxgb4_debugfs.h"
#include "l2t.h"
+/* generic seq_file support for showing a table of size rows x width. */
+static void *seq_tab_get_idx(struct seq_tab *tb, loff_t pos)
+{
+ pos -= tb->skip_first;
+ return pos >= tb->rows ? NULL : &tb->data[pos * tb->width];
+}
+
+static void *seq_tab_start(struct seq_file *seq, loff_t *pos)
+{
+ struct seq_tab *tb = seq->private;
+
+ if (tb->skip_first && *pos == 0)
+ return SEQ_START_TOKEN;
+
+ return seq_tab_get_idx(tb, *pos);
+}
+
+static void *seq_tab_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ v = seq_tab_get_idx(seq->private, *pos + 1);
+ if (v)
+ ++*pos;
+ return v;
+}
+
+static void seq_tab_stop(struct seq_file *seq, void *v)
+{
+}
+
+static int seq_tab_show(struct seq_file *seq, void *v)
+{
+ const struct seq_tab *tb = seq->private;
+
+ return tb->show(seq, v, ((char *)v - tb->data) / tb->width);
+}
+
+static const struct seq_operations seq_tab_ops = {
+ .start = seq_tab_start,
+ .next = seq_tab_next,
+ .stop = seq_tab_stop,
+ .show = seq_tab_show
+};
+
+struct seq_tab *seq_open_tab(struct file *f, unsigned int rows,
+ unsigned int width, unsigned int have_header,
+ int (*show)(struct seq_file *seq, void *v, int i))
+{
+ struct seq_tab *p;
+
+ p = __seq_open_private(f, &seq_tab_ops, sizeof(*p) + rows * width);
+ if (p) {
+ p->show = show;
+ p->rows = rows;
+ p->width = width;
+ p->skip_first = have_header != 0;
+ }
+ return p;
+}
+
+static int cim_la_show(struct seq_file *seq, void *v, int idx)
+{
+ if (v == SEQ_START_TOKEN)
+ seq_puts(seq, "Status Data PC LS0Stat LS0Addr "
+ " LS0Data\n");
+ else {
+ const u32 *p = v;
+
+ seq_printf(seq,
+ " %02x %x%07x %x%07x %08x %08x %08x%08x%08x%08x\n",
+ (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
+ p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
+ p[6], p[7]);
+ }
+ return 0;
+}
+
+static int cim_la_show_3in1(struct seq_file *seq, void *v, int idx)
+{
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(seq, "Status Data PC\n");
+ } else {
+ const u32 *p = v;
+
+ seq_printf(seq, " %02x %08x %08x\n", p[5] & 0xff, p[6],
+ p[7]);
+ seq_printf(seq, " %02x %02x%06x %02x%06x\n",
+ (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
+ p[4] & 0xff, p[5] >> 8);
+ seq_printf(seq, " %02x %x%07x %x%07x\n", (p[0] >> 4) & 0xff,
+ p[0] & 0xf, p[1] >> 4, p[1] & 0xf, p[2] >> 4);
+ }
+ return 0;
+}
+
+static int cim_la_open(struct inode *inode, struct file *file)
+{
+ int ret;
+ unsigned int cfg;
+ struct seq_tab *p;
+ struct adapter *adap = inode->i_private;
+
+ ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
+ if (ret)
+ return ret;
+
+ p = seq_open_tab(file, adap->params.cim_la_size / 8, 8 * sizeof(u32), 1,
+ cfg & UPDBGLACAPTPCONLY_F ?
+ cim_la_show_3in1 : cim_la_show);
+ if (!p)
+ return -ENOMEM;
+
+ ret = t4_cim_read_la(adap, (u32 *)p->data, NULL);
+ if (ret)
+ seq_release_private(inode, file);
+ return ret;
+}
+
+static const struct file_operations cim_la_fops = {
+ .owner = THIS_MODULE,
+ .open = cim_la_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private
+};
+
+static int cim_qcfg_show(struct seq_file *seq, void *v)
+{
+ static const char * const qname[] = {
+ "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI",
+ "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI",
+ "SGE0-RX", "SGE1-RX"
+ };
+
+ int i;
+ struct adapter *adap = seq->private;
+ u16 base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
+ u16 size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
+ u32 stat[(4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5))];
+ u16 thres[CIM_NUM_IBQ];
+ u32 obq_wr_t4[2 * CIM_NUM_OBQ], *wr;
+ u32 obq_wr_t5[2 * CIM_NUM_OBQ_T5];
+ u32 *p = stat;
+ int cim_num_obq = is_t4(adap->params.chip) ?
+ CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
+
+ i = t4_cim_read(adap, is_t4(adap->params.chip) ? UP_IBQ_0_RDADDR_A :
+ UP_IBQ_0_SHADOW_RDADDR_A,
+ ARRAY_SIZE(stat), stat);
+ if (!i) {
+ if (is_t4(adap->params.chip)) {
+ i = t4_cim_read(adap, UP_OBQ_0_REALADDR_A,
+ ARRAY_SIZE(obq_wr_t4), obq_wr_t4);
+ wr = obq_wr_t4;
+ } else {
+ i = t4_cim_read(adap, UP_OBQ_0_SHADOW_REALADDR_A,
+ ARRAY_SIZE(obq_wr_t5), obq_wr_t5);
+ wr = obq_wr_t5;
+ }
+ }
+ if (i)
+ return i;
+
+ t4_read_cimq_cfg(adap, base, size, thres);
+
+ seq_printf(seq,
+ " Queue Base Size Thres RdPtr WrPtr SOP EOP Avail\n");
+ for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
+ seq_printf(seq, "%7s %5x %5u %5u %6x %4x %4u %4u %5u\n",
+ qname[i], base[i], size[i], thres[i],
+ IBQRDADDR_G(p[0]), IBQWRADDR_G(p[1]),
+ QUESOPCNT_G(p[3]), QUEEOPCNT_G(p[3]),
+ QUEREMFLITS_G(p[2]) * 16);
+ for ( ; i < CIM_NUM_IBQ + cim_num_obq; i++, p += 4, wr += 2)
+ seq_printf(seq, "%7s %5x %5u %12x %4x %4u %4u %5u\n",
+ qname[i], base[i], size[i],
+ QUERDADDR_G(p[0]) & 0x3fff, wr[0] - base[i],
+ QUESOPCNT_G(p[3]), QUEEOPCNT_G(p[3]),
+ QUEREMFLITS_G(p[2]) * 16);
+ return 0;
+}
+
+static int cim_qcfg_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, cim_qcfg_show, inode->i_private);
+}
+
+static const struct file_operations cim_qcfg_fops = {
+ .owner = THIS_MODULE,
+ .open = cim_qcfg_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/* Firmware Device Log dump. */
+static const char * const devlog_level_strings[] = {
+ [FW_DEVLOG_LEVEL_EMERG] = "EMERG",
+ [FW_DEVLOG_LEVEL_CRIT] = "CRIT",
+ [FW_DEVLOG_LEVEL_ERR] = "ERR",
+ [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE",
+ [FW_DEVLOG_LEVEL_INFO] = "INFO",
+ [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG"
+};
+
+static const char * const devlog_facility_strings[] = {
+ [FW_DEVLOG_FACILITY_CORE] = "CORE",
+ [FW_DEVLOG_FACILITY_SCHED] = "SCHED",
+ [FW_DEVLOG_FACILITY_TIMER] = "TIMER",
+ [FW_DEVLOG_FACILITY_RES] = "RES",
+ [FW_DEVLOG_FACILITY_HW] = "HW",
+ [FW_DEVLOG_FACILITY_FLR] = "FLR",
+ [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ",
+ [FW_DEVLOG_FACILITY_PHY] = "PHY",
+ [FW_DEVLOG_FACILITY_MAC] = "MAC",
+ [FW_DEVLOG_FACILITY_PORT] = "PORT",
+ [FW_DEVLOG_FACILITY_VI] = "VI",
+ [FW_DEVLOG_FACILITY_FILTER] = "FILTER",
+ [FW_DEVLOG_FACILITY_ACL] = "ACL",
+ [FW_DEVLOG_FACILITY_TM] = "TM",
+ [FW_DEVLOG_FACILITY_QFC] = "QFC",
+ [FW_DEVLOG_FACILITY_DCB] = "DCB",
+ [FW_DEVLOG_FACILITY_ETH] = "ETH",
+ [FW_DEVLOG_FACILITY_OFLD] = "OFLD",
+ [FW_DEVLOG_FACILITY_RI] = "RI",
+ [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI",
+ [FW_DEVLOG_FACILITY_FCOE] = "FCOE",
+ [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI",
+ [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE"
+};
+
+/* Information gathered by Device Log Open routine for the display routine.
+ */
+struct devlog_info {
+ unsigned int nentries; /* number of entries in log[] */
+ unsigned int first; /* first [temporal] entry in log[] */
+ struct fw_devlog_e log[0]; /* Firmware Device Log */
+};
+
+/* Dump a Firmaware Device Log entry.
+ */
+static int devlog_show(struct seq_file *seq, void *v)
+{
+ if (v == SEQ_START_TOKEN)
+ seq_printf(seq, "%10s %15s %8s %8s %s\n",
+ "Seq#", "Tstamp", "Level", "Facility", "Message");
+ else {
+ struct devlog_info *dinfo = seq->private;
+ int fidx = (uintptr_t)v - 2;
+ unsigned long index;
+ struct fw_devlog_e *e;
+
+ /* Get a pointer to the log entry to display. Skip unused log
+ * entries.
+ */
+ index = dinfo->first + fidx;
+ if (index >= dinfo->nentries)
+ index -= dinfo->nentries;
+ e = &dinfo->log[index];
+ if (e->timestamp == 0)
+ return 0;
+
+ /* Print the message. This depends on the firmware using
+ * exactly the same formating strings as the kernel so we may
+ * eventually have to put a format interpreter in here ...
+ */
+ seq_printf(seq, "%10d %15llu %8s %8s ",
+ e->seqno, e->timestamp,
+ (e->level < ARRAY_SIZE(devlog_level_strings)
+ ? devlog_level_strings[e->level]
+ : "UNKNOWN"),
+ (e->facility < ARRAY_SIZE(devlog_facility_strings)
+ ? devlog_facility_strings[e->facility]
+ : "UNKNOWN"));
+ seq_printf(seq, e->fmt, e->params[0], e->params[1],
+ e->params[2], e->params[3], e->params[4],
+ e->params[5], e->params[6], e->params[7]);
+ }
+ return 0;
+}
+
+/* Sequential File Operations for Device Log.
+ */
+static inline void *devlog_get_idx(struct devlog_info *dinfo, loff_t pos)
+{
+ if (pos > dinfo->nentries)
+ return NULL;
+
+ return (void *)(uintptr_t)(pos + 1);
+}
+
+static void *devlog_start(struct seq_file *seq, loff_t *pos)
+{
+ struct devlog_info *dinfo = seq->private;
+
+ return (*pos
+ ? devlog_get_idx(dinfo, *pos)
+ : SEQ_START_TOKEN);
+}
+
+static void *devlog_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct devlog_info *dinfo = seq->private;
+
+ (*pos)++;
+ return devlog_get_idx(dinfo, *pos);
+}
+
+static void devlog_stop(struct seq_file *seq, void *v)
+{
+}
+
+static const struct seq_operations devlog_seq_ops = {
+ .start = devlog_start,
+ .next = devlog_next,
+ .stop = devlog_stop,
+ .show = devlog_show
+};
+
+/* Set up for reading the firmware's device log. We read the entire log here
+ * and then display it incrementally in devlog_show().
+ */
+static int devlog_open(struct inode *inode, struct file *file)
+{
+ struct adapter *adap = inode->i_private;
+ struct devlog_params *dparams = &adap->params.devlog;
+ struct devlog_info *dinfo;
+ unsigned int index;
+ u32 fseqno;
+ int ret;
+
+ /* If we don't know where the log is we can't do anything.
+ */
+ if (dparams->start == 0)
+ return -ENXIO;
+
+ /* Allocate the space to read in the firmware's device log and set up
+ * for the iterated call to our display function.
+ */
+ dinfo = __seq_open_private(file, &devlog_seq_ops,
+ sizeof(*dinfo) + dparams->size);
+ if (!dinfo)
+ return -ENOMEM;
+
+ /* Record the basic log buffer information and read in the raw log.
+ */
+ dinfo->nentries = (dparams->size / sizeof(struct fw_devlog_e));
+ dinfo->first = 0;
+ spin_lock(&adap->win0_lock);
+ ret = t4_memory_rw(adap, adap->params.drv_memwin, dparams->memtype,
+ dparams->start, dparams->size, (__be32 *)dinfo->log,
+ T4_MEMORY_READ);
+ spin_unlock(&adap->win0_lock);
+ if (ret) {
+ seq_release_private(inode, file);
+ return ret;
+ }
+
+ /* Translate log multi-byte integral elements into host native format
+ * and determine where the first entry in the log is.
+ */
+ for (fseqno = ~((u32)0), index = 0; index < dinfo->nentries; index++) {
+ struct fw_devlog_e *e = &dinfo->log[index];
+ int i;
+ __u32 seqno;
+
+ if (e->timestamp == 0)
+ continue;
+
+ e->timestamp = (__force __be64)be64_to_cpu(e->timestamp);
+ seqno = be32_to_cpu(e->seqno);
+ for (i = 0; i < 8; i++)
+ e->params[i] =
+ (__force __be32)be32_to_cpu(e->params[i]);
+
+ if (seqno < fseqno) {
+ fseqno = seqno;
+ dinfo->first = index;
+ }
+ }
+ return 0;
+}
+
+static const struct file_operations devlog_fops = {
+ .owner = THIS_MODULE,
+ .open = devlog_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private
+};
+
+static inline void tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
+{
+ *mask = x | y;
+ y = (__force u64)cpu_to_be64(y);
+ memcpy(addr, (char *)&y + 2, ETH_ALEN);
+}
+
+static int mps_tcam_show(struct seq_file *seq, void *v)
+{
+ if (v == SEQ_START_TOKEN)
+ seq_puts(seq, "Idx Ethernet address Mask Vld Ports PF"
+ " VF Replication "
+ "P0 P1 P2 P3 ML\n");
+ else {
+ u64 mask;
+ u8 addr[ETH_ALEN];
+ struct adapter *adap = seq->private;
+ unsigned int idx = (uintptr_t)v - 2;
+ u64 tcamy = t4_read_reg64(adap, MPS_CLS_TCAM_Y_L(idx));
+ u64 tcamx = t4_read_reg64(adap, MPS_CLS_TCAM_X_L(idx));
+ u32 cls_lo = t4_read_reg(adap, MPS_CLS_SRAM_L(idx));
+ u32 cls_hi = t4_read_reg(adap, MPS_CLS_SRAM_H(idx));
+ u32 rplc[4] = {0, 0, 0, 0};
+
+ if (tcamx & tcamy) {
+ seq_printf(seq, "%3u -\n", idx);
+ goto out;
+ }
+
+ if (cls_lo & REPLICATE_F) {
+ struct fw_ldst_cmd ldst_cmd;
+ int ret;
+
+ memset(&ldst_cmd, 0, sizeof(ldst_cmd));
+ ldst_cmd.op_to_addrspace =
+ htonl(FW_CMD_OP_V(FW_LDST_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F |
+ FW_LDST_CMD_ADDRSPACE_V(
+ FW_LDST_ADDRSPC_MPS));
+ ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
+ ldst_cmd.u.mps.fid_ctl =
+ htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) |
+ FW_LDST_CMD_CTL_V(idx));
+ ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd,
+ sizeof(ldst_cmd), &ldst_cmd);
+ if (ret)
+ dev_warn(adap->pdev_dev, "Can't read MPS "
+ "replication map for idx %d: %d\n",
+ idx, -ret);
+ else {
+ rplc[0] = ntohl(ldst_cmd.u.mps.rplc31_0);
+ rplc[1] = ntohl(ldst_cmd.u.mps.rplc63_32);
+ rplc[2] = ntohl(ldst_cmd.u.mps.rplc95_64);
+ rplc[3] = ntohl(ldst_cmd.u.mps.rplc127_96);
+ }
+ }
+
+ tcamxy2valmask(tcamx, tcamy, addr, &mask);
+ seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x %012llx"
+ "%3c %#x%4u%4d",
+ idx, addr[0], addr[1], addr[2], addr[3], addr[4],
+ addr[5], (unsigned long long)mask,
+ (cls_lo & SRAM_VLD_F) ? 'Y' : 'N', PORTMAP_G(cls_hi),
+ PF_G(cls_lo),
+ (cls_lo & VF_VALID_F) ? VF_G(cls_lo) : -1);
+ if (cls_lo & REPLICATE_F)
+ seq_printf(seq, " %08x %08x %08x %08x",
+ rplc[3], rplc[2], rplc[1], rplc[0]);
+ else
+ seq_printf(seq, "%36c", ' ');
+ seq_printf(seq, "%4u%3u%3u%3u %#x\n",
+ SRAM_PRIO0_G(cls_lo), SRAM_PRIO1_G(cls_lo),
+ SRAM_PRIO2_G(cls_lo), SRAM_PRIO3_G(cls_lo),
+ (cls_lo >> MULTILISTEN0_S) & 0xf);
+ }
+out: return 0;
+}
+
+static inline void *mps_tcam_get_idx(struct seq_file *seq, loff_t pos)
+{
+ struct adapter *adap = seq->private;
+ int max_mac_addr = is_t4(adap->params.chip) ?
+ NUM_MPS_CLS_SRAM_L_INSTANCES :
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ return ((pos <= max_mac_addr) ? (void *)(uintptr_t)(pos + 1) : NULL);
+}
+
+static void *mps_tcam_start(struct seq_file *seq, loff_t *pos)
+{
+ return *pos ? mps_tcam_get_idx(seq, *pos) : SEQ_START_TOKEN;
+}
+
+static void *mps_tcam_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ ++*pos;
+ return mps_tcam_get_idx(seq, *pos);
+}
+
+static void mps_tcam_stop(struct seq_file *seq, void *v)
+{
+}
+
+static const struct seq_operations mps_tcam_seq_ops = {
+ .start = mps_tcam_start,
+ .next = mps_tcam_next,
+ .stop = mps_tcam_stop,
+ .show = mps_tcam_show
+};
+
+static int mps_tcam_open(struct inode *inode, struct file *file)
+{
+ int res = seq_open(file, &mps_tcam_seq_ops);
+
+ if (!res) {
+ struct seq_file *seq = file->private_data;
+
+ seq->private = inode->i_private;
+ }
+ return res;
+}
+
+static const struct file_operations mps_tcam_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = mps_tcam_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
@@ -121,7 +641,11 @@ int t4_setup_debugfs(struct adapter *adap)
u32 size;
static struct t4_debugfs_entry t4_debugfs_files[] = {
+ { "cim_la", &cim_la_fops, S_IRUSR, 0 },
+ { "cim_qcfg", &cim_qcfg_fops, S_IRUSR, 0 },
+ { "devlog", &devlog_fops, S_IRUSR, 0 },
{ "l2t", &t4_l2t_fops, S_IRUSR, 0},
+ { "mps_tcam", &mps_tcam_debugfs_fops, S_IRUSR, 0 },
};
add_debugfs_files(adap,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h
index a3d8867efd3d..70fcbc930826 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h
@@ -44,6 +44,18 @@ struct t4_debugfs_entry {
unsigned char data;
};
+struct seq_tab {
+ int (*show)(struct seq_file *seq, void *v, int idx);
+ unsigned int rows; /* # of entries */
+ unsigned char width; /* size in bytes of each entry */
+ unsigned char skip_first; /* whether the first line is a header */
+ char data[0]; /* the table data */
+};
+
+struct seq_tab *seq_open_tab(struct file *f, unsigned int rows,
+ unsigned int width, unsigned int have_header,
+ int (*show)(struct seq_file *seq, void *v, int i));
+
int t4_setup_debugfs(struct adapter *adap);
void add_debugfs_files(struct adapter *adap,
struct t4_debugfs_entry *files,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index ccf3436024bc..082a596a4264 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -66,6 +66,7 @@
#include "cxgb4.h"
#include "t4_regs.h"
+#include "t4_values.h"
#include "t4_msg.h"
#include "t4fw_api.h"
#include "cxgb4_dcb.h"
@@ -78,99 +79,6 @@
#define DRV_VERSION "2.0.0-ko"
#define DRV_DESC "Chelsio T4/T5 Network Driver"
-/*
- * Max interrupt hold-off timer value in us. Queues fall back to this value
- * under extreme memory pressure so it's largish to give the system time to
- * recover.
- */
-#define MAX_SGE_TIMERVAL 200U
-
-enum {
- /*
- * Physical Function provisioning constants.
- */
- PFRES_NVI = 4, /* # of Virtual Interfaces */
- PFRES_NETHCTRL = 128, /* # of EQs used for ETH or CTRL Qs */
- PFRES_NIQFLINT = 128, /* # of ingress Qs/w Free List(s)/intr
- */
- PFRES_NEQ = 256, /* # of egress queues */
- PFRES_NIQ = 0, /* # of ingress queues */
- PFRES_TC = 0, /* PCI-E traffic class */
- PFRES_NEXACTF = 128, /* # of exact MPS filters */
-
- PFRES_R_CAPS = FW_CMD_CAP_PF,
- PFRES_WX_CAPS = FW_CMD_CAP_PF,
-
-#ifdef CONFIG_PCI_IOV
- /*
- * Virtual Function provisioning constants. We need two extra Ingress
- * Queues with Interrupt capability to serve as the VF's Firmware
- * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
- * neither will have Free Lists associated with them). For each
- * Ethernet/Control Egress Queue and for each Free List, we need an
- * Egress Context.
- */
- VFRES_NPORTS = 1, /* # of "ports" per VF */
- VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
-
- VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
- VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
- VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
- VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
- VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
- VFRES_TC = 0, /* PCI-E traffic class */
- VFRES_NEXACTF = 16, /* # of exact MPS filters */
-
- VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
- VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
-#endif
-};
-
-/*
- * Provide a Port Access Rights Mask for the specified PF/VF. This is very
- * static and likely not to be useful in the long run. We really need to
- * implement some form of persistent configuration which the firmware
- * controls.
- */
-static unsigned int pfvfres_pmask(struct adapter *adapter,
- unsigned int pf, unsigned int vf)
-{
- unsigned int portn, portvec;
-
- /*
- * Give PF's access to all of the ports.
- */
- if (vf == 0)
- return FW_PFVF_CMD_PMASK_M;
-
- /*
- * For VFs, we'll assign them access to the ports based purely on the
- * PF. We assign active ports in order, wrapping around if there are
- * fewer active ports than PFs: e.g. active port[pf % nports].
- * Unfortunately the adapter's port_info structs haven't been
- * initialized yet so we have to compute this.
- */
- if (adapter->params.nports == 0)
- return 0;
-
- portn = pf % adapter->params.nports;
- portvec = adapter->params.portvec;
- for (;;) {
- /*
- * Isolate the lowest set bit in the port vector. If we're at
- * the port number that we want, return that as the pmask.
- * otherwise mask that bit out of the port vector and
- * decrement our port number ...
- */
- unsigned int pmask = portvec ^ (portvec & (portvec-1));
- if (portn == 0)
- return pmask;
- portn--;
- portvec &= ~pmask;
- }
- /*NOTREACHED*/
-}
-
enum {
MAX_TXQ_ENTRIES = 16384,
MAX_CTRL_TXQ_ENTRIES = 1024,
@@ -263,7 +171,8 @@ MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter")
static uint force_old_init;
module_param(force_old_init, uint, 0644);
-MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
+MODULE_PARM_DESC(force_old_init, "Force old initialization sequence, deprecated"
+ " parameter");
static int dflt_msg_enable = DFLT_MSG_ENABLE;
@@ -292,13 +201,14 @@ static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
module_param_array(intr_holdoff, uint, NULL, 0644);
MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
- "0..4 in microseconds");
+ "0..4 in microseconds, deprecated parameter");
static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
module_param_array(intr_cnt, uint, NULL, 0644);
MODULE_PARM_DESC(intr_cnt,
- "thresholds 1..3 for queue interrupt packet counters");
+ "thresholds 1..3 for queue interrupt packet counters, "
+ "deprecated parameter");
/*
* Normally we tell the chip to deliver Ingress Packets into our DMA buffers
@@ -318,7 +228,8 @@ static bool vf_acls;
#ifdef CONFIG_PCI_IOV
module_param(vf_acls, bool, 0644);
-MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
+MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement, "
+ "deprecated parameter");
/* Configure the number of PCI-E Virtual Function which are to be instantiated
* on SR-IOV Capable Physical Functions.
@@ -340,32 +251,11 @@ module_param(select_queue, int, 0644);
MODULE_PARM_DESC(select_queue,
"Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
-/*
- * The filter TCAM has a fixed portion and a variable portion. The fixed
- * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
- * ports. The variable portion is 36 bits which can include things like Exact
- * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
- * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
- * far exceed the 36-bit budget for this "compressed" header portion of the
- * filter. Thus, we have a scarce resource which must be carefully managed.
- *
- * By default we set this up to mostly match the set of filter matching
- * capabilities of T3 but with accommodations for some of T4's more
- * interesting features:
- *
- * { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
- * [Inner] VLAN (17), Port (3), FCoE (1) }
- */
-enum {
- TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
- TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
- TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
-};
-
-static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
+static unsigned int tp_vlan_pri_map = HW_TPL_FR_MT_PR_IV_P_FC;
module_param(tp_vlan_pri_map, uint, 0644);
-MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
+MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration, "
+ "deprecated parameter");
static struct dentry *cxgb4_debugfs_root;
@@ -671,7 +561,7 @@ static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
if (idx >= adap->tids.ftid_base && nidx <
(adap->tids.nftids + adap->tids.nsftids)) {
idx = nidx;
- ret = GET_TCB_COOKIE(rpl->cookie);
+ ret = TCB_COOKIE_G(rpl->cookie);
f = &adap->tids.ftid_tab[idx];
if (ret == FW_FILTER_WR_FLT_DELETED) {
@@ -723,7 +613,7 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
const struct cpl_sge_egr_update *p = (void *)rsp;
- unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
+ unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
struct sge_txq *txq;
txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
@@ -833,11 +723,11 @@ static void disable_msi(struct adapter *adapter)
static irqreturn_t t4_nondata_intr(int irq, void *cookie)
{
struct adapter *adap = cookie;
+ u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A));
- u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
- if (v & PFSW) {
+ if (v & PFSW_F) {
adap->swintr = 1;
- t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
+ t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
}
t4_slow_intr_handler(adap);
return IRQ_HANDLED;
@@ -1050,9 +940,9 @@ static void enable_rx(struct adapter *adap)
if (q->handler)
napi_enable(&q->napi);
/* 0-increment GTS to start the timer and enable interrupts */
- t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
- SEINTARM(q->intr_params) |
- INGRESSQID(q->cntxt_id));
+ t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
+ SEINTARM_V(q->intr_params) |
+ INGRESSQID_V(q->cntxt_id));
}
}
@@ -1176,10 +1066,10 @@ freeout: t4_free_sge_resources(adap);
}
t4_write_reg(adap, is_t4(adap->params.chip) ?
- MPS_TRC_RSS_CONTROL :
- MPS_T5_TRC_RSS_CONTROL,
- RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
- QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
+ MPS_TRC_RSS_CONTROL_A :
+ MPS_T5_TRC_RSS_CONTROL_A,
+ RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
+ QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
return 0;
}
@@ -1589,9 +1479,9 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
data += sizeof(struct queue_port_stats) / sizeof(u64);
if (!is_t4(adapter->params.chip)) {
- t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
- val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
- val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
+ t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7));
+ val1 = t4_read_reg(adapter, SGE_STAT_TOTAL_A);
+ val2 = t4_read_reg(adapter, SGE_STAT_MATCH_A);
*data = val1 - val2;
data++;
*data = val2;
@@ -3415,8 +3305,8 @@ int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
req->peer_ip = htonl(0);
chan = rxq_to_chan(&adap->sge, queue);
req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
- req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
- SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
+ req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
+ SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
ret = t4_mgmt_tx(adap, skb);
return net_xmit_eval(ret);
}
@@ -3458,8 +3348,8 @@ int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
req->peer_ip_lo = cpu_to_be64(0);
chan = rxq_to_chan(&adap->sge, queue);
req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
- req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
- SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
+ req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
+ SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
ret = t4_mgmt_tx(adap, skb);
return net_xmit_eval(ret);
}
@@ -3482,8 +3372,8 @@ int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
INIT_TP_WR(req, 0);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
- req->reply_ctrl = htons(NO_REPLY(0) | (ipv6 ? LISTSVR_IPV6(1) :
- LISTSVR_IPV6(0)) | QUEUENO(queue));
+ req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
+ LISTSVR_IPV6_V(0)) | QUEUENO_V(queue));
ret = t4_mgmt_tx(adap, skb);
return net_xmit_eval(ret);
}
@@ -3600,14 +3490,14 @@ unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
struct adapter *adap = netdev2adap(dev);
u32 v1, v2, lp_count, hp_count;
- v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
- v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
+ v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
+ v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
if (is_t4(adap->params.chip)) {
- lp_count = G_LP_COUNT(v1);
- hp_count = G_HP_COUNT(v1);
+ lp_count = LP_COUNT_G(v1);
+ hp_count = HP_COUNT_G(v1);
} else {
- lp_count = G_LP_COUNT_T5(v1);
- hp_count = G_HP_COUNT_T5(v2);
+ lp_count = LP_COUNT_T5_G(v1);
+ hp_count = HP_COUNT_T5_G(v2);
}
return lpfifo ? lp_count : hp_count;
}
@@ -3653,10 +3543,10 @@ void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
{
struct adapter *adap = netdev2adap(dev);
- t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
- t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
- HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
- HPZ3(pgsz_order[3]));
+ t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask);
+ t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) |
+ HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) |
+ HPZ3_V(pgsz_order[3]));
}
EXPORT_SYMBOL(cxgb4_iscsi_init);
@@ -3666,14 +3556,14 @@ int cxgb4_flush_eq_cache(struct net_device *dev)
int ret;
ret = t4_fwaddrspace_write(adap, adap->mbox,
- 0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
+ 0xe1000000 + SGE_CTXT_CMD_A, 0x20000000);
return ret;
}
EXPORT_SYMBOL(cxgb4_flush_eq_cache);
static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
{
- u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
+ u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8;
__be64 indices;
int ret;
@@ -3702,14 +3592,20 @@ int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
if (pidx != hw_pidx) {
u16 delta;
+ u32 val;
if (pidx >= hw_pidx)
delta = pidx - hw_pidx;
else
delta = size - hw_pidx + pidx;
+
+ if (is_t4(adap->params.chip))
+ val = PIDX_V(delta);
+ else
+ val = PIDX_T5_V(delta);
wmb();
- t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
- QID(qid) | PIDX(delta));
+ t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
+ QID_V(qid) | val);
}
out:
return ret;
@@ -3721,8 +3617,8 @@ void cxgb4_disable_db_coalescing(struct net_device *dev)
struct adapter *adap;
adap = netdev2adap(dev);
- t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE,
- F_NOCOALESCE);
+ t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F,
+ NOCOALESCE_F);
}
EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
@@ -3731,7 +3627,7 @@ void cxgb4_enable_db_coalescing(struct net_device *dev)
struct adapter *adap;
adap = netdev2adap(dev);
- t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0);
+ t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F, 0);
}
EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
@@ -3809,8 +3705,8 @@ u64 cxgb4_read_sge_timestamp(struct net_device *dev)
struct adapter *adap;
adap = netdev2adap(dev);
- lo = t4_read_reg(adap, SGE_TIMESTAMP_LO);
- hi = GET_TSVAL(t4_read_reg(adap, SGE_TIMESTAMP_HI));
+ lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A);
+ hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A));
return ((u64)hi << 32) | (u64)lo;
}
@@ -3870,14 +3766,14 @@ static void drain_db_fifo(struct adapter *adap, int usecs)
u32 v1, v2, lp_count, hp_count;
do {
- v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
- v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
+ v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
+ v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
if (is_t4(adap->params.chip)) {
- lp_count = G_LP_COUNT(v1);
- hp_count = G_HP_COUNT(v1);
+ lp_count = LP_COUNT_G(v1);
+ hp_count = HP_COUNT_G(v1);
} else {
- lp_count = G_LP_COUNT_T5(v1);
- hp_count = G_HP_COUNT_T5(v2);
+ lp_count = LP_COUNT_T5_G(v1);
+ hp_count = HP_COUNT_T5_G(v2);
}
if (lp_count == 0 && hp_count == 0)
@@ -3904,8 +3800,8 @@ static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
* are committed before we tell HW about them.
*/
wmb();
- t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
- QID(q->cntxt_id) | PIDX(q->db_pidx_inc));
+ t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
+ QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc));
q->db_pidx_inc = 0;
}
q->db_disabled = 0;
@@ -3952,9 +3848,9 @@ static void process_db_full(struct work_struct *work)
drain_db_fifo(adap, dbfifo_drain_delay);
enable_dbs(adap);
notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
- t4_set_reg_field(adap, SGE_INT_ENABLE3,
- DBFIFO_HP_INT | DBFIFO_LP_INT,
- DBFIFO_HP_INT | DBFIFO_LP_INT);
+ t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
+ DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
+ DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
}
static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
@@ -3968,14 +3864,20 @@ static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
goto out;
if (q->db_pidx != hw_pidx) {
u16 delta;
+ u32 val;
if (q->db_pidx >= hw_pidx)
delta = q->db_pidx - hw_pidx;
else
delta = q->size - hw_pidx + q->db_pidx;
+
+ if (is_t4(adap->params.chip))
+ val = PIDX_V(delta);
+ else
+ val = PIDX_T5_V(delta);
wmb();
- t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
- QID(q->cntxt_id) | PIDX(delta));
+ t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
+ QID_V(q->cntxt_id) | val);
}
out:
q->db_disabled = 0;
@@ -4024,14 +3926,14 @@ static void process_db_drop(struct work_struct *work)
dev_err(adap->pdev_dev, "doorbell drop recovery: "
"qid=%d, pidx_inc=%d\n", qid, pidx_inc);
else
- writel(PIDX_T5(pidx_inc) | QID(bar2_qid),
+ writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid),
adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
/* Re-enable BAR2 WC */
t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
}
- t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
+ t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
}
void t4_db_full(struct adapter *adap)
@@ -4039,8 +3941,8 @@ void t4_db_full(struct adapter *adap)
if (is_t4(adap->params.chip)) {
disable_dbs(adap);
notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
- t4_set_reg_field(adap, SGE_INT_ENABLE3,
- DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
+ t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
+ DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0);
queue_work(adap->workq, &adap->db_full_task);
}
}
@@ -4081,7 +3983,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
lli.nports = adap->params.nports;
lli.wr_cred = adap->params.ofldq_wr_cred;
lli.adapter_type = adap->params.chip;
- lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
+ lli.iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
lli.udb_density = 1 << adap->params.sge.eq_qpp;
lli.ucq_density = 1 << adap->params.sge.iq_qpp;
@@ -4089,8 +3991,8 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
/* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
for (i = 0; i < NCHAN; i++)
lli.tx_modq[i] = i;
- lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
- lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
+ lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
+ lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
lli.fw_vers = adap->params.fw_vers;
lli.dbfifo_int_thresh = dbfifo_int_thresh;
lli.sge_ingpadboundary = adap->sge.fl_align;
@@ -4567,13 +4469,13 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
f->fs.val.lip[i] = val[i];
f->fs.mask.lip[i] = ~0;
}
- if (adap->params.tp.vlan_pri_map & F_PORT) {
+ if (adap->params.tp.vlan_pri_map & PORT_F) {
f->fs.val.iport = port;
f->fs.mask.iport = mask;
}
}
- if (adap->params.tp.vlan_pri_map & F_PROTOCOL) {
+ if (adap->params.tp.vlan_pri_map & PROTOCOL_F) {
f->fs.val.proto = IPPROTO_TCP;
f->fs.mask.proto = ~0;
}
@@ -4783,7 +4685,7 @@ static const struct net_device_ops cxgb4_netdev_ops = {
void t4_fatal_err(struct adapter *adap)
{
- t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
+ t4_set_reg_field(adap, SGE_CONTROL_A, GLOBALENABLE_F, 0);
t4_intr_disable(adap);
dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
}
@@ -4858,16 +4760,16 @@ static void setup_memwin(struct adapter *adap)
mem_win2_base = MEMWIN2_BASE_T5;
mem_win2_aperture = MEMWIN2_APERTURE_T5;
}
- t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
- mem_win0_base | BIR(0) |
- WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
- t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
- mem_win1_base | BIR(0) |
- WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
- t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
- mem_win2_base | BIR(0) |
- WINDOW(ilog2(mem_win2_aperture) - 10));
- t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2));
+ t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 0),
+ mem_win0_base | BIR_V(0) |
+ WINDOW_V(ilog2(MEMWIN0_APERTURE) - 10));
+ t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 1),
+ mem_win1_base | BIR_V(0) |
+ WINDOW_V(ilog2(MEMWIN1_APERTURE) - 10));
+ t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 2),
+ mem_win2_base | BIR_V(0) |
+ WINDOW_V(ilog2(mem_win2_aperture) - 10));
+ t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 2));
}
static void setup_memwin_rdma(struct adapter *adap)
@@ -4881,13 +4783,13 @@ static void setup_memwin_rdma(struct adapter *adap)
start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
t4_write_reg(adap,
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
- start | BIR(1) | WINDOW(ilog2(sz_kb)));
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 3),
+ start | BIR_V(1) | WINDOW_V(ilog2(sz_kb)));
t4_write_reg(adap,
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3),
adap->vres.ocq.start);
t4_read_reg(adap,
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3));
}
}
@@ -4936,38 +4838,38 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
t4_sge_init(adap);
/* tweak some settings */
- t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
- t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
- t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
- v = t4_read_reg(adap, TP_PIO_DATA);
- t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
+ t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849);
+ t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12));
+ t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A);
+ v = t4_read_reg(adap, TP_PIO_DATA_A);
+ t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F);
/* first 4 Tx modulation queues point to consecutive Tx channels */
adap->params.tp.tx_modq_map = 0xE4;
- t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
- V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
+ t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A,
+ TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map));
/* associate each Tx modulation queue with consecutive Tx channels */
v = 0x84218421;
- t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
- &v, 1, A_TP_TX_SCHED_HDR);
- t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
- &v, 1, A_TP_TX_SCHED_FIFO);
- t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
- &v, 1, A_TP_TX_SCHED_PCMD);
+ t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+ &v, 1, TP_TX_SCHED_HDR_A);
+ t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+ &v, 1, TP_TX_SCHED_FIFO_A);
+ t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+ &v, 1, TP_TX_SCHED_PCMD_A);
#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
if (is_offload(adap)) {
- t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
- V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
- V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
- V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
- V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
- t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
- V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
- V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
- V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
- V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
+ t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A,
+ TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+ TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+ TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+ TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
+ t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A,
+ TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+ TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+ TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+ TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
}
/* get basic stuff going */
@@ -5013,16 +4915,16 @@ static int adap_init0_tweaks(struct adapter *adapter)
rx_dma_offset);
rx_dma_offset = 2;
}
- t4_set_reg_field(adapter, SGE_CONTROL,
- PKTSHIFT_MASK,
- PKTSHIFT(rx_dma_offset));
+ t4_set_reg_field(adapter, SGE_CONTROL_A,
+ PKTSHIFT_V(PKTSHIFT_M),
+ PKTSHIFT_V(rx_dma_offset));
/*
* Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
* adds the pseudo header itself.
*/
- t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
- CSUM_HAS_PSEUDO_HDR, 0);
+ t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG_A,
+ CSUM_HAS_PSEUDO_HDR_F, 0);
return 0;
}
@@ -5046,7 +4948,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
*/
if (reset) {
ret = t4_fw_reset(adapter, adapter->mbox,
- PIORSTMODE | PIORST);
+ PIORSTMODE_F | PIORST_F);
if (ret < 0)
goto bye;
}
@@ -5212,12 +5114,9 @@ static int adap_init0_config(struct adapter *adapter, int reset)
if (ret < 0)
goto bye;
- /*
- * Return successfully and note that we're operating with parameters
- * not supplied by the driver, rather than from hard-wired
- * initialization constants burried in the driver.
+ /* Emit Firmware Configuration File information and return
+ * successfully.
*/
- adapter->flags |= USING_SOFT_PARAMS;
dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
"Configuration File \"%s\", version %#x, computed checksum %#x\n",
config_name, finiver, cfcsum);
@@ -5235,249 +5134,6 @@ bye:
return ret;
}
-/*
- * Attempt to initialize the adapter via hard-coded, driver supplied
- * parameters ...
- */
-static int adap_init0_no_config(struct adapter *adapter, int reset)
-{
- struct sge *s = &adapter->sge;
- struct fw_caps_config_cmd caps_cmd;
- u32 v;
- int i, ret;
-
- /*
- * Reset device if necessary
- */
- if (reset) {
- ret = t4_fw_reset(adapter, adapter->mbox,
- PIORSTMODE | PIORST);
- if (ret < 0)
- goto bye;
- }
-
- /*
- * Get device capabilities and select which we'll be using.
- */
- memset(&caps_cmd, 0, sizeof(caps_cmd));
- caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST_F | FW_CMD_READ_F);
- caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
- ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
- &caps_cmd);
- if (ret < 0)
- goto bye;
-
- if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
- if (!vf_acls)
- caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
- else
- caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
- } else if (vf_acls) {
- dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
- goto bye;
- }
- caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
- ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
- NULL);
- if (ret < 0)
- goto bye;
-
- /*
- * Tweak configuration based on system architecture, module
- * parameters, etc.
- */
- ret = adap_init0_tweaks(adapter);
- if (ret < 0)
- goto bye;
-
- /*
- * Select RSS Global Mode we want to use. We use "Basic Virtual"
- * mode which maps each Virtual Interface to its own section of
- * the RSS Table and we turn on all map and hash enables ...
- */
- adapter->flags |= RSS_TNLALLLOOKUP;
- ret = t4_config_glbl_rss(adapter, adapter->mbox,
- FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
- FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
- FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_F |
- ((adapter->flags & RSS_TNLALLLOOKUP) ?
- FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F : 0));
- if (ret < 0)
- goto bye;
-
- /*
- * Set up our own fundamental resource provisioning ...
- */
- ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
- PFRES_NEQ, PFRES_NETHCTRL,
- PFRES_NIQFLINT, PFRES_NIQ,
- PFRES_TC, PFRES_NVI,
- FW_PFVF_CMD_CMASK_M,
- pfvfres_pmask(adapter, adapter->fn, 0),
- PFRES_NEXACTF,
- PFRES_R_CAPS, PFRES_WX_CAPS);
- if (ret < 0)
- goto bye;
-
- /*
- * Perform low level SGE initialization. We need to do this before we
- * send the firmware the INITIALIZE command because that will cause
- * any other PF Drivers which are waiting for the Master
- * Initialization to proceed forward.
- */
- for (i = 0; i < SGE_NTIMERS - 1; i++)
- s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
- s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
- s->counter_val[0] = 1;
- for (i = 1; i < SGE_NCOUNTERS; i++)
- s->counter_val[i] = min(intr_cnt[i - 1],
- THRESHOLD_0_GET(THRESHOLD_0_MASK));
- t4_sge_init(adapter);
-
-#ifdef CONFIG_PCI_IOV
- /*
- * Provision resource limits for Virtual Functions. We currently
- * grant them all the same static resource limits except for the Port
- * Access Rights Mask which we're assigning based on the PF. All of
- * the static provisioning stuff for both the PF and VF really needs
- * to be managed in a persistent manner for each device which the
- * firmware controls.
- */
- {
- int pf, vf;
-
- for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
- if (num_vf[pf] <= 0)
- continue;
-
- /* VF numbering starts at 1! */
- for (vf = 1; vf <= num_vf[pf]; vf++) {
- ret = t4_cfg_pfvf(adapter, adapter->mbox,
- pf, vf,
- VFRES_NEQ, VFRES_NETHCTRL,
- VFRES_NIQFLINT, VFRES_NIQ,
- VFRES_TC, VFRES_NVI,
- FW_PFVF_CMD_CMASK_M,
- pfvfres_pmask(
- adapter, pf, vf),
- VFRES_NEXACTF,
- VFRES_R_CAPS, VFRES_WX_CAPS);
- if (ret < 0)
- dev_warn(adapter->pdev_dev,
- "failed to "\
- "provision pf/vf=%d/%d; "
- "err=%d\n", pf, vf, ret);
- }
- }
- }
-#endif
-
- /*
- * Set up the default filter mode. Later we'll want to implement this
- * via a firmware command, etc. ... This needs to be done before the
- * firmare initialization command ... If the selected set of fields
- * isn't equal to the default value, we'll need to make sure that the
- * field selections will fit in the 36-bit budget.
- */
- if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
- int j, bits = 0;
-
- for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
- switch (tp_vlan_pri_map & (1 << j)) {
- case 0:
- /* compressed filter field not enabled */
- break;
- case FCOE_MASK:
- bits += 1;
- break;
- case PORT_MASK:
- bits += 3;
- break;
- case VNIC_ID_MASK:
- bits += 17;
- break;
- case VLAN_MASK:
- bits += 17;
- break;
- case TOS_MASK:
- bits += 8;
- break;
- case PROTOCOL_MASK:
- bits += 8;
- break;
- case ETHERTYPE_MASK:
- bits += 16;
- break;
- case MACMATCH_MASK:
- bits += 9;
- break;
- case MPSHITTYPE_MASK:
- bits += 3;
- break;
- case FRAGMENTATION_MASK:
- bits += 1;
- break;
- }
-
- if (bits > 36) {
- dev_err(adapter->pdev_dev,
- "tp_vlan_pri_map=%#x needs %d bits > 36;"\
- " using %#x\n", tp_vlan_pri_map, bits,
- TP_VLAN_PRI_MAP_DEFAULT);
- tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
- }
- }
- v = tp_vlan_pri_map;
- t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
- &v, 1, TP_VLAN_PRI_MAP);
-
- /*
- * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
- * to support any of the compressed filter fields above. Newer
- * versions of the firmware do this automatically but it doesn't hurt
- * to set it here. Meanwhile, we do _not_ need to set Lookup Every
- * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
- * since the firmware automatically turns this on and off when we have
- * a non-zero number of filters active (since it does have a
- * performance impact).
- */
- if (tp_vlan_pri_map)
- t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
- FIVETUPLELOOKUP_MASK,
- FIVETUPLELOOKUP_MASK);
-
- /*
- * Tweak some settings.
- */
- t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
- RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
- PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
- KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
-
- /*
- * Get basic stuff going by issuing the Firmware Initialize command.
- * Note that this _must_ be after all PFVF commands ...
- */
- ret = t4_fw_initialize(adapter, adapter->mbox);
- if (ret < 0)
- goto bye;
-
- /*
- * Return successfully!
- */
- dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
- "driver parameters\n");
- return 0;
-
- /*
- * Something bad happened. Return the error ...
- */
-bye:
- return ret;
-}
-
static struct fw_info fw_info_array[] = {
{
.chip = CHELSIO_T4,
@@ -5529,6 +5185,8 @@ static int adap_init0(struct adapter *adap)
enum dev_state state;
u32 params[7], val[7];
struct fw_caps_config_cmd caps_cmd;
+ struct fw_devlog_cmd devlog_cmd;
+ u32 devlog_meminfo;
int reset = 1;
/* Contact FW, advertising Master capability */
@@ -5609,6 +5267,30 @@ static int adap_init0(struct adapter *adap)
if (ret < 0)
goto bye;
+ /* Read firmware device log parameters. We really need to find a way
+ * to get these parameters initialized with some default values (which
+ * are likely to be correct) for the case where we either don't
+ * attache to the firmware or it's crashed when we probe the adapter.
+ * That way we'll still be able to perform early firmware startup
+ * debugging ... If the request to get the Firmware's Device Log
+ * parameters fails, we'll live so we don't make that a fatal error.
+ */
+ memset(&devlog_cmd, 0, sizeof(devlog_cmd));
+ devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F);
+ devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd));
+ ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
+ &devlog_cmd);
+ if (ret == 0) {
+ devlog_meminfo =
+ ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog);
+ adap->params.devlog.memtype =
+ FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
+ adap->params.devlog.start =
+ FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
+ adap->params.devlog.size = ntohl(devlog_cmd.memsize_devlog);
+ }
+
/*
* Find out what ports are available to us. Note that we need to do
* this before calling adap_init0_no_config() since it needs nports
@@ -5624,88 +5306,58 @@ static int adap_init0(struct adapter *adap)
adap->params.nports = hweight32(port_vec);
adap->params.portvec = port_vec;
- /*
- * If the firmware is initialized already (and we're not forcing a
- * master initialization), note that we're living with existing
- * adapter parameters. Otherwise, it's time to try initializing the
- * adapter ...
+ /* If the firmware is initialized already, emit a simply note to that
+ * effect. Otherwise, it's time to try initializing the adapter.
*/
if (state == DEV_STATE_INIT) {
dev_info(adap->pdev_dev, "Coming up as %s: "\
"Adapter already initialized\n",
adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
- adap->flags |= USING_SOFT_PARAMS;
} else {
dev_info(adap->pdev_dev, "Coming up as MASTER: "\
"Initializing adapter\n");
- /*
- * If the firmware doesn't support Configuration
- * Files warn user and exit,
+
+ /* Find out whether we're dealing with a version of the
+ * firmware which has configuration file support.
*/
- if (ret < 0)
- dev_warn(adap->pdev_dev, "Firmware doesn't support "
- "configuration file.\n");
- if (force_old_init)
- ret = adap_init0_no_config(adap, reset);
- else {
- /*
- * Find out whether we're dealing with a version of
- * the firmware which has configuration file support.
- */
- params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
- FW_PARAMS_PARAM_X_V(
- FW_PARAMS_PARAM_DEV_CF));
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
- params, val);
-
- /*
- * If the firmware doesn't support Configuration
- * Files, use the old Driver-based, hard-wired
- * initialization. Otherwise, try using the
- * Configuration File support and fall back to the
- * Driver-based initialization if there's no
- * Configuration File found.
- */
- if (ret < 0)
- ret = adap_init0_no_config(adap, reset);
- else {
- /*
- * The firmware provides us with a memory
- * buffer where we can load a Configuration
- * File from the host if we want to override
- * the Configuration File in flash.
- */
+ params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
+ ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
+ params, val);
- ret = adap_init0_config(adap, reset);
- if (ret == -ENOENT) {
- dev_info(adap->pdev_dev,
- "No Configuration File present "
- "on adapter. Using hard-wired "
- "configuration parameters.\n");
- ret = adap_init0_no_config(adap, reset);
- }
- }
+ /* If the firmware doesn't support Configuration Files,
+ * return an error.
+ */
+ if (ret < 0) {
+ dev_err(adap->pdev_dev, "firmware doesn't support "
+ "Firmware Configuration Files\n");
+ goto bye;
+ }
+
+ /* The firmware provides us with a memory buffer where we can
+ * load a Configuration File from the host if we want to
+ * override the Configuration File in flash.
+ */
+ ret = adap_init0_config(adap, reset);
+ if (ret == -ENOENT) {
+ dev_err(adap->pdev_dev, "no Configuration File "
+ "present on adapter.\n");
+ goto bye;
}
if (ret < 0) {
- dev_err(adap->pdev_dev,
- "could not initialize adapter, error %d\n",
- -ret);
+ dev_err(adap->pdev_dev, "could not initialize "
+ "adapter, error %d\n", -ret);
goto bye;
}
}
- /*
- * If we're living with non-hard-coded parameters (either from a
- * Firmware Configuration File or values programmed by a different PF
- * Driver), give the SGE code a chance to pull in anything that it
- * needs ... Note that this must be called after we retrieve our VPD
- * parameters in order to know how to convert core ticks to seconds.
+ /* Give the SGE code a chance to pull in anything that it needs ...
+ * Note that this must be called after we retrieve our VPD parameters
+ * in order to know how to convert core ticks to seconds, etc.
*/
- if (adap->flags & USING_SOFT_PARAMS) {
- ret = t4_sge_init(adap);
- if (ret < 0)
- goto bye;
- }
+ ret = t4_sge_init(adap);
+ if (ret < 0)
+ goto bye;
if (is_bypass_device(adap->pdev->device))
adap->params.bypass = 1;
@@ -6401,7 +6053,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_unmap_bar0;
/* We control everything through one PF */
- func = SOURCEPF_GET(readl(regs + PL_WHOAMI));
+ func = SOURCEPF_G(readl(regs + PL_WHOAMI_A));
if (func != ent->driver_data) {
iounmap(regs);
pci_disable_device(pdev);
@@ -6467,9 +6119,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!is_t4(adapter->params.chip)) {
- s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
- qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
- SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
+ s_qpp = (QUEUESPERPAGEPF0_S +
+ (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
+ adapter->fn);
+ qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
+ SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
num_seg = PAGE_SIZE / SEGMENT_SIZE;
/* Each segment size is 128B. Write coalescing is enabled only
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index a047baa9fd04..252efc29321f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -46,6 +46,7 @@
#include "t4_msg.h"
#include "t4fw_api.h"
#include "t4_regs.h"
+#include "t4_values.h"
#define VLAN_NONE 0xfff
@@ -150,8 +151,8 @@ static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ,
e->idx | (sync ? F_SYNC_WR : 0) |
- TID_QID(adap->sge.fw_evtq.abs_id)));
- req->params = htons(L2T_W_PORT(e->lport) | L2T_W_NOREPLY(!sync));
+ TID_QID_V(adap->sge.fw_evtq.abs_id)));
+ req->params = htons(L2T_W_PORT_V(e->lport) | L2T_W_NOREPLY_V(!sync));
req->l2t_idx = htons(e->idx);
req->vlan = htons(e->vlan);
if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK))
@@ -425,7 +426,7 @@ u64 cxgb4_select_ntuple(struct net_device *dev,
* in the Compressed Filter Tuple.
*/
if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE)
- ntuple |= (u64)(F_FT_VLAN_VLD | l2t->vlan) << tp->vlan_shift;
+ ntuple |= (u64)(FT_VLAN_VLD_F | l2t->vlan) << tp->vlan_shift;
if (tp->port_shift >= 0)
ntuple |= (u64)l2t->lport << tp->port_shift;
@@ -439,9 +440,9 @@ u64 cxgb4_select_ntuple(struct net_device *dev,
u32 pf = FW_VIID_PFN_G(viid);
u32 vld = FW_VIID_VIVLD_G(viid);
- ntuple |= (u64)(V_FT_VNID_ID_VF(vf) |
- V_FT_VNID_ID_PF(pf) |
- V_FT_VNID_ID_VLD(vld)) << tp->vnic_shift;
+ ntuple |= (u64)(FT_VNID_ID_VF_V(vf) |
+ FT_VNID_ID_PF_V(pf) |
+ FT_VNID_ID_VLD_V(vld)) << tp->vnic_shift;
}
return ntuple;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index ebf935a1e352..619156112b21 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -45,6 +45,7 @@
#include <net/tcp.h>
#include "cxgb4.h"
#include "t4_regs.h"
+#include "t4_values.h"
#include "t4_msg.h"
#include "t4fw_api.h"
@@ -521,10 +522,12 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
{
u32 val;
if (q->pend_cred >= 8) {
- val = PIDX(q->pend_cred / 8);
- if (!is_t4(adap->params.chip))
- val |= DBTYPE(1);
- val |= DBPRIO(1);
+ if (is_t4(adap->params.chip))
+ val = PIDX_V(q->pend_cred / 8);
+ else
+ val = PIDX_T5_V(q->pend_cred / 8) |
+ DBTYPE_F;
+ val |= DBPRIO_F;
wmb();
/* If we don't have access to the new User Doorbell (T5+), use
@@ -532,10 +535,10 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
* mechanism.
*/
if (unlikely(q->bar2_addr == NULL)) {
- t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
- val | QID(q->cntxt_id));
+ t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
+ val | QID_V(q->cntxt_id));
} else {
- writel(val | QID(q->bar2_qid),
+ writel(val | QID_V(q->bar2_qid),
q->bar2_addr + SGE_UDB_KDOORBELL);
/* This Write memory Barrier will force the write to
@@ -818,7 +821,8 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
sgl->addr0 = cpu_to_be64(addr[1]);
}
- sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags));
+ sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
+ ULPTX_NSGE_V(nfrags));
if (likely(--nfrags == 0))
return;
/*
@@ -884,7 +888,7 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
* doorbell mechanism; otherwise use the new BAR2 mechanism.
*/
if (unlikely(q->bar2_addr == NULL)) {
- u32 val = PIDX(n);
+ u32 val = PIDX_V(n);
unsigned long flags;
/* For T4 we need to participate in the Doorbell Recovery
@@ -892,14 +896,14 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
*/
spin_lock_irqsave(&q->db_lock, flags);
if (!q->db_disabled)
- t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
- QID(q->cntxt_id) | val);
+ t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
+ QID_V(q->cntxt_id) | val);
else
q->db_pidx_inc += n;
q->db_pidx = q->pidx;
spin_unlock_irqrestore(&q->db_lock, flags);
} else {
- u32 val = PIDX_T5(n);
+ u32 val = PIDX_T5_V(n);
/* T4 and later chips share the same PIDX field offset within
* the doorbell, but T5 and later shrank the field in order to
@@ -907,7 +911,7 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
* large in the first place (14 bits) so we just use the T5
* and later limits and warn if a Queue ID is too large.
*/
- WARN_ON(val & DBPRIO(1));
+ WARN_ON(val & DBPRIO_F);
/* If we're only writing a single TX Descriptor and we can use
* Inferred QID registers, we can use the Write Combining
@@ -923,7 +927,7 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
(q->bar2_addr + SGE_UDB_WCDOORBELL),
wr);
} else {
- writel(val | QID(q->bar2_qid),
+ writel(val | QID_V(q->bar2_qid),
q->bar2_addr + SGE_UDB_KDOORBELL);
}
@@ -1150,9 +1154,9 @@ out_free: dev_kfree_skb_any(skb);
cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
}
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
q->vlan_ins++;
- cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb));
+ cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(skb_vlan_tag_get(skb));
}
cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) |
@@ -1758,7 +1762,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
pkt = (const struct cpl_rx_pkt *)rsp;
csum_ok = pkt->csum_calc && !pkt->err_vec &&
(q->netdev->features & NETIF_F_RXCSUM);
- if ((pkt->l2info & htonl(RXF_TCP)) &&
+ if ((pkt->l2info & htonl(RXF_TCP_F)) &&
(q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
do_gro(rxq, si, pkt);
return 0;
@@ -1780,11 +1784,11 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
rxq->stats.pkts++;
- if (csum_ok && (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
+ if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) {
if (!pkt->ip_frag) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
rxq->stats.rx_cso++;
- } else if (pkt->l2info & htonl(RXF_IP)) {
+ } else if (pkt->l2info & htonl(RXF_IP_F)) {
__sum16 c = (__force __sum16)pkt->csum;
skb->csum = csum_unfold(c);
skb->ip_summed = CHECKSUM_COMPLETE;
@@ -2001,16 +2005,16 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
} else
params = QINTR_TIMER_IDX(7);
- val = CIDXINC(work_done) | SEINTARM(params);
+ val = CIDXINC_V(work_done) | SEINTARM_V(params);
/* If we don't have access to the new User GTS (T5+), use the old
* doorbell mechanism; otherwise use the new BAR2 mechanism.
*/
if (unlikely(q->bar2_addr == NULL)) {
- t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS),
- val | INGRESSQID((u32)q->cntxt_id));
+ t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
+ val | INGRESSQID_V((u32)q->cntxt_id));
} else {
- writel(val | INGRESSQID(q->bar2_qid),
+ writel(val | INGRESSQID_V(q->bar2_qid),
q->bar2_addr + SGE_UDB_GTS);
wmb();
}
@@ -2056,16 +2060,16 @@ static unsigned int process_intrq(struct adapter *adap)
rspq_next(q);
}
- val = CIDXINC(credits) | SEINTARM(q->intr_params);
+ val = CIDXINC_V(credits) | SEINTARM_V(q->intr_params);
/* If we don't have access to the new User GTS (T5+), use the old
* doorbell mechanism; otherwise use the new BAR2 mechanism.
*/
if (unlikely(q->bar2_addr == NULL)) {
- t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
- val | INGRESSQID(q->cntxt_id));
+ t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
+ val | INGRESSQID_V(q->cntxt_id));
} else {
- writel(val | INGRESSQID(q->bar2_qid),
+ writel(val | INGRESSQID_V(q->bar2_qid),
q->bar2_addr + SGE_UDB_GTS);
wmb();
}
@@ -2095,7 +2099,7 @@ static irqreturn_t t4_intr_intx(int irq, void *cookie)
{
struct adapter *adap = cookie;
- t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI), 0);
+ t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0);
if (t4_slow_intr_handler(adap) | process_intrq(adap))
return IRQ_HANDLED;
return IRQ_NONE; /* probably shared interrupt */
@@ -2142,9 +2146,9 @@ static void sge_rx_timer_cb(unsigned long data)
}
}
- t4_write_reg(adap, SGE_DEBUG_INDEX, 13);
- idma_same_state_cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH);
- idma_same_state_cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
+ t4_write_reg(adap, SGE_DEBUG_INDEX_A, 13);
+ idma_same_state_cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH_A);
+ idma_same_state_cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
for (i = 0; i < 2; i++) {
u32 debug0, debug11;
@@ -2188,12 +2192,12 @@ static void sge_rx_timer_cb(unsigned long data)
/* Read and save the SGE IDMA State and Queue ID information.
* We do this every time in case it changes across time ...
*/
- t4_write_reg(adap, SGE_DEBUG_INDEX, 0);
- debug0 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
+ t4_write_reg(adap, SGE_DEBUG_INDEX_A, 0);
+ debug0 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
s->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
- t4_write_reg(adap, SGE_DEBUG_INDEX, 11);
- debug11 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
+ t4_write_reg(adap, SGE_DEBUG_INDEX_A, 11);
+ debug11 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
s->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
CH_WARN(adap, "SGE idma%u, queue%u, maybe stuck state%u %dsecs (debug0=%#x, debug11=%#x)\n",
@@ -2738,24 +2742,11 @@ void t4_sge_stop(struct adapter *adap)
}
/**
- * t4_sge_init - initialize SGE
+ * t4_sge_init_soft - grab core SGE values needed by SGE code
* @adap: the adapter
*
- * Performs SGE initialization needed every time after a chip reset.
- * We do not initialize any of the queues here, instead the driver
- * top-level must request them individually.
- *
- * Called in two different modes:
- *
- * 1. Perform actual hardware initialization and record hard-coded
- * parameters which were used. This gets used when we're the
- * Master PF and the Firmware Configuration File support didn't
- * work for some reason.
- *
- * 2. We're not the Master PF or initialization was performed with
- * a Firmware Configuration File. In this case we need to grab
- * any of the SGE operating parameters that we need to have in
- * order to do our job and make sure we can live with them ...
+ * We need to grab the SGE operating parameters that we need to have
+ * in order to do our job and make sure we can live with them.
*/
static int t4_sge_init_soft(struct adapter *adap)
@@ -2770,8 +2761,8 @@ static int t4_sge_init_soft(struct adapter *adap)
* process_responses() and that only packet data is going to the
* Free Lists.
*/
- if ((t4_read_reg(adap, SGE_CONTROL) & RXPKTCPLMODE_MASK) !=
- RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) {
+ if ((t4_read_reg(adap, SGE_CONTROL_A) & RXPKTCPLMODE_F) !=
+ RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) {
dev_err(adap->pdev_dev, "bad SGE CPL MODE\n");
return -EINVAL;
}
@@ -2785,7 +2776,7 @@ static int t4_sge_init_soft(struct adapter *adap)
* XXX meet our needs!
*/
#define READ_FL_BUF(x) \
- t4_read_reg(adap, SGE_FL_BUFFER_SIZE0+(x)*sizeof(u32))
+ t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32))
fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
@@ -2823,99 +2814,38 @@ static int t4_sge_init_soft(struct adapter *adap)
* Retrieve our RX interrupt holdoff timer values and counter
* threshold values from the SGE parameters.
*/
- timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1);
- timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3);
- timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5);
+ timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1_A);
+ timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3_A);
+ timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5_A);
s->timer_val[0] = core_ticks_to_us(adap,
- TIMERVALUE0_GET(timer_value_0_and_1));
+ TIMERVALUE0_G(timer_value_0_and_1));
s->timer_val[1] = core_ticks_to_us(adap,
- TIMERVALUE1_GET(timer_value_0_and_1));
+ TIMERVALUE1_G(timer_value_0_and_1));
s->timer_val[2] = core_ticks_to_us(adap,
- TIMERVALUE2_GET(timer_value_2_and_3));
+ TIMERVALUE2_G(timer_value_2_and_3));
s->timer_val[3] = core_ticks_to_us(adap,
- TIMERVALUE3_GET(timer_value_2_and_3));
+ TIMERVALUE3_G(timer_value_2_and_3));
s->timer_val[4] = core_ticks_to_us(adap,
- TIMERVALUE4_GET(timer_value_4_and_5));
+ TIMERVALUE4_G(timer_value_4_and_5));
s->timer_val[5] = core_ticks_to_us(adap,
- TIMERVALUE5_GET(timer_value_4_and_5));
+ TIMERVALUE5_G(timer_value_4_and_5));
- ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD);
- s->counter_val[0] = THRESHOLD_0_GET(ingress_rx_threshold);
- s->counter_val[1] = THRESHOLD_1_GET(ingress_rx_threshold);
- s->counter_val[2] = THRESHOLD_2_GET(ingress_rx_threshold);
- s->counter_val[3] = THRESHOLD_3_GET(ingress_rx_threshold);
-
- return 0;
-}
-
-static int t4_sge_init_hard(struct adapter *adap)
-{
- struct sge *s = &adap->sge;
-
- /*
- * Set up our basic SGE mode to deliver CPL messages to our Ingress
- * Queue and Packet Date to the Free List.
- */
- t4_set_reg_field(adap, SGE_CONTROL, RXPKTCPLMODE_MASK,
- RXPKTCPLMODE_MASK);
-
- /*
- * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
- * and generate an interrupt when this occurs so we can recover.
- */
- if (is_t4(adap->params.chip)) {
- t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
- V_HP_INT_THRESH(M_HP_INT_THRESH) |
- V_LP_INT_THRESH(M_LP_INT_THRESH),
- V_HP_INT_THRESH(dbfifo_int_thresh) |
- V_LP_INT_THRESH(dbfifo_int_thresh));
- } else {
- t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
- V_LP_INT_THRESH_T5(M_LP_INT_THRESH_T5),
- V_LP_INT_THRESH_T5(dbfifo_int_thresh));
- t4_set_reg_field(adap, SGE_DBFIFO_STATUS2,
- V_HP_INT_THRESH_T5(M_HP_INT_THRESH_T5),
- V_HP_INT_THRESH_T5(dbfifo_int_thresh));
- }
- t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_ENABLE_DROP,
- F_ENABLE_DROP);
-
- /*
- * SGE_FL_BUFFER_SIZE0 (RX_SMALL_PG_BUF) is set up by
- * t4_fixup_host_params().
- */
- s->fl_pg_order = FL_PG_ORDER;
- if (s->fl_pg_order)
- t4_write_reg(adap,
- SGE_FL_BUFFER_SIZE0+RX_LARGE_PG_BUF*sizeof(u32),
- PAGE_SIZE << FL_PG_ORDER);
- t4_write_reg(adap, SGE_FL_BUFFER_SIZE0+RX_SMALL_MTU_BUF*sizeof(u32),
- FL_MTU_SMALL_BUFSIZE(adap));
- t4_write_reg(adap, SGE_FL_BUFFER_SIZE0+RX_LARGE_MTU_BUF*sizeof(u32),
- FL_MTU_LARGE_BUFSIZE(adap));
-
- /*
- * Note that the SGE Ingress Packet Count Interrupt Threshold and
- * Timer Holdoff values must be supplied by our caller.
- */
- t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD,
- THRESHOLD_0(s->counter_val[0]) |
- THRESHOLD_1(s->counter_val[1]) |
- THRESHOLD_2(s->counter_val[2]) |
- THRESHOLD_3(s->counter_val[3]));
- t4_write_reg(adap, SGE_TIMER_VALUE_0_AND_1,
- TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) |
- TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1])));
- t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3,
- TIMERVALUE2(us_to_core_ticks(adap, s->timer_val[2])) |
- TIMERVALUE3(us_to_core_ticks(adap, s->timer_val[3])));
- t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5,
- TIMERVALUE4(us_to_core_ticks(adap, s->timer_val[4])) |
- TIMERVALUE5(us_to_core_ticks(adap, s->timer_val[5])));
+ ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD_A);
+ s->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold);
+ s->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold);
+ s->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold);
+ s->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold);
return 0;
}
+/**
+ * t4_sge_init - initialize SGE
+ * @adap: the adapter
+ *
+ * Perform low-level SGE code initialization needed every time after a
+ * chip reset.
+ */
int t4_sge_init(struct adapter *adap)
{
struct sge *s = &adap->sge;
@@ -2927,9 +2857,9 @@ int t4_sge_init(struct adapter *adap)
* Ingress Padding Boundary and Egress Status Page Size are set up by
* t4_fixup_host_params().
*/
- sge_control = t4_read_reg(adap, SGE_CONTROL);
- s->pktshift = PKTSHIFT_GET(sge_control);
- s->stat_len = (sge_control & EGRSTATUSPAGESIZE_MASK) ? 128 : 64;
+ sge_control = t4_read_reg(adap, SGE_CONTROL_A);
+ s->pktshift = PKTSHIFT_G(sge_control);
+ s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64;
/* T4 uses a single control field to specify both the PCIe Padding and
* Packing Boundary. T5 introduced the ability to specify these
@@ -2937,8 +2867,8 @@ int t4_sge_init(struct adapter *adap)
* within Packed Buffer Mode is the maximum of these two
* specifications.
*/
- ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_control) +
- X_INGPADBOUNDARY_SHIFT);
+ ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) +
+ INGPADBOUNDARY_SHIFT_X);
if (is_t4(adap->params.chip)) {
s->fl_align = ingpadboundary;
} else {
@@ -2956,10 +2886,7 @@ int t4_sge_init(struct adapter *adap)
s->fl_align = max(ingpadboundary, ingpackboundary);
}
- if (adap->flags & USING_SOFT_PARAMS)
- ret = t4_sge_init_soft(adap);
- else
- ret = t4_sge_init_hard(adap);
+ ret = t4_sge_init_soft(adap);
if (ret < 0)
return ret;
@@ -2975,11 +2902,11 @@ int t4_sge_init(struct adapter *adap)
* buffers and a new field which only applies to Packed Mode Free List
* buffers.
*/
- sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL);
+ sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A);
if (is_t4(adap->params.chip))
- egress_threshold = EGRTHRESHOLD_GET(sge_conm_ctrl);
+ egress_threshold = EGRTHRESHOLD_G(sge_conm_ctrl);
else
- egress_threshold = EGRTHRESHOLDPACKING_GET(sge_conm_ctrl);
+ egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
s->fl_starve_thres = 2*egress_threshold + 1;
setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index c132d9030729..734d33e3f53b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -35,6 +35,7 @@
#include <linux/delay.h>
#include "cxgb4.h"
#include "t4_regs.h"
+#include "t4_values.h"
#include "t4fw_api.h"
/**
@@ -149,20 +150,20 @@ void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
*/
void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
{
- u32 req = ENABLE | FUNCTION(adap->fn) | reg;
+ u32 req = ENABLE_F | FUNCTION_V(adap->fn) | REGISTER_V(reg);
if (is_t4(adap->params.chip))
- req |= F_LOCALCFG;
+ req |= LOCALCFG_F;
- t4_write_reg(adap, PCIE_CFG_SPACE_REQ, req);
- *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA);
+ t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, req);
+ *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA_A);
/* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
* Configuration Space read. (None of the other fields matter when
* ENABLE is 0 so a simple register write is easier than a
* read-modify-write via t4_set_reg_field().)
*/
- t4_write_reg(adap, PCIE_CFG_SPACE_REQ, 0);
+ t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, 0);
}
/*
@@ -187,8 +188,8 @@ static void t4_report_fw_error(struct adapter *adap)
};
u32 pcie_fw;
- pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
- if (pcie_fw & PCIE_FW_ERR)
+ pcie_fw = t4_read_reg(adap, PCIE_FW_A);
+ if (pcie_fw & PCIE_FW_ERR_F)
dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
reason[PCIE_FW_EVAL_G(pcie_fw)]);
}
@@ -264,8 +265,8 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
u64 res;
int i, ms, delay_idx;
const __be64 *p = cmd;
- u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
- u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
+ u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
+ u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL_A);
if ((size & 15) || size > MBOX_LEN)
return -EINVAL;
@@ -277,9 +278,9 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
if (adap->pdev->error_state != pci_channel_io_normal)
return -EIO;
- v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
+ v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
- v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
+ v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
if (v != MBOX_OWNER_DRV)
return v ? -EBUSY : -ETIMEDOUT;
@@ -287,7 +288,7 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
for (i = 0; i < size; i += 8)
t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
- t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
+ t4_write_reg(adap, ctl_reg, MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
t4_read_reg(adap, ctl_reg); /* flush write */
delay_idx = 0;
@@ -303,8 +304,8 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
mdelay(ms);
v = t4_read_reg(adap, ctl_reg);
- if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
- if (!(v & MBMSGVALID)) {
+ if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
+ if (!(v & MBMSGVALID_F)) {
t4_write_reg(adap, ctl_reg, 0);
continue;
}
@@ -350,27 +351,27 @@ int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
u32 mc_bist_status_rdata, mc_bist_data_pattern;
if (is_t4(adap->params.chip)) {
- mc_bist_cmd = MC_BIST_CMD;
- mc_bist_cmd_addr = MC_BIST_CMD_ADDR;
- mc_bist_cmd_len = MC_BIST_CMD_LEN;
- mc_bist_status_rdata = MC_BIST_STATUS_RDATA;
- mc_bist_data_pattern = MC_BIST_DATA_PATTERN;
+ mc_bist_cmd = MC_BIST_CMD_A;
+ mc_bist_cmd_addr = MC_BIST_CMD_ADDR_A;
+ mc_bist_cmd_len = MC_BIST_CMD_LEN_A;
+ mc_bist_status_rdata = MC_BIST_STATUS_RDATA_A;
+ mc_bist_data_pattern = MC_BIST_DATA_PATTERN_A;
} else {
- mc_bist_cmd = MC_REG(MC_P_BIST_CMD, idx);
- mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR, idx);
- mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN, idx);
- mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA, idx);
- mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN, idx);
+ mc_bist_cmd = MC_REG(MC_P_BIST_CMD_A, idx);
+ mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR_A, idx);
+ mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN_A, idx);
+ mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA_A, idx);
+ mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN_A, idx);
}
- if (t4_read_reg(adap, mc_bist_cmd) & START_BIST)
+ if (t4_read_reg(adap, mc_bist_cmd) & START_BIST_F)
return -EBUSY;
t4_write_reg(adap, mc_bist_cmd_addr, addr & ~0x3fU);
t4_write_reg(adap, mc_bist_cmd_len, 64);
t4_write_reg(adap, mc_bist_data_pattern, 0xc);
- t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE(1) | START_BIST |
- BIST_CMD_GAP(1));
- i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST, 0, 10, 1);
+ t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE_V(1) | START_BIST_F |
+ BIST_CMD_GAP_V(1));
+ i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST_F, 0, 10, 1);
if (i)
return i;
@@ -403,31 +404,31 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata;
if (is_t4(adap->params.chip)) {
- edc_bist_cmd = EDC_REG(EDC_BIST_CMD, idx);
- edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR, idx);
- edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN, idx);
- edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN,
- idx);
- edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA,
+ edc_bist_cmd = EDC_REG(EDC_BIST_CMD_A, idx);
+ edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR_A, idx);
+ edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN_A, idx);
+ edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN_A,
idx);
+ edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA_A,
+ idx);
} else {
- edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD, idx);
- edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR, idx);
- edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN, idx);
+ edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD_A, idx);
+ edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR_A, idx);
+ edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN_A, idx);
edc_bist_cmd_data_pattern =
- EDC_REG_T5(EDC_H_BIST_DATA_PATTERN, idx);
+ EDC_REG_T5(EDC_H_BIST_DATA_PATTERN_A, idx);
edc_bist_status_rdata =
- EDC_REG_T5(EDC_H_BIST_STATUS_RDATA, idx);
+ EDC_REG_T5(EDC_H_BIST_STATUS_RDATA_A, idx);
}
- if (t4_read_reg(adap, edc_bist_cmd) & START_BIST)
+ if (t4_read_reg(adap, edc_bist_cmd) & START_BIST_F)
return -EBUSY;
t4_write_reg(adap, edc_bist_cmd_addr, addr & ~0x3fU);
t4_write_reg(adap, edc_bist_cmd_len, 64);
t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
t4_write_reg(adap, edc_bist_cmd,
- BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
- i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST, 0, 10, 1);
+ BIST_OPCODE_V(1) | BIST_CMD_GAP_V(1) | START_BIST_F);
+ i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST_F, 0, 10, 1);
if (i)
return i;
@@ -505,13 +506,13 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
* the address is relative to BAR0.
*/
mem_reg = t4_read_reg(adap,
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN,
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A,
win));
- mem_aperture = 1 << (GET_WINDOW(mem_reg) + 10);
- mem_base = GET_PCIEOFST(mem_reg) << 10;
+ mem_aperture = 1 << (WINDOW_G(mem_reg) + WINDOW_SHIFT_X);
+ mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X;
if (is_t4(adap->params.chip))
mem_base -= adap->t4_bar0;
- win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn);
+ win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->fn);
/* Calculate our initial PCI-E Memory Window Position and Offset into
* that Window.
@@ -524,10 +525,10 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
* attempt to use the new value.)
*/
t4_write_reg(adap,
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win),
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win),
pos | win_pf);
t4_read_reg(adap,
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
/* Transfer data to/from the adapter as long as there's an integral
* number of 32-bit transfers to complete.
@@ -552,11 +553,11 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
pos += mem_aperture;
offset = 0;
t4_write_reg(adap,
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET,
- win), pos | win_pf);
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
+ win), pos | win_pf);
t4_read_reg(adap,
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET,
- win));
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
+ win));
}
}
@@ -760,14 +761,13 @@ static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
if (!byte_cnt || byte_cnt > 4)
return -EINVAL;
- if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
+ if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
return -EBUSY;
- cont = cont ? SF_CONT : 0;
- lock = lock ? SF_LOCK : 0;
- t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
- ret = t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
+ t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
+ SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1));
+ ret = t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
if (!ret)
- *valp = t4_read_reg(adapter, SF_DATA);
+ *valp = t4_read_reg(adapter, SF_DATA_A);
return ret;
}
@@ -788,14 +788,12 @@ static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
{
if (!byte_cnt || byte_cnt > 4)
return -EINVAL;
- if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
+ if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
return -EBUSY;
- cont = cont ? SF_CONT : 0;
- lock = lock ? SF_LOCK : 0;
- t4_write_reg(adapter, SF_DATA, val);
- t4_write_reg(adapter, SF_OP, lock |
- cont | BYTECNT(byte_cnt - 1) | OP_WR);
- return t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
+ t4_write_reg(adapter, SF_DATA_A, val);
+ t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
+ SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) | OP_V(1));
+ return t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
}
/**
@@ -854,7 +852,7 @@ static int t4_read_flash(struct adapter *adapter, unsigned int addr,
for ( ; nwords; nwords--, data++) {
ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
if (nwords == 1)
- t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
+ t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
if (ret)
return ret;
if (byte_oriented)
@@ -902,7 +900,7 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
if (ret)
goto unlock;
- t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
+ t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
/* Read the page to verify the write succeeded */
ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
@@ -918,7 +916,7 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
return 0;
unlock:
- t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
+ t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
return ret;
}
@@ -1113,7 +1111,7 @@ static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
}
start++;
}
- t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
+ t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
return ret;
}
@@ -1365,95 +1363,97 @@ static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
static void pcie_intr_handler(struct adapter *adapter)
{
static const struct intr_info sysbus_intr_info[] = {
- { RNPP, "RXNP array parity error", -1, 1 },
- { RPCP, "RXPC array parity error", -1, 1 },
- { RCIP, "RXCIF array parity error", -1, 1 },
- { RCCP, "Rx completions control array parity error", -1, 1 },
- { RFTP, "RXFT array parity error", -1, 1 },
+ { RNPP_F, "RXNP array parity error", -1, 1 },
+ { RPCP_F, "RXPC array parity error", -1, 1 },
+ { RCIP_F, "RXCIF array parity error", -1, 1 },
+ { RCCP_F, "Rx completions control array parity error", -1, 1 },
+ { RFTP_F, "RXFT array parity error", -1, 1 },
{ 0 }
};
static const struct intr_info pcie_port_intr_info[] = {
- { TPCP, "TXPC array parity error", -1, 1 },
- { TNPP, "TXNP array parity error", -1, 1 },
- { TFTP, "TXFT array parity error", -1, 1 },
- { TCAP, "TXCA array parity error", -1, 1 },
- { TCIP, "TXCIF array parity error", -1, 1 },
- { RCAP, "RXCA array parity error", -1, 1 },
- { OTDD, "outbound request TLP discarded", -1, 1 },
- { RDPE, "Rx data parity error", -1, 1 },
- { TDUE, "Tx uncorrectable data error", -1, 1 },
+ { TPCP_F, "TXPC array parity error", -1, 1 },
+ { TNPP_F, "TXNP array parity error", -1, 1 },
+ { TFTP_F, "TXFT array parity error", -1, 1 },
+ { TCAP_F, "TXCA array parity error", -1, 1 },
+ { TCIP_F, "TXCIF array parity error", -1, 1 },
+ { RCAP_F, "RXCA array parity error", -1, 1 },
+ { OTDD_F, "outbound request TLP discarded", -1, 1 },
+ { RDPE_F, "Rx data parity error", -1, 1 },
+ { TDUE_F, "Tx uncorrectable data error", -1, 1 },
{ 0 }
};
static const struct intr_info pcie_intr_info[] = {
- { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
- { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
- { MSIDATAPERR, "MSI data parity error", -1, 1 },
- { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
- { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
- { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
- { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
- { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
- { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
- { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
- { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
- { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
- { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
- { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
- { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
- { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
- { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
- { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
- { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
- { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
- { FIDPERR, "PCI FID parity error", -1, 1 },
- { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
- { MATAGPERR, "PCI MA tag parity error", -1, 1 },
- { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
- { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
- { RXWRPERR, "PCI Rx write parity error", -1, 1 },
- { RPLPERR, "PCI replay buffer parity error", -1, 1 },
- { PCIESINT, "PCI core secondary fault", -1, 1 },
- { PCIEPINT, "PCI core primary fault", -1, 1 },
- { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
+ { MSIADDRLPERR_F, "MSI AddrL parity error", -1, 1 },
+ { MSIADDRHPERR_F, "MSI AddrH parity error", -1, 1 },
+ { MSIDATAPERR_F, "MSI data parity error", -1, 1 },
+ { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
+ { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
+ { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
+ { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
+ { PIOCPLPERR_F, "PCI PIO completion FIFO parity error", -1, 1 },
+ { PIOREQPERR_F, "PCI PIO request FIFO parity error", -1, 1 },
+ { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
+ { CCNTPERR_F, "PCI CMD channel count parity error", -1, 1 },
+ { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
+ { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
+ { DCNTPERR_F, "PCI DMA channel count parity error", -1, 1 },
+ { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
+ { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
+ { HCNTPERR_F, "PCI HMA channel count parity error", -1, 1 },
+ { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
+ { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
+ { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
+ { FIDPERR_F, "PCI FID parity error", -1, 1 },
+ { INTXCLRPERR_F, "PCI INTx clear parity error", -1, 1 },
+ { MATAGPERR_F, "PCI MA tag parity error", -1, 1 },
+ { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
+ { RXCPLPERR_F, "PCI Rx completion parity error", -1, 1 },
+ { RXWRPERR_F, "PCI Rx write parity error", -1, 1 },
+ { RPLPERR_F, "PCI replay buffer parity error", -1, 1 },
+ { PCIESINT_F, "PCI core secondary fault", -1, 1 },
+ { PCIEPINT_F, "PCI core primary fault", -1, 1 },
+ { UNXSPLCPLERR_F, "PCI unexpected split completion error",
+ -1, 0 },
{ 0 }
};
static struct intr_info t5_pcie_intr_info[] = {
- { MSTGRPPERR, "Master Response Read Queue parity error",
+ { MSTGRPPERR_F, "Master Response Read Queue parity error",
+ -1, 1 },
+ { MSTTIMEOUTPERR_F, "Master Timeout FIFO parity error", -1, 1 },
+ { MSIXSTIPERR_F, "MSI-X STI SRAM parity error", -1, 1 },
+ { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
+ { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
+ { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
+ { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
+ { PIOCPLGRPPERR_F, "PCI PIO completion Group FIFO parity error",
-1, 1 },
- { MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
- { MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
- { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
- { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
- { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
- { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
- { PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
+ { PIOREQGRPPERR_F, "PCI PIO request Group FIFO parity error",
-1, 1 },
- { PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
+ { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
+ { MSTTAGQPERR_F, "PCI master tag queue parity error", -1, 1 },
+ { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
+ { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
+ { DREQWRPERR_F, "PCI DMA channel write request parity error",
-1, 1 },
- { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
- { MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
- { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
- { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
- { DREQWRPERR, "PCI DMA channel write request parity error",
+ { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
+ { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
+ { HREQWRPERR_F, "PCI HMA channel count parity error", -1, 1 },
+ { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
+ { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
+ { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
+ { FIDPERR_F, "PCI FID parity error", -1, 1 },
+ { VFIDPERR_F, "PCI INTx clear parity error", -1, 1 },
+ { MAGRPPERR_F, "PCI MA group FIFO parity error", -1, 1 },
+ { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
+ { IPRXHDRGRPPERR_F, "PCI IP Rx header group parity error",
-1, 1 },
- { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
- { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
- { HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
- { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
- { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
- { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
- { FIDPERR, "PCI FID parity error", -1, 1 },
- { VFIDPERR, "PCI INTx clear parity error", -1, 1 },
- { MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
- { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
- { IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
+ { IPRXDATAGRPPERR_F, "PCI IP Rx data group parity error",
-1, 1 },
- { IPRXDATAGRPPERR, "PCI IP Rx data group parity error", -1, 1 },
- { RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
- { IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
- { TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
- { READRSPERR, "Outbound read error", -1, 0 },
+ { RPLPERR_F, "PCI IP replay buffer parity error", -1, 1 },
+ { IPSOTPERR_F, "PCI IP SOT buffer parity error", -1, 1 },
+ { TRGT1GRPPERR_F, "PCI TRGT1 group FIFOs parity error", -1, 1 },
+ { READRSPERR_F, "Outbound read error", -1, 0 },
{ 0 }
};
@@ -1461,15 +1461,15 @@ static void pcie_intr_handler(struct adapter *adapter)
if (is_t4(adapter->params.chip))
fat = t4_handle_intr_status(adapter,
- PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
- sysbus_intr_info) +
+ PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
+ sysbus_intr_info) +
t4_handle_intr_status(adapter,
- PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
- pcie_port_intr_info) +
- t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
+ PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
+ pcie_port_intr_info) +
+ t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
pcie_intr_info);
else
- fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
+ fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
t5_pcie_intr_info);
if (fat)
@@ -1483,11 +1483,11 @@ static void tp_intr_handler(struct adapter *adapter)
{
static const struct intr_info tp_intr_info[] = {
{ 0x3fffffff, "TP parity error", -1, 1 },
- { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
+ { FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 },
{ 0 }
};
- if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
+ if (t4_handle_intr_status(adapter, TP_INT_CAUSE_A, tp_intr_info))
t4_fatal_err(adapter);
}
@@ -1499,102 +1499,107 @@ static void sge_intr_handler(struct adapter *adapter)
u64 v;
static const struct intr_info sge_intr_info[] = {
- { ERR_CPL_EXCEED_IQE_SIZE,
+ { ERR_CPL_EXCEED_IQE_SIZE_F,
"SGE received CPL exceeding IQE size", -1, 1 },
- { ERR_INVALID_CIDX_INC,
+ { ERR_INVALID_CIDX_INC_F,
"SGE GTS CIDX increment too large", -1, 0 },
- { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
- { DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
- { DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
- { ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
- { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
+ { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
+ { DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full },
+ { DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
+ { ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
+ { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
"SGE IQID > 1023 received CPL for FL", -1, 0 },
- { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
+ { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
0 },
- { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
+ { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
0 },
- { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
+ { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
0 },
- { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
+ { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
0 },
- { ERR_ING_CTXT_PRIO,
+ { ERR_ING_CTXT_PRIO_F,
"SGE too many priority ingress contexts", -1, 0 },
- { ERR_EGR_CTXT_PRIO,
+ { ERR_EGR_CTXT_PRIO_F,
"SGE too many priority egress contexts", -1, 0 },
- { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
- { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
+ { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
+ { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
{ 0 }
};
- v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
- ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
+ v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1_A) |
+ ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2_A) << 32);
if (v) {
dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
(unsigned long long)v);
- t4_write_reg(adapter, SGE_INT_CAUSE1, v);
- t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
+ t4_write_reg(adapter, SGE_INT_CAUSE1_A, v);
+ t4_write_reg(adapter, SGE_INT_CAUSE2_A, v >> 32);
}
- if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
+ if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info) ||
v != 0)
t4_fatal_err(adapter);
}
+#define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
+ OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
+#define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
+ IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
+
/*
* CIM interrupt handler.
*/
static void cim_intr_handler(struct adapter *adapter)
{
static const struct intr_info cim_intr_info[] = {
- { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
- { OBQPARERR, "CIM OBQ parity error", -1, 1 },
- { IBQPARERR, "CIM IBQ parity error", -1, 1 },
- { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
- { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
- { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
- { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
+ { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 },
+ { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
+ { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
+ { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 },
+ { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
+ { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
+ { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
{ 0 }
};
static const struct intr_info cim_upintr_info[] = {
- { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
- { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
- { ILLWRINT, "CIM illegal write", -1, 1 },
- { ILLRDINT, "CIM illegal read", -1, 1 },
- { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
- { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
- { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
- { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
- { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
- { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
- { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
- { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
- { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
- { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
- { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
- { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
- { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
- { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
- { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
- { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
- { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
- { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
- { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
- { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
- { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
- { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
- { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
- { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
+ { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 },
+ { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 },
+ { ILLWRINT_F, "CIM illegal write", -1, 1 },
+ { ILLRDINT_F, "CIM illegal read", -1, 1 },
+ { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 },
+ { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 },
+ { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 },
+ { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 },
+ { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 },
+ { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 },
+ { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 },
+ { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 },
+ { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 },
+ { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 },
+ { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 },
+ { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 },
+ { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 },
+ { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 },
+ { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 },
+ { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 },
+ { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 },
+ { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 },
+ { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 },
+ { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 },
+ { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 },
+ { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 },
+ { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 },
+ { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 },
{ 0 }
};
int fat;
- if (t4_read_reg(adapter, MA_PCIE_FW) & PCIE_FW_ERR)
+ if (t4_read_reg(adapter, PCIE_FW_A) & PCIE_FW_ERR_F)
t4_report_fw_error(adapter);
- fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
+ fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE_A,
cim_intr_info) +
- t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
+ t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE_A,
cim_upintr_info);
if (fat)
t4_fatal_err(adapter);
@@ -1611,7 +1616,7 @@ static void ulprx_intr_handler(struct adapter *adapter)
{ 0 }
};
- if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
+ if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE_A, ulprx_intr_info))
t4_fatal_err(adapter);
}
@@ -1621,19 +1626,19 @@ static void ulprx_intr_handler(struct adapter *adapter)
static void ulptx_intr_handler(struct adapter *adapter)
{
static const struct intr_info ulptx_intr_info[] = {
- { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
+ { PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1,
0 },
- { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
+ { PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1,
0 },
- { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
+ { PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1,
0 },
- { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
+ { PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1,
0 },
{ 0xfffffff, "ULPTX parity error", -1, 1 },
{ 0 }
};
- if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
+ if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE_A, ulptx_intr_info))
t4_fatal_err(adapter);
}
@@ -1643,19 +1648,20 @@ static void ulptx_intr_handler(struct adapter *adapter)
static void pmtx_intr_handler(struct adapter *adapter)
{
static const struct intr_info pmtx_intr_info[] = {
- { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
- { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
- { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
- { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
- { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
- { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
- { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
- { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
- { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
+ { PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 },
+ { PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 },
+ { PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 },
+ { ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 },
+ { PMTX_FRAMING_ERROR_F, "PMTX framing error", -1, 1 },
+ { OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 },
+ { DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error",
+ -1, 1 },
+ { ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 },
+ { PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1},
{ 0 }
};
- if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
+ if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE_A, pmtx_intr_info))
t4_fatal_err(adapter);
}
@@ -1665,16 +1671,17 @@ static void pmtx_intr_handler(struct adapter *adapter)
static void pmrx_intr_handler(struct adapter *adapter)
{
static const struct intr_info pmrx_intr_info[] = {
- { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
- { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
- { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
- { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
- { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
- { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
+ { ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 },
+ { PMRX_FRAMING_ERROR_F, "PMRX framing error", -1, 1 },
+ { OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 },
+ { DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error",
+ -1, 1 },
+ { IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 },
+ { PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1},
{ 0 }
};
- if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
+ if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE_A, pmrx_intr_info))
t4_fatal_err(adapter);
}
@@ -1684,16 +1691,16 @@ static void pmrx_intr_handler(struct adapter *adapter)
static void cplsw_intr_handler(struct adapter *adapter)
{
static const struct intr_info cplsw_intr_info[] = {
- { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
- { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
- { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
- { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
- { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
- { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
+ { CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 },
+ { CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 },
+ { TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 },
+ { SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 },
+ { CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 },
+ { ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 },
{ 0 }
};
- if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
+ if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE_A, cplsw_intr_info))
t4_fatal_err(adapter);
}
@@ -1703,15 +1710,15 @@ static void cplsw_intr_handler(struct adapter *adapter)
static void le_intr_handler(struct adapter *adap)
{
static const struct intr_info le_intr_info[] = {
- { LIPMISS, "LE LIP miss", -1, 0 },
- { LIP0, "LE 0 LIP error", -1, 0 },
- { PARITYERR, "LE parity error", -1, 1 },
- { UNKNOWNCMD, "LE unknown command", -1, 1 },
- { REQQPARERR, "LE request queue parity error", -1, 1 },
+ { LIPMISS_F, "LE LIP miss", -1, 0 },
+ { LIP0_F, "LE 0 LIP error", -1, 0 },
+ { PARITYERR_F, "LE parity error", -1, 1 },
+ { UNKNOWNCMD_F, "LE unknown command", -1, 1 },
+ { REQQPARERR_F, "LE request queue parity error", -1, 1 },
{ 0 }
};
- if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
+ if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A, le_intr_info))
t4_fatal_err(adap);
}
@@ -1725,19 +1732,22 @@ static void mps_intr_handler(struct adapter *adapter)
{ 0 }
};
static const struct intr_info mps_tx_intr_info[] = {
- { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
- { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
- { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
- { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
- { BUBBLE, "MPS Tx underflow", -1, 1 },
- { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
- { FRMERR, "MPS Tx framing error", -1, 1 },
+ { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
+ { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
+ { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
+ -1, 1 },
+ { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
+ -1, 1 },
+ { BUBBLE_F, "MPS Tx underflow", -1, 1 },
+ { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
+ { FRMERR_F, "MPS Tx framing error", -1, 1 },
{ 0 }
};
static const struct intr_info mps_trc_intr_info[] = {
- { FILTMEM, "MPS TRC filter parity error", -1, 1 },
- { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
- { MISCPERR, "MPS TRC misc parity error", -1, 1 },
+ { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 },
+ { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error",
+ -1, 1 },
+ { MISCPERR_F, "MPS TRC misc parity error", -1, 1 },
{ 0 }
};
static const struct intr_info mps_stat_sram_intr_info[] = {
@@ -1753,37 +1763,37 @@ static void mps_intr_handler(struct adapter *adapter)
{ 0 }
};
static const struct intr_info mps_cls_intr_info[] = {
- { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
- { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
- { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
+ { MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 },
+ { MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 },
+ { HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 },
{ 0 }
};
int fat;
- fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
+ fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE_A,
mps_rx_intr_info) +
- t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
+ t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE_A,
mps_tx_intr_info) +
- t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
+ t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE_A,
mps_trc_intr_info) +
- t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
+ t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM_A,
mps_stat_sram_intr_info) +
- t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
+ t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A,
mps_stat_tx_intr_info) +
- t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
+ t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A,
mps_stat_rx_intr_info) +
- t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
+ t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE_A,
mps_cls_intr_info);
- t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
- RXINT | TXINT | STATINT);
- t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */
+ t4_write_reg(adapter, MPS_INT_CAUSE_A, 0);
+ t4_read_reg(adapter, MPS_INT_CAUSE_A); /* flush */
if (fat)
t4_fatal_err(adapter);
}
-#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
+#define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
+ ECC_UE_INT_CAUSE_F)
/*
* EDC/MC interrupt handler.
@@ -1795,40 +1805,40 @@ static void mem_intr_handler(struct adapter *adapter, int idx)
unsigned int addr, cnt_addr, v;
if (idx <= MEM_EDC1) {
- addr = EDC_REG(EDC_INT_CAUSE, idx);
- cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
+ addr = EDC_REG(EDC_INT_CAUSE_A, idx);
+ cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx);
} else if (idx == MEM_MC) {
if (is_t4(adapter->params.chip)) {
- addr = MC_INT_CAUSE;
- cnt_addr = MC_ECC_STATUS;
+ addr = MC_INT_CAUSE_A;
+ cnt_addr = MC_ECC_STATUS_A;
} else {
- addr = MC_P_INT_CAUSE;
- cnt_addr = MC_P_ECC_STATUS;
+ addr = MC_P_INT_CAUSE_A;
+ cnt_addr = MC_P_ECC_STATUS_A;
}
} else {
- addr = MC_REG(MC_P_INT_CAUSE, 1);
- cnt_addr = MC_REG(MC_P_ECC_STATUS, 1);
+ addr = MC_REG(MC_P_INT_CAUSE_A, 1);
+ cnt_addr = MC_REG(MC_P_ECC_STATUS_A, 1);
}
v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
- if (v & PERR_INT_CAUSE)
+ if (v & PERR_INT_CAUSE_F)
dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
name[idx]);
- if (v & ECC_CE_INT_CAUSE) {
- u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
+ if (v & ECC_CE_INT_CAUSE_F) {
+ u32 cnt = ECC_CECNT_G(t4_read_reg(adapter, cnt_addr));
- t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
+ t4_write_reg(adapter, cnt_addr, ECC_CECNT_V(ECC_CECNT_M));
if (printk_ratelimit())
dev_warn(adapter->pdev_dev,
"%u %s correctable ECC data error%s\n",
cnt, name[idx], cnt > 1 ? "s" : "");
}
- if (v & ECC_UE_INT_CAUSE)
+ if (v & ECC_UE_INT_CAUSE_F)
dev_alert(adapter->pdev_dev,
"%s uncorrectable ECC data error\n", name[idx]);
t4_write_reg(adapter, addr, v);
- if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
+ if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F))
t4_fatal_err(adapter);
}
@@ -1837,26 +1847,26 @@ static void mem_intr_handler(struct adapter *adapter, int idx)
*/
static void ma_intr_handler(struct adapter *adap)
{
- u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
+ u32 v, status = t4_read_reg(adap, MA_INT_CAUSE_A);
- if (status & MEM_PERR_INT_CAUSE) {
+ if (status & MEM_PERR_INT_CAUSE_F) {
dev_alert(adap->pdev_dev,
"MA parity error, parity status %#x\n",
- t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
+ t4_read_reg(adap, MA_PARITY_ERROR_STATUS1_A));
if (is_t5(adap->params.chip))
dev_alert(adap->pdev_dev,
"MA parity error, parity status %#x\n",
t4_read_reg(adap,
- MA_PARITY_ERROR_STATUS2));
+ MA_PARITY_ERROR_STATUS2_A));
}
- if (status & MEM_WRAP_INT_CAUSE) {
- v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
+ if (status & MEM_WRAP_INT_CAUSE_F) {
+ v = t4_read_reg(adap, MA_INT_WRAP_STATUS_A);
dev_alert(adap->pdev_dev, "MA address wrap-around error by "
"client %u to address %#x\n",
- MEM_WRAP_CLIENT_NUM_GET(v),
- MEM_WRAP_ADDRESS_GET(v) << 4);
+ MEM_WRAP_CLIENT_NUM_G(v),
+ MEM_WRAP_ADDRESS_G(v) << 4);
}
- t4_write_reg(adap, MA_INT_CAUSE, status);
+ t4_write_reg(adap, MA_INT_CAUSE_A, status);
t4_fatal_err(adap);
}
@@ -1866,13 +1876,13 @@ static void ma_intr_handler(struct adapter *adap)
static void smb_intr_handler(struct adapter *adap)
{
static const struct intr_info smb_intr_info[] = {
- { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
- { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
- { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
+ { MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 },
+ { MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 },
+ { SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 },
{ 0 }
};
- if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
+ if (t4_handle_intr_status(adap, SMB_INT_CAUSE_A, smb_intr_info))
t4_fatal_err(adap);
}
@@ -1882,14 +1892,14 @@ static void smb_intr_handler(struct adapter *adap)
static void ncsi_intr_handler(struct adapter *adap)
{
static const struct intr_info ncsi_intr_info[] = {
- { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
- { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
- { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
- { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
+ { CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 },
+ { MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 },
+ { TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 },
+ { RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 },
{ 0 }
};
- if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
+ if (t4_handle_intr_status(adap, NCSI_INT_CAUSE_A, ncsi_intr_info))
t4_fatal_err(adap);
}
@@ -1901,23 +1911,23 @@ static void xgmac_intr_handler(struct adapter *adap, int port)
u32 v, int_cause_reg;
if (is_t4(adap->params.chip))
- int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE);
+ int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE_A);
else
- int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE);
+ int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A);
v = t4_read_reg(adap, int_cause_reg);
- v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
+ v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F;
if (!v)
return;
- if (v & TXFIFO_PRTY_ERR)
+ if (v & TXFIFO_PRTY_ERR_F)
dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
port);
- if (v & RXFIFO_PRTY_ERR)
+ if (v & RXFIFO_PRTY_ERR_F)
dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
port);
- t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
+ t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE_A), v);
t4_fatal_err(adap);
}
@@ -1927,19 +1937,19 @@ static void xgmac_intr_handler(struct adapter *adap, int port)
static void pl_intr_handler(struct adapter *adap)
{
static const struct intr_info pl_intr_info[] = {
- { FATALPERR, "T4 fatal parity error", -1, 1 },
- { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
+ { FATALPERR_F, "T4 fatal parity error", -1, 1 },
+ { PERRVFID_F, "PL VFID_MAP parity error", -1, 1 },
{ 0 }
};
- if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
+ if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE_A, pl_intr_info))
t4_fatal_err(adap);
}
-#define PF_INTR_MASK (PFSW)
-#define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
- EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
- CPL_SWITCH | SGE | ULP_TX)
+#define PF_INTR_MASK (PFSW_F)
+#define GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | EDC0_F | \
+ EDC1_F | LE_F | TP_F | MA_F | PM_TX_F | PM_RX_F | ULP_RX_F | \
+ CPL_SWITCH_F | SGE_F | ULP_TX_F)
/**
* t4_slow_intr_handler - control path interrupt handler
@@ -1951,60 +1961,60 @@ static void pl_intr_handler(struct adapter *adap)
*/
int t4_slow_intr_handler(struct adapter *adapter)
{
- u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
+ u32 cause = t4_read_reg(adapter, PL_INT_CAUSE_A);
if (!(cause & GLBL_INTR_MASK))
return 0;
- if (cause & CIM)
+ if (cause & CIM_F)
cim_intr_handler(adapter);
- if (cause & MPS)
+ if (cause & MPS_F)
mps_intr_handler(adapter);
- if (cause & NCSI)
+ if (cause & NCSI_F)
ncsi_intr_handler(adapter);
- if (cause & PL)
+ if (cause & PL_F)
pl_intr_handler(adapter);
- if (cause & SMB)
+ if (cause & SMB_F)
smb_intr_handler(adapter);
- if (cause & XGMAC0)
+ if (cause & XGMAC0_F)
xgmac_intr_handler(adapter, 0);
- if (cause & XGMAC1)
+ if (cause & XGMAC1_F)
xgmac_intr_handler(adapter, 1);
- if (cause & XGMAC_KR0)
+ if (cause & XGMAC_KR0_F)
xgmac_intr_handler(adapter, 2);
- if (cause & XGMAC_KR1)
+ if (cause & XGMAC_KR1_F)
xgmac_intr_handler(adapter, 3);
- if (cause & PCIE)
+ if (cause & PCIE_F)
pcie_intr_handler(adapter);
- if (cause & MC)
+ if (cause & MC_F)
mem_intr_handler(adapter, MEM_MC);
- if (!is_t4(adapter->params.chip) && (cause & MC1))
+ if (!is_t4(adapter->params.chip) && (cause & MC1_S))
mem_intr_handler(adapter, MEM_MC1);
- if (cause & EDC0)
+ if (cause & EDC0_F)
mem_intr_handler(adapter, MEM_EDC0);
- if (cause & EDC1)
+ if (cause & EDC1_F)
mem_intr_handler(adapter, MEM_EDC1);
- if (cause & LE)
+ if (cause & LE_F)
le_intr_handler(adapter);
- if (cause & TP)
+ if (cause & TP_F)
tp_intr_handler(adapter);
- if (cause & MA)
+ if (cause & MA_F)
ma_intr_handler(adapter);
- if (cause & PM_TX)
+ if (cause & PM_TX_F)
pmtx_intr_handler(adapter);
- if (cause & PM_RX)
+ if (cause & PM_RX_F)
pmrx_intr_handler(adapter);
- if (cause & ULP_RX)
+ if (cause & ULP_RX_F)
ulprx_intr_handler(adapter);
- if (cause & CPL_SWITCH)
+ if (cause & CPL_SWITCH_F)
cplsw_intr_handler(adapter);
- if (cause & SGE)
+ if (cause & SGE_F)
sge_intr_handler(adapter);
- if (cause & ULP_TX)
+ if (cause & ULP_TX_F)
ulptx_intr_handler(adapter);
/* Clear the interrupts just processed for which we are the master. */
- t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
- (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
+ t4_write_reg(adapter, PL_INT_CAUSE_A, cause & GLBL_INTR_MASK);
+ (void)t4_read_reg(adapter, PL_INT_CAUSE_A); /* flush */
return 1;
}
@@ -2023,19 +2033,19 @@ int t4_slow_intr_handler(struct adapter *adapter)
*/
void t4_intr_enable(struct adapter *adapter)
{
- u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
-
- t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE |
- ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 |
- ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 |
- ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
- ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
- ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
- ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
- DBFIFO_HP_INT | DBFIFO_LP_INT |
- EGRESS_SIZE_ERR);
- t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
- t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
+ u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
+
+ t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F |
+ ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F |
+ ERR_DROPPED_DB_F | ERR_DATA_CPL_ON_HIGH_QID1_F |
+ ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
+ ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
+ ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
+ ERR_EGR_CTXT_PRIO_F | INGRESS_SIZE_ERR_F |
+ DBFIFO_HP_INT_F | DBFIFO_LP_INT_F |
+ EGRESS_SIZE_ERR_F);
+ t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), PF_INTR_MASK);
+ t4_set_reg_field(adapter, PL_INT_MAP0_A, 0, 1 << pf);
}
/**
@@ -2048,10 +2058,10 @@ void t4_intr_enable(struct adapter *adapter)
*/
void t4_intr_disable(struct adapter *adapter)
{
- u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
+ u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
- t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
- t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
+ t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
+ t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
}
/**
@@ -2178,23 +2188,23 @@ int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
struct tp_tcp_stats *v6)
{
- u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1];
+ u32 val[TP_MIB_TCP_RXT_SEG_LO_A - TP_MIB_TCP_OUT_RST_A + 1];
-#define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
+#define STAT_IDX(x) ((TP_MIB_TCP_##x##_A) - TP_MIB_TCP_OUT_RST_A)
#define STAT(x) val[STAT_IDX(x)]
#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
if (v4) {
- t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
- ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
+ ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST_A);
v4->tcpOutRsts = STAT(OUT_RST);
v4->tcpInSegs = STAT64(IN_SEG);
v4->tcpOutSegs = STAT64(OUT_SEG);
v4->tcpRetransSegs = STAT64(RXT_SEG);
}
if (v6) {
- t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
- ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
+ ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST_A);
v6->tcpOutRsts = STAT(OUT_RST);
v6->tcpInSegs = STAT64(IN_SEG);
v6->tcpOutSegs = STAT64(OUT_SEG);
@@ -2219,12 +2229,12 @@ void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
int i;
for (i = 0; i < NMTUS; ++i) {
- t4_write_reg(adap, TP_MTU_TABLE,
- MTUINDEX(0xff) | MTUVALUE(i));
- v = t4_read_reg(adap, TP_MTU_TABLE);
- mtus[i] = MTUVALUE_GET(v);
+ t4_write_reg(adap, TP_MTU_TABLE_A,
+ MTUINDEX_V(0xff) | MTUVALUE_V(i));
+ v = t4_read_reg(adap, TP_MTU_TABLE_A);
+ mtus[i] = MTUVALUE_G(v);
if (mtu_log)
- mtu_log[i] = MTUWIDTH_GET(v);
+ mtu_log[i] = MTUWIDTH_G(v);
}
}
@@ -2240,9 +2250,9 @@ void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
unsigned int mask, unsigned int val)
{
- t4_write_reg(adap, TP_PIO_ADDR, addr);
- val |= t4_read_reg(adap, TP_PIO_DATA) & ~mask;
- t4_write_reg(adap, TP_PIO_DATA, val);
+ t4_write_reg(adap, TP_PIO_ADDR_A, addr);
+ val |= t4_read_reg(adap, TP_PIO_DATA_A) & ~mask;
+ t4_write_reg(adap, TP_PIO_DATA_A, val);
}
/**
@@ -2321,8 +2331,8 @@ void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
if (!(mtu & ((1 << log2) >> 2))) /* round */
log2--;
- t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
- MTUWIDTH(log2) | MTUVALUE(mtu));
+ t4_write_reg(adap, TP_MTU_TABLE_A, MTUINDEX_V(i) |
+ MTUWIDTH_V(log2) | MTUVALUE_V(mtu));
for (w = 0; w < NCCTRL_WIN; ++w) {
unsigned int inc;
@@ -2330,7 +2340,7 @@ void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
CC_MIN_INCR);
- t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
+ t4_write_reg(adap, TP_CCTRL_TABLE_A, (i << 21) |
(w << 16) | (beta[w] << 13) | inc);
}
}
@@ -2347,7 +2357,7 @@ void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
*/
static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
{
- u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
+ u32 n = NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A));
if (n == 0)
return idx == 0 ? 0xf : 0;
@@ -2485,11 +2495,11 @@ void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
if (is_t4(adap->params.chip)) {
mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO);
mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI);
- port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
+ port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2_A);
} else {
mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO);
mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI);
- port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
+ port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2_A);
}
if (addr) {
@@ -2499,8 +2509,8 @@ void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
t4_write_reg(adap, mag_id_reg_h,
(addr[0] << 8) | addr[1]);
}
- t4_set_reg_field(adap, port_cfg_reg, MAGICEN,
- addr ? MAGICEN : 0);
+ t4_set_reg_field(adap, port_cfg_reg, MAGICEN_F,
+ addr ? MAGICEN_F : 0);
}
/**
@@ -2525,20 +2535,21 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
u32 port_cfg_reg;
if (is_t4(adap->params.chip))
- port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
+ port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2_A);
else
- port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
+ port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2_A);
if (!enable) {
- t4_set_reg_field(adap, port_cfg_reg, PATEN, 0);
+ t4_set_reg_field(adap, port_cfg_reg, PATEN_F, 0);
return 0;
}
if (map > 0xff)
return -EINVAL;
#define EPIO_REG(name) \
- (is_t4(adap->params.chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \
- T5_PORT_REG(port, MAC_PORT_EPIO_##name))
+ (is_t4(adap->params.chip) ? \
+ PORT_REG(port, XGMAC_PORT_EPIO_##name##_A) : \
+ T5_PORT_REG(port, MAC_PORT_EPIO_##name##_A))
t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
t4_write_reg(adap, EPIO_REG(DATA2), mask1);
@@ -2550,21 +2561,21 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
/* write byte masks */
t4_write_reg(adap, EPIO_REG(DATA0), mask0);
- t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
+ t4_write_reg(adap, EPIO_REG(OP), ADDRESS_V(i) | EPIOWR_F);
t4_read_reg(adap, EPIO_REG(OP)); /* flush */
- if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
+ if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY_F)
return -ETIMEDOUT;
/* write CRC */
t4_write_reg(adap, EPIO_REG(DATA0), crc);
- t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
+ t4_write_reg(adap, EPIO_REG(OP), ADDRESS_V(i + 32) | EPIOWR_F);
t4_read_reg(adap, EPIO_REG(OP)); /* flush */
- if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
+ if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY_F)
return -ETIMEDOUT;
}
#undef EPIO_REG
- t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
+ t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2_A), 0, PATEN_F);
return 0;
}
@@ -2749,9 +2760,9 @@ void t4_sge_decode_idma_state(struct adapter *adapter, int state)
"IDMA_FL_SEND_COMPLETION_TO_IMSG",
};
static const u32 sge_regs[] = {
- SGE_DEBUG_DATA_LOW_INDEX_2,
- SGE_DEBUG_DATA_LOW_INDEX_3,
- SGE_DEBUG_DATA_HIGH_INDEX_10,
+ SGE_DEBUG_DATA_LOW_INDEX_2_A,
+ SGE_DEBUG_DATA_LOW_INDEX_3_A,
+ SGE_DEBUG_DATA_HIGH_INDEX_10_A,
};
const char **sge_idma_decode;
int sge_idma_decode_nstates;
@@ -2818,7 +2829,7 @@ retry:
if (ret < 0) {
if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
goto retry;
- if (t4_read_reg(adap, MA_PCIE_FW) & PCIE_FW_ERR)
+ if (t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_ERR_F)
t4_report_fw_error(adap);
return ret;
}
@@ -2868,8 +2879,8 @@ retry:
* timeout ... and then retry if we haven't exhausted
* our retries ...
*/
- pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
- if (!(pcie_fw & (PCIE_FW_ERR|PCIE_FW_INIT))) {
+ pcie_fw = t4_read_reg(adap, PCIE_FW_A);
+ if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) {
if (waiting <= 0) {
if (retries-- > 0)
goto retry;
@@ -2884,9 +2895,9 @@ retry:
* report errors preferentially.
*/
if (state) {
- if (pcie_fw & PCIE_FW_ERR)
+ if (pcie_fw & PCIE_FW_ERR_F)
*state = DEV_STATE_ERR;
- else if (pcie_fw & PCIE_FW_INIT)
+ else if (pcie_fw & PCIE_FW_INIT_F)
*state = DEV_STATE_INIT;
}
@@ -2896,7 +2907,7 @@ retry:
* for our caller.
*/
if (master_mbox == PCIE_FW_MASTER_M &&
- (pcie_fw & PCIE_FW_MASTER_VLD))
+ (pcie_fw & PCIE_FW_MASTER_VLD_F))
master_mbox = PCIE_FW_MASTER_G(pcie_fw);
break;
}
@@ -2985,7 +2996,7 @@ static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
memset(&c, 0, sizeof(c));
INIT_CMD(c, RESET, WRITE);
- c.val = htonl(PIORST | PIORSTMODE);
+ c.val = htonl(PIORST_F | PIORSTMODE_F);
c.halt_pkd = htonl(FW_RESET_CMD_HALT_F);
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -3004,8 +3015,8 @@ static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
* rather than a RESET ... if it's new enough to understand that ...
*/
if (ret == 0 || force) {
- t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, UPCRST);
- t4_set_reg_field(adap, PCIE_FW, PCIE_FW_HALT_F,
+ t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F);
+ t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F,
PCIE_FW_HALT_F);
}
@@ -3045,7 +3056,7 @@ static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
* doing it automatically, we need to clear the PCIE_FW.HALT
* bit.
*/
- t4_set_reg_field(adap, PCIE_FW, PCIE_FW_HALT_F, 0);
+ t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F, 0);
/*
* If we've been given a valid mailbox, first try to get the
@@ -3055,21 +3066,21 @@ static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
* hitting the chip with a hammer.
*/
if (mbox <= PCIE_FW_MASTER_M) {
- t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
+ t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
msleep(100);
if (t4_fw_reset(adap, mbox,
- PIORST | PIORSTMODE) == 0)
+ PIORST_F | PIORSTMODE_F) == 0)
return 0;
}
- t4_write_reg(adap, PL_RST, PIORST | PIORSTMODE);
+ t4_write_reg(adap, PL_RST_A, PIORST_F | PIORSTMODE_F);
msleep(2000);
} else {
int ms;
- t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
+ t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
- if (!(t4_read_reg(adap, PCIE_FW) & PCIE_FW_HALT_F))
+ if (!(t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_HALT_F))
return 0;
msleep(100);
ms += 100;
@@ -3148,22 +3159,23 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
unsigned int fl_align_log = fls(fl_align) - 1;
- t4_write_reg(adap, SGE_HOST_PAGE_SIZE,
- HOSTPAGESIZEPF0(sge_hps) |
- HOSTPAGESIZEPF1(sge_hps) |
- HOSTPAGESIZEPF2(sge_hps) |
- HOSTPAGESIZEPF3(sge_hps) |
- HOSTPAGESIZEPF4(sge_hps) |
- HOSTPAGESIZEPF5(sge_hps) |
- HOSTPAGESIZEPF6(sge_hps) |
- HOSTPAGESIZEPF7(sge_hps));
+ t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
+ HOSTPAGESIZEPF0_V(sge_hps) |
+ HOSTPAGESIZEPF1_V(sge_hps) |
+ HOSTPAGESIZEPF2_V(sge_hps) |
+ HOSTPAGESIZEPF3_V(sge_hps) |
+ HOSTPAGESIZEPF4_V(sge_hps) |
+ HOSTPAGESIZEPF5_V(sge_hps) |
+ HOSTPAGESIZEPF6_V(sge_hps) |
+ HOSTPAGESIZEPF7_V(sge_hps));
if (is_t4(adap->params.chip)) {
- t4_set_reg_field(adap, SGE_CONTROL,
- INGPADBOUNDARY_MASK |
- EGRSTATUSPAGESIZE_MASK,
- INGPADBOUNDARY(fl_align_log - 5) |
- EGRSTATUSPAGESIZE(stat_len != 64));
+ t4_set_reg_field(adap, SGE_CONTROL_A,
+ INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
+ EGRSTATUSPAGESIZE_F,
+ INGPADBOUNDARY_V(fl_align_log -
+ INGPADBOUNDARY_SHIFT_X) |
+ EGRSTATUSPAGESIZE_V(stat_len != 64));
} else {
/* T5 introduced the separation of the Free List Padding and
* Packing Boundaries. Thus, we can select a smaller Padding
@@ -3193,15 +3205,15 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
fl_align = 64;
fl_align_log = 6;
}
- t4_set_reg_field(adap, SGE_CONTROL,
- INGPADBOUNDARY_MASK |
- EGRSTATUSPAGESIZE_MASK,
- INGPADBOUNDARY(INGPCIEBOUNDARY_32B_X) |
- EGRSTATUSPAGESIZE(stat_len != 64));
+ t4_set_reg_field(adap, SGE_CONTROL_A,
+ INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
+ EGRSTATUSPAGESIZE_F,
+ INGPADBOUNDARY_V(INGPCIEBOUNDARY_32B_X) |
+ EGRSTATUSPAGESIZE_V(stat_len != 64));
t4_set_reg_field(adap, SGE_CONTROL2_A,
INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
INGPACKBOUNDARY_V(fl_align_log -
- INGPACKBOUNDARY_SHIFT_X));
+ INGPACKBOUNDARY_SHIFT_X));
}
/*
* Adjust various SGE Free List Host Buffer Sizes.
@@ -3224,15 +3236,15 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
* Default Firmware Configuration File but we need to adjust it for
* this host's cache line size.
*/
- t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, page_size);
- t4_write_reg(adap, SGE_FL_BUFFER_SIZE2,
- (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2) + fl_align-1)
+ t4_write_reg(adap, SGE_FL_BUFFER_SIZE0_A, page_size);
+ t4_write_reg(adap, SGE_FL_BUFFER_SIZE2_A,
+ (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2_A) + fl_align-1)
& ~(fl_align-1));
- t4_write_reg(adap, SGE_FL_BUFFER_SIZE3,
- (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3) + fl_align-1)
+ t4_write_reg(adap, SGE_FL_BUFFER_SIZE3_A,
+ (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3_A) + fl_align-1)
& ~(fl_align-1));
- t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(page_shift - 12));
+ t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(page_shift - 12));
return 0;
}
@@ -3917,12 +3929,12 @@ int t4_wait_dev_ready(void __iomem *regs)
{
u32 whoami;
- whoami = readl(regs + PL_WHOAMI);
+ whoami = readl(regs + PL_WHOAMI_A);
if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS)
return 0;
msleep(500);
- whoami = readl(regs + PL_WHOAMI);
+ whoami = readl(regs + PL_WHOAMI_A);
return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO);
}
@@ -3946,7 +3958,7 @@ static int get_flash_params(struct adapter *adap)
ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
if (!ret)
ret = sf1_read(adap, 3, 0, 1, &info);
- t4_write_reg(adap, SF_OP, 0); /* unlock SF */
+ t4_write_reg(adap, SF_OP_A, 0); /* unlock SF */
if (ret)
return ret;
@@ -3969,7 +3981,7 @@ static int get_flash_params(struct adapter *adap)
return -EINVAL;
adap->params.sf_size = 1 << info;
adap->params.sf_fw_start =
- t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK;
+ t4_read_reg(adap, CIM_BOOT_CFG_A) & BOOTADDR_M;
if (adap->params.sf_size < FLASH_MIN_SIZE)
dev_warn(adap->pdev_dev, "WARNING!!! FLASH size %#x < %#x!!!\n",
@@ -3993,7 +4005,7 @@ int t4_prep_adapter(struct adapter *adapter)
u32 pl_rev;
get_pci_mode(adapter, &adapter->params.pci);
- pl_rev = G_REV(t4_read_reg(adapter, PL_REV));
+ pl_rev = REV_G(t4_read_reg(adapter, PL_REV_A));
ret = get_flash_params(adapter);
if (ret < 0) {
@@ -4019,6 +4031,7 @@ int t4_prep_adapter(struct adapter *adapter)
return -EINVAL;
}
+ adapter->params.cim_la_size = CIMLA_SIZE;
init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
/*
@@ -4133,7 +4146,7 @@ int t4_init_sge_params(struct adapter *adapter)
/* Extract the SGE Page Size for our PF.
*/
- hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE);
+ hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE_A);
s_hps = (HOSTPAGESIZEPF0_S +
(HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->fn);
sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M);
@@ -4142,10 +4155,10 @@ int t4_init_sge_params(struct adapter *adapter)
*/
s_qpp = (QUEUESPERPAGEPF0_S +
(QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->fn);
- qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF);
- sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_MASK);
- qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF);
- sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_MASK);
+ qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A);
+ sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
+ qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF_A);
+ sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
return 0;
}
@@ -4161,9 +4174,9 @@ int t4_init_tp_params(struct adapter *adap)
int chan;
u32 v;
- v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
- adap->params.tp.tre = TIMERRESOLUTION_GET(v);
- adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
+ v = t4_read_reg(adap, TP_TIMER_RESOLUTION_A);
+ adap->params.tp.tre = TIMERRESOLUTION_G(v);
+ adap->params.tp.dack_re = DELAYEDACKRESOLUTION_G(v);
/* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
for (chan = 0; chan < NCHAN; chan++)
@@ -4172,27 +4185,27 @@ int t4_init_tp_params(struct adapter *adap)
/* Cache the adapter's Compressed Filter Mode and global Incress
* Configuration.
*/
- t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
+ t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
&adap->params.tp.vlan_pri_map, 1,
- TP_VLAN_PRI_MAP);
- t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
+ TP_VLAN_PRI_MAP_A);
+ t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
&adap->params.tp.ingress_config, 1,
- TP_INGRESS_CONFIG);
+ TP_INGRESS_CONFIG_A);
/* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
* shift positions of several elements of the Compressed Filter Tuple
* for this adapter which we need frequently ...
*/
- adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
- adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
- adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
+ adap->params.tp.vlan_shift = t4_filter_field_shift(adap, VLAN_F);
+ adap->params.tp.vnic_shift = t4_filter_field_shift(adap, VNIC_ID_F);
+ adap->params.tp.port_shift = t4_filter_field_shift(adap, PORT_F);
adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
- F_PROTOCOL);
+ PROTOCOL_F);
/* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
* represents the presense of an Outer VLAN instead of a VNIC ID.
*/
- if ((adap->params.tp.ingress_config & F_VNIC) == 0)
+ if ((adap->params.tp.ingress_config & VNIC_F) == 0)
adap->params.tp.vnic_shift = -1;
return 0;
@@ -4218,35 +4231,35 @@ int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
switch (filter_mode & sel) {
- case F_FCOE:
- field_shift += W_FT_FCOE;
+ case FCOE_F:
+ field_shift += FT_FCOE_W;
break;
- case F_PORT:
- field_shift += W_FT_PORT;
+ case PORT_F:
+ field_shift += FT_PORT_W;
break;
- case F_VNIC_ID:
- field_shift += W_FT_VNIC_ID;
+ case VNIC_ID_F:
+ field_shift += FT_VNIC_ID_W;
break;
- case F_VLAN:
- field_shift += W_FT_VLAN;
+ case VLAN_F:
+ field_shift += FT_VLAN_W;
break;
- case F_TOS:
- field_shift += W_FT_TOS;
+ case TOS_F:
+ field_shift += FT_TOS_W;
break;
- case F_PROTOCOL:
- field_shift += W_FT_PROTOCOL;
+ case PROTOCOL_F:
+ field_shift += FT_PROTOCOL_W;
break;
- case F_ETHERTYPE:
- field_shift += W_FT_ETHERTYPE;
+ case ETHERTYPE_F:
+ field_shift += FT_ETHERTYPE_W;
break;
- case F_MACMATCH:
- field_shift += W_FT_MACMATCH;
+ case MACMATCH_F:
+ field_shift += FT_MACMATCH_W;
break;
- case F_MPSHITTYPE:
- field_shift += W_FT_MPSHITTYPE;
+ case MPSHITTYPE_F:
+ field_shift += FT_MPSHITTYPE_W;
break;
- case F_FRAGMENTATION:
- field_shift += W_FT_FRAGMENTATION;
+ case FRAGMENTATION_F:
+ field_shift += FT_FRAGMENTATION_W;
break;
}
}
@@ -4311,3 +4324,157 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
}
return 0;
}
+
+/**
+ * t4_read_cimq_cfg - read CIM queue configuration
+ * @adap: the adapter
+ * @base: holds the queue base addresses in bytes
+ * @size: holds the queue sizes in bytes
+ * @thres: holds the queue full thresholds in bytes
+ *
+ * Returns the current configuration of the CIM queues, starting with
+ * the IBQs, then the OBQs.
+ */
+void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
+{
+ unsigned int i, v;
+ int cim_num_obq = is_t4(adap->params.chip) ?
+ CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
+
+ for (i = 0; i < CIM_NUM_IBQ; i++) {
+ t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, IBQSELECT_F |
+ QUENUMSELECT_V(i));
+ v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
+ /* value is in 256-byte units */
+ *base++ = CIMQBASE_G(v) * 256;
+ *size++ = CIMQSIZE_G(v) * 256;
+ *thres++ = QUEFULLTHRSH_G(v) * 8; /* 8-byte unit */
+ }
+ for (i = 0; i < cim_num_obq; i++) {
+ t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
+ QUENUMSELECT_V(i));
+ v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
+ /* value is in 256-byte units */
+ *base++ = CIMQBASE_G(v) * 256;
+ *size++ = CIMQSIZE_G(v) * 256;
+ }
+}
+
+/**
+ * t4_cim_read - read a block from CIM internal address space
+ * @adap: the adapter
+ * @addr: the start address within the CIM address space
+ * @n: number of words to read
+ * @valp: where to store the result
+ *
+ * Reads a block of 4-byte words from the CIM intenal address space.
+ */
+int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
+ unsigned int *valp)
+{
+ int ret = 0;
+
+ if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
+ return -EBUSY;
+
+ for ( ; !ret && n--; addr += 4) {
+ t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr);
+ ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
+ 0, 5, 2);
+ if (!ret)
+ *valp++ = t4_read_reg(adap, CIM_HOST_ACC_DATA_A);
+ }
+ return ret;
+}
+
+/**
+ * t4_cim_write - write a block into CIM internal address space
+ * @adap: the adapter
+ * @addr: the start address within the CIM address space
+ * @n: number of words to write
+ * @valp: set of values to write
+ *
+ * Writes a block of 4-byte words into the CIM intenal address space.
+ */
+int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
+ const unsigned int *valp)
+{
+ int ret = 0;
+
+ if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
+ return -EBUSY;
+
+ for ( ; !ret && n--; addr += 4) {
+ t4_write_reg(adap, CIM_HOST_ACC_DATA_A, *valp++);
+ t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr | HOSTWRITE_F);
+ ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
+ 0, 5, 2);
+ }
+ return ret;
+}
+
+static int t4_cim_write1(struct adapter *adap, unsigned int addr,
+ unsigned int val)
+{
+ return t4_cim_write(adap, addr, 1, &val);
+}
+
+/**
+ * t4_cim_read_la - read CIM LA capture buffer
+ * @adap: the adapter
+ * @la_buf: where to store the LA data
+ * @wrptr: the HW write pointer within the capture buffer
+ *
+ * Reads the contents of the CIM LA buffer with the most recent entry at
+ * the end of the returned data and with the entry at @wrptr first.
+ * We try to leave the LA in the running state we find it in.
+ */
+int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
+{
+ int i, ret;
+ unsigned int cfg, val, idx;
+
+ ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
+ if (ret)
+ return ret;
+
+ if (cfg & UPDBGLAEN_F) { /* LA is running, freeze it */
+ ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A, 0);
+ if (ret)
+ return ret;
+ }
+
+ ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
+ if (ret)
+ goto restart;
+
+ idx = UPDBGLAWRPTR_G(val);
+ if (wrptr)
+ *wrptr = idx;
+
+ for (i = 0; i < adap->params.cim_la_size; i++) {
+ ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
+ UPDBGLARDPTR_V(idx) | UPDBGLARDEN_F);
+ if (ret)
+ break;
+ ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
+ if (ret)
+ break;
+ if (val & UPDBGLARDEN_F) {
+ ret = -ETIMEDOUT;
+ break;
+ }
+ ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]);
+ if (ret)
+ break;
+ idx = (idx + 1) & UPDBGLARDPTR_M;
+ }
+restart:
+ if (cfg & UPDBGLAEN_F) {
+ int r = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
+ cfg & ~UPDBGLARDEN_F);
+ if (!ret)
+ ret = r;
+ }
+ return ret;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index c19a90e7f7d1..f6b82da350e2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -56,6 +56,13 @@ enum {
};
enum {
+ CIM_NUM_IBQ = 6, /* # of CIM IBQs */
+ CIM_NUM_OBQ = 6, /* # of CIM OBQs */
+ CIM_NUM_OBQ_T5 = 8, /* # of CIM OBQs for T5 adapter */
+ CIMLA_SIZE = 2048, /* # of 32-bit words in CIM LA */
+};
+
+enum {
SF_PAGE_SIZE = 256, /* serial flash page size */
SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
};
@@ -110,6 +117,18 @@ enum {
SGE_INGPADBOUNDARY_SHIFT = 5,/* ingress queue pad boundary */
};
+/* PCI-e memory window access */
+enum pcie_memwin {
+ MEMWIN_NIC = 0,
+ MEMWIN_RSVD1 = 1,
+ MEMWIN_RSVD2 = 2,
+ MEMWIN_RDMA = 3,
+ MEMWIN_RSVD4 = 4,
+ MEMWIN_FOISCSI = 5,
+ MEMWIN_CSIOSTOR = 6,
+ MEMWIN_RSVD7 = 7,
+};
+
struct sge_qstat { /* data written to SGE queue status entries */
__be32 qid;
__be16 cidx;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 0f89f68948ab..0fb975e258b3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -124,6 +124,13 @@ enum CPL_error {
};
enum {
+ CPL_CONN_POLICY_AUTO = 0,
+ CPL_CONN_POLICY_ASK = 1,
+ CPL_CONN_POLICY_FILTER = 2,
+ CPL_CONN_POLICY_DENY = 3
+};
+
+enum {
ULP_MODE_NONE = 0,
ULP_MODE_ISCSI = 2,
ULP_MODE_RDMA = 4,
@@ -160,16 +167,28 @@ union opcode_tid {
u8 opcode;
};
-#define CPL_OPCODE(x) ((x) << 24)
-#define G_CPL_OPCODE(x) (((x) >> 24) & 0xFF)
-#define MK_OPCODE_TID(opcode, tid) (CPL_OPCODE(opcode) | (tid))
+#define CPL_OPCODE_S 24
+#define CPL_OPCODE_V(x) ((x) << CPL_OPCODE_S)
+#define CPL_OPCODE_G(x) (((x) >> CPL_OPCODE_S) & 0xFF)
+#define TID_G(x) ((x) & 0xFFFFFF)
+
+/* tid is assumed to be 24-bits */
+#define MK_OPCODE_TID(opcode, tid) (CPL_OPCODE_V(opcode) | (tid))
+
#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
-#define GET_TID(cmd) (ntohl(OPCODE_TID(cmd)) & 0xFFFFFF)
+
+/* extract the TID from a CPL command */
+#define GET_TID(cmd) (TID_G(be32_to_cpu(OPCODE_TID(cmd))))
/* partitioning of TID fields that also carry a queue id */
-#define GET_TID_TID(x) ((x) & 0x3fff)
-#define GET_TID_QID(x) (((x) >> 14) & 0x3ff)
-#define TID_QID(x) ((x) << 14)
+#define TID_TID_S 0
+#define TID_TID_M 0x3fff
+#define TID_TID_G(x) (((x) >> TID_TID_S) & TID_TID_M)
+
+#define TID_QID_S 14
+#define TID_QID_M 0x3ff
+#define TID_QID_V(x) ((x) << TID_QID_S)
+#define TID_QID_G(x) (((x) >> TID_QID_S) & TID_QID_M)
struct rss_header {
u8 opcode;
@@ -199,8 +218,8 @@ struct work_request_hdr {
};
/* wr_hi fields */
-#define S_WR_OP 24
-#define V_WR_OP(x) ((__u64)(x) << S_WR_OP)
+#define WR_OP_S 24
+#define WR_OP_V(x) ((__u64)(x) << WR_OP_S)
#define WR_HDR struct work_request_hdr wr
@@ -270,17 +289,42 @@ struct cpl_pass_open_req {
__be32 local_ip;
__be32 peer_ip;
__be64 opt0;
-#define NO_CONG(x) ((x) << 4)
-#define DELACK(x) ((x) << 5)
-#define DSCP(x) ((x) << 22)
-#define TCAM_BYPASS(x) ((u64)(x) << 48)
-#define NAGLE(x) ((u64)(x) << 49)
__be64 opt1;
-#define SYN_RSS_ENABLE (1 << 0)
-#define SYN_RSS_QUEUE(x) ((x) << 2)
-#define CONN_POLICY_ASK (1 << 22)
};
+/* option 0 fields */
+#define NO_CONG_S 4
+#define NO_CONG_V(x) ((x) << NO_CONG_S)
+#define NO_CONG_F NO_CONG_V(1U)
+
+#define DELACK_S 5
+#define DELACK_V(x) ((x) << DELACK_S)
+#define DELACK_F DELACK_V(1U)
+
+#define DSCP_S 22
+#define DSCP_M 0x3F
+#define DSCP_V(x) ((x) << DSCP_S)
+#define DSCP_G(x) (((x) >> DSCP_S) & DSCP_M)
+
+#define TCAM_BYPASS_S 48
+#define TCAM_BYPASS_V(x) ((__u64)(x) << TCAM_BYPASS_S)
+#define TCAM_BYPASS_F TCAM_BYPASS_V(1ULL)
+
+#define NAGLE_S 49
+#define NAGLE_V(x) ((__u64)(x) << NAGLE_S)
+#define NAGLE_F NAGLE_V(1ULL)
+
+/* option 1 fields */
+#define SYN_RSS_ENABLE_S 0
+#define SYN_RSS_ENABLE_V(x) ((x) << SYN_RSS_ENABLE_S)
+#define SYN_RSS_ENABLE_F SYN_RSS_ENABLE_V(1U)
+
+#define SYN_RSS_QUEUE_S 2
+#define SYN_RSS_QUEUE_V(x) ((x) << SYN_RSS_QUEUE_S)
+
+#define CONN_POLICY_S 22
+#define CONN_POLICY_V(x) ((x) << CONN_POLICY_S)
+
struct cpl_pass_open_req6 {
WR_HDR;
union opcode_tid ot;
@@ -304,16 +348,37 @@ struct cpl_pass_accept_rpl {
WR_HDR;
union opcode_tid ot;
__be32 opt2;
-#define RX_COALESCE_VALID(x) ((x) << 11)
-#define RX_COALESCE(x) ((x) << 12)
-#define PACE(x) ((x) << 16)
-#define TX_QUEUE(x) ((x) << 23)
-#define CCTRL_ECN(x) ((x) << 27)
-#define TSTAMPS_EN(x) ((x) << 29)
-#define SACK_EN(x) ((x) << 30)
__be64 opt0;
};
+/* option 2 fields */
+#define RX_COALESCE_VALID_S 11
+#define RX_COALESCE_VALID_V(x) ((x) << RX_COALESCE_VALID_S)
+#define RX_COALESCE_VALID_F RX_COALESCE_VALID_V(1U)
+
+#define RX_COALESCE_S 12
+#define RX_COALESCE_V(x) ((x) << RX_COALESCE_S)
+
+#define PACE_S 16
+#define PACE_V(x) ((x) << PACE_S)
+
+#define TX_QUEUE_S 23
+#define TX_QUEUE_M 0x7
+#define TX_QUEUE_V(x) ((x) << TX_QUEUE_S)
+#define TX_QUEUE_G(x) (((x) >> TX_QUEUE_S) & TX_QUEUE_M)
+
+#define CCTRL_ECN_S 27
+#define CCTRL_ECN_V(x) ((x) << CCTRL_ECN_S)
+#define CCTRL_ECN_F CCTRL_ECN_V(1U)
+
+#define TSTAMPS_EN_S 29
+#define TSTAMPS_EN_V(x) ((x) << TSTAMPS_EN_S)
+#define TSTAMPS_EN_F TSTAMPS_EN_V(1U)
+
+#define SACK_EN_S 30
+#define SACK_EN_V(x) ((x) << SACK_EN_S)
+#define SACK_EN_F SACK_EN_V(1U)
+
struct cpl_t5_pass_accept_rpl {
WR_HDR;
union opcode_tid ot;
@@ -384,30 +449,61 @@ struct cpl_t5_act_open_req6 {
struct cpl_act_open_rpl {
union opcode_tid ot;
__be32 atid_status;
-#define GET_AOPEN_STATUS(x) ((x) & 0xff)
-#define GET_AOPEN_ATID(x) (((x) >> 8) & 0xffffff)
};
+/* cpl_act_open_rpl.atid_status fields */
+#define AOPEN_STATUS_S 0
+#define AOPEN_STATUS_M 0xFF
+#define AOPEN_STATUS_G(x) (((x) >> AOPEN_STATUS_S) & AOPEN_STATUS_M)
+
+#define AOPEN_ATID_S 8
+#define AOPEN_ATID_M 0xFFFFFF
+#define AOPEN_ATID_G(x) (((x) >> AOPEN_ATID_S) & AOPEN_ATID_M)
+
struct cpl_pass_establish {
union opcode_tid ot;
__be32 rsvd;
__be32 tos_stid;
-#define PASS_OPEN_TID(x) ((x) << 0)
-#define PASS_OPEN_TOS(x) ((x) << 24)
-#define GET_PASS_OPEN_TID(x) (((x) >> 0) & 0xFFFFFF)
-#define GET_POPEN_TID(x) ((x) & 0xffffff)
-#define GET_POPEN_TOS(x) (((x) >> 24) & 0xff)
__be16 mac_idx;
__be16 tcp_opt;
-#define GET_TCPOPT_WSCALE_OK(x) (((x) >> 5) & 1)
-#define GET_TCPOPT_SACK(x) (((x) >> 6) & 1)
-#define GET_TCPOPT_TSTAMP(x) (((x) >> 7) & 1)
-#define GET_TCPOPT_SND_WSCALE(x) (((x) >> 8) & 0xf)
-#define GET_TCPOPT_MSS(x) (((x) >> 12) & 0xf)
__be32 snd_isn;
__be32 rcv_isn;
};
+/* cpl_pass_establish.tos_stid fields */
+#define PASS_OPEN_TID_S 0
+#define PASS_OPEN_TID_M 0xFFFFFF
+#define PASS_OPEN_TID_V(x) ((x) << PASS_OPEN_TID_S)
+#define PASS_OPEN_TID_G(x) (((x) >> PASS_OPEN_TID_S) & PASS_OPEN_TID_M)
+
+#define PASS_OPEN_TOS_S 24
+#define PASS_OPEN_TOS_M 0xFF
+#define PASS_OPEN_TOS_V(x) ((x) << PASS_OPEN_TOS_S)
+#define PASS_OPEN_TOS_G(x) (((x) >> PASS_OPEN_TOS_S) & PASS_OPEN_TOS_M)
+
+/* cpl_pass_establish.tcp_opt fields (also applies to act_open_establish) */
+#define TCPOPT_WSCALE_OK_S 5
+#define TCPOPT_WSCALE_OK_M 0x1
+#define TCPOPT_WSCALE_OK_G(x) \
+ (((x) >> TCPOPT_WSCALE_OK_S) & TCPOPT_WSCALE_OK_M)
+
+#define TCPOPT_SACK_S 6
+#define TCPOPT_SACK_M 0x1
+#define TCPOPT_SACK_G(x) (((x) >> TCPOPT_SACK_S) & TCPOPT_SACK_M)
+
+#define TCPOPT_TSTAMP_S 7
+#define TCPOPT_TSTAMP_M 0x1
+#define TCPOPT_TSTAMP_G(x) (((x) >> TCPOPT_TSTAMP_S) & TCPOPT_TSTAMP_M)
+
+#define TCPOPT_SND_WSCALE_S 8
+#define TCPOPT_SND_WSCALE_M 0xF
+#define TCPOPT_SND_WSCALE_G(x) \
+ (((x) >> TCPOPT_SND_WSCALE_S) & TCPOPT_SND_WSCALE_M)
+
+#define TCPOPT_MSS_S 12
+#define TCPOPT_MSS_M 0xF
+#define TCPOPT_MSS_G(x) (((x) >> TCPOPT_MSS_S) & TCPOPT_MSS_M)
+
struct cpl_act_establish {
union opcode_tid ot;
__be32 rsvd;
@@ -422,24 +518,39 @@ struct cpl_get_tcb {
WR_HDR;
union opcode_tid ot;
__be16 reply_ctrl;
-#define QUEUENO(x) ((x) << 0)
-#define REPLY_CHAN(x) ((x) << 14)
-#define NO_REPLY(x) ((x) << 15)
__be16 cookie;
};
+/* cpl_get_tcb.reply_ctrl fields */
+#define QUEUENO_S 0
+#define QUEUENO_V(x) ((x) << QUEUENO_S)
+
+#define REPLY_CHAN_S 14
+#define REPLY_CHAN_V(x) ((x) << REPLY_CHAN_S)
+#define REPLY_CHAN_F REPLY_CHAN_V(1U)
+
+#define NO_REPLY_S 15
+#define NO_REPLY_V(x) ((x) << NO_REPLY_S)
+#define NO_REPLY_F NO_REPLY_V(1U)
+
struct cpl_set_tcb_field {
WR_HDR;
union opcode_tid ot;
__be16 reply_ctrl;
__be16 word_cookie;
-#define TCB_WORD(x) ((x) << 0)
-#define TCB_COOKIE(x) ((x) << 5)
-#define GET_TCB_COOKIE(x) (((x) >> 5) & 7)
__be64 mask;
__be64 val;
};
+/* cpl_set_tcb_field.word_cookie fields */
+#define TCB_WORD_S 0
+#define TCB_WORD(x) ((x) << TCB_WORD_S)
+
+#define TCB_COOKIE_S 5
+#define TCB_COOKIE_M 0x7
+#define TCB_COOKIE_V(x) ((x) << TCB_COOKIE_S)
+#define TCB_COOKIE_G(x) (((x) >> TCB_COOKIE_S) & TCB_COOKIE_M)
+
struct cpl_set_tcb_rpl {
union opcode_tid ot;
__be16 rsvd;
@@ -466,10 +577,14 @@ struct cpl_close_listsvr_req {
WR_HDR;
union opcode_tid ot;
__be16 reply_ctrl;
-#define LISTSVR_IPV6(x) ((x) << 14)
__be16 rsvd;
};
+/* additional cpl_close_listsvr_req.reply_ctrl field */
+#define LISTSVR_IPV6_S 14
+#define LISTSVR_IPV6_V(x) ((x) << LISTSVR_IPV6_S)
+#define LISTSVR_IPV6_F LISTSVR_IPV6_V(1U)
+
struct cpl_close_listsvr_rpl {
union opcode_tid ot;
u8 rsvd[3];
@@ -565,6 +680,34 @@ struct cpl_tx_pkt_lso_core {
/* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */
};
+/* cpl_tx_pkt_lso_core.lso_ctrl fields */
+#define LSO_TCPHDR_LEN_S 0
+#define LSO_TCPHDR_LEN_V(x) ((x) << LSO_TCPHDR_LEN_S)
+
+#define LSO_IPHDR_LEN_S 4
+#define LSO_IPHDR_LEN_V(x) ((x) << LSO_IPHDR_LEN_S)
+
+#define LSO_ETHHDR_LEN_S 16
+#define LSO_ETHHDR_LEN_V(x) ((x) << LSO_ETHHDR_LEN_S)
+
+#define LSO_IPV6_S 20
+#define LSO_IPV6_V(x) ((x) << LSO_IPV6_S)
+#define LSO_IPV6_F LSO_IPV6_V(1U)
+
+#define LSO_LAST_SLICE_S 22
+#define LSO_LAST_SLICE_V(x) ((x) << LSO_LAST_SLICE_S)
+#define LSO_LAST_SLICE_F LSO_LAST_SLICE_V(1U)
+
+#define LSO_FIRST_SLICE_S 23
+#define LSO_FIRST_SLICE_V(x) ((x) << LSO_FIRST_SLICE_S)
+#define LSO_FIRST_SLICE_F LSO_FIRST_SLICE_V(1U)
+
+#define LSO_OPCODE_S 24
+#define LSO_OPCODE_V(x) ((x) << LSO_OPCODE_S)
+
+#define LSO_T5_XFER_SIZE_S 0
+#define LSO_T5_XFER_SIZE_V(x) ((x) << LSO_T5_XFER_SIZE_S)
+
struct cpl_tx_pkt_lso {
WR_HDR;
struct cpl_tx_pkt_lso_core c;
@@ -574,8 +717,6 @@ struct cpl_tx_pkt_lso {
struct cpl_iscsi_hdr {
union opcode_tid ot;
__be16 pdu_len_ddp;
-#define ISCSI_PDU_LEN(x) ((x) & 0x7FFF)
-#define ISCSI_DDP (1 << 15)
__be16 len;
__be32 seq;
__be16 urg;
@@ -583,6 +724,16 @@ struct cpl_iscsi_hdr {
u8 status;
};
+/* cpl_iscsi_hdr.pdu_len_ddp fields */
+#define ISCSI_PDU_LEN_S 0
+#define ISCSI_PDU_LEN_M 0x7FFF
+#define ISCSI_PDU_LEN_V(x) ((x) << ISCSI_PDU_LEN_S)
+#define ISCSI_PDU_LEN_G(x) (((x) >> ISCSI_PDU_LEN_S) & ISCSI_PDU_LEN_M)
+
+#define ISCSI_DDP_S 15
+#define ISCSI_DDP_V(x) ((x) << ISCSI_DDP_S)
+#define ISCSI_DDP_F ISCSI_DDP_V(1U)
+
struct cpl_rx_data {
union opcode_tid ot;
__be16 rsvd;
@@ -639,49 +790,61 @@ struct cpl_rx_pkt {
__be16 vlan;
__be16 len;
__be32 l2info;
-#define RXF_UDP (1 << 22)
-#define RXF_TCP (1 << 23)
-#define RXF_IP (1 << 24)
-#define RXF_IP6 (1 << 25)
__be16 hdr_len;
__be16 err_vec;
};
+#define RXF_UDP_S 22
+#define RXF_UDP_V(x) ((x) << RXF_UDP_S)
+#define RXF_UDP_F RXF_UDP_V(1U)
+
+#define RXF_TCP_S 23
+#define RXF_TCP_V(x) ((x) << RXF_TCP_S)
+#define RXF_TCP_F RXF_TCP_V(1U)
+
+#define RXF_IP_S 24
+#define RXF_IP_V(x) ((x) << RXF_IP_S)
+#define RXF_IP_F RXF_IP_V(1U)
+
+#define RXF_IP6_S 25
+#define RXF_IP6_V(x) ((x) << RXF_IP6_S)
+#define RXF_IP6_F RXF_IP6_V(1U)
+
/* rx_pkt.l2info fields */
-#define S_RX_ETHHDR_LEN 0
-#define M_RX_ETHHDR_LEN 0x1F
-#define V_RX_ETHHDR_LEN(x) ((x) << S_RX_ETHHDR_LEN)
-#define G_RX_ETHHDR_LEN(x) (((x) >> S_RX_ETHHDR_LEN) & M_RX_ETHHDR_LEN)
-
-#define S_RX_T5_ETHHDR_LEN 0
-#define M_RX_T5_ETHHDR_LEN 0x3F
-#define V_RX_T5_ETHHDR_LEN(x) ((x) << S_RX_T5_ETHHDR_LEN)
-#define G_RX_T5_ETHHDR_LEN(x) (((x) >> S_RX_T5_ETHHDR_LEN) & M_RX_T5_ETHHDR_LEN)
-
-#define S_RX_MACIDX 8
-#define M_RX_MACIDX 0x1FF
-#define V_RX_MACIDX(x) ((x) << S_RX_MACIDX)
-#define G_RX_MACIDX(x) (((x) >> S_RX_MACIDX) & M_RX_MACIDX)
-
-#define S_RXF_SYN 21
-#define V_RXF_SYN(x) ((x) << S_RXF_SYN)
-#define F_RXF_SYN V_RXF_SYN(1U)
-
-#define S_RX_CHAN 28
-#define M_RX_CHAN 0xF
-#define V_RX_CHAN(x) ((x) << S_RX_CHAN)
-#define G_RX_CHAN(x) (((x) >> S_RX_CHAN) & M_RX_CHAN)
+#define RX_ETHHDR_LEN_S 0
+#define RX_ETHHDR_LEN_M 0x1F
+#define RX_ETHHDR_LEN_V(x) ((x) << RX_ETHHDR_LEN_S)
+#define RX_ETHHDR_LEN_G(x) (((x) >> RX_ETHHDR_LEN_S) & RX_ETHHDR_LEN_M)
+
+#define RX_T5_ETHHDR_LEN_S 0
+#define RX_T5_ETHHDR_LEN_M 0x3F
+#define RX_T5_ETHHDR_LEN_V(x) ((x) << RX_T5_ETHHDR_LEN_S)
+#define RX_T5_ETHHDR_LEN_G(x) (((x) >> RX_T5_ETHHDR_LEN_S) & RX_T5_ETHHDR_LEN_M)
+
+#define RX_MACIDX_S 8
+#define RX_MACIDX_M 0x1FF
+#define RX_MACIDX_V(x) ((x) << RX_MACIDX_S)
+#define RX_MACIDX_G(x) (((x) >> RX_MACIDX_S) & RX_MACIDX_M)
+
+#define RXF_SYN_S 21
+#define RXF_SYN_V(x) ((x) << RXF_SYN_S)
+#define RXF_SYN_F RXF_SYN_V(1U)
+
+#define RX_CHAN_S 28
+#define RX_CHAN_M 0xF
+#define RX_CHAN_V(x) ((x) << RX_CHAN_S)
+#define RX_CHAN_G(x) (((x) >> RX_CHAN_S) & RX_CHAN_M)
/* rx_pkt.hdr_len fields */
-#define S_RX_TCPHDR_LEN 0
-#define M_RX_TCPHDR_LEN 0x3F
-#define V_RX_TCPHDR_LEN(x) ((x) << S_RX_TCPHDR_LEN)
-#define G_RX_TCPHDR_LEN(x) (((x) >> S_RX_TCPHDR_LEN) & M_RX_TCPHDR_LEN)
+#define RX_TCPHDR_LEN_S 0
+#define RX_TCPHDR_LEN_M 0x3F
+#define RX_TCPHDR_LEN_V(x) ((x) << RX_TCPHDR_LEN_S)
+#define RX_TCPHDR_LEN_G(x) (((x) >> RX_TCPHDR_LEN_S) & RX_TCPHDR_LEN_M)
-#define S_RX_IPHDR_LEN 6
-#define M_RX_IPHDR_LEN 0x3FF
-#define V_RX_IPHDR_LEN(x) ((x) << S_RX_IPHDR_LEN)
-#define G_RX_IPHDR_LEN(x) (((x) >> S_RX_IPHDR_LEN) & M_RX_IPHDR_LEN)
+#define RX_IPHDR_LEN_S 6
+#define RX_IPHDR_LEN_M 0x3FF
+#define RX_IPHDR_LEN_V(x) ((x) << RX_IPHDR_LEN_S)
+#define RX_IPHDR_LEN_G(x) (((x) >> RX_IPHDR_LEN_S) & RX_IPHDR_LEN_M)
struct cpl_trace_pkt {
u8 opcode;
@@ -730,14 +893,22 @@ struct cpl_l2t_write_req {
WR_HDR;
union opcode_tid ot;
__be16 params;
-#define L2T_W_INFO(x) ((x) << 2)
-#define L2T_W_PORT(x) ((x) << 8)
-#define L2T_W_NOREPLY(x) ((x) << 15)
__be16 l2t_idx;
__be16 vlan;
u8 dst_mac[6];
};
+/* cpl_l2t_write_req.params fields */
+#define L2T_W_INFO_S 2
+#define L2T_W_INFO_V(x) ((x) << L2T_W_INFO_S)
+
+#define L2T_W_PORT_S 8
+#define L2T_W_PORT_V(x) ((x) << L2T_W_PORT_S)
+
+#define L2T_W_NOREPLY_S 15
+#define L2T_W_NOREPLY_V(x) ((x) << L2T_W_NOREPLY_S)
+#define L2T_W_NOREPLY_F L2T_W_NOREPLY_V(1U)
+
struct cpl_l2t_write_rpl {
union opcode_tid ot;
u8 status;
@@ -752,11 +923,15 @@ struct cpl_rdma_terminate {
struct cpl_sge_egr_update {
__be32 opcode_qid;
-#define EGR_QID(x) ((x) & 0x1FFFF)
__be16 cidx;
__be16 pidx;
};
+/* cpl_sge_egr_update.ot fields */
+#define EGR_QID_S 0
+#define EGR_QID_M 0x1FFFF
+#define EGR_QID_G(x) (((x) >> EGR_QID_S) & EGR_QID_M)
+
/* cpl_fw*.type values */
enum {
FW_TYPE_CMD_RPL = 0,
@@ -849,22 +1024,30 @@ struct ulptx_sge_pair {
struct ulptx_sgl {
__be32 cmd_nsge;
-#define ULPTX_NSGE(x) ((x) << 0)
-#define ULPTX_MORE (1U << 23)
__be32 len0;
__be64 addr0;
struct ulptx_sge_pair sge[0];
};
+#define ULPTX_NSGE_S 0
+#define ULPTX_NSGE_V(x) ((x) << ULPTX_NSGE_S)
+
+#define ULPTX_MORE_S 23
+#define ULPTX_MORE_V(x) ((x) << ULPTX_MORE_S)
+#define ULPTX_MORE_F ULPTX_MORE_V(1U)
+
struct ulp_mem_io {
WR_HDR;
__be32 cmd;
__be32 len16; /* command length */
__be32 dlen; /* data length in 32-byte units */
__be32 lock_addr;
-#define ULP_MEMIO_LOCK(x) ((x) << 31)
};
+#define ULP_MEMIO_LOCK_S 31
+#define ULP_MEMIO_LOCK_V(x) ((x) << ULP_MEMIO_LOCK_S)
+#define ULP_MEMIO_LOCK_F ULP_MEMIO_LOCK_V(1U)
+
/* additional ulp_mem_io.cmd fields */
#define ULP_MEMIO_ORDER_S 23
#define ULP_MEMIO_ORDER_V(x) ((x) << ULP_MEMIO_ORDER_S)
@@ -874,13 +1057,9 @@ struct ulp_mem_io {
#define T5_ULP_MEMIO_IMM_V(x) ((x) << T5_ULP_MEMIO_IMM_S)
#define T5_ULP_MEMIO_IMM_F T5_ULP_MEMIO_IMM_V(1U)
-#define S_T5_ULP_MEMIO_IMM 23
-#define V_T5_ULP_MEMIO_IMM(x) ((x) << S_T5_ULP_MEMIO_IMM)
-#define F_T5_ULP_MEMIO_IMM V_T5_ULP_MEMIO_IMM(1U)
-
-#define S_T5_ULP_MEMIO_ORDER 22
-#define V_T5_ULP_MEMIO_ORDER(x) ((x) << S_T5_ULP_MEMIO_ORDER)
-#define F_T5_ULP_MEMIO_ORDER V_T5_ULP_MEMIO_ORDER(1U)
+#define T5_ULP_MEMIO_ORDER_S 22
+#define T5_ULP_MEMIO_ORDER_V(x) ((x) << T5_ULP_MEMIO_ORDER_S)
+#define T5_ULP_MEMIO_ORDER_F T5_ULP_MEMIO_ORDER_V(1U)
/* ulp_mem_io.lock_addr fields */
#define ULP_MEMIO_ADDR_S 0
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 9e4f95a91fb4..ddfb5b846045 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -153,6 +153,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x5086), /* Custom 2x T580-CR */
CH_PCI_ID_TABLE_FENTRY(0x5087), /* Custom T580-CR */
CH_PCI_ID_TABLE_FENTRY(0x5088), /* Custom T570-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5089), /* Custom T520-CR */
CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
#endif /* CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index d7bd34ee65bd..7ce55f9be9d4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -63,460 +63,779 @@
#define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
#define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
-#define SGE_PF_KDOORBELL 0x0
-#define QID_MASK 0xffff8000U
-#define QID_SHIFT 15
-#define QID(x) ((x) << QID_SHIFT)
-#define DBPRIO(x) ((x) << 14)
-#define DBTYPE(x) ((x) << 13)
-#define PIDX_MASK 0x00003fffU
-#define PIDX_SHIFT 0
-#define PIDX(x) ((x) << PIDX_SHIFT)
-#define PIDX_SHIFT_T5 0
-#define PIDX_T5(x) ((x) << PIDX_SHIFT_T5)
-
-
-#define SGE_TIMERREGS 6
-#define SGE_PF_GTS 0x4
-#define INGRESSQID_MASK 0xffff0000U
-#define INGRESSQID_SHIFT 16
-#define INGRESSQID(x) ((x) << INGRESSQID_SHIFT)
-#define TIMERREG_MASK 0x0000e000U
-#define TIMERREG_SHIFT 13
-#define TIMERREG(x) ((x) << TIMERREG_SHIFT)
-#define SEINTARM_MASK 0x00001000U
-#define SEINTARM_SHIFT 12
-#define SEINTARM(x) ((x) << SEINTARM_SHIFT)
-#define CIDXINC_MASK 0x00000fffU
-#define CIDXINC_SHIFT 0
-#define CIDXINC(x) ((x) << CIDXINC_SHIFT)
-
-#define X_RXPKTCPLMODE_SPLIT 1
-#define X_INGPADBOUNDARY_SHIFT 5
-
-#define SGE_CONTROL 0x1008
-#define SGE_CONTROL2_A 0x1124
-#define DCASYSTYPE 0x00080000U
-#define RXPKTCPLMODE_MASK 0x00040000U
-#define RXPKTCPLMODE_SHIFT 18
-#define RXPKTCPLMODE(x) ((x) << RXPKTCPLMODE_SHIFT)
-#define EGRSTATUSPAGESIZE_MASK 0x00020000U
-#define EGRSTATUSPAGESIZE_SHIFT 17
-#define EGRSTATUSPAGESIZE(x) ((x) << EGRSTATUSPAGESIZE_SHIFT)
-#define PKTSHIFT_MASK 0x00001c00U
-#define PKTSHIFT_SHIFT 10
-#define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT)
-#define PKTSHIFT_GET(x) (((x) & PKTSHIFT_MASK) >> PKTSHIFT_SHIFT)
-#define INGPCIEBOUNDARY_32B_X 0
-#define INGPCIEBOUNDARY_MASK 0x00000380U
-#define INGPCIEBOUNDARY_SHIFT 7
-#define INGPCIEBOUNDARY(x) ((x) << INGPCIEBOUNDARY_SHIFT)
-#define INGPADBOUNDARY_MASK 0x00000070U
-#define INGPADBOUNDARY_SHIFT 4
-#define INGPADBOUNDARY(x) ((x) << INGPADBOUNDARY_SHIFT)
-#define INGPADBOUNDARY_GET(x) (((x) & INGPADBOUNDARY_MASK) \
- >> INGPADBOUNDARY_SHIFT)
-#define INGPACKBOUNDARY_16B_X 0
-#define INGPACKBOUNDARY_SHIFT_X 5
+#define SGE_PF_KDOORBELL_A 0x0
+
+#define QID_S 15
+#define QID_V(x) ((x) << QID_S)
+
+#define DBPRIO_S 14
+#define DBPRIO_V(x) ((x) << DBPRIO_S)
+#define DBPRIO_F DBPRIO_V(1U)
+
+#define PIDX_S 0
+#define PIDX_V(x) ((x) << PIDX_S)
+
+#define SGE_VF_KDOORBELL_A 0x0
+
+#define DBTYPE_S 13
+#define DBTYPE_V(x) ((x) << DBTYPE_S)
+#define DBTYPE_F DBTYPE_V(1U)
+
+#define PIDX_T5_S 0
+#define PIDX_T5_M 0x1fffU
+#define PIDX_T5_V(x) ((x) << PIDX_T5_S)
+#define PIDX_T5_G(x) (((x) >> PIDX_T5_S) & PIDX_T5_M)
+
+#define SGE_PF_GTS_A 0x4
+
+#define INGRESSQID_S 16
+#define INGRESSQID_V(x) ((x) << INGRESSQID_S)
+
+#define TIMERREG_S 13
+#define TIMERREG_V(x) ((x) << TIMERREG_S)
+
+#define SEINTARM_S 12
+#define SEINTARM_V(x) ((x) << SEINTARM_S)
+
+#define CIDXINC_S 0
+#define CIDXINC_M 0xfffU
+#define CIDXINC_V(x) ((x) << CIDXINC_S)
+
+#define SGE_CONTROL_A 0x1008
+#define SGE_CONTROL2_A 0x1124
+
+#define RXPKTCPLMODE_S 18
+#define RXPKTCPLMODE_V(x) ((x) << RXPKTCPLMODE_S)
+#define RXPKTCPLMODE_F RXPKTCPLMODE_V(1U)
+
+#define EGRSTATUSPAGESIZE_S 17
+#define EGRSTATUSPAGESIZE_V(x) ((x) << EGRSTATUSPAGESIZE_S)
+#define EGRSTATUSPAGESIZE_F EGRSTATUSPAGESIZE_V(1U)
+
+#define PKTSHIFT_S 10
+#define PKTSHIFT_M 0x7U
+#define PKTSHIFT_V(x) ((x) << PKTSHIFT_S)
+#define PKTSHIFT_G(x) (((x) >> PKTSHIFT_S) & PKTSHIFT_M)
+
+#define INGPCIEBOUNDARY_S 7
+#define INGPCIEBOUNDARY_V(x) ((x) << INGPCIEBOUNDARY_S)
+
+#define INGPADBOUNDARY_S 4
+#define INGPADBOUNDARY_M 0x7U
+#define INGPADBOUNDARY_V(x) ((x) << INGPADBOUNDARY_S)
+#define INGPADBOUNDARY_G(x) (((x) >> INGPADBOUNDARY_S) & INGPADBOUNDARY_M)
+
+#define EGRPCIEBOUNDARY_S 1
+#define EGRPCIEBOUNDARY_V(x) ((x) << EGRPCIEBOUNDARY_S)
#define INGPACKBOUNDARY_S 16
#define INGPACKBOUNDARY_M 0x7U
#define INGPACKBOUNDARY_V(x) ((x) << INGPACKBOUNDARY_S)
#define INGPACKBOUNDARY_G(x) (((x) >> INGPACKBOUNDARY_S) \
& INGPACKBOUNDARY_M)
-#define EGRPCIEBOUNDARY_MASK 0x0000000eU
-#define EGRPCIEBOUNDARY_SHIFT 1
-#define EGRPCIEBOUNDARY(x) ((x) << EGRPCIEBOUNDARY_SHIFT)
-#define GLOBALENABLE 0x00000001U
-#define SGE_HOST_PAGE_SIZE 0x100c
+#define GLOBALENABLE_S 0
+#define GLOBALENABLE_V(x) ((x) << GLOBALENABLE_S)
+#define GLOBALENABLE_F GLOBALENABLE_V(1U)
+
+#define SGE_HOST_PAGE_SIZE_A 0x100c
+
+#define HOSTPAGESIZEPF7_S 28
+#define HOSTPAGESIZEPF7_M 0xfU
+#define HOSTPAGESIZEPF7_V(x) ((x) << HOSTPAGESIZEPF7_S)
+#define HOSTPAGESIZEPF7_G(x) (((x) >> HOSTPAGESIZEPF7_S) & HOSTPAGESIZEPF7_M)
+
+#define HOSTPAGESIZEPF6_S 24
+#define HOSTPAGESIZEPF6_M 0xfU
+#define HOSTPAGESIZEPF6_V(x) ((x) << HOSTPAGESIZEPF6_S)
+#define HOSTPAGESIZEPF6_G(x) (((x) >> HOSTPAGESIZEPF6_S) & HOSTPAGESIZEPF6_M)
+
+#define HOSTPAGESIZEPF5_S 20
+#define HOSTPAGESIZEPF5_M 0xfU
+#define HOSTPAGESIZEPF5_V(x) ((x) << HOSTPAGESIZEPF5_S)
+#define HOSTPAGESIZEPF5_G(x) (((x) >> HOSTPAGESIZEPF5_S) & HOSTPAGESIZEPF5_M)
+
+#define HOSTPAGESIZEPF4_S 16
+#define HOSTPAGESIZEPF4_M 0xfU
+#define HOSTPAGESIZEPF4_V(x) ((x) << HOSTPAGESIZEPF4_S)
+#define HOSTPAGESIZEPF4_G(x) (((x) >> HOSTPAGESIZEPF4_S) & HOSTPAGESIZEPF4_M)
+
+#define HOSTPAGESIZEPF3_S 12
+#define HOSTPAGESIZEPF3_M 0xfU
+#define HOSTPAGESIZEPF3_V(x) ((x) << HOSTPAGESIZEPF3_S)
+#define HOSTPAGESIZEPF3_G(x) (((x) >> HOSTPAGESIZEPF3_S) & HOSTPAGESIZEPF3_M)
+
+#define HOSTPAGESIZEPF2_S 8
+#define HOSTPAGESIZEPF2_M 0xfU
+#define HOSTPAGESIZEPF2_V(x) ((x) << HOSTPAGESIZEPF2_S)
+#define HOSTPAGESIZEPF2_G(x) (((x) >> HOSTPAGESIZEPF2_S) & HOSTPAGESIZEPF2_M)
+
+#define HOSTPAGESIZEPF1_S 4
+#define HOSTPAGESIZEPF1_M 0xfU
+#define HOSTPAGESIZEPF1_V(x) ((x) << HOSTPAGESIZEPF1_S)
+#define HOSTPAGESIZEPF1_G(x) (((x) >> HOSTPAGESIZEPF1_S) & HOSTPAGESIZEPF1_M)
+
+#define HOSTPAGESIZEPF0_S 0
+#define HOSTPAGESIZEPF0_M 0xfU
+#define HOSTPAGESIZEPF0_V(x) ((x) << HOSTPAGESIZEPF0_S)
+#define HOSTPAGESIZEPF0_G(x) (((x) >> HOSTPAGESIZEPF0_S) & HOSTPAGESIZEPF0_M)
+
+#define SGE_EGRESS_QUEUES_PER_PAGE_PF_A 0x1010
+#define SGE_EGRESS_QUEUES_PER_PAGE_VF_A 0x1014
-#define HOSTPAGESIZEPF7_MASK 0x0000000fU
-#define HOSTPAGESIZEPF7_SHIFT 28
-#define HOSTPAGESIZEPF7(x) ((x) << HOSTPAGESIZEPF7_SHIFT)
+#define QUEUESPERPAGEPF1_S 4
-#define HOSTPAGESIZEPF6_MASK 0x0000000fU
-#define HOSTPAGESIZEPF6_SHIFT 24
-#define HOSTPAGESIZEPF6(x) ((x) << HOSTPAGESIZEPF6_SHIFT)
+#define QUEUESPERPAGEPF0_S 0
+#define QUEUESPERPAGEPF0_M 0xfU
+#define QUEUESPERPAGEPF0_V(x) ((x) << QUEUESPERPAGEPF0_S)
+#define QUEUESPERPAGEPF0_G(x) (((x) >> QUEUESPERPAGEPF0_S) & QUEUESPERPAGEPF0_M)
-#define HOSTPAGESIZEPF5_MASK 0x0000000fU
-#define HOSTPAGESIZEPF5_SHIFT 20
-#define HOSTPAGESIZEPF5(x) ((x) << HOSTPAGESIZEPF5_SHIFT)
+#define SGE_INT_CAUSE1_A 0x1024
+#define SGE_INT_CAUSE2_A 0x1030
+#define SGE_INT_CAUSE3_A 0x103c
+
+#define ERR_FLM_DBP_S 31
+#define ERR_FLM_DBP_V(x) ((x) << ERR_FLM_DBP_S)
+#define ERR_FLM_DBP_F ERR_FLM_DBP_V(1U)
+
+#define ERR_FLM_IDMA1_S 30
+#define ERR_FLM_IDMA1_V(x) ((x) << ERR_FLM_IDMA1_S)
+#define ERR_FLM_IDMA1_F ERR_FLM_IDMA1_V(1U)
+
+#define ERR_FLM_IDMA0_S 29
+#define ERR_FLM_IDMA0_V(x) ((x) << ERR_FLM_IDMA0_S)
+#define ERR_FLM_IDMA0_F ERR_FLM_IDMA0_V(1U)
+
+#define ERR_FLM_HINT_S 28
+#define ERR_FLM_HINT_V(x) ((x) << ERR_FLM_HINT_S)
+#define ERR_FLM_HINT_F ERR_FLM_HINT_V(1U)
+
+#define ERR_PCIE_ERROR3_S 27
+#define ERR_PCIE_ERROR3_V(x) ((x) << ERR_PCIE_ERROR3_S)
+#define ERR_PCIE_ERROR3_F ERR_PCIE_ERROR3_V(1U)
+
+#define ERR_PCIE_ERROR2_S 26
+#define ERR_PCIE_ERROR2_V(x) ((x) << ERR_PCIE_ERROR2_S)
+#define ERR_PCIE_ERROR2_F ERR_PCIE_ERROR2_V(1U)
+
+#define ERR_PCIE_ERROR1_S 25
+#define ERR_PCIE_ERROR1_V(x) ((x) << ERR_PCIE_ERROR1_S)
+#define ERR_PCIE_ERROR1_F ERR_PCIE_ERROR1_V(1U)
+
+#define ERR_PCIE_ERROR0_S 24
+#define ERR_PCIE_ERROR0_V(x) ((x) << ERR_PCIE_ERROR0_S)
+#define ERR_PCIE_ERROR0_F ERR_PCIE_ERROR0_V(1U)
+
+#define ERR_CPL_EXCEED_IQE_SIZE_S 22
+#define ERR_CPL_EXCEED_IQE_SIZE_V(x) ((x) << ERR_CPL_EXCEED_IQE_SIZE_S)
+#define ERR_CPL_EXCEED_IQE_SIZE_F ERR_CPL_EXCEED_IQE_SIZE_V(1U)
+
+#define ERR_INVALID_CIDX_INC_S 21
+#define ERR_INVALID_CIDX_INC_V(x) ((x) << ERR_INVALID_CIDX_INC_S)
+#define ERR_INVALID_CIDX_INC_F ERR_INVALID_CIDX_INC_V(1U)
+
+#define ERR_CPL_OPCODE_0_S 19
+#define ERR_CPL_OPCODE_0_V(x) ((x) << ERR_CPL_OPCODE_0_S)
+#define ERR_CPL_OPCODE_0_F ERR_CPL_OPCODE_0_V(1U)
+
+#define ERR_DROPPED_DB_S 18
+#define ERR_DROPPED_DB_V(x) ((x) << ERR_DROPPED_DB_S)
+#define ERR_DROPPED_DB_F ERR_DROPPED_DB_V(1U)
+
+#define ERR_DATA_CPL_ON_HIGH_QID1_S 17
+#define ERR_DATA_CPL_ON_HIGH_QID1_V(x) ((x) << ERR_DATA_CPL_ON_HIGH_QID1_S)
+#define ERR_DATA_CPL_ON_HIGH_QID1_F ERR_DATA_CPL_ON_HIGH_QID1_V(1U)
+
+#define ERR_DATA_CPL_ON_HIGH_QID0_S 16
+#define ERR_DATA_CPL_ON_HIGH_QID0_V(x) ((x) << ERR_DATA_CPL_ON_HIGH_QID0_S)
+#define ERR_DATA_CPL_ON_HIGH_QID0_F ERR_DATA_CPL_ON_HIGH_QID0_V(1U)
+
+#define ERR_BAD_DB_PIDX3_S 15
+#define ERR_BAD_DB_PIDX3_V(x) ((x) << ERR_BAD_DB_PIDX3_S)
+#define ERR_BAD_DB_PIDX3_F ERR_BAD_DB_PIDX3_V(1U)
+
+#define ERR_BAD_DB_PIDX2_S 14
+#define ERR_BAD_DB_PIDX2_V(x) ((x) << ERR_BAD_DB_PIDX2_S)
+#define ERR_BAD_DB_PIDX2_F ERR_BAD_DB_PIDX2_V(1U)
+
+#define ERR_BAD_DB_PIDX1_S 13
+#define ERR_BAD_DB_PIDX1_V(x) ((x) << ERR_BAD_DB_PIDX1_S)
+#define ERR_BAD_DB_PIDX1_F ERR_BAD_DB_PIDX1_V(1U)
+
+#define ERR_BAD_DB_PIDX0_S 12
+#define ERR_BAD_DB_PIDX0_V(x) ((x) << ERR_BAD_DB_PIDX0_S)
+#define ERR_BAD_DB_PIDX0_F ERR_BAD_DB_PIDX0_V(1U)
+
+#define ERR_ING_CTXT_PRIO_S 10
+#define ERR_ING_CTXT_PRIO_V(x) ((x) << ERR_ING_CTXT_PRIO_S)
+#define ERR_ING_CTXT_PRIO_F ERR_ING_CTXT_PRIO_V(1U)
+
+#define ERR_EGR_CTXT_PRIO_S 9
+#define ERR_EGR_CTXT_PRIO_V(x) ((x) << ERR_EGR_CTXT_PRIO_S)
+#define ERR_EGR_CTXT_PRIO_F ERR_EGR_CTXT_PRIO_V(1U)
+
+#define DBFIFO_HP_INT_S 8
+#define DBFIFO_HP_INT_V(x) ((x) << DBFIFO_HP_INT_S)
+#define DBFIFO_HP_INT_F DBFIFO_HP_INT_V(1U)
+
+#define DBFIFO_LP_INT_S 7
+#define DBFIFO_LP_INT_V(x) ((x) << DBFIFO_LP_INT_S)
+#define DBFIFO_LP_INT_F DBFIFO_LP_INT_V(1U)
+
+#define INGRESS_SIZE_ERR_S 5
+#define INGRESS_SIZE_ERR_V(x) ((x) << INGRESS_SIZE_ERR_S)
+#define INGRESS_SIZE_ERR_F INGRESS_SIZE_ERR_V(1U)
+
+#define EGRESS_SIZE_ERR_S 4
+#define EGRESS_SIZE_ERR_V(x) ((x) << EGRESS_SIZE_ERR_S)
+#define EGRESS_SIZE_ERR_F EGRESS_SIZE_ERR_V(1U)
+
+#define SGE_INT_ENABLE3_A 0x1040
+#define SGE_FL_BUFFER_SIZE0_A 0x1044
+#define SGE_FL_BUFFER_SIZE1_A 0x1048
+#define SGE_FL_BUFFER_SIZE2_A 0x104c
+#define SGE_FL_BUFFER_SIZE3_A 0x1050
+#define SGE_FL_BUFFER_SIZE4_A 0x1054
+#define SGE_FL_BUFFER_SIZE5_A 0x1058
+#define SGE_FL_BUFFER_SIZE6_A 0x105c
+#define SGE_FL_BUFFER_SIZE7_A 0x1060
+#define SGE_FL_BUFFER_SIZE8_A 0x1064
+
+#define SGE_INGRESS_RX_THRESHOLD_A 0x10a0
+
+#define THRESHOLD_0_S 24
+#define THRESHOLD_0_M 0x3fU
+#define THRESHOLD_0_V(x) ((x) << THRESHOLD_0_S)
+#define THRESHOLD_0_G(x) (((x) >> THRESHOLD_0_S) & THRESHOLD_0_M)
+
+#define THRESHOLD_1_S 16
+#define THRESHOLD_1_M 0x3fU
+#define THRESHOLD_1_V(x) ((x) << THRESHOLD_1_S)
+#define THRESHOLD_1_G(x) (((x) >> THRESHOLD_1_S) & THRESHOLD_1_M)
+
+#define THRESHOLD_2_S 8
+#define THRESHOLD_2_M 0x3fU
+#define THRESHOLD_2_V(x) ((x) << THRESHOLD_2_S)
+#define THRESHOLD_2_G(x) (((x) >> THRESHOLD_2_S) & THRESHOLD_2_M)
+
+#define THRESHOLD_3_S 0
+#define THRESHOLD_3_M 0x3fU
+#define THRESHOLD_3_V(x) ((x) << THRESHOLD_3_S)
+#define THRESHOLD_3_G(x) (((x) >> THRESHOLD_3_S) & THRESHOLD_3_M)
+
+#define SGE_CONM_CTRL_A 0x1094
+
+#define EGRTHRESHOLD_S 8
+#define EGRTHRESHOLD_M 0x3fU
+#define EGRTHRESHOLD_V(x) ((x) << EGRTHRESHOLD_S)
+#define EGRTHRESHOLD_G(x) (((x) >> EGRTHRESHOLD_S) & EGRTHRESHOLD_M)
+
+#define EGRTHRESHOLDPACKING_S 14
+#define EGRTHRESHOLDPACKING_M 0x3fU
+#define EGRTHRESHOLDPACKING_V(x) ((x) << EGRTHRESHOLDPACKING_S)
+#define EGRTHRESHOLDPACKING_G(x) \
+ (((x) >> EGRTHRESHOLDPACKING_S) & EGRTHRESHOLDPACKING_M)
+
+#define SGE_TIMESTAMP_LO_A 0x1098
+#define SGE_TIMESTAMP_HI_A 0x109c
+
+#define TSOP_S 28
+#define TSOP_M 0x3U
+#define TSOP_V(x) ((x) << TSOP_S)
+#define TSOP_G(x) (((x) >> TSOP_S) & TSOP_M)
+
+#define TSVAL_S 0
+#define TSVAL_M 0xfffffffU
+#define TSVAL_V(x) ((x) << TSVAL_S)
+#define TSVAL_G(x) (((x) >> TSVAL_S) & TSVAL_M)
+
+#define SGE_DBFIFO_STATUS_A 0x10a4
+
+#define HP_INT_THRESH_S 28
+#define HP_INT_THRESH_M 0xfU
+#define HP_INT_THRESH_V(x) ((x) << HP_INT_THRESH_S)
+
+#define LP_INT_THRESH_S 12
+#define LP_INT_THRESH_M 0xfU
+#define LP_INT_THRESH_V(x) ((x) << LP_INT_THRESH_S)
+
+#define SGE_DOORBELL_CONTROL_A 0x10a8
+
+#define NOCOALESCE_S 26
+#define NOCOALESCE_V(x) ((x) << NOCOALESCE_S)
+#define NOCOALESCE_F NOCOALESCE_V(1U)
+
+#define ENABLE_DROP_S 13
+#define ENABLE_DROP_V(x) ((x) << ENABLE_DROP_S)
+#define ENABLE_DROP_F ENABLE_DROP_V(1U)
+
+#define SGE_TIMER_VALUE_0_AND_1_A 0x10b8
+
+#define TIMERVALUE0_S 16
+#define TIMERVALUE0_M 0xffffU
+#define TIMERVALUE0_V(x) ((x) << TIMERVALUE0_S)
+#define TIMERVALUE0_G(x) (((x) >> TIMERVALUE0_S) & TIMERVALUE0_M)
+
+#define TIMERVALUE1_S 0
+#define TIMERVALUE1_M 0xffffU
+#define TIMERVALUE1_V(x) ((x) << TIMERVALUE1_S)
+#define TIMERVALUE1_G(x) (((x) >> TIMERVALUE1_S) & TIMERVALUE1_M)
+
+#define SGE_TIMER_VALUE_2_AND_3_A 0x10bc
+
+#define TIMERVALUE2_S 16
+#define TIMERVALUE2_M 0xffffU
+#define TIMERVALUE2_V(x) ((x) << TIMERVALUE2_S)
+#define TIMERVALUE2_G(x) (((x) >> TIMERVALUE2_S) & TIMERVALUE2_M)
+
+#define TIMERVALUE3_S 0
+#define TIMERVALUE3_M 0xffffU
+#define TIMERVALUE3_V(x) ((x) << TIMERVALUE3_S)
+#define TIMERVALUE3_G(x) (((x) >> TIMERVALUE3_S) & TIMERVALUE3_M)
+
+#define SGE_TIMER_VALUE_4_AND_5_A 0x10c0
+
+#define TIMERVALUE4_S 16
+#define TIMERVALUE4_M 0xffffU
+#define TIMERVALUE4_V(x) ((x) << TIMERVALUE4_S)
+#define TIMERVALUE4_G(x) (((x) >> TIMERVALUE4_S) & TIMERVALUE4_M)
-#define HOSTPAGESIZEPF4_MASK 0x0000000fU
-#define HOSTPAGESIZEPF4_SHIFT 16
-#define HOSTPAGESIZEPF4(x) ((x) << HOSTPAGESIZEPF4_SHIFT)
+#define TIMERVALUE5_S 0
+#define TIMERVALUE5_M 0xffffU
+#define TIMERVALUE5_V(x) ((x) << TIMERVALUE5_S)
+#define TIMERVALUE5_G(x) (((x) >> TIMERVALUE5_S) & TIMERVALUE5_M)
-#define HOSTPAGESIZEPF3_MASK 0x0000000fU
-#define HOSTPAGESIZEPF3_SHIFT 12
-#define HOSTPAGESIZEPF3(x) ((x) << HOSTPAGESIZEPF3_SHIFT)
+#define SGE_DEBUG_INDEX_A 0x10cc
+#define SGE_DEBUG_DATA_HIGH_A 0x10d0
+#define SGE_DEBUG_DATA_LOW_A 0x10d4
-#define HOSTPAGESIZEPF2_MASK 0x0000000fU
-#define HOSTPAGESIZEPF2_SHIFT 8
-#define HOSTPAGESIZEPF2(x) ((x) << HOSTPAGESIZEPF2_SHIFT)
+#define SGE_DEBUG_DATA_LOW_INDEX_2_A 0x12c8
+#define SGE_DEBUG_DATA_LOW_INDEX_3_A 0x12cc
+#define SGE_DEBUG_DATA_HIGH_INDEX_10_A 0x12a8
-#define HOSTPAGESIZEPF1_M 0x0000000fU
-#define HOSTPAGESIZEPF1_S 4
-#define HOSTPAGESIZEPF1(x) ((x) << HOSTPAGESIZEPF1_S)
+#define SGE_INGRESS_QUEUES_PER_PAGE_PF_A 0x10f4
+#define SGE_INGRESS_QUEUES_PER_PAGE_VF_A 0x10f8
-#define HOSTPAGESIZEPF0_M 0x0000000fU
-#define HOSTPAGESIZEPF0_S 0
-#define HOSTPAGESIZEPF0(x) ((x) << HOSTPAGESIZEPF0_S)
+#define HP_INT_THRESH_S 28
+#define HP_INT_THRESH_M 0xfU
+#define HP_INT_THRESH_V(x) ((x) << HP_INT_THRESH_S)
-#define SGE_EGRESS_QUEUES_PER_PAGE_PF 0x1010
-#define SGE_EGRESS_QUEUES_PER_PAGE_VF_A 0x1014
+#define HP_COUNT_S 16
+#define HP_COUNT_M 0x7ffU
+#define HP_COUNT_G(x) (((x) >> HP_COUNT_S) & HP_COUNT_M)
-#define QUEUESPERPAGEPF1_S 4
+#define LP_INT_THRESH_S 12
+#define LP_INT_THRESH_M 0xfU
+#define LP_INT_THRESH_V(x) ((x) << LP_INT_THRESH_S)
-#define QUEUESPERPAGEPF0_S 0
-#define QUEUESPERPAGEPF0_MASK 0x0000000fU
-#define QUEUESPERPAGEPF0_GET(x) ((x) & QUEUESPERPAGEPF0_MASK)
+#define LP_COUNT_S 0
+#define LP_COUNT_M 0x7ffU
+#define LP_COUNT_G(x) (((x) >> LP_COUNT_S) & LP_COUNT_M)
-#define QUEUESPERPAGEPF0 0
-#define QUEUESPERPAGEPF1 4
+#define LP_INT_THRESH_T5_S 18
+#define LP_INT_THRESH_T5_M 0xfffU
+#define LP_INT_THRESH_T5_V(x) ((x) << LP_INT_THRESH_T5_S)
-/* T5 and later support a new BAR2-based doorbell mechanism for Egress Queues.
- * The User Doorbells are each 128 bytes in length with a Simple Doorbell at
- * offsets 8x and a Write Combining single 64-byte Egress Queue Unit
- * (X_IDXSIZE_UNIT) Gather Buffer interface at offset 64. For Ingress Queues,
- * we have a Going To Sleep register at offsets 8x+4.
- *
- * As noted above, we have many instances of the Simple Doorbell and Going To
- * Sleep registers at offsets 8x and 8x+4, respectively. We want to use a
- * non-64-byte aligned offset for the Simple Doorbell in order to attempt to
- * avoid buffering of the writes to the Simple Doorbell and we want to use a
- * non-contiguous offset for the Going To Sleep writes in order to avoid
- * possible combining between them.
- */
-#define SGE_UDB_SIZE 128
-#define SGE_UDB_KDOORBELL 8
-#define SGE_UDB_GTS 20
-#define SGE_UDB_WCDOORBELL 64
-
-#define SGE_INT_CAUSE1 0x1024
-#define SGE_INT_CAUSE2 0x1030
-#define SGE_INT_CAUSE3 0x103c
-#define ERR_FLM_DBP 0x80000000U
-#define ERR_FLM_IDMA1 0x40000000U
-#define ERR_FLM_IDMA0 0x20000000U
-#define ERR_FLM_HINT 0x10000000U
-#define ERR_PCIE_ERROR3 0x08000000U
-#define ERR_PCIE_ERROR2 0x04000000U
-#define ERR_PCIE_ERROR1 0x02000000U
-#define ERR_PCIE_ERROR0 0x01000000U
-#define ERR_TIMER_ABOVE_MAX_QID 0x00800000U
-#define ERR_CPL_EXCEED_IQE_SIZE 0x00400000U
-#define ERR_INVALID_CIDX_INC 0x00200000U
-#define ERR_ITP_TIME_PAUSED 0x00100000U
-#define ERR_CPL_OPCODE_0 0x00080000U
-#define ERR_DROPPED_DB 0x00040000U
-#define ERR_DATA_CPL_ON_HIGH_QID1 0x00020000U
-#define ERR_DATA_CPL_ON_HIGH_QID0 0x00010000U
-#define ERR_BAD_DB_PIDX3 0x00008000U
-#define ERR_BAD_DB_PIDX2 0x00004000U
-#define ERR_BAD_DB_PIDX1 0x00002000U
-#define ERR_BAD_DB_PIDX0 0x00001000U
-#define ERR_ING_PCIE_CHAN 0x00000800U
-#define ERR_ING_CTXT_PRIO 0x00000400U
-#define ERR_EGR_CTXT_PRIO 0x00000200U
-#define DBFIFO_HP_INT 0x00000100U
-#define DBFIFO_LP_INT 0x00000080U
-#define REG_ADDRESS_ERR 0x00000040U
-#define INGRESS_SIZE_ERR 0x00000020U
-#define EGRESS_SIZE_ERR 0x00000010U
-#define ERR_INV_CTXT3 0x00000008U
-#define ERR_INV_CTXT2 0x00000004U
-#define ERR_INV_CTXT1 0x00000002U
-#define ERR_INV_CTXT0 0x00000001U
-
-#define SGE_INT_ENABLE3 0x1040
-#define SGE_FL_BUFFER_SIZE0 0x1044
-#define SGE_FL_BUFFER_SIZE1 0x1048
-#define SGE_FL_BUFFER_SIZE2 0x104c
-#define SGE_FL_BUFFER_SIZE3 0x1050
-#define SGE_FL_BUFFER_SIZE4 0x1054
-#define SGE_FL_BUFFER_SIZE5 0x1058
-#define SGE_FL_BUFFER_SIZE6 0x105c
-#define SGE_FL_BUFFER_SIZE7 0x1060
-#define SGE_FL_BUFFER_SIZE8 0x1064
-
-#define SGE_INGRESS_RX_THRESHOLD 0x10a0
-#define THRESHOLD_0_MASK 0x3f000000U
-#define THRESHOLD_0_SHIFT 24
-#define THRESHOLD_0(x) ((x) << THRESHOLD_0_SHIFT)
-#define THRESHOLD_0_GET(x) (((x) & THRESHOLD_0_MASK) >> THRESHOLD_0_SHIFT)
-#define THRESHOLD_1_MASK 0x003f0000U
-#define THRESHOLD_1_SHIFT 16
-#define THRESHOLD_1(x) ((x) << THRESHOLD_1_SHIFT)
-#define THRESHOLD_1_GET(x) (((x) & THRESHOLD_1_MASK) >> THRESHOLD_1_SHIFT)
-#define THRESHOLD_2_MASK 0x00003f00U
-#define THRESHOLD_2_SHIFT 8
-#define THRESHOLD_2(x) ((x) << THRESHOLD_2_SHIFT)
-#define THRESHOLD_2_GET(x) (((x) & THRESHOLD_2_MASK) >> THRESHOLD_2_SHIFT)
-#define THRESHOLD_3_MASK 0x0000003fU
-#define THRESHOLD_3_SHIFT 0
-#define THRESHOLD_3(x) ((x) << THRESHOLD_3_SHIFT)
-#define THRESHOLD_3_GET(x) (((x) & THRESHOLD_3_MASK) >> THRESHOLD_3_SHIFT)
-
-#define SGE_CONM_CTRL 0x1094
-#define EGRTHRESHOLD_MASK 0x00003f00U
-#define EGRTHRESHOLDshift 8
-#define EGRTHRESHOLD(x) ((x) << EGRTHRESHOLDshift)
-#define EGRTHRESHOLD_GET(x) (((x) & EGRTHRESHOLD_MASK) >> EGRTHRESHOLDshift)
-
-#define EGRTHRESHOLDPACKING_MASK 0x3fU
-#define EGRTHRESHOLDPACKING_SHIFT 14
-#define EGRTHRESHOLDPACKING(x) ((x) << EGRTHRESHOLDPACKING_SHIFT)
-#define EGRTHRESHOLDPACKING_GET(x) (((x) >> EGRTHRESHOLDPACKING_SHIFT) & \
- EGRTHRESHOLDPACKING_MASK)
-
-#define SGE_DBFIFO_STATUS 0x10a4
-#define HP_INT_THRESH_SHIFT 28
-#define HP_INT_THRESH_MASK 0xfU
-#define HP_INT_THRESH(x) ((x) << HP_INT_THRESH_SHIFT)
-#define LP_INT_THRESH_SHIFT 12
-#define LP_INT_THRESH_MASK 0xfU
-#define LP_INT_THRESH(x) ((x) << LP_INT_THRESH_SHIFT)
-
-#define SGE_DOORBELL_CONTROL 0x10a8
-#define ENABLE_DROP (1 << 13)
-
-#define S_NOCOALESCE 26
-#define V_NOCOALESCE(x) ((x) << S_NOCOALESCE)
-#define F_NOCOALESCE V_NOCOALESCE(1U)
-
-#define SGE_TIMESTAMP_LO 0x1098
-#define SGE_TIMESTAMP_HI 0x109c
-#define S_TSVAL 0
-#define M_TSVAL 0xfffffffU
-#define GET_TSVAL(x) (((x) >> S_TSVAL) & M_TSVAL)
-
-#define SGE_TIMER_VALUE_0_AND_1 0x10b8
-#define TIMERVALUE0_MASK 0xffff0000U
-#define TIMERVALUE0_SHIFT 16
-#define TIMERVALUE0(x) ((x) << TIMERVALUE0_SHIFT)
-#define TIMERVALUE0_GET(x) (((x) & TIMERVALUE0_MASK) >> TIMERVALUE0_SHIFT)
-#define TIMERVALUE1_MASK 0x0000ffffU
-#define TIMERVALUE1_SHIFT 0
-#define TIMERVALUE1(x) ((x) << TIMERVALUE1_SHIFT)
-#define TIMERVALUE1_GET(x) (((x) & TIMERVALUE1_MASK) >> TIMERVALUE1_SHIFT)
-
-#define SGE_TIMER_VALUE_2_AND_3 0x10bc
-#define TIMERVALUE2_MASK 0xffff0000U
-#define TIMERVALUE2_SHIFT 16
-#define TIMERVALUE2(x) ((x) << TIMERVALUE2_SHIFT)
-#define TIMERVALUE2_GET(x) (((x) & TIMERVALUE2_MASK) >> TIMERVALUE2_SHIFT)
-#define TIMERVALUE3_MASK 0x0000ffffU
-#define TIMERVALUE3_SHIFT 0
-#define TIMERVALUE3(x) ((x) << TIMERVALUE3_SHIFT)
-#define TIMERVALUE3_GET(x) (((x) & TIMERVALUE3_MASK) >> TIMERVALUE3_SHIFT)
-
-#define SGE_TIMER_VALUE_4_AND_5 0x10c0
-#define TIMERVALUE4_MASK 0xffff0000U
-#define TIMERVALUE4_SHIFT 16
-#define TIMERVALUE4(x) ((x) << TIMERVALUE4_SHIFT)
-#define TIMERVALUE4_GET(x) (((x) & TIMERVALUE4_MASK) >> TIMERVALUE4_SHIFT)
-#define TIMERVALUE5_MASK 0x0000ffffU
-#define TIMERVALUE5_SHIFT 0
-#define TIMERVALUE5(x) ((x) << TIMERVALUE5_SHIFT)
-#define TIMERVALUE5_GET(x) (((x) & TIMERVALUE5_MASK) >> TIMERVALUE5_SHIFT)
-
-#define SGE_DEBUG_INDEX 0x10cc
-#define SGE_DEBUG_DATA_HIGH 0x10d0
-#define SGE_DEBUG_DATA_LOW 0x10d4
-#define SGE_DEBUG_DATA_LOW_INDEX_2 0x12c8
-#define SGE_DEBUG_DATA_LOW_INDEX_3 0x12cc
-#define SGE_DEBUG_DATA_HIGH_INDEX_10 0x12a8
-#define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4
-#define SGE_INGRESS_QUEUES_PER_PAGE_VF_A 0x10f8
+#define LP_COUNT_T5_S 0
+#define LP_COUNT_T5_M 0x3ffffU
+#define LP_COUNT_T5_G(x) (((x) >> LP_COUNT_T5_S) & LP_COUNT_T5_M)
+
+#define SGE_DOORBELL_CONTROL_A 0x10a8
+
+#define SGE_STAT_TOTAL_A 0x10e4
+#define SGE_STAT_MATCH_A 0x10e8
+#define SGE_STAT_CFG_A 0x10ec
+
+#define STATSOURCE_T5_S 9
+#define STATSOURCE_T5_V(x) ((x) << STATSOURCE_T5_S)
+
+#define SGE_DBFIFO_STATUS2_A 0x1118
+
+#define HP_INT_THRESH_T5_S 10
+#define HP_INT_THRESH_T5_M 0xfU
+#define HP_INT_THRESH_T5_V(x) ((x) << HP_INT_THRESH_T5_S)
+
+#define HP_COUNT_T5_S 0
+#define HP_COUNT_T5_M 0x3ffU
+#define HP_COUNT_T5_G(x) (((x) >> HP_COUNT_T5_S) & HP_COUNT_T5_M)
+
+#define ENABLE_DROP_S 13
+#define ENABLE_DROP_V(x) ((x) << ENABLE_DROP_S)
+#define ENABLE_DROP_F ENABLE_DROP_V(1U)
+
+#define DROPPED_DB_S 0
+#define DROPPED_DB_V(x) ((x) << DROPPED_DB_S)
+#define DROPPED_DB_F DROPPED_DB_V(1U)
+
+#define SGE_CTXT_CMD_A 0x11fc
+#define SGE_DBQ_CTXT_BADDR_A 0x1084
+
+/* registers for module PCIE */
+#define PCIE_PF_CFG_A 0x40
+
+#define AIVEC_S 4
+#define AIVEC_M 0x3ffU
+#define AIVEC_V(x) ((x) << AIVEC_S)
+
+#define PCIE_PF_CLI_A 0x44
+#define PCIE_INT_CAUSE_A 0x3004
+
+#define UNXSPLCPLERR_S 29
+#define UNXSPLCPLERR_V(x) ((x) << UNXSPLCPLERR_S)
+#define UNXSPLCPLERR_F UNXSPLCPLERR_V(1U)
+
+#define PCIEPINT_S 28
+#define PCIEPINT_V(x) ((x) << PCIEPINT_S)
+#define PCIEPINT_F PCIEPINT_V(1U)
+
+#define PCIESINT_S 27
+#define PCIESINT_V(x) ((x) << PCIESINT_S)
+#define PCIESINT_F PCIESINT_V(1U)
+
+#define RPLPERR_S 26
+#define RPLPERR_V(x) ((x) << RPLPERR_S)
+#define RPLPERR_F RPLPERR_V(1U)
+
+#define RXWRPERR_S 25
+#define RXWRPERR_V(x) ((x) << RXWRPERR_S)
+#define RXWRPERR_F RXWRPERR_V(1U)
+
+#define RXCPLPERR_S 24
+#define RXCPLPERR_V(x) ((x) << RXCPLPERR_S)
+#define RXCPLPERR_F RXCPLPERR_V(1U)
+
+#define PIOTAGPERR_S 23
+#define PIOTAGPERR_V(x) ((x) << PIOTAGPERR_S)
+#define PIOTAGPERR_F PIOTAGPERR_V(1U)
+
+#define MATAGPERR_S 22
+#define MATAGPERR_V(x) ((x) << MATAGPERR_S)
+#define MATAGPERR_F MATAGPERR_V(1U)
+
+#define INTXCLRPERR_S 21
+#define INTXCLRPERR_V(x) ((x) << INTXCLRPERR_S)
+#define INTXCLRPERR_F INTXCLRPERR_V(1U)
+
+#define FIDPERR_S 20
+#define FIDPERR_V(x) ((x) << FIDPERR_S)
+#define FIDPERR_F FIDPERR_V(1U)
+
+#define CFGSNPPERR_S 19
+#define CFGSNPPERR_V(x) ((x) << CFGSNPPERR_S)
+#define CFGSNPPERR_F CFGSNPPERR_V(1U)
+
+#define HRSPPERR_S 18
+#define HRSPPERR_V(x) ((x) << HRSPPERR_S)
+#define HRSPPERR_F HRSPPERR_V(1U)
+
+#define HREQPERR_S 17
+#define HREQPERR_V(x) ((x) << HREQPERR_S)
+#define HREQPERR_F HREQPERR_V(1U)
+
+#define HCNTPERR_S 16
+#define HCNTPERR_V(x) ((x) << HCNTPERR_S)
+#define HCNTPERR_F HCNTPERR_V(1U)
+
+#define DRSPPERR_S 15
+#define DRSPPERR_V(x) ((x) << DRSPPERR_S)
+#define DRSPPERR_F DRSPPERR_V(1U)
+
+#define DREQPERR_S 14
+#define DREQPERR_V(x) ((x) << DREQPERR_S)
+#define DREQPERR_F DREQPERR_V(1U)
+
+#define DCNTPERR_S 13
+#define DCNTPERR_V(x) ((x) << DCNTPERR_S)
+#define DCNTPERR_F DCNTPERR_V(1U)
+
+#define CRSPPERR_S 12
+#define CRSPPERR_V(x) ((x) << CRSPPERR_S)
+#define CRSPPERR_F CRSPPERR_V(1U)
+
+#define CREQPERR_S 11
+#define CREQPERR_V(x) ((x) << CREQPERR_S)
+#define CREQPERR_F CREQPERR_V(1U)
+
+#define CCNTPERR_S 10
+#define CCNTPERR_V(x) ((x) << CCNTPERR_S)
+#define CCNTPERR_F CCNTPERR_V(1U)
+
+#define TARTAGPERR_S 9
+#define TARTAGPERR_V(x) ((x) << TARTAGPERR_S)
+#define TARTAGPERR_F TARTAGPERR_V(1U)
+
+#define PIOREQPERR_S 8
+#define PIOREQPERR_V(x) ((x) << PIOREQPERR_S)
+#define PIOREQPERR_F PIOREQPERR_V(1U)
+
+#define PIOCPLPERR_S 7
+#define PIOCPLPERR_V(x) ((x) << PIOCPLPERR_S)
+#define PIOCPLPERR_F PIOCPLPERR_V(1U)
+
+#define MSIXDIPERR_S 6
+#define MSIXDIPERR_V(x) ((x) << MSIXDIPERR_S)
+#define MSIXDIPERR_F MSIXDIPERR_V(1U)
+
+#define MSIXDATAPERR_S 5
+#define MSIXDATAPERR_V(x) ((x) << MSIXDATAPERR_S)
+#define MSIXDATAPERR_F MSIXDATAPERR_V(1U)
+
+#define MSIXADDRHPERR_S 4
+#define MSIXADDRHPERR_V(x) ((x) << MSIXADDRHPERR_S)
+#define MSIXADDRHPERR_F MSIXADDRHPERR_V(1U)
+
+#define MSIXADDRLPERR_S 3
+#define MSIXADDRLPERR_V(x) ((x) << MSIXADDRLPERR_S)
+#define MSIXADDRLPERR_F MSIXADDRLPERR_V(1U)
+
+#define MSIDATAPERR_S 2
+#define MSIDATAPERR_V(x) ((x) << MSIDATAPERR_S)
+#define MSIDATAPERR_F MSIDATAPERR_V(1U)
+
+#define MSIADDRHPERR_S 1
+#define MSIADDRHPERR_V(x) ((x) << MSIADDRHPERR_S)
+#define MSIADDRHPERR_F MSIADDRHPERR_V(1U)
+
+#define MSIADDRLPERR_S 0
+#define MSIADDRLPERR_V(x) ((x) << MSIADDRLPERR_S)
+#define MSIADDRLPERR_F MSIADDRLPERR_V(1U)
+
+#define READRSPERR_S 29
+#define READRSPERR_V(x) ((x) << READRSPERR_S)
+#define READRSPERR_F READRSPERR_V(1U)
+
+#define TRGT1GRPPERR_S 28
+#define TRGT1GRPPERR_V(x) ((x) << TRGT1GRPPERR_S)
+#define TRGT1GRPPERR_F TRGT1GRPPERR_V(1U)
+
+#define IPSOTPERR_S 27
+#define IPSOTPERR_V(x) ((x) << IPSOTPERR_S)
+#define IPSOTPERR_F IPSOTPERR_V(1U)
+
+#define IPRETRYPERR_S 26
+#define IPRETRYPERR_V(x) ((x) << IPRETRYPERR_S)
+#define IPRETRYPERR_F IPRETRYPERR_V(1U)
+
+#define IPRXDATAGRPPERR_S 25
+#define IPRXDATAGRPPERR_V(x) ((x) << IPRXDATAGRPPERR_S)
+#define IPRXDATAGRPPERR_F IPRXDATAGRPPERR_V(1U)
+
+#define IPRXHDRGRPPERR_S 24
+#define IPRXHDRGRPPERR_V(x) ((x) << IPRXHDRGRPPERR_S)
+#define IPRXHDRGRPPERR_F IPRXHDRGRPPERR_V(1U)
+
+#define MAGRPPERR_S 22
+#define MAGRPPERR_V(x) ((x) << MAGRPPERR_S)
+#define MAGRPPERR_F MAGRPPERR_V(1U)
+
+#define VFIDPERR_S 21
+#define VFIDPERR_V(x) ((x) << VFIDPERR_S)
+#define VFIDPERR_F VFIDPERR_V(1U)
+
+#define HREQWRPERR_S 16
+#define HREQWRPERR_V(x) ((x) << HREQWRPERR_S)
+#define HREQWRPERR_F HREQWRPERR_V(1U)
+
+#define DREQWRPERR_S 13
+#define DREQWRPERR_V(x) ((x) << DREQWRPERR_S)
+#define DREQWRPERR_F DREQWRPERR_V(1U)
+
+#define CREQRDPERR_S 11
+#define CREQRDPERR_V(x) ((x) << CREQRDPERR_S)
+#define CREQRDPERR_F CREQRDPERR_V(1U)
+
+#define MSTTAGQPERR_S 10
+#define MSTTAGQPERR_V(x) ((x) << MSTTAGQPERR_S)
+#define MSTTAGQPERR_F MSTTAGQPERR_V(1U)
+
+#define PIOREQGRPPERR_S 8
+#define PIOREQGRPPERR_V(x) ((x) << PIOREQGRPPERR_S)
+#define PIOREQGRPPERR_F PIOREQGRPPERR_V(1U)
+
+#define PIOCPLGRPPERR_S 7
+#define PIOCPLGRPPERR_V(x) ((x) << PIOCPLGRPPERR_S)
+#define PIOCPLGRPPERR_F PIOCPLGRPPERR_V(1U)
+
+#define MSIXSTIPERR_S 2
+#define MSIXSTIPERR_V(x) ((x) << MSIXSTIPERR_S)
+#define MSIXSTIPERR_F MSIXSTIPERR_V(1U)
+
+#define MSTTIMEOUTPERR_S 1
+#define MSTTIMEOUTPERR_V(x) ((x) << MSTTIMEOUTPERR_S)
+#define MSTTIMEOUTPERR_F MSTTIMEOUTPERR_V(1U)
+
+#define MSTGRPPERR_S 0
+#define MSTGRPPERR_V(x) ((x) << MSTGRPPERR_S)
+#define MSTGRPPERR_F MSTGRPPERR_V(1U)
+
+#define PCIE_NONFAT_ERR_A 0x3010
+#define PCIE_CFG_SPACE_REQ_A 0x3060
+#define PCIE_CFG_SPACE_DATA_A 0x3064
+#define PCIE_MEM_ACCESS_BASE_WIN_A 0x3068
+
+#define PCIEOFST_S 10
+#define PCIEOFST_M 0x3fffffU
+#define PCIEOFST_G(x) (((x) >> PCIEOFST_S) & PCIEOFST_M)
+
+#define BIR_S 8
+#define BIR_M 0x3U
+#define BIR_V(x) ((x) << BIR_S)
+#define BIR_G(x) (((x) >> BIR_S) & BIR_M)
+
+#define WINDOW_S 0
+#define WINDOW_M 0xffU
+#define WINDOW_V(x) ((x) << WINDOW_S)
+#define WINDOW_G(x) (((x) >> WINDOW_S) & WINDOW_M)
+
+#define PCIE_MEM_ACCESS_OFFSET_A 0x306c
+
+#define ENABLE_S 30
+#define ENABLE_V(x) ((x) << ENABLE_S)
+#define ENABLE_F ENABLE_V(1U)
+
+#define LOCALCFG_S 28
+#define LOCALCFG_V(x) ((x) << LOCALCFG_S)
+#define LOCALCFG_F LOCALCFG_V(1U)
+
+#define FUNCTION_S 12
+#define FUNCTION_V(x) ((x) << FUNCTION_S)
+
+#define REGISTER_S 0
+#define REGISTER_V(x) ((x) << REGISTER_S)
+
+#define PFNUM_S 0
+#define PFNUM_V(x) ((x) << PFNUM_S)
-#define S_HP_INT_THRESH 28
-#define M_HP_INT_THRESH 0xfU
-#define V_HP_INT_THRESH(x) ((x) << S_HP_INT_THRESH)
-#define S_LP_INT_THRESH_T5 18
-#define V_LP_INT_THRESH_T5(x) ((x) << S_LP_INT_THRESH_T5)
-#define M_LP_COUNT_T5 0x3ffffU
-#define G_LP_COUNT_T5(x) (((x) >> S_LP_COUNT) & M_LP_COUNT_T5)
-#define M_HP_COUNT 0x7ffU
-#define S_HP_COUNT 16
-#define G_HP_COUNT(x) (((x) >> S_HP_COUNT) & M_HP_COUNT)
-#define S_LP_INT_THRESH 12
-#define M_LP_INT_THRESH 0xfU
-#define M_LP_INT_THRESH_T5 0xfffU
-#define V_LP_INT_THRESH(x) ((x) << S_LP_INT_THRESH)
-#define M_LP_COUNT 0x7ffU
-#define S_LP_COUNT 0
-#define G_LP_COUNT(x) (((x) >> S_LP_COUNT) & M_LP_COUNT)
-#define A_SGE_DBFIFO_STATUS 0x10a4
-
-#define SGE_STAT_TOTAL 0x10e4
-#define SGE_STAT_MATCH 0x10e8
-
-#define SGE_STAT_CFG 0x10ec
-#define S_STATSOURCE_T5 9
-#define STATSOURCE_T5(x) ((x) << S_STATSOURCE_T5)
-
-#define SGE_DBFIFO_STATUS2 0x1118
-#define M_HP_COUNT_T5 0x3ffU
-#define G_HP_COUNT_T5(x) ((x) & M_HP_COUNT_T5)
-#define S_HP_INT_THRESH_T5 10
-#define M_HP_INT_THRESH_T5 0xfU
-#define V_HP_INT_THRESH_T5(x) ((x) << S_HP_INT_THRESH_T5)
-
-#define S_ENABLE_DROP 13
-#define V_ENABLE_DROP(x) ((x) << S_ENABLE_DROP)
-#define F_ENABLE_DROP V_ENABLE_DROP(1U)
-#define S_DROPPED_DB 0
-#define V_DROPPED_DB(x) ((x) << S_DROPPED_DB)
-#define F_DROPPED_DB V_DROPPED_DB(1U)
-#define A_SGE_DOORBELL_CONTROL 0x10a8
-
-#define A_SGE_CTXT_CMD 0x11fc
-#define A_SGE_DBQ_CTXT_BADDR 0x1084
-
-#define PCIE_PF_CFG 0x40
-#define AIVEC(x) ((x) << 4)
-#define AIVEC_MASK 0x3ffU
-
-#define PCIE_PF_CLI 0x44
-#define PCIE_INT_CAUSE 0x3004
-#define UNXSPLCPLERR 0x20000000U
-#define PCIEPINT 0x10000000U
-#define PCIESINT 0x08000000U
-#define RPLPERR 0x04000000U
-#define RXWRPERR 0x02000000U
-#define RXCPLPERR 0x01000000U
-#define PIOTAGPERR 0x00800000U
-#define MATAGPERR 0x00400000U
-#define INTXCLRPERR 0x00200000U
-#define FIDPERR 0x00100000U
-#define CFGSNPPERR 0x00080000U
-#define HRSPPERR 0x00040000U
-#define HREQPERR 0x00020000U
-#define HCNTPERR 0x00010000U
-#define DRSPPERR 0x00008000U
-#define DREQPERR 0x00004000U
-#define DCNTPERR 0x00002000U
-#define CRSPPERR 0x00001000U
-#define CREQPERR 0x00000800U
-#define CCNTPERR 0x00000400U
-#define TARTAGPERR 0x00000200U
-#define PIOREQPERR 0x00000100U
-#define PIOCPLPERR 0x00000080U
-#define MSIXDIPERR 0x00000040U
-#define MSIXDATAPERR 0x00000020U
-#define MSIXADDRHPERR 0x00000010U
-#define MSIXADDRLPERR 0x00000008U
-#define MSIDATAPERR 0x00000004U
-#define MSIADDRHPERR 0x00000002U
-#define MSIADDRLPERR 0x00000001U
-
-#define READRSPERR 0x20000000U
-#define TRGT1GRPPERR 0x10000000U
-#define IPSOTPERR 0x08000000U
-#define IPRXDATAGRPPERR 0x02000000U
-#define IPRXHDRGRPPERR 0x01000000U
-#define MAGRPPERR 0x00400000U
-#define VFIDPERR 0x00200000U
-#define HREQWRPERR 0x00010000U
-#define DREQWRPERR 0x00002000U
-#define MSTTAGQPERR 0x00000400U
-#define PIOREQGRPPERR 0x00000100U
-#define PIOCPLGRPPERR 0x00000080U
-#define MSIXSTIPERR 0x00000004U
-#define MSTTIMEOUTPERR 0x00000002U
-#define MSTGRPPERR 0x00000001U
-
-#define PCIE_NONFAT_ERR 0x3010
-#define PCIE_CFG_SPACE_REQ 0x3060
-#define PCIE_CFG_SPACE_DATA 0x3064
-#define PCIE_MEM_ACCESS_BASE_WIN 0x3068
-#define S_PCIEOFST 10
-#define M_PCIEOFST 0x3fffffU
-#define GET_PCIEOFST(x) (((x) >> S_PCIEOFST) & M_PCIEOFST)
-#define PCIEOFST_MASK 0xfffffc00U
-#define BIR_MASK 0x00000300U
-#define BIR_SHIFT 8
-#define BIR(x) ((x) << BIR_SHIFT)
-#define WINDOW_MASK 0x000000ffU
-#define WINDOW_SHIFT 0
-#define WINDOW(x) ((x) << WINDOW_SHIFT)
-#define GET_WINDOW(x) (((x) >> WINDOW_SHIFT) & WINDOW_MASK)
-#define PCIE_MEM_ACCESS_OFFSET 0x306c
-#define ENABLE (1U << 30)
-#define FUNCTION(x) ((x) << 12)
-#define F_LOCALCFG (1U << 28)
-
-#define S_PFNUM 0
-#define V_PFNUM(x) ((x) << S_PFNUM)
-
-#define PCIE_FW 0x30b8
-#define PCIE_FW_ERR 0x80000000U
-#define PCIE_FW_INIT 0x40000000U
-#define PCIE_FW_HALT 0x20000000U
-#define PCIE_FW_MASTER_VLD 0x00008000U
-#define PCIE_FW_MASTER(x) ((x) << 12)
-#define PCIE_FW_MASTER_MASK 0x7
-#define PCIE_FW_MASTER_GET(x) (((x) >> 12) & PCIE_FW_MASTER_MASK)
-
-#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS 0x5908
-#define RNPP 0x80000000U
-#define RPCP 0x20000000U
-#define RCIP 0x08000000U
-#define RCCP 0x04000000U
-#define RFTP 0x00800000U
-#define PTRP 0x00100000U
-
-#define PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS 0x59a4
-#define TPCP 0x40000000U
-#define TNPP 0x20000000U
-#define TFTP 0x10000000U
-#define TCAP 0x08000000U
-#define TCIP 0x04000000U
-#define RCAP 0x02000000U
-#define PLUP 0x00800000U
-#define PLDN 0x00400000U
-#define OTDD 0x00200000U
-#define GTRP 0x00100000U
-#define RDPE 0x00040000U
-#define TDCE 0x00020000U
-#define TDUE 0x00010000U
-
-#define MC_INT_CAUSE 0x7518
-#define MC_P_INT_CAUSE 0x41318
-#define ECC_UE_INT_CAUSE 0x00000004U
-#define ECC_CE_INT_CAUSE 0x00000002U
-#define PERR_INT_CAUSE 0x00000001U
-
-#define MC_ECC_STATUS 0x751c
-#define MC_P_ECC_STATUS 0x4131c
-#define ECC_CECNT_MASK 0xffff0000U
-#define ECC_CECNT_SHIFT 16
-#define ECC_CECNT(x) ((x) << ECC_CECNT_SHIFT)
-#define ECC_CECNT_GET(x) (((x) & ECC_CECNT_MASK) >> ECC_CECNT_SHIFT)
-#define ECC_UECNT_MASK 0x0000ffffU
-#define ECC_UECNT_SHIFT 0
-#define ECC_UECNT(x) ((x) << ECC_UECNT_SHIFT)
-#define ECC_UECNT_GET(x) (((x) & ECC_UECNT_MASK) >> ECC_UECNT_SHIFT)
-
-#define MC_BIST_CMD 0x7600
-#define START_BIST 0x80000000U
-#define BIST_CMD_GAP_MASK 0x0000ff00U
-#define BIST_CMD_GAP_SHIFT 8
-#define BIST_CMD_GAP(x) ((x) << BIST_CMD_GAP_SHIFT)
-#define BIST_OPCODE_MASK 0x00000003U
-#define BIST_OPCODE_SHIFT 0
-#define BIST_OPCODE(x) ((x) << BIST_OPCODE_SHIFT)
-
-#define MC_BIST_CMD_ADDR 0x7604
-#define MC_BIST_CMD_LEN 0x7608
-#define MC_BIST_DATA_PATTERN 0x760c
-#define BIST_DATA_TYPE_MASK 0x0000000fU
-#define BIST_DATA_TYPE_SHIFT 0
-#define BIST_DATA_TYPE(x) ((x) << BIST_DATA_TYPE_SHIFT)
-
-#define MC_BIST_STATUS_RDATA 0x7688
+#define PCIE_FW_A 0x30b8
+#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A 0x5908
+
+#define RNPP_S 31
+#define RNPP_V(x) ((x) << RNPP_S)
+#define RNPP_F RNPP_V(1U)
+
+#define RPCP_S 29
+#define RPCP_V(x) ((x) << RPCP_S)
+#define RPCP_F RPCP_V(1U)
+
+#define RCIP_S 27
+#define RCIP_V(x) ((x) << RCIP_S)
+#define RCIP_F RCIP_V(1U)
+
+#define RCCP_S 26
+#define RCCP_V(x) ((x) << RCCP_S)
+#define RCCP_F RCCP_V(1U)
+
+#define RFTP_S 23
+#define RFTP_V(x) ((x) << RFTP_S)
+#define RFTP_F RFTP_V(1U)
+
+#define PTRP_S 20
+#define PTRP_V(x) ((x) << PTRP_S)
+#define PTRP_F PTRP_V(1U)
+
+#define PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A 0x59a4
+
+#define TPCP_S 30
+#define TPCP_V(x) ((x) << TPCP_S)
+#define TPCP_F TPCP_V(1U)
+
+#define TNPP_S 29
+#define TNPP_V(x) ((x) << TNPP_S)
+#define TNPP_F TNPP_V(1U)
+
+#define TFTP_S 28
+#define TFTP_V(x) ((x) << TFTP_S)
+#define TFTP_F TFTP_V(1U)
+
+#define TCAP_S 27
+#define TCAP_V(x) ((x) << TCAP_S)
+#define TCAP_F TCAP_V(1U)
+
+#define TCIP_S 26
+#define TCIP_V(x) ((x) << TCIP_S)
+#define TCIP_F TCIP_V(1U)
+
+#define RCAP_S 25
+#define RCAP_V(x) ((x) << RCAP_S)
+#define RCAP_F RCAP_V(1U)
+
+#define PLUP_S 23
+#define PLUP_V(x) ((x) << PLUP_S)
+#define PLUP_F PLUP_V(1U)
+
+#define PLDN_S 22
+#define PLDN_V(x) ((x) << PLDN_S)
+#define PLDN_F PLDN_V(1U)
+
+#define OTDD_S 21
+#define OTDD_V(x) ((x) << OTDD_S)
+#define OTDD_F OTDD_V(1U)
+
+#define GTRP_S 20
+#define GTRP_V(x) ((x) << GTRP_S)
+#define GTRP_F GTRP_V(1U)
+
+#define RDPE_S 18
+#define RDPE_V(x) ((x) << RDPE_S)
+#define RDPE_F RDPE_V(1U)
+
+#define TDCE_S 17
+#define TDCE_V(x) ((x) << TDCE_S)
+#define TDCE_F TDCE_V(1U)
+
+#define TDUE_S 16
+#define TDUE_V(x) ((x) << TDUE_S)
+#define TDUE_F TDUE_V(1U)
+
+/* registers for module MC */
+#define MC_INT_CAUSE_A 0x7518
+#define MC_P_INT_CAUSE_A 0x41318
+
+#define ECC_UE_INT_CAUSE_S 2
+#define ECC_UE_INT_CAUSE_V(x) ((x) << ECC_UE_INT_CAUSE_S)
+#define ECC_UE_INT_CAUSE_F ECC_UE_INT_CAUSE_V(1U)
+
+#define ECC_CE_INT_CAUSE_S 1
+#define ECC_CE_INT_CAUSE_V(x) ((x) << ECC_CE_INT_CAUSE_S)
+#define ECC_CE_INT_CAUSE_F ECC_CE_INT_CAUSE_V(1U)
+
+#define PERR_INT_CAUSE_S 0
+#define PERR_INT_CAUSE_V(x) ((x) << PERR_INT_CAUSE_S)
+#define PERR_INT_CAUSE_F PERR_INT_CAUSE_V(1U)
+
+#define MC_ECC_STATUS_A 0x751c
+#define MC_P_ECC_STATUS_A 0x4131c
+
+#define ECC_CECNT_S 16
+#define ECC_CECNT_M 0xffffU
+#define ECC_CECNT_V(x) ((x) << ECC_CECNT_S)
+#define ECC_CECNT_G(x) (((x) >> ECC_CECNT_S) & ECC_CECNT_M)
+
+#define ECC_UECNT_S 0
+#define ECC_UECNT_M 0xffffU
+#define ECC_UECNT_V(x) ((x) << ECC_UECNT_S)
+#define ECC_UECNT_G(x) (((x) >> ECC_UECNT_S) & ECC_UECNT_M)
+
+#define MC_BIST_CMD_A 0x7600
+
+#define START_BIST_S 31
+#define START_BIST_V(x) ((x) << START_BIST_S)
+#define START_BIST_F START_BIST_V(1U)
+
+#define BIST_CMD_GAP_S 8
+#define BIST_CMD_GAP_V(x) ((x) << BIST_CMD_GAP_S)
+
+#define BIST_OPCODE_S 0
+#define BIST_OPCODE_V(x) ((x) << BIST_OPCODE_S)
+
+#define MC_BIST_CMD_ADDR_A 0x7604
+#define MC_BIST_CMD_LEN_A 0x7608
+#define MC_BIST_DATA_PATTERN_A 0x760c
+
+#define MC_BIST_STATUS_RDATA_A 0x7688
+
+/* registers for module MA */
#define MA_EDRAM0_BAR_A 0x77c0
#define EDRAM0_SIZE_S 0
@@ -574,263 +893,547 @@
#define EXT_MEM0_ENABLE_V(x) ((x) << EXT_MEM0_ENABLE_S)
#define EXT_MEM0_ENABLE_F EXT_MEM0_ENABLE_V(1U)
-#define MA_INT_CAUSE 0x77e0
-#define MEM_PERR_INT_CAUSE 0x00000002U
-#define MEM_WRAP_INT_CAUSE 0x00000001U
-
-#define MA_INT_WRAP_STATUS 0x77e4
-#define MEM_WRAP_ADDRESS_MASK 0xfffffff0U
-#define MEM_WRAP_ADDRESS_SHIFT 4
-#define MEM_WRAP_ADDRESS_GET(x) (((x) & MEM_WRAP_ADDRESS_MASK) >> MEM_WRAP_ADDRESS_SHIFT)
-#define MEM_WRAP_CLIENT_NUM_MASK 0x0000000fU
-#define MEM_WRAP_CLIENT_NUM_SHIFT 0
-#define MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT)
-#define MA_PCIE_FW 0x30b8
-#define MA_PARITY_ERROR_STATUS 0x77f4
-#define MA_PARITY_ERROR_STATUS2 0x7804
-
-#define EDC_0_BASE_ADDR 0x7900
-
-#define EDC_BIST_CMD 0x7904
-#define EDC_BIST_CMD_ADDR 0x7908
-#define EDC_BIST_CMD_LEN 0x790c
-#define EDC_BIST_DATA_PATTERN 0x7910
-#define EDC_BIST_STATUS_RDATA 0x7928
-#define EDC_INT_CAUSE 0x7978
-#define ECC_UE_PAR 0x00000020U
-#define ECC_CE_PAR 0x00000010U
-#define PERR_PAR_CAUSE 0x00000008U
-
-#define EDC_ECC_STATUS 0x797c
-
-#define EDC_1_BASE_ADDR 0x7980
-
-#define CIM_BOOT_CFG 0x7b00
-#define BOOTADDR_MASK 0xffffff00U
-#define UPCRST 0x1U
-
-#define CIM_PF_MAILBOX_DATA 0x240
-#define CIM_PF_MAILBOX_CTRL 0x280
-#define MBMSGVALID 0x00000008U
-#define MBINTREQ 0x00000004U
-#define MBOWNER_MASK 0x00000003U
-#define MBOWNER_SHIFT 0
-#define MBOWNER(x) ((x) << MBOWNER_SHIFT)
-#define MBOWNER_GET(x) (((x) & MBOWNER_MASK) >> MBOWNER_SHIFT)
-
-#define CIM_PF_HOST_INT_ENABLE 0x288
-#define MBMSGRDYINTEN(x) ((x) << 19)
-
-#define CIM_PF_HOST_INT_CAUSE 0x28c
-#define MBMSGRDYINT 0x00080000U
-
-#define CIM_HOST_INT_CAUSE 0x7b2c
-#define TIEQOUTPARERRINT 0x00100000U
-#define TIEQINPARERRINT 0x00080000U
-#define MBHOSTPARERR 0x00040000U
-#define MBUPPARERR 0x00020000U
-#define IBQPARERR 0x0001f800U
-#define IBQTP0PARERR 0x00010000U
-#define IBQTP1PARERR 0x00008000U
-#define IBQULPPARERR 0x00004000U
-#define IBQSGELOPARERR 0x00002000U
-#define IBQSGEHIPARERR 0x00001000U
-#define IBQNCSIPARERR 0x00000800U
-#define OBQPARERR 0x000007e0U
-#define OBQULP0PARERR 0x00000400U
-#define OBQULP1PARERR 0x00000200U
-#define OBQULP2PARERR 0x00000100U
-#define OBQULP3PARERR 0x00000080U
-#define OBQSGEPARERR 0x00000040U
-#define OBQNCSIPARERR 0x00000020U
-#define PREFDROPINT 0x00000002U
-#define UPACCNONZERO 0x00000001U
-
-#define CIM_HOST_UPACC_INT_CAUSE 0x7b34
-#define EEPROMWRINT 0x40000000U
-#define TIMEOUTMAINT 0x20000000U
-#define TIMEOUTINT 0x10000000U
-#define RSPOVRLOOKUPINT 0x08000000U
-#define REQOVRLOOKUPINT 0x04000000U
-#define BLKWRPLINT 0x02000000U
-#define BLKRDPLINT 0x01000000U
-#define SGLWRPLINT 0x00800000U
-#define SGLRDPLINT 0x00400000U
-#define BLKWRCTLINT 0x00200000U
-#define BLKRDCTLINT 0x00100000U
-#define SGLWRCTLINT 0x00080000U
-#define SGLRDCTLINT 0x00040000U
-#define BLKWREEPROMINT 0x00020000U
-#define BLKRDEEPROMINT 0x00010000U
-#define SGLWREEPROMINT 0x00008000U
-#define SGLRDEEPROMINT 0x00004000U
-#define BLKWRFLASHINT 0x00002000U
-#define BLKRDFLASHINT 0x00001000U
-#define SGLWRFLASHINT 0x00000800U
-#define SGLRDFLASHINT 0x00000400U
-#define BLKWRBOOTINT 0x00000200U
-#define BLKRDBOOTINT 0x00000100U
-#define SGLWRBOOTINT 0x00000080U
-#define SGLRDBOOTINT 0x00000040U
-#define ILLWRBEINT 0x00000020U
-#define ILLRDBEINT 0x00000010U
-#define ILLRDINT 0x00000008U
-#define ILLWRINT 0x00000004U
-#define ILLTRANSINT 0x00000002U
-#define RSVDSPACEINT 0x00000001U
-
-#define TP_OUT_CONFIG 0x7d04
-#define VLANEXTENABLE_MASK 0x0000f000U
-#define VLANEXTENABLE_SHIFT 12
-
-#define TP_GLOBAL_CONFIG 0x7d08
-#define FIVETUPLELOOKUP_SHIFT 17
-#define FIVETUPLELOOKUP_MASK 0x00060000U
-#define FIVETUPLELOOKUP(x) ((x) << FIVETUPLELOOKUP_SHIFT)
-#define FIVETUPLELOOKUP_GET(x) (((x) & FIVETUPLELOOKUP_MASK) >> \
- FIVETUPLELOOKUP_SHIFT)
-
-#define TP_PARA_REG2 0x7d68
-#define MAXRXDATA_MASK 0xffff0000U
-#define MAXRXDATA_SHIFT 16
-#define MAXRXDATA_GET(x) (((x) & MAXRXDATA_MASK) >> MAXRXDATA_SHIFT)
-
-#define TP_TIMER_RESOLUTION 0x7d90
-#define TIMERRESOLUTION_MASK 0x00ff0000U
-#define TIMERRESOLUTION_SHIFT 16
-#define TIMERRESOLUTION_GET(x) (((x) & TIMERRESOLUTION_MASK) >> TIMERRESOLUTION_SHIFT)
-#define DELAYEDACKRESOLUTION_MASK 0x000000ffU
-#define DELAYEDACKRESOLUTION_SHIFT 0
-#define DELAYEDACKRESOLUTION_GET(x) \
- (((x) & DELAYEDACKRESOLUTION_MASK) >> DELAYEDACKRESOLUTION_SHIFT)
-
-#define TP_SHIFT_CNT 0x7dc0
-#define SYNSHIFTMAX_SHIFT 24
-#define SYNSHIFTMAX_MASK 0xff000000U
-#define SYNSHIFTMAX(x) ((x) << SYNSHIFTMAX_SHIFT)
-#define SYNSHIFTMAX_GET(x) (((x) & SYNSHIFTMAX_MASK) >> \
- SYNSHIFTMAX_SHIFT)
-#define RXTSHIFTMAXR1_SHIFT 20
-#define RXTSHIFTMAXR1_MASK 0x00f00000U
-#define RXTSHIFTMAXR1(x) ((x) << RXTSHIFTMAXR1_SHIFT)
-#define RXTSHIFTMAXR1_GET(x) (((x) & RXTSHIFTMAXR1_MASK) >> \
- RXTSHIFTMAXR1_SHIFT)
-#define RXTSHIFTMAXR2_SHIFT 16
-#define RXTSHIFTMAXR2_MASK 0x000f0000U
-#define RXTSHIFTMAXR2(x) ((x) << RXTSHIFTMAXR2_SHIFT)
-#define RXTSHIFTMAXR2_GET(x) (((x) & RXTSHIFTMAXR2_MASK) >> \
- RXTSHIFTMAXR2_SHIFT)
-#define PERSHIFTBACKOFFMAX_SHIFT 12
-#define PERSHIFTBACKOFFMAX_MASK 0x0000f000U
-#define PERSHIFTBACKOFFMAX(x) ((x) << PERSHIFTBACKOFFMAX_SHIFT)
-#define PERSHIFTBACKOFFMAX_GET(x) (((x) & PERSHIFTBACKOFFMAX_MASK) >> \
- PERSHIFTBACKOFFMAX_SHIFT)
-#define PERSHIFTMAX_SHIFT 8
-#define PERSHIFTMAX_MASK 0x00000f00U
-#define PERSHIFTMAX(x) ((x) << PERSHIFTMAX_SHIFT)
-#define PERSHIFTMAX_GET(x) (((x) & PERSHIFTMAX_MASK) >> \
- PERSHIFTMAX_SHIFT)
-#define KEEPALIVEMAXR1_SHIFT 4
-#define KEEPALIVEMAXR1_MASK 0x000000f0U
-#define KEEPALIVEMAXR1(x) ((x) << KEEPALIVEMAXR1_SHIFT)
-#define KEEPALIVEMAXR1_GET(x) (((x) & KEEPALIVEMAXR1_MASK) >> \
- KEEPALIVEMAXR1_SHIFT)
-#define KEEPALIVEMAXR2_SHIFT 0
-#define KEEPALIVEMAXR2_MASK 0x0000000fU
-#define KEEPALIVEMAXR2(x) ((x) << KEEPALIVEMAXR2_SHIFT)
-#define KEEPALIVEMAXR2_GET(x) (((x) & KEEPALIVEMAXR2_MASK) >> \
- KEEPALIVEMAXR2_SHIFT)
-
-#define TP_CCTRL_TABLE 0x7ddc
-#define TP_MTU_TABLE 0x7de4
-#define MTUINDEX_MASK 0xff000000U
-#define MTUINDEX_SHIFT 24
-#define MTUINDEX(x) ((x) << MTUINDEX_SHIFT)
-#define MTUWIDTH_MASK 0x000f0000U
-#define MTUWIDTH_SHIFT 16
-#define MTUWIDTH(x) ((x) << MTUWIDTH_SHIFT)
-#define MTUWIDTH_GET(x) (((x) & MTUWIDTH_MASK) >> MTUWIDTH_SHIFT)
-#define MTUVALUE_MASK 0x00003fffU
-#define MTUVALUE_SHIFT 0
-#define MTUVALUE(x) ((x) << MTUVALUE_SHIFT)
-#define MTUVALUE_GET(x) (((x) & MTUVALUE_MASK) >> MTUVALUE_SHIFT)
-
-#define TP_RSS_LKP_TABLE 0x7dec
-#define LKPTBLROWVLD 0x80000000U
-#define LKPTBLQUEUE1_MASK 0x000ffc00U
-#define LKPTBLQUEUE1_SHIFT 10
-#define LKPTBLQUEUE1(x) ((x) << LKPTBLQUEUE1_SHIFT)
-#define LKPTBLQUEUE1_GET(x) (((x) & LKPTBLQUEUE1_MASK) >> LKPTBLQUEUE1_SHIFT)
-#define LKPTBLQUEUE0_MASK 0x000003ffU
-#define LKPTBLQUEUE0_SHIFT 0
-#define LKPTBLQUEUE0(x) ((x) << LKPTBLQUEUE0_SHIFT)
-#define LKPTBLQUEUE0_GET(x) (((x) & LKPTBLQUEUE0_MASK) >> LKPTBLQUEUE0_SHIFT)
-
-#define TP_PIO_ADDR 0x7e40
-#define TP_PIO_DATA 0x7e44
-#define TP_MIB_INDEX 0x7e50
-#define TP_MIB_DATA 0x7e54
-#define TP_INT_CAUSE 0x7e74
-#define FLMTXFLSTEMPTY 0x40000000U
-
-#define TP_VLAN_PRI_MAP 0x140
-#define FRAGMENTATION_SHIFT 9
-#define FRAGMENTATION_MASK 0x00000200U
-#define MPSHITTYPE_MASK 0x00000100U
-#define MACMATCH_MASK 0x00000080U
-#define ETHERTYPE_MASK 0x00000040U
-#define PROTOCOL_MASK 0x00000020U
-#define TOS_MASK 0x00000010U
-#define VLAN_MASK 0x00000008U
-#define VNIC_ID_MASK 0x00000004U
-#define PORT_MASK 0x00000002U
-#define FCOE_SHIFT 0
-#define FCOE_MASK 0x00000001U
-
-#define TP_INGRESS_CONFIG 0x141
-#define VNIC 0x00000800U
-#define CSUM_HAS_PSEUDO_HDR 0x00000400U
-#define RM_OVLAN 0x00000200U
-#define LOOKUPEVERYPKT 0x00000100U
-
-#define TP_MIB_MAC_IN_ERR_0 0x0
-#define TP_MIB_TCP_OUT_RST 0xc
-#define TP_MIB_TCP_IN_SEG_HI 0x10
-#define TP_MIB_TCP_IN_SEG_LO 0x11
-#define TP_MIB_TCP_OUT_SEG_HI 0x12
-#define TP_MIB_TCP_OUT_SEG_LO 0x13
-#define TP_MIB_TCP_RXT_SEG_HI 0x14
-#define TP_MIB_TCP_RXT_SEG_LO 0x15
-#define TP_MIB_TNL_CNG_DROP_0 0x18
-#define TP_MIB_TCP_V6IN_ERR_0 0x28
-#define TP_MIB_TCP_V6OUT_RST 0x2c
-#define TP_MIB_OFD_ARP_DROP 0x36
-#define TP_MIB_TNL_DROP_0 0x44
-#define TP_MIB_OFD_VLN_DROP_0 0x58
-
-#define ULP_TX_INT_CAUSE 0x8dcc
-#define PBL_BOUND_ERR_CH3 0x80000000U
-#define PBL_BOUND_ERR_CH2 0x40000000U
-#define PBL_BOUND_ERR_CH1 0x20000000U
-#define PBL_BOUND_ERR_CH0 0x10000000U
-
-#define PM_RX_INT_CAUSE 0x8fdc
-#define ZERO_E_CMD_ERROR 0x00400000U
-#define PMRX_FRAMING_ERROR 0x003ffff0U
-#define OCSPI_PAR_ERROR 0x00000008U
-#define DB_OPTIONS_PAR_ERROR 0x00000004U
-#define IESPI_PAR_ERROR 0x00000002U
-#define E_PCMD_PAR_ERROR 0x00000001U
-
-#define PM_TX_INT_CAUSE 0x8ffc
-#define PCMD_LEN_OVFL0 0x80000000U
-#define PCMD_LEN_OVFL1 0x40000000U
-#define PCMD_LEN_OVFL2 0x20000000U
-#define ZERO_C_CMD_ERROR 0x10000000U
-#define PMTX_FRAMING_ERROR 0x0ffffff0U
-#define OESPI_PAR_ERROR 0x00000008U
-#define ICSPI_PAR_ERROR 0x00000002U
-#define C_PCMD_PAR_ERROR 0x00000001U
+#define MA_INT_CAUSE_A 0x77e0
+
+#define MEM_PERR_INT_CAUSE_S 1
+#define MEM_PERR_INT_CAUSE_V(x) ((x) << MEM_PERR_INT_CAUSE_S)
+#define MEM_PERR_INT_CAUSE_F MEM_PERR_INT_CAUSE_V(1U)
+
+#define MEM_WRAP_INT_CAUSE_S 0
+#define MEM_WRAP_INT_CAUSE_V(x) ((x) << MEM_WRAP_INT_CAUSE_S)
+#define MEM_WRAP_INT_CAUSE_F MEM_WRAP_INT_CAUSE_V(1U)
+
+#define MA_INT_WRAP_STATUS_A 0x77e4
+
+#define MEM_WRAP_ADDRESS_S 4
+#define MEM_WRAP_ADDRESS_M 0xfffffffU
+#define MEM_WRAP_ADDRESS_G(x) (((x) >> MEM_WRAP_ADDRESS_S) & MEM_WRAP_ADDRESS_M)
+
+#define MEM_WRAP_CLIENT_NUM_S 0
+#define MEM_WRAP_CLIENT_NUM_M 0xfU
+#define MEM_WRAP_CLIENT_NUM_G(x) \
+ (((x) >> MEM_WRAP_CLIENT_NUM_S) & MEM_WRAP_CLIENT_NUM_M)
+
+#define MA_PARITY_ERROR_STATUS_A 0x77f4
+#define MA_PARITY_ERROR_STATUS1_A 0x77f4
+#define MA_PARITY_ERROR_STATUS2_A 0x7804
+
+/* registers for module EDC_0 */
+#define EDC_0_BASE_ADDR 0x7900
+
+#define EDC_BIST_CMD_A 0x7904
+#define EDC_BIST_CMD_ADDR_A 0x7908
+#define EDC_BIST_CMD_LEN_A 0x790c
+#define EDC_BIST_DATA_PATTERN_A 0x7910
+#define EDC_BIST_STATUS_RDATA_A 0x7928
+#define EDC_INT_CAUSE_A 0x7978
+
+#define ECC_UE_PAR_S 5
+#define ECC_UE_PAR_V(x) ((x) << ECC_UE_PAR_S)
+#define ECC_UE_PAR_F ECC_UE_PAR_V(1U)
+
+#define ECC_CE_PAR_S 4
+#define ECC_CE_PAR_V(x) ((x) << ECC_CE_PAR_S)
+#define ECC_CE_PAR_F ECC_CE_PAR_V(1U)
+
+#define PERR_PAR_CAUSE_S 3
+#define PERR_PAR_CAUSE_V(x) ((x) << PERR_PAR_CAUSE_S)
+#define PERR_PAR_CAUSE_F PERR_PAR_CAUSE_V(1U)
+
+#define EDC_ECC_STATUS_A 0x797c
+
+/* registers for module EDC_1 */
+#define EDC_1_BASE_ADDR 0x7980
+
+/* registers for module CIM */
+#define CIM_BOOT_CFG_A 0x7b00
+
+#define BOOTADDR_M 0xffffff00U
+
+#define UPCRST_S 0
+#define UPCRST_V(x) ((x) << UPCRST_S)
+#define UPCRST_F UPCRST_V(1U)
+
+#define CIM_PF_MAILBOX_DATA_A 0x240
+#define CIM_PF_MAILBOX_CTRL_A 0x280
+
+#define MBMSGVALID_S 3
+#define MBMSGVALID_V(x) ((x) << MBMSGVALID_S)
+#define MBMSGVALID_F MBMSGVALID_V(1U)
+
+#define MBINTREQ_S 2
+#define MBINTREQ_V(x) ((x) << MBINTREQ_S)
+#define MBINTREQ_F MBINTREQ_V(1U)
+
+#define MBOWNER_S 0
+#define MBOWNER_M 0x3U
+#define MBOWNER_V(x) ((x) << MBOWNER_S)
+#define MBOWNER_G(x) (((x) >> MBOWNER_S) & MBOWNER_M)
+
+#define CIM_PF_HOST_INT_ENABLE_A 0x288
+
+#define MBMSGRDYINTEN_S 19
+#define MBMSGRDYINTEN_V(x) ((x) << MBMSGRDYINTEN_S)
+#define MBMSGRDYINTEN_F MBMSGRDYINTEN_V(1U)
+
+#define CIM_PF_HOST_INT_CAUSE_A 0x28c
+
+#define MBMSGRDYINT_S 19
+#define MBMSGRDYINT_V(x) ((x) << MBMSGRDYINT_S)
+#define MBMSGRDYINT_F MBMSGRDYINT_V(1U)
+
+#define CIM_HOST_INT_CAUSE_A 0x7b2c
+
+#define TIEQOUTPARERRINT_S 20
+#define TIEQOUTPARERRINT_V(x) ((x) << TIEQOUTPARERRINT_S)
+#define TIEQOUTPARERRINT_F TIEQOUTPARERRINT_V(1U)
+
+#define TIEQINPARERRINT_S 19
+#define TIEQINPARERRINT_V(x) ((x) << TIEQINPARERRINT_S)
+#define TIEQINPARERRINT_F TIEQINPARERRINT_V(1U)
+
+#define PREFDROPINT_S 1
+#define PREFDROPINT_V(x) ((x) << PREFDROPINT_S)
+#define PREFDROPINT_F PREFDROPINT_V(1U)
+
+#define UPACCNONZERO_S 0
+#define UPACCNONZERO_V(x) ((x) << UPACCNONZERO_S)
+#define UPACCNONZERO_F UPACCNONZERO_V(1U)
+
+#define MBHOSTPARERR_S 18
+#define MBHOSTPARERR_V(x) ((x) << MBHOSTPARERR_S)
+#define MBHOSTPARERR_F MBHOSTPARERR_V(1U)
+
+#define MBUPPARERR_S 17
+#define MBUPPARERR_V(x) ((x) << MBUPPARERR_S)
+#define MBUPPARERR_F MBUPPARERR_V(1U)
+
+#define IBQTP0PARERR_S 16
+#define IBQTP0PARERR_V(x) ((x) << IBQTP0PARERR_S)
+#define IBQTP0PARERR_F IBQTP0PARERR_V(1U)
+
+#define IBQTP1PARERR_S 15
+#define IBQTP1PARERR_V(x) ((x) << IBQTP1PARERR_S)
+#define IBQTP1PARERR_F IBQTP1PARERR_V(1U)
+
+#define IBQULPPARERR_S 14
+#define IBQULPPARERR_V(x) ((x) << IBQULPPARERR_S)
+#define IBQULPPARERR_F IBQULPPARERR_V(1U)
+
+#define IBQSGELOPARERR_S 13
+#define IBQSGELOPARERR_V(x) ((x) << IBQSGELOPARERR_S)
+#define IBQSGELOPARERR_F IBQSGELOPARERR_V(1U)
+
+#define IBQSGEHIPARERR_S 12
+#define IBQSGEHIPARERR_V(x) ((x) << IBQSGEHIPARERR_S)
+#define IBQSGEHIPARERR_F IBQSGEHIPARERR_V(1U)
+
+#define IBQNCSIPARERR_S 11
+#define IBQNCSIPARERR_V(x) ((x) << IBQNCSIPARERR_S)
+#define IBQNCSIPARERR_F IBQNCSIPARERR_V(1U)
+
+#define OBQULP0PARERR_S 10
+#define OBQULP0PARERR_V(x) ((x) << OBQULP0PARERR_S)
+#define OBQULP0PARERR_F OBQULP0PARERR_V(1U)
+
+#define OBQULP1PARERR_S 9
+#define OBQULP1PARERR_V(x) ((x) << OBQULP1PARERR_S)
+#define OBQULP1PARERR_F OBQULP1PARERR_V(1U)
+
+#define OBQULP2PARERR_S 8
+#define OBQULP2PARERR_V(x) ((x) << OBQULP2PARERR_S)
+#define OBQULP2PARERR_F OBQULP2PARERR_V(1U)
+
+#define OBQULP3PARERR_S 7
+#define OBQULP3PARERR_V(x) ((x) << OBQULP3PARERR_S)
+#define OBQULP3PARERR_F OBQULP3PARERR_V(1U)
+
+#define OBQSGEPARERR_S 6
+#define OBQSGEPARERR_V(x) ((x) << OBQSGEPARERR_S)
+#define OBQSGEPARERR_F OBQSGEPARERR_V(1U)
+
+#define OBQNCSIPARERR_S 5
+#define OBQNCSIPARERR_V(x) ((x) << OBQNCSIPARERR_S)
+#define OBQNCSIPARERR_F OBQNCSIPARERR_V(1U)
+
+#define CIM_HOST_UPACC_INT_CAUSE_A 0x7b34
+
+#define EEPROMWRINT_S 30
+#define EEPROMWRINT_V(x) ((x) << EEPROMWRINT_S)
+#define EEPROMWRINT_F EEPROMWRINT_V(1U)
+
+#define TIMEOUTMAINT_S 29
+#define TIMEOUTMAINT_V(x) ((x) << TIMEOUTMAINT_S)
+#define TIMEOUTMAINT_F TIMEOUTMAINT_V(1U)
+
+#define TIMEOUTINT_S 28
+#define TIMEOUTINT_V(x) ((x) << TIMEOUTINT_S)
+#define TIMEOUTINT_F TIMEOUTINT_V(1U)
+
+#define RSPOVRLOOKUPINT_S 27
+#define RSPOVRLOOKUPINT_V(x) ((x) << RSPOVRLOOKUPINT_S)
+#define RSPOVRLOOKUPINT_F RSPOVRLOOKUPINT_V(1U)
+
+#define REQOVRLOOKUPINT_S 26
+#define REQOVRLOOKUPINT_V(x) ((x) << REQOVRLOOKUPINT_S)
+#define REQOVRLOOKUPINT_F REQOVRLOOKUPINT_V(1U)
+
+#define BLKWRPLINT_S 25
+#define BLKWRPLINT_V(x) ((x) << BLKWRPLINT_S)
+#define BLKWRPLINT_F BLKWRPLINT_V(1U)
+
+#define BLKRDPLINT_S 24
+#define BLKRDPLINT_V(x) ((x) << BLKRDPLINT_S)
+#define BLKRDPLINT_F BLKRDPLINT_V(1U)
+
+#define SGLWRPLINT_S 23
+#define SGLWRPLINT_V(x) ((x) << SGLWRPLINT_S)
+#define SGLWRPLINT_F SGLWRPLINT_V(1U)
+
+#define SGLRDPLINT_S 22
+#define SGLRDPLINT_V(x) ((x) << SGLRDPLINT_S)
+#define SGLRDPLINT_F SGLRDPLINT_V(1U)
+
+#define BLKWRCTLINT_S 21
+#define BLKWRCTLINT_V(x) ((x) << BLKWRCTLINT_S)
+#define BLKWRCTLINT_F BLKWRCTLINT_V(1U)
+
+#define BLKRDCTLINT_S 20
+#define BLKRDCTLINT_V(x) ((x) << BLKRDCTLINT_S)
+#define BLKRDCTLINT_F BLKRDCTLINT_V(1U)
+
+#define SGLWRCTLINT_S 19
+#define SGLWRCTLINT_V(x) ((x) << SGLWRCTLINT_S)
+#define SGLWRCTLINT_F SGLWRCTLINT_V(1U)
+
+#define SGLRDCTLINT_S 18
+#define SGLRDCTLINT_V(x) ((x) << SGLRDCTLINT_S)
+#define SGLRDCTLINT_F SGLRDCTLINT_V(1U)
+
+#define BLKWREEPROMINT_S 17
+#define BLKWREEPROMINT_V(x) ((x) << BLKWREEPROMINT_S)
+#define BLKWREEPROMINT_F BLKWREEPROMINT_V(1U)
+
+#define BLKRDEEPROMINT_S 16
+#define BLKRDEEPROMINT_V(x) ((x) << BLKRDEEPROMINT_S)
+#define BLKRDEEPROMINT_F BLKRDEEPROMINT_V(1U)
+
+#define SGLWREEPROMINT_S 15
+#define SGLWREEPROMINT_V(x) ((x) << SGLWREEPROMINT_S)
+#define SGLWREEPROMINT_F SGLWREEPROMINT_V(1U)
+
+#define SGLRDEEPROMINT_S 14
+#define SGLRDEEPROMINT_V(x) ((x) << SGLRDEEPROMINT_S)
+#define SGLRDEEPROMINT_F SGLRDEEPROMINT_V(1U)
+
+#define BLKWRFLASHINT_S 13
+#define BLKWRFLASHINT_V(x) ((x) << BLKWRFLASHINT_S)
+#define BLKWRFLASHINT_F BLKWRFLASHINT_V(1U)
+
+#define BLKRDFLASHINT_S 12
+#define BLKRDFLASHINT_V(x) ((x) << BLKRDFLASHINT_S)
+#define BLKRDFLASHINT_F BLKRDFLASHINT_V(1U)
+
+#define SGLWRFLASHINT_S 11
+#define SGLWRFLASHINT_V(x) ((x) << SGLWRFLASHINT_S)
+#define SGLWRFLASHINT_F SGLWRFLASHINT_V(1U)
+
+#define SGLRDFLASHINT_S 10
+#define SGLRDFLASHINT_V(x) ((x) << SGLRDFLASHINT_S)
+#define SGLRDFLASHINT_F SGLRDFLASHINT_V(1U)
+
+#define BLKWRBOOTINT_S 9
+#define BLKWRBOOTINT_V(x) ((x) << BLKWRBOOTINT_S)
+#define BLKWRBOOTINT_F BLKWRBOOTINT_V(1U)
+
+#define BLKRDBOOTINT_S 8
+#define BLKRDBOOTINT_V(x) ((x) << BLKRDBOOTINT_S)
+#define BLKRDBOOTINT_F BLKRDBOOTINT_V(1U)
+
+#define SGLWRBOOTINT_S 7
+#define SGLWRBOOTINT_V(x) ((x) << SGLWRBOOTINT_S)
+#define SGLWRBOOTINT_F SGLWRBOOTINT_V(1U)
+
+#define SGLRDBOOTINT_S 6
+#define SGLRDBOOTINT_V(x) ((x) << SGLRDBOOTINT_S)
+#define SGLRDBOOTINT_F SGLRDBOOTINT_V(1U)
+
+#define ILLWRBEINT_S 5
+#define ILLWRBEINT_V(x) ((x) << ILLWRBEINT_S)
+#define ILLWRBEINT_F ILLWRBEINT_V(1U)
+
+#define ILLRDBEINT_S 4
+#define ILLRDBEINT_V(x) ((x) << ILLRDBEINT_S)
+#define ILLRDBEINT_F ILLRDBEINT_V(1U)
+
+#define ILLRDINT_S 3
+#define ILLRDINT_V(x) ((x) << ILLRDINT_S)
+#define ILLRDINT_F ILLRDINT_V(1U)
+
+#define ILLWRINT_S 2
+#define ILLWRINT_V(x) ((x) << ILLWRINT_S)
+#define ILLWRINT_F ILLWRINT_V(1U)
+
+#define ILLTRANSINT_S 1
+#define ILLTRANSINT_V(x) ((x) << ILLTRANSINT_S)
+#define ILLTRANSINT_F ILLTRANSINT_V(1U)
+
+#define RSVDSPACEINT_S 0
+#define RSVDSPACEINT_V(x) ((x) << RSVDSPACEINT_S)
+#define RSVDSPACEINT_F RSVDSPACEINT_V(1U)
+
+/* registers for module TP */
+#define TP_OUT_CONFIG_A 0x7d04
+#define TP_GLOBAL_CONFIG_A 0x7d08
+
+#define FIVETUPLELOOKUP_S 17
+#define FIVETUPLELOOKUP_M 0x3U
+#define FIVETUPLELOOKUP_V(x) ((x) << FIVETUPLELOOKUP_S)
+#define FIVETUPLELOOKUP_G(x) (((x) >> FIVETUPLELOOKUP_S) & FIVETUPLELOOKUP_M)
+
+#define TP_PARA_REG2_A 0x7d68
+
+#define MAXRXDATA_S 16
+#define MAXRXDATA_M 0xffffU
+#define MAXRXDATA_G(x) (((x) >> MAXRXDATA_S) & MAXRXDATA_M)
+
+#define TP_TIMER_RESOLUTION_A 0x7d90
+
+#define TIMERRESOLUTION_S 16
+#define TIMERRESOLUTION_M 0xffU
+#define TIMERRESOLUTION_G(x) (((x) >> TIMERRESOLUTION_S) & TIMERRESOLUTION_M)
+
+#define DELAYEDACKRESOLUTION_S 0
+#define DELAYEDACKRESOLUTION_M 0xffU
+#define DELAYEDACKRESOLUTION_G(x) \
+ (((x) >> DELAYEDACKRESOLUTION_S) & DELAYEDACKRESOLUTION_M)
+
+#define TP_SHIFT_CNT_A 0x7dc0
+
+#define SYNSHIFTMAX_S 24
+#define SYNSHIFTMAX_M 0xffU
+#define SYNSHIFTMAX_V(x) ((x) << SYNSHIFTMAX_S)
+#define SYNSHIFTMAX_G(x) (((x) >> SYNSHIFTMAX_S) & SYNSHIFTMAX_M)
+
+#define RXTSHIFTMAXR1_S 20
+#define RXTSHIFTMAXR1_M 0xfU
+#define RXTSHIFTMAXR1_V(x) ((x) << RXTSHIFTMAXR1_S)
+#define RXTSHIFTMAXR1_G(x) (((x) >> RXTSHIFTMAXR1_S) & RXTSHIFTMAXR1_M)
+
+#define RXTSHIFTMAXR2_S 16
+#define RXTSHIFTMAXR2_M 0xfU
+#define RXTSHIFTMAXR2_V(x) ((x) << RXTSHIFTMAXR2_S)
+#define RXTSHIFTMAXR2_G(x) (((x) >> RXTSHIFTMAXR2_S) & RXTSHIFTMAXR2_M)
+
+#define PERSHIFTBACKOFFMAX_S 12
+#define PERSHIFTBACKOFFMAX_M 0xfU
+#define PERSHIFTBACKOFFMAX_V(x) ((x) << PERSHIFTBACKOFFMAX_S)
+#define PERSHIFTBACKOFFMAX_G(x) \
+ (((x) >> PERSHIFTBACKOFFMAX_S) & PERSHIFTBACKOFFMAX_M)
+
+#define PERSHIFTMAX_S 8
+#define PERSHIFTMAX_M 0xfU
+#define PERSHIFTMAX_V(x) ((x) << PERSHIFTMAX_S)
+#define PERSHIFTMAX_G(x) (((x) >> PERSHIFTMAX_S) & PERSHIFTMAX_M)
+
+#define KEEPALIVEMAXR1_S 4
+#define KEEPALIVEMAXR1_M 0xfU
+#define KEEPALIVEMAXR1_V(x) ((x) << KEEPALIVEMAXR1_S)
+#define KEEPALIVEMAXR1_G(x) (((x) >> KEEPALIVEMAXR1_S) & KEEPALIVEMAXR1_M)
+
+#define KEEPALIVEMAXR2_S 0
+#define KEEPALIVEMAXR2_M 0xfU
+#define KEEPALIVEMAXR2_V(x) ((x) << KEEPALIVEMAXR2_S)
+#define KEEPALIVEMAXR2_G(x) (((x) >> KEEPALIVEMAXR2_S) & KEEPALIVEMAXR2_M)
+
+#define TP_CCTRL_TABLE_A 0x7ddc
+#define TP_MTU_TABLE_A 0x7de4
+
+#define MTUINDEX_S 24
+#define MTUINDEX_V(x) ((x) << MTUINDEX_S)
+
+#define MTUWIDTH_S 16
+#define MTUWIDTH_M 0xfU
+#define MTUWIDTH_V(x) ((x) << MTUWIDTH_S)
+#define MTUWIDTH_G(x) (((x) >> MTUWIDTH_S) & MTUWIDTH_M)
+
+#define MTUVALUE_S 0
+#define MTUVALUE_M 0x3fffU
+#define MTUVALUE_V(x) ((x) << MTUVALUE_S)
+#define MTUVALUE_G(x) (((x) >> MTUVALUE_S) & MTUVALUE_M)
+
+#define TP_RSS_LKP_TABLE_A 0x7dec
+
+#define LKPTBLROWVLD_S 31
+#define LKPTBLROWVLD_V(x) ((x) << LKPTBLROWVLD_S)
+#define LKPTBLROWVLD_F LKPTBLROWVLD_V(1U)
+
+#define LKPTBLQUEUE1_S 10
+#define LKPTBLQUEUE1_M 0x3ffU
+#define LKPTBLQUEUE1_G(x) (((x) >> LKPTBLQUEUE1_S) & LKPTBLQUEUE1_M)
+
+#define LKPTBLQUEUE0_S 0
+#define LKPTBLQUEUE0_M 0x3ffU
+#define LKPTBLQUEUE0_G(x) (((x) >> LKPTBLQUEUE0_S) & LKPTBLQUEUE0_M)
+
+#define TP_PIO_ADDR_A 0x7e40
+#define TP_PIO_DATA_A 0x7e44
+#define TP_MIB_INDEX_A 0x7e50
+#define TP_MIB_DATA_A 0x7e54
+#define TP_INT_CAUSE_A 0x7e74
+
+#define FLMTXFLSTEMPTY_S 30
+#define FLMTXFLSTEMPTY_V(x) ((x) << FLMTXFLSTEMPTY_S)
+#define FLMTXFLSTEMPTY_F FLMTXFLSTEMPTY_V(1U)
+
+#define TP_VLAN_PRI_MAP_A 0x140
+
+#define FRAGMENTATION_S 9
+#define FRAGMENTATION_V(x) ((x) << FRAGMENTATION_S)
+#define FRAGMENTATION_F FRAGMENTATION_V(1U)
+
+#define MPSHITTYPE_S 8
+#define MPSHITTYPE_V(x) ((x) << MPSHITTYPE_S)
+#define MPSHITTYPE_F MPSHITTYPE_V(1U)
+
+#define MACMATCH_S 7
+#define MACMATCH_V(x) ((x) << MACMATCH_S)
+#define MACMATCH_F MACMATCH_V(1U)
+
+#define ETHERTYPE_S 6
+#define ETHERTYPE_V(x) ((x) << ETHERTYPE_S)
+#define ETHERTYPE_F ETHERTYPE_V(1U)
+
+#define PROTOCOL_S 5
+#define PROTOCOL_V(x) ((x) << PROTOCOL_S)
+#define PROTOCOL_F PROTOCOL_V(1U)
+
+#define TOS_S 4
+#define TOS_V(x) ((x) << TOS_S)
+#define TOS_F TOS_V(1U)
+
+#define VLAN_S 3
+#define VLAN_V(x) ((x) << VLAN_S)
+#define VLAN_F VLAN_V(1U)
+
+#define VNIC_ID_S 2
+#define VNIC_ID_V(x) ((x) << VNIC_ID_S)
+#define VNIC_ID_F VNIC_ID_V(1U)
+
+#define PORT_S 1
+#define PORT_V(x) ((x) << PORT_S)
+#define PORT_F PORT_V(1U)
+
+#define FCOE_S 0
+#define FCOE_V(x) ((x) << FCOE_S)
+#define FCOE_F FCOE_V(1U)
+
+#define FILTERMODE_S 15
+#define FILTERMODE_V(x) ((x) << FILTERMODE_S)
+#define FILTERMODE_F FILTERMODE_V(1U)
+
+#define FCOEMASK_S 14
+#define FCOEMASK_V(x) ((x) << FCOEMASK_S)
+#define FCOEMASK_F FCOEMASK_V(1U)
+
+#define TP_INGRESS_CONFIG_A 0x141
+
+#define VNIC_S 11
+#define VNIC_V(x) ((x) << VNIC_S)
+#define VNIC_F VNIC_V(1U)
+
+#define CSUM_HAS_PSEUDO_HDR_S 10
+#define CSUM_HAS_PSEUDO_HDR_V(x) ((x) << CSUM_HAS_PSEUDO_HDR_S)
+#define CSUM_HAS_PSEUDO_HDR_F CSUM_HAS_PSEUDO_HDR_V(1U)
+
+#define TP_MIB_MAC_IN_ERR_0_A 0x0
+#define TP_MIB_TCP_OUT_RST_A 0xc
+#define TP_MIB_TCP_IN_SEG_HI_A 0x10
+#define TP_MIB_TCP_IN_SEG_LO_A 0x11
+#define TP_MIB_TCP_OUT_SEG_HI_A 0x12
+#define TP_MIB_TCP_OUT_SEG_LO_A 0x13
+#define TP_MIB_TCP_RXT_SEG_HI_A 0x14
+#define TP_MIB_TCP_RXT_SEG_LO_A 0x15
+#define TP_MIB_TNL_CNG_DROP_0_A 0x18
+#define TP_MIB_TCP_V6IN_ERR_0_A 0x28
+#define TP_MIB_TCP_V6OUT_RST_A 0x2c
+#define TP_MIB_OFD_ARP_DROP_A 0x36
+#define TP_MIB_TNL_DROP_0_A 0x44
+#define TP_MIB_OFD_VLN_DROP_0_A 0x58
+
+#define ULP_TX_INT_CAUSE_A 0x8dcc
+
+#define PBL_BOUND_ERR_CH3_S 31
+#define PBL_BOUND_ERR_CH3_V(x) ((x) << PBL_BOUND_ERR_CH3_S)
+#define PBL_BOUND_ERR_CH3_F PBL_BOUND_ERR_CH3_V(1U)
+
+#define PBL_BOUND_ERR_CH2_S 30
+#define PBL_BOUND_ERR_CH2_V(x) ((x) << PBL_BOUND_ERR_CH2_S)
+#define PBL_BOUND_ERR_CH2_F PBL_BOUND_ERR_CH2_V(1U)
+
+#define PBL_BOUND_ERR_CH1_S 29
+#define PBL_BOUND_ERR_CH1_V(x) ((x) << PBL_BOUND_ERR_CH1_S)
+#define PBL_BOUND_ERR_CH1_F PBL_BOUND_ERR_CH1_V(1U)
+
+#define PBL_BOUND_ERR_CH0_S 28
+#define PBL_BOUND_ERR_CH0_V(x) ((x) << PBL_BOUND_ERR_CH0_S)
+#define PBL_BOUND_ERR_CH0_F PBL_BOUND_ERR_CH0_V(1U)
+
+#define PM_RX_INT_CAUSE_A 0x8fdc
+
+#define PMRX_FRAMING_ERROR_F 0x003ffff0U
+
+#define ZERO_E_CMD_ERROR_S 22
+#define ZERO_E_CMD_ERROR_V(x) ((x) << ZERO_E_CMD_ERROR_S)
+#define ZERO_E_CMD_ERROR_F ZERO_E_CMD_ERROR_V(1U)
+
+#define OCSPI_PAR_ERROR_S 3
+#define OCSPI_PAR_ERROR_V(x) ((x) << OCSPI_PAR_ERROR_S)
+#define OCSPI_PAR_ERROR_F OCSPI_PAR_ERROR_V(1U)
+
+#define DB_OPTIONS_PAR_ERROR_S 2
+#define DB_OPTIONS_PAR_ERROR_V(x) ((x) << DB_OPTIONS_PAR_ERROR_S)
+#define DB_OPTIONS_PAR_ERROR_F DB_OPTIONS_PAR_ERROR_V(1U)
+
+#define IESPI_PAR_ERROR_S 1
+#define IESPI_PAR_ERROR_V(x) ((x) << IESPI_PAR_ERROR_S)
+#define IESPI_PAR_ERROR_F IESPI_PAR_ERROR_V(1U)
+
+#define PMRX_E_PCMD_PAR_ERROR_S 0
+#define PMRX_E_PCMD_PAR_ERROR_V(x) ((x) << PMRX_E_PCMD_PAR_ERROR_S)
+#define PMRX_E_PCMD_PAR_ERROR_F PMRX_E_PCMD_PAR_ERROR_V(1U)
+
+#define PM_TX_INT_CAUSE_A 0x8ffc
+
+#define PCMD_LEN_OVFL0_S 31
+#define PCMD_LEN_OVFL0_V(x) ((x) << PCMD_LEN_OVFL0_S)
+#define PCMD_LEN_OVFL0_F PCMD_LEN_OVFL0_V(1U)
+
+#define PCMD_LEN_OVFL1_S 30
+#define PCMD_LEN_OVFL1_V(x) ((x) << PCMD_LEN_OVFL1_S)
+#define PCMD_LEN_OVFL1_F PCMD_LEN_OVFL1_V(1U)
+
+#define PCMD_LEN_OVFL2_S 29
+#define PCMD_LEN_OVFL2_V(x) ((x) << PCMD_LEN_OVFL2_S)
+#define PCMD_LEN_OVFL2_F PCMD_LEN_OVFL2_V(1U)
+
+#define ZERO_C_CMD_ERROR_S 28
+#define ZERO_C_CMD_ERROR_V(x) ((x) << ZERO_C_CMD_ERROR_S)
+#define ZERO_C_CMD_ERROR_F ZERO_C_CMD_ERROR_V(1U)
+
+#define PMTX_FRAMING_ERROR_F 0x0ffffff0U
+
+#define OESPI_PAR_ERROR_S 3
+#define OESPI_PAR_ERROR_V(x) ((x) << OESPI_PAR_ERROR_S)
+#define OESPI_PAR_ERROR_F OESPI_PAR_ERROR_V(1U)
+
+#define ICSPI_PAR_ERROR_S 1
+#define ICSPI_PAR_ERROR_V(x) ((x) << ICSPI_PAR_ERROR_S)
+#define ICSPI_PAR_ERROR_F ICSPI_PAR_ERROR_V(1U)
+
+#define PMTX_C_PCMD_PAR_ERROR_S 0
+#define PMTX_C_PCMD_PAR_ERROR_V(x) ((x) << PMTX_C_PCMD_PAR_ERROR_S)
+#define PMTX_C_PCMD_PAR_ERROR_F PMTX_C_PCMD_PAR_ERROR_V(1U)
#define MPS_PORT_STAT_TX_PORT_BYTES_L 0x400
#define MPS_PORT_STAT_TX_PORT_BYTES_H 0x404
@@ -959,41 +1562,57 @@
#define MPS_PORT_STAT_RX_PORT_PPP7_H 0x60c
#define MPS_PORT_STAT_RX_PORT_LESS_64B_L 0x610
#define MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614
-#define MAC_PORT_CFG2 0x818
#define MAC_PORT_MAGIC_MACID_LO 0x824
#define MAC_PORT_MAGIC_MACID_HI 0x828
-#define MAC_PORT_EPIO_DATA0 0x8c0
-#define MAC_PORT_EPIO_DATA1 0x8c4
-#define MAC_PORT_EPIO_DATA2 0x8c8
-#define MAC_PORT_EPIO_DATA3 0x8cc
-#define MAC_PORT_EPIO_OP 0x8d0
-
-#define MPS_CMN_CTL 0x9000
-#define NUMPORTS_MASK 0x00000003U
-#define NUMPORTS_SHIFT 0
-#define NUMPORTS_GET(x) (((x) & NUMPORTS_MASK) >> NUMPORTS_SHIFT)
-
-#define MPS_INT_CAUSE 0x9008
-#define STATINT 0x00000020U
-#define TXINT 0x00000010U
-#define RXINT 0x00000008U
-#define TRCINT 0x00000004U
-#define CLSINT 0x00000002U
-#define PLINT 0x00000001U
-
-#define MPS_TX_INT_CAUSE 0x9408
-#define PORTERR 0x00010000U
-#define FRMERR 0x00008000U
-#define SECNTERR 0x00004000U
-#define BUBBLE 0x00002000U
-#define TXDESCFIFO 0x00001e00U
-#define TXDATAFIFO 0x000001e0U
-#define NCSIFIFO 0x00000010U
-#define TPFIFO 0x0000000fU
-
-#define MPS_STAT_PERR_INT_CAUSE_SRAM 0x9614
-#define MPS_STAT_PERR_INT_CAUSE_TX_FIFO 0x9620
-#define MPS_STAT_PERR_INT_CAUSE_RX_FIFO 0x962c
+
+#define MAC_PORT_EPIO_DATA0_A 0x8c0
+#define MAC_PORT_EPIO_DATA1_A 0x8c4
+#define MAC_PORT_EPIO_DATA2_A 0x8c8
+#define MAC_PORT_EPIO_DATA3_A 0x8cc
+#define MAC_PORT_EPIO_OP_A 0x8d0
+
+#define MAC_PORT_CFG2_A 0x818
+
+#define MPS_CMN_CTL_A 0x9000
+
+#define NUMPORTS_S 0
+#define NUMPORTS_M 0x3U
+#define NUMPORTS_G(x) (((x) >> NUMPORTS_S) & NUMPORTS_M)
+
+#define MPS_INT_CAUSE_A 0x9008
+#define MPS_TX_INT_CAUSE_A 0x9408
+
+#define FRMERR_S 15
+#define FRMERR_V(x) ((x) << FRMERR_S)
+#define FRMERR_F FRMERR_V(1U)
+
+#define SECNTERR_S 14
+#define SECNTERR_V(x) ((x) << SECNTERR_S)
+#define SECNTERR_F SECNTERR_V(1U)
+
+#define BUBBLE_S 13
+#define BUBBLE_V(x) ((x) << BUBBLE_S)
+#define BUBBLE_F BUBBLE_V(1U)
+
+#define TXDESCFIFO_S 9
+#define TXDESCFIFO_M 0xfU
+#define TXDESCFIFO_V(x) ((x) << TXDESCFIFO_S)
+
+#define TXDATAFIFO_S 5
+#define TXDATAFIFO_M 0xfU
+#define TXDATAFIFO_V(x) ((x) << TXDATAFIFO_S)
+
+#define NCSIFIFO_S 4
+#define NCSIFIFO_V(x) ((x) << NCSIFIFO_S)
+#define NCSIFIFO_F NCSIFIFO_V(1U)
+
+#define TPFIFO_S 0
+#define TPFIFO_M 0xfU
+#define TPFIFO_V(x) ((x) << TPFIFO_S)
+
+#define MPS_STAT_PERR_INT_CAUSE_SRAM_A 0x9614
+#define MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A 0x9620
+#define MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A 0x962c
#define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L 0x9640
#define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_H 0x9644
@@ -1027,294 +1646,445 @@
#define MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_H 0x96b4
#define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_L 0x96b8
#define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_H 0x96bc
-#define MPS_TRC_CFG 0x9800
-#define TRCFIFOEMPTY 0x00000010U
-#define TRCIGNOREDROPINPUT 0x00000008U
-#define TRCKEEPDUPLICATES 0x00000004U
-#define TRCEN 0x00000002U
-#define TRCMULTIFILTER 0x00000001U
-
-#define MPS_TRC_RSS_CONTROL 0x9808
-#define MPS_T5_TRC_RSS_CONTROL 0xa00c
-#define RSSCONTROL_MASK 0x00ff0000U
-#define RSSCONTROL_SHIFT 16
-#define RSSCONTROL(x) ((x) << RSSCONTROL_SHIFT)
-#define QUEUENUMBER_MASK 0x0000ffffU
-#define QUEUENUMBER_SHIFT 0
-#define QUEUENUMBER(x) ((x) << QUEUENUMBER_SHIFT)
-
-#define MPS_TRC_FILTER_MATCH_CTL_A 0x9810
-#define TFINVERTMATCH 0x01000000U
-#define TFPKTTOOLARGE 0x00800000U
-#define TFEN 0x00400000U
-#define TFPORT_MASK 0x003c0000U
-#define TFPORT_SHIFT 18
-#define TFPORT(x) ((x) << TFPORT_SHIFT)
-#define TFPORT_GET(x) (((x) & TFPORT_MASK) >> TFPORT_SHIFT)
-#define TFDROP 0x00020000U
-#define TFSOPEOPERR 0x00010000U
-#define TFLENGTH_MASK 0x00001f00U
-#define TFLENGTH_SHIFT 8
-#define TFLENGTH(x) ((x) << TFLENGTH_SHIFT)
-#define TFLENGTH_GET(x) (((x) & TFLENGTH_MASK) >> TFLENGTH_SHIFT)
-#define TFOFFSET_MASK 0x0000001fU
-#define TFOFFSET_SHIFT 0
-#define TFOFFSET(x) ((x) << TFOFFSET_SHIFT)
-#define TFOFFSET_GET(x) (((x) & TFOFFSET_MASK) >> TFOFFSET_SHIFT)
-
-#define MPS_TRC_FILTER_MATCH_CTL_B 0x9820
-#define TFMINPKTSIZE_MASK 0x01ff0000U
-#define TFMINPKTSIZE_SHIFT 16
-#define TFMINPKTSIZE(x) ((x) << TFMINPKTSIZE_SHIFT)
-#define TFMINPKTSIZE_GET(x) (((x) & TFMINPKTSIZE_MASK) >> TFMINPKTSIZE_SHIFT)
-#define TFCAPTUREMAX_MASK 0x00003fffU
-#define TFCAPTUREMAX_SHIFT 0
-#define TFCAPTUREMAX(x) ((x) << TFCAPTUREMAX_SHIFT)
-#define TFCAPTUREMAX_GET(x) (((x) & TFCAPTUREMAX_MASK) >> TFCAPTUREMAX_SHIFT)
-
-#define MPS_TRC_INT_CAUSE 0x985c
-#define MISCPERR 0x00000100U
-#define PKTFIFO 0x000000f0U
-#define FILTMEM 0x0000000fU
-
-#define MPS_TRC_FILTER0_MATCH 0x9c00
-#define MPS_TRC_FILTER0_DONT_CARE 0x9c80
-#define MPS_TRC_FILTER1_MATCH 0x9d00
-#define MPS_CLS_INT_CAUSE 0xd028
-#define PLERRENB 0x00000008U
-#define HASHSRAM 0x00000004U
-#define MATCHTCAM 0x00000002U
-#define MATCHSRAM 0x00000001U
-
-#define MPS_RX_PERR_INT_CAUSE 0x11074
-
-#define CPL_INTR_CAUSE 0x19054
-#define CIM_OP_MAP_PERR 0x00000020U
-#define CIM_OVFL_ERROR 0x00000010U
-#define TP_FRAMING_ERROR 0x00000008U
-#define SGE_FRAMING_ERROR 0x00000004U
-#define CIM_FRAMING_ERROR 0x00000002U
-#define ZERO_SWITCH_ERROR 0x00000001U
-
-#define SMB_INT_CAUSE 0x19090
-#define MSTTXFIFOPARINT 0x00200000U
-#define MSTRXFIFOPARINT 0x00100000U
-#define SLVFIFOPARINT 0x00080000U
-
-#define ULP_RX_INT_CAUSE 0x19158
-#define ULP_RX_ISCSI_TAGMASK 0x19164
-#define ULP_RX_ISCSI_PSZ 0x19168
-#define HPZ3_MASK 0x0f000000U
-#define HPZ3_SHIFT 24
-#define HPZ3(x) ((x) << HPZ3_SHIFT)
-#define HPZ2_MASK 0x000f0000U
-#define HPZ2_SHIFT 16
-#define HPZ2(x) ((x) << HPZ2_SHIFT)
-#define HPZ1_MASK 0x00000f00U
-#define HPZ1_SHIFT 8
-#define HPZ1(x) ((x) << HPZ1_SHIFT)
-#define HPZ0_MASK 0x0000000fU
-#define HPZ0_SHIFT 0
-#define HPZ0(x) ((x) << HPZ0_SHIFT)
-
-#define ULP_RX_TDDP_PSZ 0x19178
-
-#define SF_DATA 0x193f8
-#define SF_OP 0x193fc
-#define SF_BUSY 0x80000000U
-#define SF_LOCK 0x00000010U
-#define SF_CONT 0x00000008U
-#define BYTECNT_MASK 0x00000006U
-#define BYTECNT_SHIFT 1
-#define BYTECNT(x) ((x) << BYTECNT_SHIFT)
-#define OP_WR 0x00000001U
-
-#define PL_PF_INT_CAUSE 0x3c0
-#define PFSW 0x00000008U
-#define PFSGE 0x00000004U
-#define PFCIM 0x00000002U
-#define PFMPS 0x00000001U
-
-#define PL_PF_INT_ENABLE 0x3c4
-#define PL_PF_CTL 0x3c8
-#define SWINT 0x00000001U
-
-#define PL_WHOAMI 0x19400
-#define SOURCEPF_MASK 0x00000700U
-#define SOURCEPF_SHIFT 8
-#define SOURCEPF(x) ((x) << SOURCEPF_SHIFT)
-#define SOURCEPF_GET(x) (((x) & SOURCEPF_MASK) >> SOURCEPF_SHIFT)
-#define ISVF 0x00000080U
-#define VFID_MASK 0x0000007fU
-#define VFID_SHIFT 0
-#define VFID(x) ((x) << VFID_SHIFT)
-#define VFID_GET(x) (((x) & VFID_MASK) >> VFID_SHIFT)
-
-#define PL_INT_CAUSE 0x1940c
-#define ULP_TX 0x08000000U
-#define SGE 0x04000000U
-#define HMA 0x02000000U
-#define CPL_SWITCH 0x01000000U
-#define ULP_RX 0x00800000U
-#define PM_RX 0x00400000U
-#define PM_TX 0x00200000U
-#define MA 0x00100000U
-#define TP 0x00080000U
-#define LE 0x00040000U
-#define EDC1 0x00020000U
-#define EDC0 0x00010000U
-#define MC 0x00008000U
-#define PCIE 0x00004000U
-#define PMU 0x00002000U
-#define XGMAC_KR1 0x00001000U
-#define XGMAC_KR0 0x00000800U
-#define XGMAC1 0x00000400U
-#define XGMAC0 0x00000200U
-#define SMB 0x00000100U
-#define SF 0x00000080U
-#define PL 0x00000040U
-#define NCSI 0x00000020U
-#define MPS 0x00000010U
-#define MI 0x00000008U
-#define DBG 0x00000004U
-#define I2CM 0x00000002U
-#define CIM 0x00000001U
-
-#define MC1 0x31
-#define PL_INT_ENABLE 0x19410
-#define PL_INT_MAP0 0x19414
-#define PL_RST 0x19428
-#define PIORST 0x00000002U
-#define PIORSTMODE 0x00000001U
-
-#define PL_PL_INT_CAUSE 0x19430
-#define FATALPERR 0x00000010U
-#define PERRVFID 0x00000001U
-
-#define PL_REV 0x1943c
-
-#define S_REV 0
-#define M_REV 0xfU
-#define V_REV(x) ((x) << S_REV)
-#define G_REV(x) (((x) >> S_REV) & M_REV)
-
-#define LE_DB_CONFIG 0x19c04
-#define HASHEN 0x00100000U
-
-#define LE_DB_SERVER_INDEX 0x19c18
-#define LE_DB_ACT_CNT_IPV4 0x19c20
-#define LE_DB_ACT_CNT_IPV6 0x19c24
-
-#define LE_DB_INT_CAUSE 0x19c3c
-#define REQQPARERR 0x00010000U
-#define UNKNOWNCMD 0x00008000U
-#define PARITYERR 0x00000040U
-#define LIPMISS 0x00000020U
-#define LIP0 0x00000010U
-
-#define LE_DB_TID_HASHBASE 0x19df8
-
-#define NCSI_INT_CAUSE 0x1a0d8
-#define CIM_DM_PRTY_ERR 0x00000100U
-#define MPS_DM_PRTY_ERR 0x00000080U
-#define TXFIFO_PRTY_ERR 0x00000002U
-#define RXFIFO_PRTY_ERR 0x00000001U
-
-#define XGMAC_PORT_CFG2 0x1018
-#define PATEN 0x00040000U
-#define MAGICEN 0x00020000U
-#define XGMAC_PORT_MAGIC_MACID_LO 0x1024
-#define XGMAC_PORT_MAGIC_MACID_HI 0x1028
+#define MPS_TRC_CFG_A 0x9800
+
+#define TRCFIFOEMPTY_S 4
+#define TRCFIFOEMPTY_V(x) ((x) << TRCFIFOEMPTY_S)
+#define TRCFIFOEMPTY_F TRCFIFOEMPTY_V(1U)
+
+#define TRCIGNOREDROPINPUT_S 3
+#define TRCIGNOREDROPINPUT_V(x) ((x) << TRCIGNOREDROPINPUT_S)
+#define TRCIGNOREDROPINPUT_F TRCIGNOREDROPINPUT_V(1U)
+
+#define TRCKEEPDUPLICATES_S 2
+#define TRCKEEPDUPLICATES_V(x) ((x) << TRCKEEPDUPLICATES_S)
+#define TRCKEEPDUPLICATES_F TRCKEEPDUPLICATES_V(1U)
+
+#define TRCEN_S 1
+#define TRCEN_V(x) ((x) << TRCEN_S)
+#define TRCEN_F TRCEN_V(1U)
+
+#define TRCMULTIFILTER_S 0
+#define TRCMULTIFILTER_V(x) ((x) << TRCMULTIFILTER_S)
+#define TRCMULTIFILTER_F TRCMULTIFILTER_V(1U)
+
+#define MPS_TRC_RSS_CONTROL_A 0x9808
+#define MPS_T5_TRC_RSS_CONTROL_A 0xa00c
+
+#define RSSCONTROL_S 16
+#define RSSCONTROL_V(x) ((x) << RSSCONTROL_S)
+
+#define QUEUENUMBER_S 0
+#define QUEUENUMBER_V(x) ((x) << QUEUENUMBER_S)
+
+#define MPS_TRC_INT_CAUSE_A 0x985c
+
+#define MISCPERR_S 8
+#define MISCPERR_V(x) ((x) << MISCPERR_S)
+#define MISCPERR_F MISCPERR_V(1U)
+
+#define PKTFIFO_S 4
+#define PKTFIFO_M 0xfU
+#define PKTFIFO_V(x) ((x) << PKTFIFO_S)
+
+#define FILTMEM_S 0
+#define FILTMEM_M 0xfU
+#define FILTMEM_V(x) ((x) << FILTMEM_S)
+
+#define MPS_CLS_INT_CAUSE_A 0xd028
+
+#define HASHSRAM_S 2
+#define HASHSRAM_V(x) ((x) << HASHSRAM_S)
+#define HASHSRAM_F HASHSRAM_V(1U)
+
+#define MATCHTCAM_S 1
+#define MATCHTCAM_V(x) ((x) << MATCHTCAM_S)
+#define MATCHTCAM_F MATCHTCAM_V(1U)
+
+#define MATCHSRAM_S 0
+#define MATCHSRAM_V(x) ((x) << MATCHSRAM_S)
+#define MATCHSRAM_F MATCHSRAM_V(1U)
+
+#define MPS_RX_PERR_INT_CAUSE_A 0x11074
+
+#define MPS_CLS_TCAM_Y_L_A 0xf000
+#define MPS_CLS_TCAM_X_L_A 0xf008
+
+#define MPS_CLS_TCAM_Y_L(idx) (MPS_CLS_TCAM_Y_L_A + (idx) * 16)
+#define NUM_MPS_CLS_TCAM_Y_L_INSTANCES 512
+
+#define MPS_CLS_TCAM_X_L(idx) (MPS_CLS_TCAM_X_L_A + (idx) * 16)
+#define NUM_MPS_CLS_TCAM_X_L_INSTANCES 512
+
+#define MPS_CLS_SRAM_L_A 0xe000
+#define MPS_CLS_SRAM_H_A 0xe004
+
+#define MPS_CLS_SRAM_L(idx) (MPS_CLS_SRAM_L_A + (idx) * 8)
+#define NUM_MPS_CLS_SRAM_L_INSTANCES 336
+
+#define MPS_CLS_SRAM_H(idx) (MPS_CLS_SRAM_H_A + (idx) * 8)
+#define NUM_MPS_CLS_SRAM_H_INSTANCES 336
+
+#define MULTILISTEN0_S 25
+
+#define REPLICATE_S 11
+#define REPLICATE_V(x) ((x) << REPLICATE_S)
+#define REPLICATE_F REPLICATE_V(1U)
+
+#define PF_S 8
+#define PF_M 0x7U
+#define PF_G(x) (((x) >> PF_S) & PF_M)
+
+#define VF_VALID_S 7
+#define VF_VALID_V(x) ((x) << VF_VALID_S)
+#define VF_VALID_F VF_VALID_V(1U)
+
+#define VF_S 0
+#define VF_M 0x7fU
+#define VF_G(x) (((x) >> VF_S) & VF_M)
-#define XGMAC_PORT_EPIO_DATA0 0x10c0
-#define XGMAC_PORT_EPIO_DATA1 0x10c4
-#define XGMAC_PORT_EPIO_DATA2 0x10c8
-#define XGMAC_PORT_EPIO_DATA3 0x10cc
-#define XGMAC_PORT_EPIO_OP 0x10d0
-#define EPIOWR 0x00000100U
-#define ADDRESS_MASK 0x000000ffU
-#define ADDRESS_SHIFT 0
-#define ADDRESS(x) ((x) << ADDRESS_SHIFT)
+#define SRAM_PRIO3_S 22
+#define SRAM_PRIO3_M 0x7U
+#define SRAM_PRIO3_G(x) (((x) >> SRAM_PRIO3_S) & SRAM_PRIO3_M)
-#define MAC_PORT_INT_CAUSE 0x8dc
-#define XGMAC_PORT_INT_CAUSE 0x10dc
+#define SRAM_PRIO2_S 19
+#define SRAM_PRIO2_M 0x7U
+#define SRAM_PRIO2_G(x) (((x) >> SRAM_PRIO2_S) & SRAM_PRIO2_M)
-#define A_TP_TX_MOD_QUEUE_REQ_MAP 0x7e28
+#define SRAM_PRIO1_S 16
+#define SRAM_PRIO1_M 0x7U
+#define SRAM_PRIO1_G(x) (((x) >> SRAM_PRIO1_S) & SRAM_PRIO1_M)
-#define A_TP_TX_MOD_CHANNEL_WEIGHT 0x7e34
+#define SRAM_PRIO0_S 13
+#define SRAM_PRIO0_M 0x7U
+#define SRAM_PRIO0_G(x) (((x) >> SRAM_PRIO0_S) & SRAM_PRIO0_M)
-#define S_TX_MOD_QUEUE_REQ_MAP 0
-#define M_TX_MOD_QUEUE_REQ_MAP 0xffffU
-#define V_TX_MOD_QUEUE_REQ_MAP(x) ((x) << S_TX_MOD_QUEUE_REQ_MAP)
+#define SRAM_VLD_S 12
+#define SRAM_VLD_V(x) ((x) << SRAM_VLD_S)
+#define SRAM_VLD_F SRAM_VLD_V(1U)
-#define A_TP_TX_MOD_QUEUE_WEIGHT0 0x7e30
+#define PORTMAP_S 0
+#define PORTMAP_M 0xfU
+#define PORTMAP_G(x) (((x) >> PORTMAP_S) & PORTMAP_M)
-#define S_TX_MODQ_WEIGHT3 24
-#define M_TX_MODQ_WEIGHT3 0xffU
-#define V_TX_MODQ_WEIGHT3(x) ((x) << S_TX_MODQ_WEIGHT3)
+#define CPL_INTR_CAUSE_A 0x19054
-#define S_TX_MODQ_WEIGHT2 16
-#define M_TX_MODQ_WEIGHT2 0xffU
-#define V_TX_MODQ_WEIGHT2(x) ((x) << S_TX_MODQ_WEIGHT2)
+#define CIM_OP_MAP_PERR_S 5
+#define CIM_OP_MAP_PERR_V(x) ((x) << CIM_OP_MAP_PERR_S)
+#define CIM_OP_MAP_PERR_F CIM_OP_MAP_PERR_V(1U)
-#define S_TX_MODQ_WEIGHT1 8
-#define M_TX_MODQ_WEIGHT1 0xffU
-#define V_TX_MODQ_WEIGHT1(x) ((x) << S_TX_MODQ_WEIGHT1)
+#define CIM_OVFL_ERROR_S 4
+#define CIM_OVFL_ERROR_V(x) ((x) << CIM_OVFL_ERROR_S)
+#define CIM_OVFL_ERROR_F CIM_OVFL_ERROR_V(1U)
-#define S_TX_MODQ_WEIGHT0 0
-#define M_TX_MODQ_WEIGHT0 0xffU
-#define V_TX_MODQ_WEIGHT0(x) ((x) << S_TX_MODQ_WEIGHT0)
+#define TP_FRAMING_ERROR_S 3
+#define TP_FRAMING_ERROR_V(x) ((x) << TP_FRAMING_ERROR_S)
+#define TP_FRAMING_ERROR_F TP_FRAMING_ERROR_V(1U)
-#define A_TP_TX_SCHED_HDR 0x23
+#define SGE_FRAMING_ERROR_S 2
+#define SGE_FRAMING_ERROR_V(x) ((x) << SGE_FRAMING_ERROR_S)
+#define SGE_FRAMING_ERROR_F SGE_FRAMING_ERROR_V(1U)
-#define A_TP_TX_SCHED_FIFO 0x24
+#define CIM_FRAMING_ERROR_S 1
+#define CIM_FRAMING_ERROR_V(x) ((x) << CIM_FRAMING_ERROR_S)
+#define CIM_FRAMING_ERROR_F CIM_FRAMING_ERROR_V(1U)
-#define A_TP_TX_SCHED_PCMD 0x25
+#define ZERO_SWITCH_ERROR_S 0
+#define ZERO_SWITCH_ERROR_V(x) ((x) << ZERO_SWITCH_ERROR_S)
+#define ZERO_SWITCH_ERROR_F ZERO_SWITCH_ERROR_V(1U)
-#define S_VNIC 11
-#define V_VNIC(x) ((x) << S_VNIC)
-#define F_VNIC V_VNIC(1U)
+#define SMB_INT_CAUSE_A 0x19090
-#define S_FRAGMENTATION 9
-#define V_FRAGMENTATION(x) ((x) << S_FRAGMENTATION)
-#define F_FRAGMENTATION V_FRAGMENTATION(1U)
+#define MSTTXFIFOPARINT_S 21
+#define MSTTXFIFOPARINT_V(x) ((x) << MSTTXFIFOPARINT_S)
+#define MSTTXFIFOPARINT_F MSTTXFIFOPARINT_V(1U)
-#define S_MPSHITTYPE 8
-#define V_MPSHITTYPE(x) ((x) << S_MPSHITTYPE)
-#define F_MPSHITTYPE V_MPSHITTYPE(1U)
+#define MSTRXFIFOPARINT_S 20
+#define MSTRXFIFOPARINT_V(x) ((x) << MSTRXFIFOPARINT_S)
+#define MSTRXFIFOPARINT_F MSTRXFIFOPARINT_V(1U)
-#define S_MACMATCH 7
-#define V_MACMATCH(x) ((x) << S_MACMATCH)
-#define F_MACMATCH V_MACMATCH(1U)
+#define SLVFIFOPARINT_S 19
+#define SLVFIFOPARINT_V(x) ((x) << SLVFIFOPARINT_S)
+#define SLVFIFOPARINT_F SLVFIFOPARINT_V(1U)
-#define S_ETHERTYPE 6
-#define V_ETHERTYPE(x) ((x) << S_ETHERTYPE)
-#define F_ETHERTYPE V_ETHERTYPE(1U)
+#define ULP_RX_INT_CAUSE_A 0x19158
+#define ULP_RX_ISCSI_TAGMASK_A 0x19164
+#define ULP_RX_ISCSI_PSZ_A 0x19168
-#define S_PROTOCOL 5
-#define V_PROTOCOL(x) ((x) << S_PROTOCOL)
-#define F_PROTOCOL V_PROTOCOL(1U)
+#define HPZ3_S 24
+#define HPZ3_V(x) ((x) << HPZ3_S)
-#define S_TOS 4
-#define V_TOS(x) ((x) << S_TOS)
-#define F_TOS V_TOS(1U)
+#define HPZ2_S 16
+#define HPZ2_V(x) ((x) << HPZ2_S)
-#define S_VLAN 3
-#define V_VLAN(x) ((x) << S_VLAN)
-#define F_VLAN V_VLAN(1U)
+#define HPZ1_S 8
+#define HPZ1_V(x) ((x) << HPZ1_S)
-#define S_VNIC_ID 2
-#define V_VNIC_ID(x) ((x) << S_VNIC_ID)
-#define F_VNIC_ID V_VNIC_ID(1U)
+#define HPZ0_S 0
+#define HPZ0_V(x) ((x) << HPZ0_S)
-#define S_PORT 1
-#define V_PORT(x) ((x) << S_PORT)
-#define F_PORT V_PORT(1U)
+#define ULP_RX_TDDP_PSZ_A 0x19178
-#define S_FCOE 0
-#define V_FCOE(x) ((x) << S_FCOE)
-#define F_FCOE V_FCOE(1U)
+/* registers for module SF */
+#define SF_DATA_A 0x193f8
+#define SF_OP_A 0x193fc
+
+#define SF_BUSY_S 31
+#define SF_BUSY_V(x) ((x) << SF_BUSY_S)
+#define SF_BUSY_F SF_BUSY_V(1U)
+
+#define SF_LOCK_S 4
+#define SF_LOCK_V(x) ((x) << SF_LOCK_S)
+#define SF_LOCK_F SF_LOCK_V(1U)
+
+#define SF_CONT_S 3
+#define SF_CONT_V(x) ((x) << SF_CONT_S)
+#define SF_CONT_F SF_CONT_V(1U)
+
+#define BYTECNT_S 1
+#define BYTECNT_V(x) ((x) << BYTECNT_S)
+
+#define OP_S 0
+#define OP_V(x) ((x) << OP_S)
+#define OP_F OP_V(1U)
+
+#define PL_PF_INT_CAUSE_A 0x3c0
+
+#define PFSW_S 3
+#define PFSW_V(x) ((x) << PFSW_S)
+#define PFSW_F PFSW_V(1U)
+
+#define PFCIM_S 1
+#define PFCIM_V(x) ((x) << PFCIM_S)
+#define PFCIM_F PFCIM_V(1U)
+
+#define PL_PF_INT_ENABLE_A 0x3c4
+#define PL_PF_CTL_A 0x3c8
+
+#define PL_WHOAMI_A 0x19400
+
+#define SOURCEPF_S 8
+#define SOURCEPF_M 0x7U
+#define SOURCEPF_G(x) (((x) >> SOURCEPF_S) & SOURCEPF_M)
+
+#define PL_INT_CAUSE_A 0x1940c
+
+#define ULP_TX_S 27
+#define ULP_TX_V(x) ((x) << ULP_TX_S)
+#define ULP_TX_F ULP_TX_V(1U)
+
+#define SGE_S 26
+#define SGE_V(x) ((x) << SGE_S)
+#define SGE_F SGE_V(1U)
+
+#define CPL_SWITCH_S 24
+#define CPL_SWITCH_V(x) ((x) << CPL_SWITCH_S)
+#define CPL_SWITCH_F CPL_SWITCH_V(1U)
+
+#define ULP_RX_S 23
+#define ULP_RX_V(x) ((x) << ULP_RX_S)
+#define ULP_RX_F ULP_RX_V(1U)
+
+#define PM_RX_S 22
+#define PM_RX_V(x) ((x) << PM_RX_S)
+#define PM_RX_F PM_RX_V(1U)
+
+#define PM_TX_S 21
+#define PM_TX_V(x) ((x) << PM_TX_S)
+#define PM_TX_F PM_TX_V(1U)
+
+#define MA_S 20
+#define MA_V(x) ((x) << MA_S)
+#define MA_F MA_V(1U)
+
+#define TP_S 19
+#define TP_V(x) ((x) << TP_S)
+#define TP_F TP_V(1U)
+
+#define LE_S 18
+#define LE_V(x) ((x) << LE_S)
+#define LE_F LE_V(1U)
+
+#define EDC1_S 17
+#define EDC1_V(x) ((x) << EDC1_S)
+#define EDC1_F EDC1_V(1U)
+
+#define EDC0_S 16
+#define EDC0_V(x) ((x) << EDC0_S)
+#define EDC0_F EDC0_V(1U)
+
+#define MC_S 15
+#define MC_V(x) ((x) << MC_S)
+#define MC_F MC_V(1U)
+
+#define PCIE_S 14
+#define PCIE_V(x) ((x) << PCIE_S)
+#define PCIE_F PCIE_V(1U)
+
+#define XGMAC_KR1_S 12
+#define XGMAC_KR1_V(x) ((x) << XGMAC_KR1_S)
+#define XGMAC_KR1_F XGMAC_KR1_V(1U)
+
+#define XGMAC_KR0_S 11
+#define XGMAC_KR0_V(x) ((x) << XGMAC_KR0_S)
+#define XGMAC_KR0_F XGMAC_KR0_V(1U)
+
+#define XGMAC1_S 10
+#define XGMAC1_V(x) ((x) << XGMAC1_S)
+#define XGMAC1_F XGMAC1_V(1U)
+
+#define XGMAC0_S 9
+#define XGMAC0_V(x) ((x) << XGMAC0_S)
+#define XGMAC0_F XGMAC0_V(1U)
+
+#define SMB_S 8
+#define SMB_V(x) ((x) << SMB_S)
+#define SMB_F SMB_V(1U)
+
+#define SF_S 7
+#define SF_V(x) ((x) << SF_S)
+#define SF_F SF_V(1U)
+
+#define PL_S 6
+#define PL_V(x) ((x) << PL_S)
+#define PL_F PL_V(1U)
+
+#define NCSI_S 5
+#define NCSI_V(x) ((x) << NCSI_S)
+#define NCSI_F NCSI_V(1U)
+
+#define MPS_S 4
+#define MPS_V(x) ((x) << MPS_S)
+#define MPS_F MPS_V(1U)
+
+#define CIM_S 0
+#define CIM_V(x) ((x) << CIM_S)
+#define CIM_F CIM_V(1U)
+
+#define MC1_S 31
+
+#define PL_INT_ENABLE_A 0x19410
+#define PL_INT_MAP0_A 0x19414
+#define PL_RST_A 0x19428
+
+#define PIORST_S 1
+#define PIORST_V(x) ((x) << PIORST_S)
+#define PIORST_F PIORST_V(1U)
+
+#define PIORSTMODE_S 0
+#define PIORSTMODE_V(x) ((x) << PIORSTMODE_S)
+#define PIORSTMODE_F PIORSTMODE_V(1U)
+
+#define PL_PL_INT_CAUSE_A 0x19430
+
+#define FATALPERR_S 4
+#define FATALPERR_V(x) ((x) << FATALPERR_S)
+#define FATALPERR_F FATALPERR_V(1U)
+
+#define PERRVFID_S 0
+#define PERRVFID_V(x) ((x) << PERRVFID_S)
+#define PERRVFID_F PERRVFID_V(1U)
+
+#define PL_REV_A 0x1943c
+
+#define REV_S 0
+#define REV_M 0xfU
+#define REV_V(x) ((x) << REV_S)
+#define REV_G(x) (((x) >> REV_S) & REV_M)
+
+#define LE_DB_INT_CAUSE_A 0x19c3c
+
+#define REQQPARERR_S 16
+#define REQQPARERR_V(x) ((x) << REQQPARERR_S)
+#define REQQPARERR_F REQQPARERR_V(1U)
+
+#define UNKNOWNCMD_S 15
+#define UNKNOWNCMD_V(x) ((x) << UNKNOWNCMD_S)
+#define UNKNOWNCMD_F UNKNOWNCMD_V(1U)
+
+#define PARITYERR_S 6
+#define PARITYERR_V(x) ((x) << PARITYERR_S)
+#define PARITYERR_F PARITYERR_V(1U)
+
+#define LIPMISS_S 5
+#define LIPMISS_V(x) ((x) << LIPMISS_S)
+#define LIPMISS_F LIPMISS_V(1U)
+
+#define LIP0_S 4
+#define LIP0_V(x) ((x) << LIP0_S)
+#define LIP0_F LIP0_V(1U)
+
+#define NCSI_INT_CAUSE_A 0x1a0d8
+
+#define CIM_DM_PRTY_ERR_S 8
+#define CIM_DM_PRTY_ERR_V(x) ((x) << CIM_DM_PRTY_ERR_S)
+#define CIM_DM_PRTY_ERR_F CIM_DM_PRTY_ERR_V(1U)
+
+#define MPS_DM_PRTY_ERR_S 7
+#define MPS_DM_PRTY_ERR_V(x) ((x) << MPS_DM_PRTY_ERR_S)
+#define MPS_DM_PRTY_ERR_F MPS_DM_PRTY_ERR_V(1U)
+
+#define TXFIFO_PRTY_ERR_S 1
+#define TXFIFO_PRTY_ERR_V(x) ((x) << TXFIFO_PRTY_ERR_S)
+#define TXFIFO_PRTY_ERR_F TXFIFO_PRTY_ERR_V(1U)
+
+#define RXFIFO_PRTY_ERR_S 0
+#define RXFIFO_PRTY_ERR_V(x) ((x) << RXFIFO_PRTY_ERR_S)
+#define RXFIFO_PRTY_ERR_F RXFIFO_PRTY_ERR_V(1U)
+
+#define XGMAC_PORT_CFG2_A 0x1018
+
+#define PATEN_S 18
+#define PATEN_V(x) ((x) << PATEN_S)
+#define PATEN_F PATEN_V(1U)
+
+#define MAGICEN_S 17
+#define MAGICEN_V(x) ((x) << MAGICEN_S)
+#define MAGICEN_F MAGICEN_V(1U)
+
+#define XGMAC_PORT_MAGIC_MACID_LO 0x1024
+#define XGMAC_PORT_MAGIC_MACID_HI 0x1028
+
+#define XGMAC_PORT_EPIO_DATA0_A 0x10c0
+#define XGMAC_PORT_EPIO_DATA1_A 0x10c4
+#define XGMAC_PORT_EPIO_DATA2_A 0x10c8
+#define XGMAC_PORT_EPIO_DATA3_A 0x10cc
+#define XGMAC_PORT_EPIO_OP_A 0x10d0
+
+#define EPIOWR_S 8
+#define EPIOWR_V(x) ((x) << EPIOWR_S)
+#define EPIOWR_F EPIOWR_V(1U)
+
+#define ADDRESS_S 0
+#define ADDRESS_V(x) ((x) << ADDRESS_S)
+
+#define MAC_PORT_INT_CAUSE_A 0x8dc
+#define XGMAC_PORT_INT_CAUSE_A 0x10dc
+
+#define TP_TX_MOD_QUEUE_REQ_MAP_A 0x7e28
+
+#define TP_TX_MOD_QUEUE_WEIGHT0_A 0x7e30
+#define TP_TX_MOD_CHANNEL_WEIGHT_A 0x7e34
+
+#define TX_MOD_QUEUE_REQ_MAP_S 0
+#define TX_MOD_QUEUE_REQ_MAP_V(x) ((x) << TX_MOD_QUEUE_REQ_MAP_S)
+
+#define TX_MODQ_WEIGHT3_S 24
+#define TX_MODQ_WEIGHT3_V(x) ((x) << TX_MODQ_WEIGHT3_S)
+
+#define TX_MODQ_WEIGHT2_S 16
+#define TX_MODQ_WEIGHT2_V(x) ((x) << TX_MODQ_WEIGHT2_S)
+
+#define TX_MODQ_WEIGHT1_S 8
+#define TX_MODQ_WEIGHT1_V(x) ((x) << TX_MODQ_WEIGHT1_S)
+
+#define TX_MODQ_WEIGHT0_S 0
+#define TX_MODQ_WEIGHT0_V(x) ((x) << TX_MODQ_WEIGHT0_S)
+
+#define TP_TX_SCHED_HDR_A 0x23
+#define TP_TX_SCHED_FIFO_A 0x24
+#define TP_TX_SCHED_PCMD_A 0x25
#define NUM_MPS_CLS_SRAM_L_INSTANCES 336
#define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512
@@ -1329,62 +2099,116 @@
#define MC_STRIDE (MC_1_BASE_ADDR - MC_0_BASE_ADDR)
#define MC_REG(reg, idx) (reg + MC_STRIDE * idx)
-#define MC_P_BIST_CMD 0x41400
-#define MC_P_BIST_CMD_ADDR 0x41404
-#define MC_P_BIST_CMD_LEN 0x41408
-#define MC_P_BIST_DATA_PATTERN 0x4140c
-#define MC_P_BIST_STATUS_RDATA 0x41488
-#define EDC_T50_BASE_ADDR 0x50000
-#define EDC_H_BIST_CMD 0x50004
-#define EDC_H_BIST_CMD_ADDR 0x50008
-#define EDC_H_BIST_CMD_LEN 0x5000c
-#define EDC_H_BIST_DATA_PATTERN 0x50010
-#define EDC_H_BIST_STATUS_RDATA 0x50028
-
-#define EDC_T51_BASE_ADDR 0x50800
+#define MC_P_BIST_CMD_A 0x41400
+#define MC_P_BIST_CMD_ADDR_A 0x41404
+#define MC_P_BIST_CMD_LEN_A 0x41408
+#define MC_P_BIST_DATA_PATTERN_A 0x4140c
+#define MC_P_BIST_STATUS_RDATA_A 0x41488
+
+#define EDC_T50_BASE_ADDR 0x50000
+
+#define EDC_H_BIST_CMD_A 0x50004
+#define EDC_H_BIST_CMD_ADDR_A 0x50008
+#define EDC_H_BIST_CMD_LEN_A 0x5000c
+#define EDC_H_BIST_DATA_PATTERN_A 0x50010
+#define EDC_H_BIST_STATUS_RDATA_A 0x50028
+
+#define EDC_T51_BASE_ADDR 0x50800
+
#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
-#define A_PL_VF_REV 0x4
-#define A_PL_VF_WHOAMI 0x0
-#define A_PL_VF_REVISION 0x8
+#define PL_VF_REV_A 0x4
+#define PL_VF_WHOAMI_A 0x0
+#define PL_VF_REVISION_A 0x8
-#define S_CHIPID 4
-#define M_CHIPID 0xfU
-#define V_CHIPID(x) ((x) << S_CHIPID)
-#define G_CHIPID(x) (((x) >> S_CHIPID) & M_CHIPID)
+/* registers for module CIM */
+#define CIM_HOST_ACC_CTRL_A 0x7b50
+#define CIM_HOST_ACC_DATA_A 0x7b54
+#define UP_UP_DBG_LA_CFG_A 0x140
+#define UP_UP_DBG_LA_DATA_A 0x144
-/* TP_VLAN_PRI_MAP controls which subset of fields will be present in the
- * Compressed Filter Tuple for LE filters. Each bit set in TP_VLAN_PRI_MAP
- * selects for a particular field being present. These fields, when present
- * in the Compressed Filter Tuple, have the following widths in bits.
- */
-#define W_FT_FCOE 1
-#define W_FT_PORT 3
-#define W_FT_VNIC_ID 17
-#define W_FT_VLAN 17
-#define W_FT_TOS 8
-#define W_FT_PROTOCOL 8
-#define W_FT_ETHERTYPE 16
-#define W_FT_MACMATCH 9
-#define W_FT_MPSHITTYPE 3
-#define W_FT_FRAGMENTATION 1
-
-/* Some of the Compressed Filter Tuple fields have internal structure. These
- * bit shifts/masks describe those structures. All shifts are relative to the
- * base position of the fields within the Compressed Filter Tuple
- */
-#define S_FT_VLAN_VLD 16
-#define V_FT_VLAN_VLD(x) ((x) << S_FT_VLAN_VLD)
-#define F_FT_VLAN_VLD V_FT_VLAN_VLD(1U)
+#define HOSTBUSY_S 17
+#define HOSTBUSY_V(x) ((x) << HOSTBUSY_S)
+#define HOSTBUSY_F HOSTBUSY_V(1U)
+
+#define HOSTWRITE_S 16
+#define HOSTWRITE_V(x) ((x) << HOSTWRITE_S)
+#define HOSTWRITE_F HOSTWRITE_V(1U)
+
+#define UPDBGLARDEN_S 1
+#define UPDBGLARDEN_V(x) ((x) << UPDBGLARDEN_S)
+#define UPDBGLARDEN_F UPDBGLARDEN_V(1U)
+
+#define UPDBGLAEN_S 0
+#define UPDBGLAEN_V(x) ((x) << UPDBGLAEN_S)
+#define UPDBGLAEN_F UPDBGLAEN_V(1U)
+
+#define UPDBGLARDPTR_S 2
+#define UPDBGLARDPTR_M 0xfffU
+#define UPDBGLARDPTR_V(x) ((x) << UPDBGLARDPTR_S)
+
+#define UPDBGLAWRPTR_S 16
+#define UPDBGLAWRPTR_M 0xfffU
+#define UPDBGLAWRPTR_G(x) (((x) >> UPDBGLAWRPTR_S) & UPDBGLAWRPTR_M)
+
+#define UPDBGLACAPTPCONLY_S 30
+#define UPDBGLACAPTPCONLY_V(x) ((x) << UPDBGLACAPTPCONLY_S)
+#define UPDBGLACAPTPCONLY_F UPDBGLACAPTPCONLY_V(1U)
+
+#define CIM_QUEUE_CONFIG_REF_A 0x7b48
+#define CIM_QUEUE_CONFIG_CTRL_A 0x7b4c
+
+#define CIMQSIZE_S 24
+#define CIMQSIZE_M 0x3fU
+#define CIMQSIZE_G(x) (((x) >> CIMQSIZE_S) & CIMQSIZE_M)
+
+#define CIMQBASE_S 16
+#define CIMQBASE_M 0x3fU
+#define CIMQBASE_G(x) (((x) >> CIMQBASE_S) & CIMQBASE_M)
+
+#define QUEFULLTHRSH_S 0
+#define QUEFULLTHRSH_M 0x1ffU
+#define QUEFULLTHRSH_G(x) (((x) >> QUEFULLTHRSH_S) & QUEFULLTHRSH_M)
+
+#define UP_IBQ_0_RDADDR_A 0x10
+#define UP_IBQ_0_SHADOW_RDADDR_A 0x280
+#define UP_OBQ_0_REALADDR_A 0x104
+#define UP_OBQ_0_SHADOW_REALADDR_A 0x394
+
+#define IBQRDADDR_S 0
+#define IBQRDADDR_M 0x1fffU
+#define IBQRDADDR_G(x) (((x) >> IBQRDADDR_S) & IBQRDADDR_M)
+
+#define IBQWRADDR_S 0
+#define IBQWRADDR_M 0x1fffU
+#define IBQWRADDR_G(x) (((x) >> IBQWRADDR_S) & IBQWRADDR_M)
+
+#define QUERDADDR_S 0
+#define QUERDADDR_M 0x7fffU
+#define QUERDADDR_G(x) (((x) >> QUERDADDR_S) & QUERDADDR_M)
+
+#define QUEREMFLITS_S 0
+#define QUEREMFLITS_M 0x7ffU
+#define QUEREMFLITS_G(x) (((x) >> QUEREMFLITS_S) & QUEREMFLITS_M)
+
+#define QUEEOPCNT_S 16
+#define QUEEOPCNT_M 0xfffU
+#define QUEEOPCNT_G(x) (((x) >> QUEEOPCNT_S) & QUEEOPCNT_M)
+
+#define QUESOPCNT_S 0
+#define QUESOPCNT_M 0xfffU
+#define QUESOPCNT_G(x) (((x) >> QUESOPCNT_S) & QUESOPCNT_M)
-#define S_FT_VNID_ID_VF 0
-#define V_FT_VNID_ID_VF(x) ((x) << S_FT_VNID_ID_VF)
+#define OBQSELECT_S 4
+#define OBQSELECT_V(x) ((x) << OBQSELECT_S)
+#define OBQSELECT_F OBQSELECT_V(1U)
-#define S_FT_VNID_ID_PF 7
-#define V_FT_VNID_ID_PF(x) ((x) << S_FT_VNID_ID_PF)
+#define IBQSELECT_S 3
+#define IBQSELECT_V(x) ((x) << IBQSELECT_S)
+#define IBQSELECT_F IBQSELECT_V(1U)
-#define S_FT_VNID_ID_VLD 16
-#define V_FT_VNID_ID_VLD(x) ((x) << S_FT_VNID_ID_VLD)
+#define QUENUMSELECT_S 0
+#define QUENUMSELECT_V(x) ((x) << QUENUMSELECT_S)
#endif /* __T4_REGS_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
new file mode 100644
index 000000000000..a40484432ebf
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
@@ -0,0 +1,118 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __T4_VALUES_H__
+#define __T4_VALUES_H__
+
+/* This file contains definitions for various T4 register value hardware
+ * constants. The types of values encoded here are predominantly those for
+ * register fields which control "modal" behavior. For the most part, we do
+ * not include definitions for register fields which are simple numeric
+ * metrics, etc.
+ */
+
+/* SGE register field values.
+ */
+
+/* CONTROL1 register */
+#define RXPKTCPLMODE_SPLIT_X 1
+
+#define INGPCIEBOUNDARY_SHIFT_X 5
+#define INGPCIEBOUNDARY_32B_X 0
+
+#define INGPADBOUNDARY_SHIFT_X 5
+
+/* CONTROL2 register */
+#define INGPACKBOUNDARY_SHIFT_X 5
+#define INGPACKBOUNDARY_16B_X 0
+
+/* GTS register */
+#define SGE_TIMERREGS 6
+
+/* T5 and later support a new BAR2-based doorbell mechanism for Egress Queues.
+ * The User Doorbells are each 128 bytes in length with a Simple Doorbell at
+ * offsets 8x and a Write Combining single 64-byte Egress Queue Unit
+ * (IDXSIZE_UNIT_X) Gather Buffer interface at offset 64. For Ingress Queues,
+ * we have a Going To Sleep register at offsets 8x+4.
+ *
+ * As noted above, we have many instances of the Simple Doorbell and Going To
+ * Sleep registers at offsets 8x and 8x+4, respectively. We want to use a
+ * non-64-byte aligned offset for the Simple Doorbell in order to attempt to
+ * avoid buffering of the writes to the Simple Doorbell and we want to use a
+ * non-contiguous offset for the Going To Sleep writes in order to avoid
+ * possible combining between them.
+ */
+#define SGE_UDB_SIZE 128
+#define SGE_UDB_KDOORBELL 8
+#define SGE_UDB_GTS 20
+#define SGE_UDB_WCDOORBELL 64
+
+/* PCI-E definitions */
+#define WINDOW_SHIFT_X 10
+#define PCIEOFST_SHIFT_X 10
+
+/* TP_VLAN_PRI_MAP controls which subset of fields will be present in the
+ * Compressed Filter Tuple for LE filters. Each bit set in TP_VLAN_PRI_MAP
+ * selects for a particular field being present. These fields, when present
+ * in the Compressed Filter Tuple, have the following widths in bits.
+ */
+#define FT_FCOE_W 1
+#define FT_PORT_W 3
+#define FT_VNIC_ID_W 17
+#define FT_VLAN_W 17
+#define FT_TOS_W 8
+#define FT_PROTOCOL_W 8
+#define FT_ETHERTYPE_W 16
+#define FT_MACMATCH_W 9
+#define FT_MPSHITTYPE_W 3
+#define FT_FRAGMENTATION_W 1
+
+/* Some of the Compressed Filter Tuple fields have internal structure. These
+ * bit shifts/masks describe those structures. All shifts are relative to the
+ * base position of the fields within the Compressed Filter Tuple
+ */
+#define FT_VLAN_VLD_S 16
+#define FT_VLAN_VLD_V(x) ((x) << FT_VLAN_VLD_S)
+#define FT_VLAN_VLD_F FT_VLAN_VLD_V(1U)
+
+#define FT_VNID_ID_VF_S 0
+#define FT_VNID_ID_VF_V(x) ((x) << FT_VNID_ID_VF_S)
+
+#define FT_VNID_ID_PF_S 7
+#define FT_VNID_ID_PF_V(x) ((x) << FT_VNID_ID_PF_S)
+
+#define FT_VNID_ID_VLD_S 16
+#define FT_VNID_ID_VLD_V(x) ((x) << FT_VNID_ID_VLD_S)
+
+#endif /* __T4_VALUES_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 7c0aec85137a..de8283324f1f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -673,6 +673,7 @@ enum fw_cmd_opcodes {
FW_RSS_IND_TBL_CMD = 0x20,
FW_RSS_GLB_CONFIG_CMD = 0x22,
FW_RSS_VI_CONFIG_CMD = 0x23,
+ FW_DEVLOG_CMD = 0x25,
FW_CLIP_CMD = 0x28,
FW_LASTC2E_CMD = 0x40,
FW_ERROR_CMD = 0x80,
@@ -3038,4 +3039,84 @@ enum fw_hdr_flags {
FW_HDR_FLAGS_RESET_HALT = 0x00000001,
};
+/* length of the formatting string */
+#define FW_DEVLOG_FMT_LEN 192
+
+/* maximum number of the formatting string parameters */
+#define FW_DEVLOG_FMT_PARAMS_NUM 8
+
+/* priority levels */
+enum fw_devlog_level {
+ FW_DEVLOG_LEVEL_EMERG = 0x0,
+ FW_DEVLOG_LEVEL_CRIT = 0x1,
+ FW_DEVLOG_LEVEL_ERR = 0x2,
+ FW_DEVLOG_LEVEL_NOTICE = 0x3,
+ FW_DEVLOG_LEVEL_INFO = 0x4,
+ FW_DEVLOG_LEVEL_DEBUG = 0x5,
+ FW_DEVLOG_LEVEL_MAX = 0x5,
+};
+
+/* facilities that may send a log message */
+enum fw_devlog_facility {
+ FW_DEVLOG_FACILITY_CORE = 0x00,
+ FW_DEVLOG_FACILITY_CF = 0x01,
+ FW_DEVLOG_FACILITY_SCHED = 0x02,
+ FW_DEVLOG_FACILITY_TIMER = 0x04,
+ FW_DEVLOG_FACILITY_RES = 0x06,
+ FW_DEVLOG_FACILITY_HW = 0x08,
+ FW_DEVLOG_FACILITY_FLR = 0x10,
+ FW_DEVLOG_FACILITY_DMAQ = 0x12,
+ FW_DEVLOG_FACILITY_PHY = 0x14,
+ FW_DEVLOG_FACILITY_MAC = 0x16,
+ FW_DEVLOG_FACILITY_PORT = 0x18,
+ FW_DEVLOG_FACILITY_VI = 0x1A,
+ FW_DEVLOG_FACILITY_FILTER = 0x1C,
+ FW_DEVLOG_FACILITY_ACL = 0x1E,
+ FW_DEVLOG_FACILITY_TM = 0x20,
+ FW_DEVLOG_FACILITY_QFC = 0x22,
+ FW_DEVLOG_FACILITY_DCB = 0x24,
+ FW_DEVLOG_FACILITY_ETH = 0x26,
+ FW_DEVLOG_FACILITY_OFLD = 0x28,
+ FW_DEVLOG_FACILITY_RI = 0x2A,
+ FW_DEVLOG_FACILITY_ISCSI = 0x2C,
+ FW_DEVLOG_FACILITY_FCOE = 0x2E,
+ FW_DEVLOG_FACILITY_FOISCSI = 0x30,
+ FW_DEVLOG_FACILITY_FOFCOE = 0x32,
+ FW_DEVLOG_FACILITY_MAX = 0x32,
+};
+
+/* log message format */
+struct fw_devlog_e {
+ __be64 timestamp;
+ __be32 seqno;
+ __be16 reserved1;
+ __u8 level;
+ __u8 facility;
+ __u8 fmt[FW_DEVLOG_FMT_LEN];
+ __be32 params[FW_DEVLOG_FMT_PARAMS_NUM];
+ __be32 reserved3[4];
+};
+
+struct fw_devlog_cmd {
+ __be32 op_to_write;
+ __be32 retval_len16;
+ __u8 level;
+ __u8 r2[7];
+ __be32 memtype_devlog_memaddr16_devlog;
+ __be32 memsize_devlog;
+ __be32 r3[2];
+};
+
+#define FW_DEVLOG_CMD_MEMTYPE_DEVLOG_S 28
+#define FW_DEVLOG_CMD_MEMTYPE_DEVLOG_M 0xf
+#define FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(x) \
+ (((x) >> FW_DEVLOG_CMD_MEMTYPE_DEVLOG_S) & \
+ FW_DEVLOG_CMD_MEMTYPE_DEVLOG_M)
+
+#define FW_DEVLOG_CMD_MEMADDR16_DEVLOG_S 0
+#define FW_DEVLOG_CMD_MEMADDR16_DEVLOG_M 0xfffffff
+#define FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(x) \
+ (((x) >> FW_DEVLOG_CMD_MEMADDR16_DEVLOG_S) & \
+ FW_DEVLOG_CMD_MEMADDR16_DEVLOG_M)
+
#endif /* _T4FW_INTERFACE_H_ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index a936ee8958c7..122e2964e63b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -380,9 +380,9 @@ static void qenable(struct sge_rspq *rspq)
* enable interrupts.
*/
t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
- CIDXINC(0) |
- SEINTARM(rspq->intr_params) |
- INGRESSQID(rspq->cntxt_id));
+ CIDXINC_V(0) |
+ SEINTARM_V(rspq->intr_params) |
+ INGRESSQID_V(rspq->cntxt_id));
}
/*
@@ -403,9 +403,9 @@ static void enable_rx(struct adapter *adapter)
*/
if (adapter->flags & USING_MSI)
t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
- CIDXINC(0) |
- SEINTARM(s->intrq.intr_params) |
- INGRESSQID(s->intrq.cntxt_id));
+ CIDXINC_V(0) |
+ SEINTARM_V(s->intrq.intr_params) |
+ INGRESSQID_V(s->intrq.cntxt_id));
}
@@ -450,7 +450,7 @@ static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
/* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
*/
const struct cpl_sge_egr_update *p = (void *)(rsp + 3);
- opcode = G_CPL_OPCODE(ntohl(p->opcode_qid));
+ opcode = CPL_OPCODE_G(ntohl(p->opcode_qid));
if (opcode != CPL_SGE_EGR_UPDATE) {
dev_err(adapter->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
, opcode);
@@ -471,7 +471,7 @@ static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
* free TX Queue Descriptors ...
*/
const struct cpl_sge_egr_update *p = cpl;
- unsigned int qid = EGR_QID(be32_to_cpu(p->opcode_qid));
+ unsigned int qid = EGR_QID_G(be32_to_cpu(p->opcode_qid));
struct sge *s = &adapter->sge;
struct sge_txq *tq;
struct sge_eth_txq *txq;
@@ -1673,7 +1673,7 @@ static void cxgb4vf_get_regs(struct net_device *dev,
reg_block_dump(adapter, regbuf,
T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST,
T4VF_PL_BASE_ADDR + (is_t4(adapter->params.chip)
- ? A_PL_VF_WHOAMI : A_PL_VF_REVISION));
+ ? PL_VF_WHOAMI_A : PL_VF_REVISION_A));
reg_block_dump(adapter, regbuf,
T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST,
T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST);
@@ -2294,26 +2294,22 @@ static int adap_init0(struct adapter *adapter)
* threshold values from the SGE parameters.
*/
s->timer_val[0] = core_ticks_to_us(adapter,
- TIMERVALUE0_GET(sge_params->sge_timer_value_0_and_1));
+ TIMERVALUE0_G(sge_params->sge_timer_value_0_and_1));
s->timer_val[1] = core_ticks_to_us(adapter,
- TIMERVALUE1_GET(sge_params->sge_timer_value_0_and_1));
+ TIMERVALUE1_G(sge_params->sge_timer_value_0_and_1));
s->timer_val[2] = core_ticks_to_us(adapter,
- TIMERVALUE0_GET(sge_params->sge_timer_value_2_and_3));
+ TIMERVALUE0_G(sge_params->sge_timer_value_2_and_3));
s->timer_val[3] = core_ticks_to_us(adapter,
- TIMERVALUE1_GET(sge_params->sge_timer_value_2_and_3));
+ TIMERVALUE1_G(sge_params->sge_timer_value_2_and_3));
s->timer_val[4] = core_ticks_to_us(adapter,
- TIMERVALUE0_GET(sge_params->sge_timer_value_4_and_5));
+ TIMERVALUE0_G(sge_params->sge_timer_value_4_and_5));
s->timer_val[5] = core_ticks_to_us(adapter,
- TIMERVALUE1_GET(sge_params->sge_timer_value_4_and_5));
-
- s->counter_val[0] =
- THRESHOLD_0_GET(sge_params->sge_ingress_rx_threshold);
- s->counter_val[1] =
- THRESHOLD_1_GET(sge_params->sge_ingress_rx_threshold);
- s->counter_val[2] =
- THRESHOLD_2_GET(sge_params->sge_ingress_rx_threshold);
- s->counter_val[3] =
- THRESHOLD_3_GET(sge_params->sge_ingress_rx_threshold);
+ TIMERVALUE1_G(sge_params->sge_timer_value_4_and_5));
+
+ s->counter_val[0] = THRESHOLD_0_G(sge_params->sge_ingress_rx_threshold);
+ s->counter_val[1] = THRESHOLD_1_G(sge_params->sge_ingress_rx_threshold);
+ s->counter_val[2] = THRESHOLD_2_G(sge_params->sge_ingress_rx_threshold);
+ s->counter_val[3] = THRESHOLD_3_G(sge_params->sge_ingress_rx_threshold);
/*
* Grab our Virtual Interface resource allocation, extract the
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index f7fd1317d996..0545f0de1c52 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -47,6 +47,7 @@
#include "t4vf_defs.h"
#include "../cxgb4/t4_regs.h"
+#include "../cxgb4/t4_values.h"
#include "../cxgb4/t4fw_api.h"
#include "../cxgb4/t4_msg.h"
@@ -531,11 +532,11 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
*/
if (fl->pend_cred >= FL_PER_EQ_UNIT) {
if (is_t4(adapter->params.chip))
- val = PIDX(fl->pend_cred / FL_PER_EQ_UNIT);
+ val = PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT);
else
- val = PIDX_T5(fl->pend_cred / FL_PER_EQ_UNIT) |
- DBTYPE(1);
- val |= DBPRIO(1);
+ val = PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT) |
+ DBTYPE_F;
+ val |= DBPRIO_F;
/* Make sure all memory writes to the Free List queue are
* committed before we tell the hardware about them.
@@ -549,9 +550,9 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
if (unlikely(fl->bar2_addr == NULL)) {
t4_write_reg(adapter,
T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
- QID(fl->cntxt_id) | val);
+ QID_V(fl->cntxt_id) | val);
} else {
- writel(val | QID(fl->bar2_qid),
+ writel(val | QID_V(fl->bar2_qid),
fl->bar2_addr + SGE_UDB_KDOORBELL);
/* This Write memory Barrier will force the write to
@@ -925,7 +926,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
}
sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
- ULPTX_NSGE(nfrags));
+ ULPTX_NSGE_V(nfrags));
if (likely(--nfrags == 0))
return;
/*
@@ -979,12 +980,12 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
* doorbell mechanism; otherwise use the new BAR2 mechanism.
*/
if (unlikely(tq->bar2_addr == NULL)) {
- u32 val = PIDX(n);
+ u32 val = PIDX_V(n);
t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
- QID(tq->cntxt_id) | val);
+ QID_V(tq->cntxt_id) | val);
} else {
- u32 val = PIDX_T5(n);
+ u32 val = PIDX_T5_V(n);
/* T4 and later chips share the same PIDX field offset within
* the doorbell, but T5 and later shrank the field in order to
@@ -992,7 +993,7 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
* large in the first place (14 bits) so we just use the T5
* and later limits and warn if a Queue ID is too large.
*/
- WARN_ON(val & DBPRIO(1));
+ WARN_ON(val & DBPRIO_F);
/* If we're only writing a single Egress Unit and the BAR2
* Queue ID is 0, we can use the Write Combining Doorbell
@@ -1023,7 +1024,7 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
count--;
}
} else
- writel(val | QID(tq->bar2_qid),
+ writel(val | QID_V(tq->bar2_qid),
tq->bar2_addr + SGE_UDB_KDOORBELL);
/* This Write Memory Barrier will force the write to the User
@@ -1325,9 +1326,9 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
* If there's a VLAN tag present, add that to the list of things to
* do in this Work Request.
*/
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
txq->vlan_ins++;
- cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb));
+ cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(skb_vlan_tag_get(skb));
}
/*
@@ -1603,7 +1604,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
* If this is a good TCP packet and we have Generic Receive Offload
* enabled, handle the packet in the GRO path.
*/
- if ((pkt->l2info & cpu_to_be32(RXF_TCP)) &&
+ if ((pkt->l2info & cpu_to_be32(RXF_TCP_F)) &&
(rspq->netdev->features & NETIF_F_GRO) && csum_ok &&
!pkt->ip_frag) {
do_gro(rxq, gl, pkt);
@@ -1625,7 +1626,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
rxq->stats.pkts++;
if (csum_ok && !pkt->err_vec &&
- (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) {
+ (be32_to_cpu(pkt->l2info) & (RXF_UDP_F | RXF_TCP_F))) {
if (!pkt->ip_frag)
skb->ip_summed = CHECKSUM_UNNECESSARY;
else {
@@ -1875,13 +1876,13 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
if (unlikely(work_done == 0))
rspq->unhandled_irqs++;
- val = CIDXINC(work_done) | SEINTARM(intr_params);
+ val = CIDXINC_V(work_done) | SEINTARM_V(intr_params);
if (is_t4(rspq->adapter->params.chip)) {
t4_write_reg(rspq->adapter,
T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
- val | INGRESSQID((u32)rspq->cntxt_id));
+ val | INGRESSQID_V((u32)rspq->cntxt_id));
} else {
- writel(val | INGRESSQID(rspq->bar2_qid),
+ writel(val | INGRESSQID_V(rspq->bar2_qid),
rspq->bar2_addr + SGE_UDB_GTS);
wmb();
}
@@ -1975,12 +1976,12 @@ static unsigned int process_intrq(struct adapter *adapter)
rspq_next(intrq);
}
- val = CIDXINC(work_done) | SEINTARM(intrq->intr_params);
+ val = CIDXINC_V(work_done) | SEINTARM_V(intrq->intr_params);
if (is_t4(adapter->params.chip))
t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
- val | INGRESSQID(intrq->cntxt_id));
+ val | INGRESSQID_V(intrq->cntxt_id));
else {
- writel(val | INGRESSQID(intrq->bar2_qid),
+ writel(val | INGRESSQID_V(intrq->bar2_qid),
intrq->bar2_addr + SGE_UDB_GTS);
wmb();
}
@@ -2583,7 +2584,7 @@ int t4vf_sge_init(struct adapter *adapter)
fl0, fl1);
return -EINVAL;
}
- if ((sge_params->sge_control & RXPKTCPLMODE_MASK) == 0) {
+ if ((sge_params->sge_control & RXPKTCPLMODE_F) == 0) {
dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
return -EINVAL;
}
@@ -2593,9 +2594,9 @@ int t4vf_sge_init(struct adapter *adapter)
*/
if (fl1)
s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT;
- s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK)
+ s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F)
? 128 : 64);
- s->pktshift = PKTSHIFT_GET(sge_params->sge_control);
+ s->pktshift = PKTSHIFT_G(sge_params->sge_control);
/* T4 uses a single control field to specify both the PCIe Padding and
* Packing Boundary. T5 introduced the ability to specify these
@@ -2607,8 +2608,8 @@ int t4vf_sge_init(struct adapter *adapter)
* end doing this because it would initialize the Padding Boundary and
* leave the Packing Boundary initialized to 0 (16 bytes).)
*/
- ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) +
- X_INGPADBOUNDARY_SHIFT);
+ ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_params->sge_control) +
+ INGPADBOUNDARY_SHIFT_X);
if (is_t4(adapter->params.chip)) {
s->fl_align = ingpadboundary;
} else {
@@ -2633,7 +2634,7 @@ int t4vf_sge_init(struct adapter *adapter)
* Congestion Threshold is in units of 2 Free List pointers.)
*/
s->fl_starve_thres
- = EGRTHRESHOLD_GET(sge_params->sge_congestion_control)*2 + 1;
+ = EGRTHRESHOLD_G(sge_params->sge_congestion_control)*2 + 1;
/*
* Set up tasklet timers.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h
index c7b127d93767..b516b12b1884 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h
@@ -64,8 +64,8 @@
* Mailbox Data in the fixed CIM PF map and the programmable VF map must
* match. However, it's a useful convention ...
*/
-#if T4VF_MBDATA_BASE_ADDR != CIM_PF_MAILBOX_DATA
-#error T4VF_MBDATA_BASE_ADDR must match CIM_PF_MAILBOX_DATA!
+#if T4VF_MBDATA_BASE_ADDR != CIM_PF_MAILBOX_DATA_A
+#error T4VF_MBDATA_BASE_ADDR must match CIM_PF_MAILBOX_DATA_A!
#endif
/*
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 60426cf890a7..1b5506df35b1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -39,6 +39,7 @@
#include "t4vf_defs.h"
#include "../cxgb4/t4_regs.h"
+#include "../cxgb4/t4_values.h"
#include "../cxgb4/t4fw_api.h"
/*
@@ -137,9 +138,9 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
* Loop trying to get ownership of the mailbox. Return an error
* if we can't gain ownership.
*/
- v = MBOWNER_GET(t4_read_reg(adapter, mbox_ctl));
+ v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
- v = MBOWNER_GET(t4_read_reg(adapter, mbox_ctl));
+ v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
if (v != MBOX_OWNER_DRV)
return v == MBOX_OWNER_FW ? -EBUSY : -ETIMEDOUT;
@@ -161,7 +162,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
t4_read_reg(adapter, mbox_data); /* flush write */
t4_write_reg(adapter, mbox_ctl,
- MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
+ MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
t4_read_reg(adapter, mbox_ctl); /* flush write */
/*
@@ -183,14 +184,14 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
* If we're the owner, see if this is the reply we wanted.
*/
v = t4_read_reg(adapter, mbox_ctl);
- if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
+ if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
/*
* If the Message Valid bit isn't on, revoke ownership
* of the mailbox and continue waiting for our reply.
*/
- if ((v & MBMSGVALID) == 0) {
+ if ((v & MBMSGVALID_F) == 0) {
t4_write_reg(adapter, mbox_ctl,
- MBOWNER(MBOX_OWNER_NONE));
+ MBOWNER_V(MBOX_OWNER_NONE));
continue;
}
@@ -216,7 +217,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
& FW_CMD_REQUEST_F) != 0);
}
t4_write_reg(adapter, mbox_ctl,
- MBOWNER(MBOX_OWNER_NONE));
+ MBOWNER_V(MBOX_OWNER_NONE));
return -FW_CMD_RETVAL_G(v);
}
}
@@ -530,19 +531,19 @@ int t4vf_get_sge_params(struct adapter *adapter)
int v;
params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
- FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL));
+ FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL_A));
params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
- FW_PARAMS_PARAM_XYZ_V(SGE_HOST_PAGE_SIZE));
+ FW_PARAMS_PARAM_XYZ_V(SGE_HOST_PAGE_SIZE_A));
params[2] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
- FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE0));
+ FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE0_A));
params[3] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
- FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE1));
+ FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE1_A));
params[4] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
- FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_0_AND_1));
+ FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_0_AND_1_A));
params[5] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
- FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_2_AND_3));
+ FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_2_AND_3_A));
params[6] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
- FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_4_AND_5));
+ FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_4_AND_5_A));
v = t4vf_query_params(adapter, 7, params, vals);
if (v)
return v;
@@ -578,9 +579,9 @@ int t4vf_get_sge_params(struct adapter *adapter)
}
params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
- FW_PARAMS_PARAM_XYZ_V(SGE_INGRESS_RX_THRESHOLD));
+ FW_PARAMS_PARAM_XYZ_V(SGE_INGRESS_RX_THRESHOLD_A));
params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
- FW_PARAMS_PARAM_XYZ_V(SGE_CONM_CTRL));
+ FW_PARAMS_PARAM_XYZ_V(SGE_CONM_CTRL_A));
v = t4vf_query_params(adapter, 2, params, vals);
if (v)
return v;
@@ -617,8 +618,8 @@ int t4vf_get_sge_params(struct adapter *adapter)
* the driver can just use it.
*/
whoami = t4_read_reg(adapter,
- T4VF_PL_BASE_ADDR + A_PL_VF_WHOAMI);
- pf = SOURCEPF_GET(whoami);
+ T4VF_PL_BASE_ADDR + PL_VF_WHOAMI_A);
+ pf = SOURCEPF_G(whoami);
s_hps = (HOSTPAGESIZEPF0_S +
(HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * pf);
@@ -630,10 +631,10 @@ int t4vf_get_sge_params(struct adapter *adapter)
(QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * pf);
sge_params->sge_vf_eq_qpp =
((sge_params->sge_egress_queues_per_page >> s_qpp)
- & QUEUESPERPAGEPF0_MASK);
+ & QUEUESPERPAGEPF0_M);
sge_params->sge_vf_iq_qpp =
((sge_params->sge_ingress_queues_per_page >> s_qpp)
- & QUEUESPERPAGEPF0_MASK);
+ & QUEUESPERPAGEPF0_M);
}
return 0;
@@ -1592,7 +1593,7 @@ int t4vf_prep_adapter(struct adapter *adapter)
break;
case CHELSIO_T5:
- chipid = G_REV(t4_read_reg(adapter, A_PL_VF_REV));
+ chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A));
adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid);
break;
}
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 25c4d88853d8..84b6a2b46aec 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -33,7 +33,7 @@
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION "2.1.1.67"
+#define DRV_VERSION "2.1.1.83"
#define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc"
#define ENIC_BARS_MAX 6
@@ -188,6 +188,7 @@ struct enic {
struct enic_rfs_flw_tbl rfs_h;
u32 rx_copybreak;
u8 rss_key[ENIC_RSS_LEN];
+ struct vnic_gen_stats gen_stats;
};
static inline struct device *enic_get_dev(struct enic *enic)
@@ -242,6 +243,19 @@ static inline unsigned int enic_msix_notify_intr(struct enic *enic)
return enic->rq_count + enic->wq_count + 1;
}
+static inline int enic_dma_map_check(struct enic *enic, dma_addr_t dma_addr)
+{
+ if (unlikely(pci_dma_mapping_error(enic->pdev, dma_addr))) {
+ net_warn_ratelimited("%s: PCI dma mapping failed!\n",
+ enic->netdev->name);
+ enic->gen_stats.dma_map_error++;
+
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
void enic_reset_addr_lists(struct enic *enic);
int enic_sriov_enabled(struct enic *enic);
int enic_is_valid_vf(struct enic *enic, int vf);
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.c b/drivers/net/ethernet/cisco/enic/enic_dev.c
index 87ddc44b590e..f8d2a6a34282 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.c
@@ -177,40 +177,6 @@ int enic_dev_intr_coal_timer_info(struct enic *enic)
return err;
}
-int enic_vnic_dev_deinit(struct enic *enic)
-{
- int err;
-
- spin_lock_bh(&enic->devcmd_lock);
- err = vnic_dev_deinit(enic->vdev);
- spin_unlock_bh(&enic->devcmd_lock);
-
- return err;
-}
-
-int enic_dev_init_prov2(struct enic *enic, struct vic_provinfo *vp)
-{
- int err;
-
- spin_lock_bh(&enic->devcmd_lock);
- err = vnic_dev_init_prov2(enic->vdev,
- (u8 *)vp, vic_provinfo_size(vp));
- spin_unlock_bh(&enic->devcmd_lock);
-
- return err;
-}
-
-int enic_dev_deinit_done(struct enic *enic, int *status)
-{
- int err;
-
- spin_lock_bh(&enic->devcmd_lock);
- err = vnic_dev_deinit_done(enic->vdev, status);
- spin_unlock_bh(&enic->devcmd_lock);
-
- return err;
-}
-
/* rtnl lock is held */
int enic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
@@ -237,28 +203,6 @@ int enic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
return err;
}
-int enic_dev_enable2(struct enic *enic, int active)
-{
- int err;
-
- spin_lock_bh(&enic->devcmd_lock);
- err = vnic_dev_enable2(enic->vdev, active);
- spin_unlock_bh(&enic->devcmd_lock);
-
- return err;
-}
-
-int enic_dev_enable2_done(struct enic *enic, int *status)
-{
- int err;
-
- spin_lock_bh(&enic->devcmd_lock);
- err = vnic_dev_enable2_done(enic->vdev, status);
- spin_unlock_bh(&enic->devcmd_lock);
-
- return err;
-}
-
int enic_dev_status_to_errno(int devcmd_status)
{
switch (devcmd_status) {
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.h b/drivers/net/ethernet/cisco/enic/enic_dev.h
index 10bb970b2f35..f5bb058b3f96 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.h
@@ -55,11 +55,6 @@ int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic);
int enic_dev_enable(struct enic *enic);
int enic_dev_disable(struct enic *enic);
int enic_dev_intr_coal_timer_info(struct enic *enic);
-int enic_vnic_dev_deinit(struct enic *enic);
-int enic_dev_init_prov2(struct enic *enic, struct vic_provinfo *vp);
-int enic_dev_deinit_done(struct enic *enic, int *status);
-int enic_dev_enable2(struct enic *enic, int arg);
-int enic_dev_enable2_done(struct enic *enic, int *status);
int enic_dev_status_to_errno(int devcmd_status);
#endif /* _ENIC_DEV_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index eba1eb846d34..0c396c1f55dc 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -24,6 +24,7 @@
#include "enic_dev.h"
#include "enic_clsf.h"
#include "vnic_rss.h"
+#include "vnic_stats.h"
struct enic_stat {
char name[ETH_GSTRING_LEN];
@@ -40,6 +41,11 @@ struct enic_stat {
.index = offsetof(struct vnic_rx_stats, stat) / sizeof(u64) \
}
+#define ENIC_GEN_STAT(stat) { \
+ .name = #stat, \
+ .index = offsetof(struct vnic_gen_stats, stat) / sizeof(u64)\
+}
+
static const struct enic_stat enic_tx_stats[] = {
ENIC_TX_STAT(tx_frames_ok),
ENIC_TX_STAT(tx_unicast_frames_ok),
@@ -78,8 +84,13 @@ static const struct enic_stat enic_rx_stats[] = {
ENIC_RX_STAT(rx_frames_to_max),
};
+static const struct enic_stat enic_gen_stats[] = {
+ ENIC_GEN_STAT(dma_map_error),
+};
+
static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
+static const unsigned int enic_n_gen_stats = ARRAY_SIZE(enic_gen_stats);
void enic_intr_coal_set_rx(struct enic *enic, u32 timer)
{
@@ -146,6 +157,10 @@ static void enic_get_strings(struct net_device *netdev, u32 stringset,
memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
+ for (i = 0; i < enic_n_gen_stats; i++) {
+ memcpy(data, enic_gen_stats[i].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
break;
}
}
@@ -154,7 +169,7 @@ static int enic_get_sset_count(struct net_device *netdev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
- return enic_n_tx_stats + enic_n_rx_stats;
+ return enic_n_tx_stats + enic_n_rx_stats + enic_n_gen_stats;
default:
return -EOPNOTSUPP;
}
@@ -173,6 +188,8 @@ static void enic_get_ethtool_stats(struct net_device *netdev,
*(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index];
for (i = 0; i < enic_n_rx_stats; i++)
*(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].index];
+ for (i = 0; i < enic_n_gen_stats; i++)
+ *(data++) = ((u64 *)&enic->gen_stats)[enic_gen_stats[i].index];
}
static u32 enic_get_msglevel(struct net_device *netdev)
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index b29e027c476e..0535f6fbdc71 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -45,6 +45,7 @@
#ifdef CONFIG_NET_RX_BUSY_POLL
#include <net/busy_poll.h>
#endif
+#include <linux/crash_dump.h>
#include "cq_enet_desc.h"
#include "vnic_dev.h"
@@ -351,80 +352,94 @@ static irqreturn_t enic_isr_msix_notify(int irq, void *data)
return IRQ_HANDLED;
}
-static inline void enic_queue_wq_skb_cont(struct enic *enic,
- struct vnic_wq *wq, struct sk_buff *skb,
- unsigned int len_left, int loopback)
+static int enic_queue_wq_skb_cont(struct enic *enic, struct vnic_wq *wq,
+ struct sk_buff *skb, unsigned int len_left,
+ int loopback)
{
const skb_frag_t *frag;
+ dma_addr_t dma_addr;
/* Queue additional data fragments */
for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
len_left -= skb_frag_size(frag);
- enic_queue_wq_desc_cont(wq, skb,
- skb_frag_dma_map(&enic->pdev->dev,
- frag, 0, skb_frag_size(frag),
- DMA_TO_DEVICE),
- skb_frag_size(frag),
- (len_left == 0), /* EOP? */
- loopback);
+ dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, 0,
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (unlikely(enic_dma_map_check(enic, dma_addr)))
+ return -ENOMEM;
+ enic_queue_wq_desc_cont(wq, skb, dma_addr, skb_frag_size(frag),
+ (len_left == 0), /* EOP? */
+ loopback);
}
+
+ return 0;
}
-static inline void enic_queue_wq_skb_vlan(struct enic *enic,
- struct vnic_wq *wq, struct sk_buff *skb,
- int vlan_tag_insert, unsigned int vlan_tag, int loopback)
+static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq,
+ struct sk_buff *skb, int vlan_tag_insert,
+ unsigned int vlan_tag, int loopback)
{
unsigned int head_len = skb_headlen(skb);
unsigned int len_left = skb->len - head_len;
int eop = (len_left == 0);
+ dma_addr_t dma_addr;
+ int err = 0;
+
+ dma_addr = pci_map_single(enic->pdev, skb->data, head_len,
+ PCI_DMA_TODEVICE);
+ if (unlikely(enic_dma_map_check(enic, dma_addr)))
+ return -ENOMEM;
/* Queue the main skb fragment. The fragments are no larger
* than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
* than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
* per fragment is queued.
*/
- enic_queue_wq_desc(wq, skb,
- pci_map_single(enic->pdev, skb->data,
- head_len, PCI_DMA_TODEVICE),
- head_len,
- vlan_tag_insert, vlan_tag,
- eop, loopback);
+ enic_queue_wq_desc(wq, skb, dma_addr, head_len, vlan_tag_insert,
+ vlan_tag, eop, loopback);
if (!eop)
- enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
+ err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
+
+ return err;
}
-static inline void enic_queue_wq_skb_csum_l4(struct enic *enic,
- struct vnic_wq *wq, struct sk_buff *skb,
- int vlan_tag_insert, unsigned int vlan_tag, int loopback)
+static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq,
+ struct sk_buff *skb, int vlan_tag_insert,
+ unsigned int vlan_tag, int loopback)
{
unsigned int head_len = skb_headlen(skb);
unsigned int len_left = skb->len - head_len;
unsigned int hdr_len = skb_checksum_start_offset(skb);
unsigned int csum_offset = hdr_len + skb->csum_offset;
int eop = (len_left == 0);
+ dma_addr_t dma_addr;
+ int err = 0;
+
+ dma_addr = pci_map_single(enic->pdev, skb->data, head_len,
+ PCI_DMA_TODEVICE);
+ if (unlikely(enic_dma_map_check(enic, dma_addr)))
+ return -ENOMEM;
/* Queue the main skb fragment. The fragments are no larger
* than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
* than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
* per fragment is queued.
*/
- enic_queue_wq_desc_csum_l4(wq, skb,
- pci_map_single(enic->pdev, skb->data,
- head_len, PCI_DMA_TODEVICE),
- head_len,
- csum_offset,
- hdr_len,
- vlan_tag_insert, vlan_tag,
- eop, loopback);
+ enic_queue_wq_desc_csum_l4(wq, skb, dma_addr, head_len, csum_offset,
+ hdr_len, vlan_tag_insert, vlan_tag, eop,
+ loopback);
if (!eop)
- enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
+ err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
+
+ return err;
}
-static inline void enic_queue_wq_skb_tso(struct enic *enic,
- struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss,
- int vlan_tag_insert, unsigned int vlan_tag, int loopback)
+static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
+ struct sk_buff *skb, unsigned int mss,
+ int vlan_tag_insert, unsigned int vlan_tag,
+ int loopback)
{
unsigned int frag_len_left = skb_headlen(skb);
unsigned int len_left = skb->len - frag_len_left;
@@ -454,20 +469,19 @@ static inline void enic_queue_wq_skb_tso(struct enic *enic,
*/
while (frag_len_left) {
len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN);
- dma_addr = pci_map_single(enic->pdev, skb->data + offset,
- len, PCI_DMA_TODEVICE);
- enic_queue_wq_desc_tso(wq, skb,
- dma_addr,
- len,
- mss, hdr_len,
- vlan_tag_insert, vlan_tag,
- eop && (len == frag_len_left), loopback);
+ dma_addr = pci_map_single(enic->pdev, skb->data + offset, len,
+ PCI_DMA_TODEVICE);
+ if (unlikely(enic_dma_map_check(enic, dma_addr)))
+ return -ENOMEM;
+ enic_queue_wq_desc_tso(wq, skb, dma_addr, len, mss, hdr_len,
+ vlan_tag_insert, vlan_tag,
+ eop && (len == frag_len_left), loopback);
frag_len_left -= len;
offset += len;
}
if (eop)
- return;
+ return 0;
/* Queue WQ_ENET_MAX_DESC_LEN length descriptors
* for additional data fragments
@@ -483,16 +497,18 @@ static inline void enic_queue_wq_skb_tso(struct enic *enic,
dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag,
offset, len,
DMA_TO_DEVICE);
- enic_queue_wq_desc_cont(wq, skb,
- dma_addr,
- len,
- (len_left == 0) &&
- (len == frag_len_left), /* EOP? */
- loopback);
+ if (unlikely(enic_dma_map_check(enic, dma_addr)))
+ return -ENOMEM;
+ enic_queue_wq_desc_cont(wq, skb, dma_addr, len,
+ (len_left == 0) &&
+ (len == frag_len_left),/*EOP*/
+ loopback);
frag_len_left -= len;
offset += len;
}
}
+
+ return 0;
}
static inline void enic_queue_wq_skb(struct enic *enic,
@@ -502,25 +518,42 @@ static inline void enic_queue_wq_skb(struct enic *enic,
unsigned int vlan_tag = 0;
int vlan_tag_insert = 0;
int loopback = 0;
+ int err;
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
/* VLAN tag from trunking driver */
vlan_tag_insert = 1;
- vlan_tag = vlan_tx_tag_get(skb);
+ vlan_tag = skb_vlan_tag_get(skb);
} else if (enic->loop_enable) {
vlan_tag = enic->loop_tag;
loopback = 1;
}
if (mss)
- enic_queue_wq_skb_tso(enic, wq, skb, mss,
- vlan_tag_insert, vlan_tag, loopback);
+ err = enic_queue_wq_skb_tso(enic, wq, skb, mss,
+ vlan_tag_insert, vlan_tag,
+ loopback);
else if (skb->ip_summed == CHECKSUM_PARTIAL)
- enic_queue_wq_skb_csum_l4(enic, wq, skb,
- vlan_tag_insert, vlan_tag, loopback);
+ err = enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert,
+ vlan_tag, loopback);
else
- enic_queue_wq_skb_vlan(enic, wq, skb,
- vlan_tag_insert, vlan_tag, loopback);
+ err = enic_queue_wq_skb_vlan(enic, wq, skb, vlan_tag_insert,
+ vlan_tag, loopback);
+ if (unlikely(err)) {
+ struct vnic_wq_buf *buf;
+
+ buf = wq->to_use->prev;
+ /* while not EOP of previous pkt && queue not empty.
+ * For all non EOP bufs, os_buf is NULL.
+ */
+ while (!buf->os_buf && (buf->next != wq->to_clean)) {
+ enic_free_wq_buf(wq, buf);
+ wq->ring.desc_avail++;
+ buf = buf->prev;
+ }
+ wq->to_use = buf->next;
+ dev_kfree_skb(skb);
+ }
}
/* netif_tx_lock held, process context with BHs disabled, or BH */
@@ -950,8 +983,12 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
if (!skb)
return -ENOMEM;
- dma_addr = pci_map_single(enic->pdev, skb->data,
- len, PCI_DMA_FROMDEVICE);
+ dma_addr = pci_map_single(enic->pdev, skb->data, len,
+ PCI_DMA_FROMDEVICE);
+ if (unlikely(enic_dma_map_check(enic, dma_addr))) {
+ dev_kfree_skb(skb);
+ return -ENOMEM;
+ }
enic_queue_rq_desc(rq, skb, os_buf_index,
dma_addr, len);
@@ -2231,6 +2268,18 @@ static void enic_dev_deinit(struct enic *enic)
enic_clear_intr_mode(enic);
}
+static void enic_kdump_kernel_config(struct enic *enic)
+{
+ if (is_kdump_kernel()) {
+ dev_info(enic_get_dev(enic), "Running from within kdump kernel. Using minimal resources\n");
+ enic->rq_count = 1;
+ enic->wq_count = 1;
+ enic->config.rq_desc_count = ENIC_MIN_RQ_DESCS;
+ enic->config.wq_desc_count = ENIC_MIN_WQ_DESCS;
+ enic->config.mtu = min_t(u16, 1500, enic->config.mtu);
+ }
+}
+
static int enic_dev_init(struct enic *enic)
{
struct device *dev = enic_get_dev(enic);
@@ -2260,6 +2309,10 @@ static int enic_dev_init(struct enic *enic)
enic_get_res_counts(enic);
+ /* modify resource count if we are in kdump_kernel
+ */
+ enic_kdump_kernel_config(enic);
+
/* Set interrupt mode based on resource counts and system
* capabilities
*/
diff --git a/drivers/net/ethernet/cisco/enic/vnic_stats.h b/drivers/net/ethernet/cisco/enic/vnic_stats.h
index 77750ec93954..74c81ed6fdab 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_stats.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_stats.h
@@ -62,6 +62,11 @@ struct vnic_rx_stats {
u64 rsvd[16];
};
+/* Generic statistics */
+struct vnic_gen_stats {
+ u64 dma_map_error;
+};
+
struct vnic_stats {
struct vnic_tx_stats tx;
struct vnic_rx_stats rx;
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.c b/drivers/net/ethernet/cisco/enic/vnic_wq.c
index 3e6b8d54dafc..b5a1c937fad2 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.c
@@ -47,11 +47,14 @@ static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
wq->ring.desc_size * buf->index;
if (buf->index + 1 == count) {
buf->next = wq->bufs[0];
+ buf->next->prev = buf;
break;
} else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES(count)) {
buf->next = wq->bufs[i + 1];
+ buf->next->prev = buf;
} else {
buf->next = buf + 1;
+ buf->next->prev = buf;
buf++;
}
}
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.h b/drivers/net/ethernet/cisco/enic/vnic_wq.h
index 816f1ad6072f..296154351823 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.h
@@ -62,6 +62,7 @@ struct vnic_wq_buf {
uint8_t cq_entry; /* Gets completion event from hw */
uint8_t desc_skip_cnt; /* Num descs to occupy */
uint8_t compressed_send; /* Both hdr and payload in one desc */
+ struct vnic_wq_buf *prev;
};
/* Break the vnic_wq_buf allocations into blocks of 32/64 entries */
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 712e7f8e1df7..9fa2569f56cb 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -243,7 +243,6 @@ struct be_tx_stats {
u64 tx_bytes;
u64 tx_pkts;
u64 tx_reqs;
- u64 tx_wrbs;
u64 tx_compl;
ulong tx_jiffies;
u32 tx_stops;
@@ -266,6 +265,9 @@ struct be_tx_obj {
/* Remember the skbs that were transmitted */
struct sk_buff *sent_skb_list[TX_Q_LEN];
struct be_tx_stats stats;
+ u16 pend_wrb_cnt; /* Number of WRBs yet to be given to HW */
+ u16 last_req_wrb_cnt; /* wrb cnt of the last req in the Q */
+ u16 last_req_hdr; /* index of the last req's hdr-wrb */
} ____cacheline_aligned_in_smp;
/* Struct to remember the pages posted for rx frags */
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 73a500ccbf69..32c53bc0e07a 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -193,8 +193,6 @@ static const struct be_ethtool_stat et_tx_stats[] = {
{DRVSTAT_TX_INFO(tx_pkts)},
/* Number of skbs queued for trasmission by the driver */
{DRVSTAT_TX_INFO(tx_reqs)},
- /* Number of TX work request blocks DMAed to HW */
- {DRVSTAT_TX_INFO(tx_wrbs)},
/* Number of times the TX queue was stopped due to lack
* of spaces in the TXQ.
*/
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index 295ee0835ba0..6d7b3a4d3cff 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -311,6 +311,11 @@ struct amap_eth_hdr_wrb {
u8 vlan_tag[16];
} __packed;
+#define TX_HDR_WRB_COMPL 1 /* word 2 */
+#define TX_HDR_WRB_EVT (1 << 1) /* word 2 */
+#define TX_HDR_WRB_NUM_SHIFT 13 /* word 2: bits 13:17 */
+#define TX_HDR_WRB_NUM_MASK 0x1F /* word 2: bits 13:17 */
+
struct be_eth_hdr_wrb {
u32 dw[4];
};
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 41a0a5498da7..ed46610e5453 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -662,41 +662,22 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status)
netif_carrier_off(netdev);
}
-static void be_tx_stats_update(struct be_tx_obj *txo,
- u32 wrb_cnt, u32 copied, u32 gso_segs,
- bool stopped)
+static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
{
struct be_tx_stats *stats = tx_stats(txo);
u64_stats_update_begin(&stats->sync);
stats->tx_reqs++;
- stats->tx_wrbs += wrb_cnt;
- stats->tx_bytes += copied;
- stats->tx_pkts += (gso_segs ? gso_segs : 1);
- if (stopped)
- stats->tx_stops++;
+ stats->tx_bytes += skb->len;
+ stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
u64_stats_update_end(&stats->sync);
}
-/* Determine number of WRB entries needed to xmit data in an skb */
-static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
- bool *dummy)
+/* Returns number of WRBs needed for the skb */
+static u32 skb_wrb_cnt(struct sk_buff *skb)
{
- int cnt = (skb->len > skb->data_len);
-
- cnt += skb_shinfo(skb)->nr_frags;
-
- /* to account for hdr wrb */
- cnt++;
- if (lancer_chip(adapter) || !(cnt & 1)) {
- *dummy = false;
- } else {
- /* add a dummy to make it an even num */
- cnt++;
- *dummy = true;
- }
- BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
- return cnt;
+ /* +1 for the header wrb */
+ return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
}
static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
@@ -713,7 +694,7 @@ static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
u8 vlan_prio;
u16 vlan_tag;
- vlan_tag = vlan_tx_tag_get(skb);
+ vlan_tag = skb_vlan_tag_get(skb);
vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
/* If vlan priority provided by OS is NOT in available bmap */
if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
@@ -764,17 +745,20 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
SET_TX_WRB_HDR_BITS(udpcs, hdr, 1);
}
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
SET_TX_WRB_HDR_BITS(vlan, hdr, 1);
vlan_tag = be_get_tx_vlan_tag(adapter, skb);
SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag);
}
- /* To skip HW VLAN tagging: evt = 1, compl = 0 */
- SET_TX_WRB_HDR_BITS(complete, hdr, !skip_hw_vlan);
- SET_TX_WRB_HDR_BITS(event, hdr, 1);
SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt);
SET_TX_WRB_HDR_BITS(len, hdr, len);
+
+ /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0
+ * When this hack is not needed, the evt bit is set while ringing DB
+ */
+ if (skip_hw_vlan)
+ SET_TX_WRB_HDR_BITS(event, hdr, 1);
}
static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
@@ -794,22 +778,24 @@ static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
}
}
-static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
- struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
- bool skip_hw_vlan)
+/* Returns the number of WRBs used up by the skb */
+static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
+ struct sk_buff *skb, bool skip_hw_vlan)
{
- dma_addr_t busaddr;
- int i, copied = 0;
+ u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
struct device *dev = &adapter->pdev->dev;
- struct sk_buff *first_skb = skb;
- struct be_eth_wrb *wrb;
+ struct be_queue_info *txq = &txo->q;
struct be_eth_hdr_wrb *hdr;
bool map_single = false;
- u16 map_head;
+ struct be_eth_wrb *wrb;
+ dma_addr_t busaddr;
+ u16 head = txq->head;
hdr = queue_head_node(txq);
+ wrb_fill_hdr(adapter, hdr, skb, wrb_cnt, skb->len, skip_hw_vlan);
+ be_dws_cpu_to_le(hdr, sizeof(*hdr));
+
queue_head_inc(txq);
- map_head = txq->head;
if (skb->len > skb->data_len) {
int len = skb_headlen(skb);
@@ -839,19 +825,23 @@ static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
copied += skb_frag_size(frag);
}
- if (dummy_wrb) {
- wrb = queue_head_node(txq);
- wrb_fill(wrb, 0, 0);
- be_dws_cpu_to_le(wrb, sizeof(*wrb));
- queue_head_inc(txq);
- }
+ BUG_ON(txo->sent_skb_list[head]);
+ txo->sent_skb_list[head] = skb;
+ txo->last_req_hdr = head;
+ atomic_add(wrb_cnt, &txq->used);
+ txo->last_req_wrb_cnt = wrb_cnt;
+ txo->pend_wrb_cnt += wrb_cnt;
- wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
- be_dws_cpu_to_le(hdr, sizeof(*hdr));
+ be_tx_stats_update(txo, skb);
+ return wrb_cnt;
- return copied;
dma_err:
- txq->head = map_head;
+ /* Bring the queue back to the state it was in before this
+ * routine was invoked.
+ */
+ txq->head = head;
+ /* skip the first wrb (hdr); it's not mapped */
+ queue_head_inc(txq);
while (copied) {
wrb = queue_head_node(txq);
unmap_tx_frag(dev, wrb, map_single);
@@ -860,6 +850,7 @@ dma_err:
adapter->drv_stats.dma_map_errors++;
queue_head_inc(txq);
}
+ txq->head = head;
return 0;
}
@@ -873,7 +864,7 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
if (unlikely(!skb))
return skb;
- if (vlan_tx_tag_present(skb))
+ if (skb_vlan_tag_present(skb))
vlan_tag = be_get_tx_vlan_tag(adapter, skb);
if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
@@ -932,7 +923,7 @@ static bool be_ipv6_exthdr_check(struct sk_buff *skb)
static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
{
- return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
+ return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
}
static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
@@ -955,7 +946,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
VLAN_ETH_HLEN : ETH_HLEN;
if (skb->len <= 60 &&
- (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
+ (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
is_ipv4_pkt(skb)) {
ip = (struct iphdr *)ip_hdr(skb);
pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
@@ -973,7 +964,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
* Manually insert VLAN in pkt.
*/
if (skb->ip_summed != CHECKSUM_PARTIAL &&
- vlan_tx_tag_present(skb)) {
+ skb_vlan_tag_present(skb)) {
skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
if (unlikely(!skb))
goto err;
@@ -1030,52 +1021,64 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
return skb;
}
+static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
+{
+ struct be_queue_info *txq = &txo->q;
+ struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
+
+ /* Mark the last request eventable if it hasn't been marked already */
+ if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
+ hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
+
+ /* compose a dummy wrb if there are odd set of wrbs to notify */
+ if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
+ wrb_fill(queue_head_node(txq), 0, 0);
+ queue_head_inc(txq);
+ atomic_inc(&txq->used);
+ txo->pend_wrb_cnt++;
+ hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
+ TX_HDR_WRB_NUM_SHIFT);
+ hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
+ TX_HDR_WRB_NUM_SHIFT);
+ }
+ be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
+ txo->pend_wrb_cnt = 0;
+}
+
static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
{
+ bool skip_hw_vlan = false, flush = !skb->xmit_more;
struct be_adapter *adapter = netdev_priv(netdev);
- struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
+ u16 q_idx = skb_get_queue_mapping(skb);
+ struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
struct be_queue_info *txq = &txo->q;
- bool dummy_wrb, stopped = false;
- u32 wrb_cnt = 0, copied = 0;
- bool skip_hw_vlan = false;
- u32 start = txq->head;
+ u16 wrb_cnt;
skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
- if (!skb) {
- tx_stats(txo)->tx_drv_drops++;
- return NETDEV_TX_OK;
- }
-
- wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
+ if (unlikely(!skb))
+ goto drop;
- copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
- skip_hw_vlan);
- if (copied) {
- int gso_segs = skb_shinfo(skb)->gso_segs;
+ wrb_cnt = be_xmit_enqueue(adapter, txo, skb, skip_hw_vlan);
+ if (unlikely(!wrb_cnt)) {
+ dev_kfree_skb_any(skb);
+ goto drop;
+ }
- /* record the sent skb in the sent_skb table */
- BUG_ON(txo->sent_skb_list[start]);
- txo->sent_skb_list[start] = skb;
+ if ((atomic_read(&txq->used) + BE_MAX_TX_FRAG_COUNT) >= txq->len) {
+ netif_stop_subqueue(netdev, q_idx);
+ tx_stats(txo)->tx_stops++;
+ }
- /* Ensure txq has space for the next skb; Else stop the queue
- * *BEFORE* ringing the tx doorbell, so that we serialze the
- * tx compls of the current transmit which'll wake up the queue
- */
- atomic_add(wrb_cnt, &txq->used);
- if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
- txq->len) {
- netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
- stopped = true;
- }
+ if (flush || __netif_subqueue_stopped(netdev, q_idx))
+ be_xmit_flush(adapter, txo);
- be_txq_notify(adapter, txo, wrb_cnt);
+ return NETDEV_TX_OK;
+drop:
+ tx_stats(txo)->tx_drv_drops++;
+ /* Flush the already enqueued tx requests */
+ if (flush && txo->pend_wrb_cnt)
+ be_xmit_flush(adapter, txo);
- be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
- } else {
- txq->head = start;
- tx_stats(txo)->tx_drv_drops++;
- dev_kfree_skb_any(skb);
- }
return NETDEV_TX_OK;
}
@@ -1959,32 +1962,34 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
static u16 be_tx_compl_process(struct be_adapter *adapter,
struct be_tx_obj *txo, u16 last_index)
{
+ struct sk_buff **sent_skbs = txo->sent_skb_list;
struct be_queue_info *txq = &txo->q;
+ u16 frag_index, num_wrbs = 0;
+ struct sk_buff *skb = NULL;
+ bool unmap_skb_hdr = false;
struct be_eth_wrb *wrb;
- struct sk_buff **sent_skbs = txo->sent_skb_list;
- struct sk_buff *sent_skb;
- u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
- bool unmap_skb_hdr = true;
-
- sent_skb = sent_skbs[txq->tail];
- BUG_ON(!sent_skb);
- sent_skbs[txq->tail] = NULL;
-
- /* skip header wrb */
- queue_tail_inc(txq);
do {
- cur_index = txq->tail;
+ if (sent_skbs[txq->tail]) {
+ /* Free skb from prev req */
+ if (skb)
+ dev_consume_skb_any(skb);
+ skb = sent_skbs[txq->tail];
+ sent_skbs[txq->tail] = NULL;
+ queue_tail_inc(txq); /* skip hdr wrb */
+ num_wrbs++;
+ unmap_skb_hdr = true;
+ }
wrb = queue_tail_node(txq);
+ frag_index = txq->tail;
unmap_tx_frag(&adapter->pdev->dev, wrb,
- (unmap_skb_hdr && skb_headlen(sent_skb)));
+ (unmap_skb_hdr && skb_headlen(skb)));
unmap_skb_hdr = false;
-
- num_wrbs++;
queue_tail_inc(txq);
- } while (cur_index != last_index);
+ num_wrbs++;
+ } while (frag_index != last_index);
+ dev_consume_skb_any(skb);
- dev_consume_skb_any(sent_skb);
return num_wrbs;
}
@@ -2068,12 +2073,11 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
static void be_tx_compl_clean(struct be_adapter *adapter)
{
+ u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
+ struct device *dev = &adapter->pdev->dev;
struct be_tx_obj *txo;
struct be_queue_info *txq;
struct be_eth_tx_compl *txcp;
- u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
- struct sk_buff *sent_skb;
- bool dummy_wrb;
int i, pending_txqs;
/* Stop polling for compls when HW has been silent for 10ms */
@@ -2095,7 +2099,7 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
atomic_sub(num_wrbs, &txq->used);
timeo = 0;
}
- if (atomic_read(&txq->used) == 0)
+ if (atomic_read(&txq->used) == txo->pend_wrb_cnt)
pending_txqs--;
}
@@ -2105,21 +2109,29 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
mdelay(1);
} while (true);
+ /* Free enqueued TX that was never notified to HW */
for_all_tx_queues(adapter, txo, i) {
txq = &txo->q;
- if (atomic_read(&txq->used))
- dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
- atomic_read(&txq->used));
- /* free posted tx for which compls will never arrive */
- while (atomic_read(&txq->used)) {
- sent_skb = txo->sent_skb_list[txq->tail];
+ if (atomic_read(&txq->used)) {
+ dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
+ i, atomic_read(&txq->used));
+ notified_idx = txq->tail;
end_idx = txq->tail;
- num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
- &dummy_wrb);
- index_adv(&end_idx, num_wrbs - 1, txq->len);
+ index_adv(&end_idx, atomic_read(&txq->used) - 1,
+ txq->len);
+ /* Use the tx-compl process logic to handle requests
+ * that were not sent to the HW.
+ */
num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
atomic_sub(num_wrbs, &txq->used);
+ BUG_ON(atomic_read(&txq->used));
+ txo->pend_wrb_cnt = 0;
+ /* Since hw was never notified of these requests,
+ * reset TXQ indices
+ */
+ txq->head = notified_idx;
+ txq->tail = notified_idx;
}
}
}
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 270308315d43..ba84c4a9ce32 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -69,7 +69,8 @@ config FSL_XGMAC_MDIO
select PHYLIB
select OF_MDIO
---help---
- This driver supports the MDIO bus on the Fman 10G Ethernet MACs.
+ This driver supports the MDIO bus on the Fman 10G Ethernet MACs, and
+ on the FMan mEMAC (which supports both Clauses 22 and 45)
config UCC_GETH
tristate "Freescale QE Gigabit Ethernet"
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 40132929daf7..a86af8a7485d 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -16,6 +16,7 @@
#include <linux/clocksource.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
@@ -356,6 +357,7 @@ struct bufdesc_ex {
#define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */
#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */
#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */
+#define FEC_ENET_WAKEUP ((uint)0x00020000) /* Wakeup request */
#define FEC_ENET_TXF (FEC_ENET_TXF_0 | FEC_ENET_TXF_1 | FEC_ENET_TXF_2)
#define FEC_ENET_RXF (FEC_ENET_RXF_0 | FEC_ENET_RXF_1 | FEC_ENET_RXF_2)
#define FEC_ENET_TS_AVAIL ((uint)0x00010000)
@@ -513,6 +515,7 @@ struct fec_enet_private {
int irq[FEC_IRQ_NUM];
bool bufdesc_ex;
int pause_flag;
+ int wol_flag;
u32 quirks;
struct napi_struct napi;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index bba87775419d..1c7a7e43dd9c 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -188,6 +188,9 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define FEC_MMFR_RA(v) ((v & 0x1f) << 18)
#define FEC_MMFR_TA (2 << 16)
#define FEC_MMFR_DATA(v) (v & 0xffff)
+/* FEC ECR bits definition */
+#define FEC_ECR_MAGICEN (1 << 2)
+#define FEC_ECR_SLEEP (1 << 3)
#define FEC_MII_TIMEOUT 30000 /* us */
@@ -196,6 +199,9 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define FEC_PAUSE_FLAG_AUTONEG 0x1
#define FEC_PAUSE_FLAG_ENABLE 0x2
+#define FEC_WOL_HAS_MAGIC_PACKET (0x1 << 0)
+#define FEC_WOL_FLAG_ENABLE (0x1 << 1)
+#define FEC_WOL_FLAG_SLEEP_ON (0x1 << 2)
#define COPYBREAK_DEFAULT 256
@@ -1090,7 +1096,9 @@ static void
fec_stop(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
+ struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
+ u32 val;
/* We cannot expect a graceful transmit stop without link !!! */
if (fep->link) {
@@ -1104,17 +1112,28 @@ fec_stop(struct net_device *ndev)
* For i.MX6SX SOC, enet use AXI bus, we use disable MAC
* instead of reset MAC itself.
*/
- if (fep->quirks & FEC_QUIRK_HAS_AVB) {
- writel(0, fep->hwp + FEC_ECNTRL);
+ if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
+ if (fep->quirks & FEC_QUIRK_HAS_AVB) {
+ writel(0, fep->hwp + FEC_ECNTRL);
+ } else {
+ writel(1, fep->hwp + FEC_ECNTRL);
+ udelay(10);
+ }
+ writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
} else {
- writel(1, fep->hwp + FEC_ECNTRL);
- udelay(10);
+ writel(FEC_DEFAULT_IMASK | FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK);
+ val = readl(fep->hwp + FEC_ECNTRL);
+ val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
+ writel(val, fep->hwp + FEC_ECNTRL);
+
+ if (pdata && pdata->sleep_mode_enable)
+ pdata->sleep_mode_enable(true);
}
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
- writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
/* We have to keep ENET enabled to have MII interrupt stay working */
- if (fep->quirks & FEC_QUIRK_ENET_MAC) {
+ if (fep->quirks & FEC_QUIRK_ENET_MAC &&
+ !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
writel(2, fep->hwp + FEC_ECNTRL);
writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
}
@@ -2428,6 +2447,44 @@ static int fec_enet_set_tunable(struct net_device *netdev,
return ret;
}
+static void
+fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) {
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0;
+ } else {
+ wol->supported = wol->wolopts = 0;
+ }
+}
+
+static int
+fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET))
+ return -EINVAL;
+
+ if (wol->wolopts & ~WAKE_MAGIC)
+ return -EINVAL;
+
+ device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
+ if (device_may_wakeup(&ndev->dev)) {
+ fep->wol_flag |= FEC_WOL_FLAG_ENABLE;
+ if (fep->irq[0] > 0)
+ enable_irq_wake(fep->irq[0]);
+ } else {
+ fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE);
+ if (fep->irq[0] > 0)
+ disable_irq_wake(fep->irq[0]);
+ }
+
+ return 0;
+}
+
static const struct ethtool_ops fec_enet_ethtool_ops = {
.get_settings = fec_enet_get_settings,
.set_settings = fec_enet_set_settings,
@@ -2446,6 +2503,8 @@ static const struct ethtool_ops fec_enet_ethtool_ops = {
.get_ts_info = fec_enet_get_ts_info,
.get_tunable = fec_enet_get_tunable,
.set_tunable = fec_enet_set_tunable,
+ .get_wol = fec_enet_get_wol,
+ .set_wol = fec_enet_set_wol,
};
static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
@@ -2706,6 +2765,9 @@ fec_enet_open(struct net_device *ndev)
phy_start(fep->phy_dev);
netif_tx_start_all_queues(ndev);
+ device_set_wakeup_enable(&ndev->dev, fep->wol_flag &
+ FEC_WOL_FLAG_ENABLE);
+
return 0;
err_enet_mii_probe:
@@ -3155,6 +3217,9 @@ fec_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ndev);
+ if (of_get_property(np, "fsl,magic-packet", NULL))
+ fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
+
phy_node = of_parse_phandle(np, "phy-handle", 0);
if (!phy_node && of_phy_is_fixed_link(np)) {
ret = of_phy_register_fixed_link(np);
@@ -3249,6 +3314,8 @@ fec_probe(struct platform_device *pdev)
0, pdev->name, ndev);
if (ret)
goto failed_irq;
+
+ fep->irq[i] = irq;
}
init_completion(&fep->mdio_done);
@@ -3265,6 +3332,9 @@ fec_probe(struct platform_device *pdev)
if (ret)
goto failed_register;
+ device_init_wakeup(&ndev->dev, fep->wol_flag &
+ FEC_WOL_HAS_MAGIC_PACKET);
+
if (fep->bufdesc_ex && fep->ptp_clock)
netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
@@ -3318,6 +3388,8 @@ static int __maybe_unused fec_suspend(struct device *dev)
rtnl_lock();
if (netif_running(ndev)) {
+ if (fep->wol_flag & FEC_WOL_FLAG_ENABLE)
+ fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON;
phy_stop(fep->phy_dev);
napi_disable(&fep->napi);
netif_tx_lock_bh(ndev);
@@ -3325,11 +3397,12 @@ static int __maybe_unused fec_suspend(struct device *dev)
netif_tx_unlock_bh(ndev);
fec_stop(ndev);
fec_enet_clk_enable(ndev, false);
- pinctrl_pm_select_sleep_state(&fep->pdev->dev);
+ if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
+ pinctrl_pm_select_sleep_state(&fep->pdev->dev);
}
rtnl_unlock();
- if (fep->reg_phy)
+ if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
regulator_disable(fep->reg_phy);
/* SOC supply clock to phy, when clock is disabled, phy link down
@@ -3345,9 +3418,11 @@ static int __maybe_unused fec_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
+ struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
int ret;
+ int val;
- if (fep->reg_phy) {
+ if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
ret = regulator_enable(fep->reg_phy);
if (ret)
return ret;
@@ -3355,12 +3430,21 @@ static int __maybe_unused fec_resume(struct device *dev)
rtnl_lock();
if (netif_running(ndev)) {
- pinctrl_pm_select_default_state(&fep->pdev->dev);
ret = fec_enet_clk_enable(ndev, true);
if (ret) {
rtnl_unlock();
goto failed_clk;
}
+ if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) {
+ if (pdata && pdata->sleep_mode_enable)
+ pdata->sleep_mode_enable(false);
+ val = readl(fep->hwp + FEC_ECNTRL);
+ val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
+ writel(val, fep->hwp + FEC_ECNTRL);
+ fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON;
+ } else {
+ pinctrl_pm_select_default_state(&fep->pdev->dev);
+ }
fec_restart(ndev);
netif_tx_lock_bh(ndev);
netif_device_attach(ndev);
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 992c8c3db553..1f9cf2345266 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -374,23 +374,9 @@ static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
struct fec_enet_private *fep =
container_of(ptp, struct fec_enet_private, ptp_caps);
unsigned long flags;
- u64 now;
- u32 counter;
spin_lock_irqsave(&fep->tmreg_lock, flags);
-
- now = timecounter_read(&fep->tc);
- now += delta;
-
- /* Get the timer value based on adjusted timestamp.
- * Update the counter with the masked value.
- */
- counter = now & fep->cc.mask;
- writel(counter, fep->hwp + FEC_ATIME);
-
- /* reset the timecounter */
- timecounter_init(&fep->tc, &fep->cc, now);
-
+ timecounter_adjtime(&fep->tc, delta);
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
return 0;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 5645342f5b28..93ff846e96f1 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -116,7 +116,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
static void gfar_reset_task(struct work_struct *work);
static void gfar_timeout(struct net_device *dev);
static int gfar_close(struct net_device *dev);
-struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr);
+static struct sk_buff *gfar_new_skb(struct net_device *dev,
+ dma_addr_t *bufaddr);
static int gfar_set_mac_address(struct net_device *dev);
static int gfar_change_mtu(struct net_device *dev, int new_mtu);
static irqreturn_t gfar_error(int irq, void *dev_id);
@@ -176,7 +177,7 @@ static int gfar_init_bds(struct net_device *ndev)
struct gfar_priv_rx_q *rx_queue = NULL;
struct txbd8 *txbdp;
struct rxbd8 *rxbdp;
- u32 *rfbptr;
+ u32 __iomem *rfbptr;
int i, j;
dma_addr_t bufaddr;
@@ -554,7 +555,7 @@ static void gfar_ints_enable(struct gfar_private *priv)
}
}
-void lock_tx_qs(struct gfar_private *priv)
+static void lock_tx_qs(struct gfar_private *priv)
{
int i;
@@ -562,7 +563,7 @@ void lock_tx_qs(struct gfar_private *priv)
spin_lock(&priv->tx_queue[i]->txlock);
}
-void unlock_tx_qs(struct gfar_private *priv)
+static void unlock_tx_qs(struct gfar_private *priv)
{
int i;
@@ -2169,7 +2170,7 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
{
fcb->flags |= TXFCB_VLN;
- fcb->vlctl = vlan_tx_tag_get(skb);
+ fcb->vlctl = skb_vlan_tag_get(skb);
}
static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
@@ -2229,7 +2230,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
regs = tx_queue->grp->regs;
do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
- do_vlan = vlan_tx_tag_present(skb);
+ do_vlan = skb_vlan_tag_present(skb);
do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
priv->hwts_tx_en;
@@ -2671,7 +2672,7 @@ static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
return skb;
}
-struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr)
+static struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr)
{
struct gfar_private *priv = netdev_priv(dev);
struct sk_buff *skb;
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index b581b8823a2a..9e1802400c23 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -1039,7 +1039,7 @@ struct gfar_priv_rx_q {
/* RX Coalescing values */
unsigned char rxcoalescing;
unsigned long rxic;
- u32 *rfbptr;
+ u32 __iomem *rfbptr;
};
enum gfar_irqinfo_id {
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
index 6e7db66069aa..3a76e235fff6 100644
--- a/drivers/net/ethernet/freescale/xgmac_mdio.c
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -32,18 +32,19 @@ struct tgec_mdio_controller {
__be32 mdio_addr; /* MDIO address */
} __packed;
+#define MDIO_STAT_ENC BIT(6)
#define MDIO_STAT_CLKDIV(x) (((x>>1) & 0xff) << 8)
-#define MDIO_STAT_BSY (1 << 0)
-#define MDIO_STAT_RD_ER (1 << 1)
+#define MDIO_STAT_BSY BIT(0)
+#define MDIO_STAT_RD_ER BIT(1)
#define MDIO_CTL_DEV_ADDR(x) (x & 0x1f)
#define MDIO_CTL_PORT_ADDR(x) ((x & 0x1f) << 5)
-#define MDIO_CTL_PRE_DIS (1 << 10)
-#define MDIO_CTL_SCAN_EN (1 << 11)
-#define MDIO_CTL_POST_INC (1 << 14)
-#define MDIO_CTL_READ (1 << 15)
+#define MDIO_CTL_PRE_DIS BIT(10)
+#define MDIO_CTL_SCAN_EN BIT(11)
+#define MDIO_CTL_POST_INC BIT(14)
+#define MDIO_CTL_READ BIT(15)
#define MDIO_DATA(x) (x & 0xffff)
-#define MDIO_DATA_BSY (1 << 31)
+#define MDIO_DATA_BSY BIT(31)
/*
* Wait until the MDIO bus is free
@@ -91,26 +92,39 @@ static int xgmac_wait_until_done(struct device *dev,
static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
{
struct tgec_mdio_controller __iomem *regs = bus->priv;
- uint16_t dev_addr = regnum >> 16;
+ uint16_t dev_addr;
+ u32 mdio_ctl, mdio_stat;
int ret;
- /* Setup the MII Mgmt clock speed */
- out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
+ mdio_stat = in_be32(&regs->mdio_stat);
+ if (regnum & MII_ADDR_C45) {
+ /* Clause 45 (ie 10G) */
+ dev_addr = (regnum >> 16) & 0x1f;
+ mdio_stat |= MDIO_STAT_ENC;
+ } else {
+ /* Clause 22 (ie 1G) */
+ dev_addr = regnum & 0x1f;
+ mdio_stat &= ~MDIO_STAT_ENC;
+ }
+
+ out_be32(&regs->mdio_stat, mdio_stat);
ret = xgmac_wait_until_free(&bus->dev, regs);
if (ret)
return ret;
/* Set the port and dev addr */
- out_be32(&regs->mdio_ctl,
- MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr));
+ mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
+ out_be32(&regs->mdio_ctl, mdio_ctl);
/* Set the register address */
- out_be32(&regs->mdio_addr, regnum & 0xffff);
+ if (regnum & MII_ADDR_C45) {
+ out_be32(&regs->mdio_addr, regnum & 0xffff);
- ret = xgmac_wait_until_free(&bus->dev, regs);
- if (ret)
- return ret;
+ ret = xgmac_wait_until_free(&bus->dev, regs);
+ if (ret)
+ return ret;
+ }
/* Write the value to the register */
out_be32(&regs->mdio_data, MDIO_DATA(value));
@@ -130,13 +144,22 @@ static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 val
static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
{
struct tgec_mdio_controller __iomem *regs = bus->priv;
- uint16_t dev_addr = regnum >> 16;
+ uint16_t dev_addr;
+ uint32_t mdio_stat;
uint32_t mdio_ctl;
uint16_t value;
int ret;
- /* Setup the MII Mgmt clock speed */
- out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
+ mdio_stat = in_be32(&regs->mdio_stat);
+ if (regnum & MII_ADDR_C45) {
+ dev_addr = (regnum >> 16) & 0x1f;
+ mdio_stat |= MDIO_STAT_ENC;
+ } else {
+ dev_addr = regnum & 0x1f;
+ mdio_stat &= ~MDIO_STAT_ENC;
+ }
+
+ out_be32(&regs->mdio_stat, mdio_stat);
ret = xgmac_wait_until_free(&bus->dev, regs);
if (ret)
@@ -147,11 +170,13 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
out_be32(&regs->mdio_ctl, mdio_ctl);
/* Set the register address */
- out_be32(&regs->mdio_addr, regnum & 0xffff);
+ if (regnum & MII_ADDR_C45) {
+ out_be32(&regs->mdio_addr, regnum & 0xffff);
- ret = xgmac_wait_until_free(&bus->dev, regs);
- if (ret)
- return ret;
+ ret = xgmac_wait_until_free(&bus->dev, regs);
+ if (ret)
+ return ret;
+ }
/* Initiate the read */
out_be32(&regs->mdio_ctl, mdio_ctl | MDIO_CTL_READ);
@@ -174,24 +199,6 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
return value;
}
-/* Reset the MIIM registers, and wait for the bus to free */
-static int xgmac_mdio_reset(struct mii_bus *bus)
-{
- struct tgec_mdio_controller __iomem *regs = bus->priv;
- int ret;
-
- mutex_lock(&bus->mdio_lock);
-
- /* Setup the MII Mgmt clock speed */
- out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
-
- ret = xgmac_wait_until_free(&bus->dev, regs);
-
- mutex_unlock(&bus->mdio_lock);
-
- return ret;
-}
-
static int xgmac_mdio_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -205,15 +212,13 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
return ret;
}
- bus = mdiobus_alloc_size(PHY_MAX_ADDR * sizeof(int));
+ bus = mdiobus_alloc();
if (!bus)
return -ENOMEM;
bus->name = "Freescale XGMAC MDIO Bus";
bus->read = xgmac_mdio_read;
bus->write = xgmac_mdio_write;
- bus->reset = xgmac_mdio_reset;
- bus->irq = bus->priv;
bus->parent = &pdev->dev;
snprintf(bus->id, MII_BUS_ID_SIZE, "%llx", (unsigned long long)res.start);
@@ -258,6 +263,9 @@ static struct of_device_id xgmac_mdio_match[] = {
{
.compatible = "fsl,fman-xmdio",
},
+ {
+ .compatible = "fsl,fman-memac-mdio",
+ },
{},
};
MODULE_DEVICE_TABLE(of, xgmac_mdio_match);
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index e9421731b05e..a54d89791311 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -24,4 +24,13 @@ config HIX5HD2_GMAC
help
This selects the hix5hd2 mac family network device.
+config HIP04_ETH
+ tristate "HISILICON P04 Ethernet support"
+ select PHYLIB
+ select MARVELL_PHY
+ select MFD_SYSCON
+ ---help---
+ If you wish to compile a kernel for a hardware with hisilicon p04 SoC and
+ want to use the internal ethernet then you should answer Y to this.
+
endif # NET_VENDOR_HISILICON
diff --git a/drivers/net/ethernet/hisilicon/Makefile b/drivers/net/ethernet/hisilicon/Makefile
index 9175e84622d4..6c14540a4dc5 100644
--- a/drivers/net/ethernet/hisilicon/Makefile
+++ b/drivers/net/ethernet/hisilicon/Makefile
@@ -3,3 +3,4 @@
#
obj-$(CONFIG_HIX5HD2_GMAC) += hix5hd2_gmac.o
+obj-$(CONFIG_HIP04_ETH) += hip04_mdio.o hip04_eth.o
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
new file mode 100644
index 000000000000..525214ef5984
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -0,0 +1,969 @@
+
+/* Copyright (c) 2014 Linaro Ltd.
+ * Copyright (c) 2014 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/ktime.h>
+#include <linux/of_address.h>
+#include <linux/phy.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+#define PPE_CFG_RX_ADDR 0x100
+#define PPE_CFG_POOL_GRP 0x300
+#define PPE_CFG_RX_BUF_SIZE 0x400
+#define PPE_CFG_RX_FIFO_SIZE 0x500
+#define PPE_CURR_BUF_CNT 0xa200
+
+#define GE_DUPLEX_TYPE 0x08
+#define GE_MAX_FRM_SIZE_REG 0x3c
+#define GE_PORT_MODE 0x40
+#define GE_PORT_EN 0x44
+#define GE_SHORT_RUNTS_THR_REG 0x50
+#define GE_TX_LOCAL_PAGE_REG 0x5c
+#define GE_TRANSMIT_CONTROL_REG 0x60
+#define GE_CF_CRC_STRIP_REG 0x1b0
+#define GE_MODE_CHANGE_REG 0x1b4
+#define GE_RECV_CONTROL_REG 0x1e0
+#define GE_STATION_MAC_ADDRESS 0x210
+#define PPE_CFG_CPU_ADD_ADDR 0x580
+#define PPE_CFG_MAX_FRAME_LEN_REG 0x408
+#define PPE_CFG_BUS_CTRL_REG 0x424
+#define PPE_CFG_RX_CTRL_REG 0x428
+#define PPE_CFG_RX_PKT_MODE_REG 0x438
+#define PPE_CFG_QOS_VMID_GEN 0x500
+#define PPE_CFG_RX_PKT_INT 0x538
+#define PPE_INTEN 0x600
+#define PPE_INTSTS 0x608
+#define PPE_RINT 0x604
+#define PPE_CFG_STS_MODE 0x700
+#define PPE_HIS_RX_PKT_CNT 0x804
+
+/* REG_INTERRUPT */
+#define RCV_INT BIT(10)
+#define RCV_NOBUF BIT(8)
+#define RCV_DROP BIT(7)
+#define TX_DROP BIT(6)
+#define DEF_INT_ERR (RCV_NOBUF | RCV_DROP | TX_DROP)
+#define DEF_INT_MASK (RCV_INT | DEF_INT_ERR)
+
+/* TX descriptor config */
+#define TX_FREE_MEM BIT(0)
+#define TX_READ_ALLOC_L3 BIT(1)
+#define TX_FINISH_CACHE_INV BIT(2)
+#define TX_CLEAR_WB BIT(4)
+#define TX_L3_CHECKSUM BIT(5)
+#define TX_LOOP_BACK BIT(11)
+
+/* RX error */
+#define RX_PKT_DROP BIT(0)
+#define RX_L2_ERR BIT(1)
+#define RX_PKT_ERR (RX_PKT_DROP | RX_L2_ERR)
+
+#define SGMII_SPEED_1000 0x08
+#define SGMII_SPEED_100 0x07
+#define SGMII_SPEED_10 0x06
+#define MII_SPEED_100 0x01
+#define MII_SPEED_10 0x00
+
+#define GE_DUPLEX_FULL BIT(0)
+#define GE_DUPLEX_HALF 0x00
+#define GE_MODE_CHANGE_EN BIT(0)
+
+#define GE_TX_AUTO_NEG BIT(5)
+#define GE_TX_ADD_CRC BIT(6)
+#define GE_TX_SHORT_PAD_THROUGH BIT(7)
+
+#define GE_RX_STRIP_CRC BIT(0)
+#define GE_RX_STRIP_PAD BIT(3)
+#define GE_RX_PAD_EN BIT(4)
+
+#define GE_AUTO_NEG_CTL BIT(0)
+
+#define GE_RX_INT_THRESHOLD BIT(6)
+#define GE_RX_TIMEOUT 0x04
+
+#define GE_RX_PORT_EN BIT(1)
+#define GE_TX_PORT_EN BIT(2)
+
+#define PPE_CFG_STS_RX_PKT_CNT_RC BIT(12)
+
+#define PPE_CFG_RX_PKT_ALIGN BIT(18)
+#define PPE_CFG_QOS_VMID_MODE BIT(14)
+#define PPE_CFG_QOS_VMID_GRP_SHIFT 8
+
+#define PPE_CFG_RX_FIFO_FSFU BIT(11)
+#define PPE_CFG_RX_DEPTH_SHIFT 16
+#define PPE_CFG_RX_START_SHIFT 0
+#define PPE_CFG_RX_CTRL_ALIGN_SHIFT 11
+
+#define PPE_CFG_BUS_LOCAL_REL BIT(14)
+#define PPE_CFG_BUS_BIG_ENDIEN BIT(0)
+
+#define RX_DESC_NUM 128
+#define TX_DESC_NUM 256
+#define TX_NEXT(N) (((N) + 1) & (TX_DESC_NUM-1))
+#define RX_NEXT(N) (((N) + 1) & (RX_DESC_NUM-1))
+
+#define GMAC_PPE_RX_PKT_MAX_LEN 379
+#define GMAC_MAX_PKT_LEN 1516
+#define GMAC_MIN_PKT_LEN 31
+#define RX_BUF_SIZE 1600
+#define RESET_TIMEOUT 1000
+#define TX_TIMEOUT (6 * HZ)
+
+#define DRV_NAME "hip04-ether"
+#define DRV_VERSION "v1.0"
+
+#define HIP04_MAX_TX_COALESCE_USECS 200
+#define HIP04_MIN_TX_COALESCE_USECS 100
+#define HIP04_MAX_TX_COALESCE_FRAMES 200
+#define HIP04_MIN_TX_COALESCE_FRAMES 100
+
+struct tx_desc {
+ u32 send_addr;
+ u32 send_size;
+ u32 next_addr;
+ u32 cfg;
+ u32 wb_addr;
+} __aligned(64);
+
+struct rx_desc {
+ u16 reserved_16;
+ u16 pkt_len;
+ u32 reserve1[3];
+ u32 pkt_err;
+ u32 reserve2[4];
+};
+
+struct hip04_priv {
+ void __iomem *base;
+ int phy_mode;
+ int chan;
+ unsigned int port;
+ unsigned int speed;
+ unsigned int duplex;
+ unsigned int reg_inten;
+
+ struct napi_struct napi;
+ struct net_device *ndev;
+
+ struct tx_desc *tx_desc;
+ dma_addr_t tx_desc_dma;
+ struct sk_buff *tx_skb[TX_DESC_NUM];
+ dma_addr_t tx_phys[TX_DESC_NUM];
+ unsigned int tx_head;
+
+ int tx_coalesce_frames;
+ int tx_coalesce_usecs;
+ struct hrtimer tx_coalesce_timer;
+
+ unsigned char *rx_buf[RX_DESC_NUM];
+ dma_addr_t rx_phys[RX_DESC_NUM];
+ unsigned int rx_head;
+ unsigned int rx_buf_size;
+
+ struct device_node *phy_node;
+ struct phy_device *phy;
+ struct regmap *map;
+ struct work_struct tx_timeout_task;
+
+ /* written only by tx cleanup */
+ unsigned int tx_tail ____cacheline_aligned_in_smp;
+};
+
+static inline unsigned int tx_count(unsigned int head, unsigned int tail)
+{
+ return (head - tail) % (TX_DESC_NUM - 1);
+}
+
+static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+ u32 val;
+
+ priv->speed = speed;
+ priv->duplex = duplex;
+
+ switch (priv->phy_mode) {
+ case PHY_INTERFACE_MODE_SGMII:
+ if (speed == SPEED_1000)
+ val = SGMII_SPEED_1000;
+ else if (speed == SPEED_100)
+ val = SGMII_SPEED_100;
+ else
+ val = SGMII_SPEED_10;
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ if (speed == SPEED_100)
+ val = MII_SPEED_100;
+ else
+ val = MII_SPEED_10;
+ break;
+ default:
+ netdev_warn(ndev, "not supported mode\n");
+ val = MII_SPEED_10;
+ break;
+ }
+ writel_relaxed(val, priv->base + GE_PORT_MODE);
+
+ val = duplex ? GE_DUPLEX_FULL : GE_DUPLEX_HALF;
+ writel_relaxed(val, priv->base + GE_DUPLEX_TYPE);
+
+ val = GE_MODE_CHANGE_EN;
+ writel_relaxed(val, priv->base + GE_MODE_CHANGE_REG);
+}
+
+static void hip04_reset_ppe(struct hip04_priv *priv)
+{
+ u32 val, tmp, timeout = 0;
+
+ do {
+ regmap_read(priv->map, priv->port * 4 + PPE_CURR_BUF_CNT, &val);
+ regmap_read(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, &tmp);
+ if (timeout++ > RESET_TIMEOUT)
+ break;
+ } while (val & 0xfff);
+}
+
+static void hip04_config_fifo(struct hip04_priv *priv)
+{
+ u32 val;
+
+ val = readl_relaxed(priv->base + PPE_CFG_STS_MODE);
+ val |= PPE_CFG_STS_RX_PKT_CNT_RC;
+ writel_relaxed(val, priv->base + PPE_CFG_STS_MODE);
+
+ val = BIT(priv->port);
+ regmap_write(priv->map, priv->port * 4 + PPE_CFG_POOL_GRP, val);
+
+ val = priv->port << PPE_CFG_QOS_VMID_GRP_SHIFT;
+ val |= PPE_CFG_QOS_VMID_MODE;
+ writel_relaxed(val, priv->base + PPE_CFG_QOS_VMID_GEN);
+
+ val = RX_BUF_SIZE;
+ regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_BUF_SIZE, val);
+
+ val = RX_DESC_NUM << PPE_CFG_RX_DEPTH_SHIFT;
+ val |= PPE_CFG_RX_FIFO_FSFU;
+ val |= priv->chan << PPE_CFG_RX_START_SHIFT;
+ regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_FIFO_SIZE, val);
+
+ val = NET_IP_ALIGN << PPE_CFG_RX_CTRL_ALIGN_SHIFT;
+ writel_relaxed(val, priv->base + PPE_CFG_RX_CTRL_REG);
+
+ val = PPE_CFG_RX_PKT_ALIGN;
+ writel_relaxed(val, priv->base + PPE_CFG_RX_PKT_MODE_REG);
+
+ val = PPE_CFG_BUS_LOCAL_REL | PPE_CFG_BUS_BIG_ENDIEN;
+ writel_relaxed(val, priv->base + PPE_CFG_BUS_CTRL_REG);
+
+ val = GMAC_PPE_RX_PKT_MAX_LEN;
+ writel_relaxed(val, priv->base + PPE_CFG_MAX_FRAME_LEN_REG);
+
+ val = GMAC_MAX_PKT_LEN;
+ writel_relaxed(val, priv->base + GE_MAX_FRM_SIZE_REG);
+
+ val = GMAC_MIN_PKT_LEN;
+ writel_relaxed(val, priv->base + GE_SHORT_RUNTS_THR_REG);
+
+ val = readl_relaxed(priv->base + GE_TRANSMIT_CONTROL_REG);
+ val |= GE_TX_AUTO_NEG | GE_TX_ADD_CRC | GE_TX_SHORT_PAD_THROUGH;
+ writel_relaxed(val, priv->base + GE_TRANSMIT_CONTROL_REG);
+
+ val = GE_RX_STRIP_CRC;
+ writel_relaxed(val, priv->base + GE_CF_CRC_STRIP_REG);
+
+ val = readl_relaxed(priv->base + GE_RECV_CONTROL_REG);
+ val |= GE_RX_STRIP_PAD | GE_RX_PAD_EN;
+ writel_relaxed(val, priv->base + GE_RECV_CONTROL_REG);
+
+ val = GE_AUTO_NEG_CTL;
+ writel_relaxed(val, priv->base + GE_TX_LOCAL_PAGE_REG);
+}
+
+static void hip04_mac_enable(struct net_device *ndev)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+ u32 val;
+
+ /* enable tx & rx */
+ val = readl_relaxed(priv->base + GE_PORT_EN);
+ val |= GE_RX_PORT_EN | GE_TX_PORT_EN;
+ writel_relaxed(val, priv->base + GE_PORT_EN);
+
+ /* clear rx int */
+ val = RCV_INT;
+ writel_relaxed(val, priv->base + PPE_RINT);
+
+ /* config recv int */
+ val = GE_RX_INT_THRESHOLD | GE_RX_TIMEOUT;
+ writel_relaxed(val, priv->base + PPE_CFG_RX_PKT_INT);
+
+ /* enable interrupt */
+ priv->reg_inten = DEF_INT_MASK;
+ writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
+}
+
+static void hip04_mac_disable(struct net_device *ndev)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+ u32 val;
+
+ /* disable int */
+ priv->reg_inten &= ~(DEF_INT_MASK);
+ writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
+
+ /* disable tx & rx */
+ val = readl_relaxed(priv->base + GE_PORT_EN);
+ val &= ~(GE_RX_PORT_EN | GE_TX_PORT_EN);
+ writel_relaxed(val, priv->base + GE_PORT_EN);
+}
+
+static void hip04_set_xmit_desc(struct hip04_priv *priv, dma_addr_t phys)
+{
+ writel(phys, priv->base + PPE_CFG_CPU_ADD_ADDR);
+}
+
+static void hip04_set_recv_desc(struct hip04_priv *priv, dma_addr_t phys)
+{
+ regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, phys);
+}
+
+static u32 hip04_recv_cnt(struct hip04_priv *priv)
+{
+ return readl(priv->base + PPE_HIS_RX_PKT_CNT);
+}
+
+static void hip04_update_mac_address(struct net_device *ndev)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+
+ writel_relaxed(((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1])),
+ priv->base + GE_STATION_MAC_ADDRESS);
+ writel_relaxed(((ndev->dev_addr[2] << 24) | (ndev->dev_addr[3] << 16) |
+ (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5])),
+ priv->base + GE_STATION_MAC_ADDRESS + 4);
+}
+
+static int hip04_set_mac_address(struct net_device *ndev, void *addr)
+{
+ eth_mac_addr(ndev, addr);
+ hip04_update_mac_address(ndev);
+ return 0;
+}
+
+static int hip04_tx_reclaim(struct net_device *ndev, bool force)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+ unsigned tx_tail = priv->tx_tail;
+ struct tx_desc *desc;
+ unsigned int bytes_compl = 0, pkts_compl = 0;
+ unsigned int count;
+
+ smp_rmb();
+ count = tx_count(ACCESS_ONCE(priv->tx_head), tx_tail);
+ if (count == 0)
+ goto out;
+
+ while (count) {
+ desc = &priv->tx_desc[tx_tail];
+ if (desc->send_addr != 0) {
+ if (force)
+ desc->send_addr = 0;
+ else
+ break;
+ }
+
+ if (priv->tx_phys[tx_tail]) {
+ dma_unmap_single(&ndev->dev, priv->tx_phys[tx_tail],
+ priv->tx_skb[tx_tail]->len,
+ DMA_TO_DEVICE);
+ priv->tx_phys[tx_tail] = 0;
+ }
+ pkts_compl++;
+ bytes_compl += priv->tx_skb[tx_tail]->len;
+ dev_kfree_skb(priv->tx_skb[tx_tail]);
+ priv->tx_skb[tx_tail] = NULL;
+ tx_tail = TX_NEXT(tx_tail);
+ count--;
+ }
+
+ priv->tx_tail = tx_tail;
+ smp_wmb(); /* Ensure tx_tail visible to xmit */
+
+out:
+ if (pkts_compl || bytes_compl)
+ netdev_completed_queue(ndev, pkts_compl, bytes_compl);
+
+ if (unlikely(netif_queue_stopped(ndev)) && (count < (TX_DESC_NUM - 1)))
+ netif_wake_queue(ndev);
+
+ return count;
+}
+
+static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ unsigned int tx_head = priv->tx_head, count;
+ struct tx_desc *desc = &priv->tx_desc[tx_head];
+ dma_addr_t phys;
+
+ smp_rmb();
+ count = tx_count(tx_head, ACCESS_ONCE(priv->tx_tail));
+ if (count == (TX_DESC_NUM - 1)) {
+ netif_stop_queue(ndev);
+ return NETDEV_TX_BUSY;
+ }
+
+ phys = dma_map_single(&ndev->dev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&ndev->dev, phys)) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ priv->tx_skb[tx_head] = skb;
+ priv->tx_phys[tx_head] = phys;
+ desc->send_addr = cpu_to_be32(phys);
+ desc->send_size = cpu_to_be32(skb->len);
+ desc->cfg = cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV);
+ phys = priv->tx_desc_dma + tx_head * sizeof(struct tx_desc);
+ desc->wb_addr = cpu_to_be32(phys);
+ skb_tx_timestamp(skb);
+
+ hip04_set_xmit_desc(priv, phys);
+ priv->tx_head = TX_NEXT(tx_head);
+ count++;
+ netdev_sent_queue(ndev, skb->len);
+
+ stats->tx_bytes += skb->len;
+ stats->tx_packets++;
+
+ /* Ensure tx_head update visible to tx reclaim */
+ smp_wmb();
+
+ /* queue is getting full, better start cleaning up now */
+ if (count >= priv->tx_coalesce_frames) {
+ if (napi_schedule_prep(&priv->napi)) {
+ /* disable rx interrupt and timer */
+ priv->reg_inten &= ~(RCV_INT);
+ writel_relaxed(DEF_INT_MASK & ~RCV_INT,
+ priv->base + PPE_INTEN);
+ hrtimer_cancel(&priv->tx_coalesce_timer);
+ __napi_schedule(&priv->napi);
+ }
+ } else if (!hrtimer_is_queued(&priv->tx_coalesce_timer)) {
+ /* cleanup not pending yet, start a new timer */
+ hrtimer_start_expires(&priv->tx_coalesce_timer,
+ HRTIMER_MODE_REL);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static int hip04_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct hip04_priv *priv = container_of(napi, struct hip04_priv, napi);
+ struct net_device *ndev = priv->ndev;
+ struct net_device_stats *stats = &ndev->stats;
+ unsigned int cnt = hip04_recv_cnt(priv);
+ struct rx_desc *desc;
+ struct sk_buff *skb;
+ unsigned char *buf;
+ bool last = false;
+ dma_addr_t phys;
+ int rx = 0;
+ int tx_remaining;
+ u16 len;
+ u32 err;
+
+ while (cnt && !last) {
+ buf = priv->rx_buf[priv->rx_head];
+ skb = build_skb(buf, priv->rx_buf_size);
+ if (unlikely(!skb))
+ net_dbg_ratelimited("build_skb failed\n");
+
+ dma_unmap_single(&ndev->dev, priv->rx_phys[priv->rx_head],
+ RX_BUF_SIZE, DMA_FROM_DEVICE);
+ priv->rx_phys[priv->rx_head] = 0;
+
+ desc = (struct rx_desc *)skb->data;
+ len = be16_to_cpu(desc->pkt_len);
+ err = be32_to_cpu(desc->pkt_err);
+
+ if (0 == len) {
+ dev_kfree_skb_any(skb);
+ last = true;
+ } else if ((err & RX_PKT_ERR) || (len >= GMAC_MAX_PKT_LEN)) {
+ dev_kfree_skb_any(skb);
+ stats->rx_dropped++;
+ stats->rx_errors++;
+ } else {
+ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ napi_gro_receive(&priv->napi, skb);
+ stats->rx_packets++;
+ stats->rx_bytes += len;
+ rx++;
+ }
+
+ buf = netdev_alloc_frag(priv->rx_buf_size);
+ if (!buf)
+ goto done;
+ phys = dma_map_single(&ndev->dev, buf,
+ RX_BUF_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&ndev->dev, phys))
+ goto done;
+ priv->rx_buf[priv->rx_head] = buf;
+ priv->rx_phys[priv->rx_head] = phys;
+ hip04_set_recv_desc(priv, phys);
+
+ priv->rx_head = RX_NEXT(priv->rx_head);
+ if (rx >= budget)
+ goto done;
+
+ if (--cnt == 0)
+ cnt = hip04_recv_cnt(priv);
+ }
+
+ if (!(priv->reg_inten & RCV_INT)) {
+ /* enable rx interrupt */
+ priv->reg_inten |= RCV_INT;
+ writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
+ }
+ napi_complete(napi);
+done:
+ /* clean up tx descriptors and start a new timer if necessary */
+ tx_remaining = hip04_tx_reclaim(ndev, false);
+ if (rx < budget && tx_remaining)
+ hrtimer_start_expires(&priv->tx_coalesce_timer, HRTIMER_MODE_REL);
+
+ return rx;
+}
+
+static irqreturn_t hip04_mac_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = (struct net_device *)dev_id;
+ struct hip04_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ u32 ists = readl_relaxed(priv->base + PPE_INTSTS);
+
+ if (!ists)
+ return IRQ_NONE;
+
+ writel_relaxed(DEF_INT_MASK, priv->base + PPE_RINT);
+
+ if (unlikely(ists & DEF_INT_ERR)) {
+ if (ists & (RCV_NOBUF | RCV_DROP))
+ stats->rx_errors++;
+ stats->rx_dropped++;
+ netdev_err(ndev, "rx drop\n");
+ if (ists & TX_DROP) {
+ stats->tx_dropped++;
+ netdev_err(ndev, "tx drop\n");
+ }
+ }
+
+ if (ists & RCV_INT && napi_schedule_prep(&priv->napi)) {
+ /* disable rx interrupt */
+ priv->reg_inten &= ~(RCV_INT);
+ writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN);
+ hrtimer_cancel(&priv->tx_coalesce_timer);
+ __napi_schedule(&priv->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+enum hrtimer_restart tx_done(struct hrtimer *hrtimer)
+{
+ struct hip04_priv *priv;
+
+ priv = container_of(hrtimer, struct hip04_priv, tx_coalesce_timer);
+
+ if (napi_schedule_prep(&priv->napi)) {
+ /* disable rx interrupt */
+ priv->reg_inten &= ~(RCV_INT);
+ writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN);
+ __napi_schedule(&priv->napi);
+ }
+
+ return HRTIMER_NORESTART;
+}
+
+static void hip04_adjust_link(struct net_device *ndev)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+ struct phy_device *phy = priv->phy;
+
+ if ((priv->speed != phy->speed) || (priv->duplex != phy->duplex)) {
+ hip04_config_port(ndev, phy->speed, phy->duplex);
+ phy_print_status(phy);
+ }
+}
+
+static int hip04_mac_open(struct net_device *ndev)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+ int i;
+
+ priv->rx_head = 0;
+ priv->tx_head = 0;
+ priv->tx_tail = 0;
+ hip04_reset_ppe(priv);
+
+ for (i = 0; i < RX_DESC_NUM; i++) {
+ dma_addr_t phys;
+
+ phys = dma_map_single(&ndev->dev, priv->rx_buf[i],
+ RX_BUF_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&ndev->dev, phys))
+ return -EIO;
+
+ priv->rx_phys[i] = phys;
+ hip04_set_recv_desc(priv, phys);
+ }
+
+ if (priv->phy)
+ phy_start(priv->phy);
+
+ netdev_reset_queue(ndev);
+ netif_start_queue(ndev);
+ hip04_mac_enable(ndev);
+ napi_enable(&priv->napi);
+
+ return 0;
+}
+
+static int hip04_mac_stop(struct net_device *ndev)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+ int i;
+
+ napi_disable(&priv->napi);
+ netif_stop_queue(ndev);
+ hip04_mac_disable(ndev);
+ hip04_tx_reclaim(ndev, true);
+ hip04_reset_ppe(priv);
+
+ if (priv->phy)
+ phy_stop(priv->phy);
+
+ for (i = 0; i < RX_DESC_NUM; i++) {
+ if (priv->rx_phys[i]) {
+ dma_unmap_single(&ndev->dev, priv->rx_phys[i],
+ RX_BUF_SIZE, DMA_FROM_DEVICE);
+ priv->rx_phys[i] = 0;
+ }
+ }
+
+ return 0;
+}
+
+static void hip04_timeout(struct net_device *ndev)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+
+ schedule_work(&priv->tx_timeout_task);
+}
+
+static void hip04_tx_timeout_task(struct work_struct *work)
+{
+ struct hip04_priv *priv;
+
+ priv = container_of(work, struct hip04_priv, tx_timeout_task);
+ hip04_mac_stop(priv->ndev);
+ hip04_mac_open(priv->ndev);
+}
+
+static struct net_device_stats *hip04_get_stats(struct net_device *ndev)
+{
+ return &ndev->stats;
+}
+
+static int hip04_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct hip04_priv *priv = netdev_priv(netdev);
+
+ ec->tx_coalesce_usecs = priv->tx_coalesce_usecs;
+ ec->tx_max_coalesced_frames = priv->tx_coalesce_frames;
+
+ return 0;
+}
+
+static int hip04_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct hip04_priv *priv = netdev_priv(netdev);
+
+ /* Check not supported parameters */
+ if ((ec->rx_max_coalesced_frames) || (ec->rx_coalesce_usecs_irq) ||
+ (ec->rx_max_coalesced_frames_irq) || (ec->tx_coalesce_usecs_irq) ||
+ (ec->use_adaptive_rx_coalesce) || (ec->use_adaptive_tx_coalesce) ||
+ (ec->pkt_rate_low) || (ec->rx_coalesce_usecs_low) ||
+ (ec->rx_max_coalesced_frames_low) || (ec->tx_coalesce_usecs_high) ||
+ (ec->tx_max_coalesced_frames_low) || (ec->pkt_rate_high) ||
+ (ec->tx_coalesce_usecs_low) || (ec->rx_coalesce_usecs_high) ||
+ (ec->rx_max_coalesced_frames_high) || (ec->rx_coalesce_usecs) ||
+ (ec->tx_max_coalesced_frames_irq) ||
+ (ec->stats_block_coalesce_usecs) ||
+ (ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval))
+ return -EOPNOTSUPP;
+
+ if ((ec->tx_coalesce_usecs > HIP04_MAX_TX_COALESCE_USECS ||
+ ec->tx_coalesce_usecs < HIP04_MIN_TX_COALESCE_USECS) ||
+ (ec->tx_max_coalesced_frames > HIP04_MAX_TX_COALESCE_FRAMES ||
+ ec->tx_max_coalesced_frames < HIP04_MIN_TX_COALESCE_FRAMES))
+ return -EINVAL;
+
+ priv->tx_coalesce_usecs = ec->tx_coalesce_usecs;
+ priv->tx_coalesce_frames = ec->tx_max_coalesced_frames;
+
+ return 0;
+}
+
+static void hip04_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+}
+
+static struct ethtool_ops hip04_ethtool_ops = {
+ .get_coalesce = hip04_get_coalesce,
+ .set_coalesce = hip04_set_coalesce,
+ .get_drvinfo = hip04_get_drvinfo,
+};
+
+static struct net_device_ops hip04_netdev_ops = {
+ .ndo_open = hip04_mac_open,
+ .ndo_stop = hip04_mac_stop,
+ .ndo_get_stats = hip04_get_stats,
+ .ndo_start_xmit = hip04_mac_start_xmit,
+ .ndo_set_mac_address = hip04_set_mac_address,
+ .ndo_tx_timeout = hip04_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = eth_change_mtu,
+};
+
+static int hip04_alloc_ring(struct net_device *ndev, struct device *d)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+ int i;
+
+ priv->tx_desc = dma_alloc_coherent(d,
+ TX_DESC_NUM * sizeof(struct tx_desc),
+ &priv->tx_desc_dma, GFP_KERNEL);
+ if (!priv->tx_desc)
+ return -ENOMEM;
+
+ priv->rx_buf_size = RX_BUF_SIZE +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ for (i = 0; i < RX_DESC_NUM; i++) {
+ priv->rx_buf[i] = netdev_alloc_frag(priv->rx_buf_size);
+ if (!priv->rx_buf[i])
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void hip04_free_ring(struct net_device *ndev, struct device *d)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+ int i;
+
+ for (i = 0; i < RX_DESC_NUM; i++)
+ if (priv->rx_buf[i])
+ put_page(virt_to_head_page(priv->rx_buf[i]));
+
+ for (i = 0; i < TX_DESC_NUM; i++)
+ if (priv->tx_skb[i])
+ dev_kfree_skb_any(priv->tx_skb[i]);
+
+ dma_free_coherent(d, TX_DESC_NUM * sizeof(struct tx_desc),
+ priv->tx_desc, priv->tx_desc_dma);
+}
+
+static int hip04_mac_probe(struct platform_device *pdev)
+{
+ struct device *d = &pdev->dev;
+ struct device_node *node = d->of_node;
+ struct of_phandle_args arg;
+ struct net_device *ndev;
+ struct hip04_priv *priv;
+ struct resource *res;
+ unsigned int irq;
+ ktime_t txtime;
+ int ret;
+
+ ndev = alloc_etherdev(sizeof(struct hip04_priv));
+ if (!ndev)
+ return -ENOMEM;
+
+ priv = netdev_priv(ndev);
+ priv->ndev = ndev;
+ platform_set_drvdata(pdev, ndev);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(d, res);
+ if (IS_ERR(priv->base)) {
+ ret = PTR_ERR(priv->base);
+ goto init_fail;
+ }
+
+ ret = of_parse_phandle_with_fixed_args(node, "port-handle", 2, 0, &arg);
+ if (ret < 0) {
+ dev_warn(d, "no port-handle\n");
+ goto init_fail;
+ }
+
+ priv->port = arg.args[0];
+ priv->chan = arg.args[1] * RX_DESC_NUM;
+
+ hrtimer_init(&priv->tx_coalesce_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+
+ /* BQL will try to keep the TX queue as short as possible, but it can't
+ * be faster than tx_coalesce_usecs, so we need a fast timeout here,
+ * but also long enough to gather up enough frames to ensure we don't
+ * get more interrupts than necessary.
+ * 200us is enough for 16 frames of 1500 bytes at gigabit ethernet rate
+ */
+ priv->tx_coalesce_frames = TX_DESC_NUM * 3 / 4;
+ priv->tx_coalesce_usecs = 200;
+ /* allow timer to fire after half the time at the earliest */
+ txtime = ktime_set(0, priv->tx_coalesce_usecs * NSEC_PER_USEC / 2);
+ hrtimer_set_expires_range(&priv->tx_coalesce_timer, txtime, txtime);
+ priv->tx_coalesce_timer.function = tx_done;
+
+ priv->map = syscon_node_to_regmap(arg.np);
+ if (IS_ERR(priv->map)) {
+ dev_warn(d, "no syscon hisilicon,hip04-ppe\n");
+ ret = PTR_ERR(priv->map);
+ goto init_fail;
+ }
+
+ priv->phy_mode = of_get_phy_mode(node);
+ if (priv->phy_mode < 0) {
+ dev_warn(d, "not find phy-mode\n");
+ ret = -EINVAL;
+ goto init_fail;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ ret = -EINVAL;
+ goto init_fail;
+ }
+
+ ret = devm_request_irq(d, irq, hip04_mac_interrupt,
+ 0, pdev->name, ndev);
+ if (ret) {
+ netdev_err(ndev, "devm_request_irq failed\n");
+ goto init_fail;
+ }
+
+ priv->phy_node = of_parse_phandle(node, "phy-handle", 0);
+ if (priv->phy_node) {
+ priv->phy = of_phy_connect(ndev, priv->phy_node,
+ &hip04_adjust_link,
+ 0, priv->phy_mode);
+ if (!priv->phy) {
+ ret = -EPROBE_DEFER;
+ goto init_fail;
+ }
+ }
+
+ INIT_WORK(&priv->tx_timeout_task, hip04_tx_timeout_task);
+
+ ether_setup(ndev);
+ ndev->netdev_ops = &hip04_netdev_ops;
+ ndev->ethtool_ops = &hip04_ethtool_ops;
+ ndev->watchdog_timeo = TX_TIMEOUT;
+ ndev->priv_flags |= IFF_UNICAST_FLT;
+ ndev->irq = irq;
+ netif_napi_add(ndev, &priv->napi, hip04_rx_poll, NAPI_POLL_WEIGHT);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ hip04_reset_ppe(priv);
+ if (priv->phy_mode == PHY_INTERFACE_MODE_MII)
+ hip04_config_port(ndev, SPEED_100, DUPLEX_FULL);
+
+ hip04_config_fifo(priv);
+ random_ether_addr(ndev->dev_addr);
+ hip04_update_mac_address(ndev);
+
+ ret = hip04_alloc_ring(ndev, d);
+ if (ret) {
+ netdev_err(ndev, "alloc ring fail\n");
+ goto alloc_fail;
+ }
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ free_netdev(ndev);
+ goto alloc_fail;
+ }
+
+ return 0;
+
+alloc_fail:
+ hip04_free_ring(ndev, d);
+init_fail:
+ of_node_put(priv->phy_node);
+ free_netdev(ndev);
+ return ret;
+}
+
+static int hip04_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct hip04_priv *priv = netdev_priv(ndev);
+ struct device *d = &pdev->dev;
+
+ if (priv->phy)
+ phy_disconnect(priv->phy);
+
+ hip04_free_ring(ndev, d);
+ unregister_netdev(ndev);
+ free_irq(ndev->irq, ndev);
+ of_node_put(priv->phy_node);
+ cancel_work_sync(&priv->tx_timeout_task);
+ free_netdev(ndev);
+
+ return 0;
+}
+
+static const struct of_device_id hip04_mac_match[] = {
+ { .compatible = "hisilicon,hip04-mac" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, hip04_mac_match);
+
+static struct platform_driver hip04_mac_driver = {
+ .probe = hip04_mac_probe,
+ .remove = hip04_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = hip04_mac_match,
+ },
+};
+module_platform_driver(hip04_mac_driver);
+
+MODULE_DESCRIPTION("HISILICON P04 Ethernet driver");
diff --git a/drivers/net/ethernet/hisilicon/hip04_mdio.c b/drivers/net/ethernet/hisilicon/hip04_mdio.c
new file mode 100644
index 000000000000..b3bac25db99c
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hip04_mdio.c
@@ -0,0 +1,186 @@
+/* Copyright (c) 2014 Linaro Ltd.
+ * Copyright (c) 2014 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/of_mdio.h>
+#include <linux/delay.h>
+
+#define MDIO_CMD_REG 0x0
+#define MDIO_ADDR_REG 0x4
+#define MDIO_WDATA_REG 0x8
+#define MDIO_RDATA_REG 0xc
+#define MDIO_STA_REG 0x10
+
+#define MDIO_START BIT(14)
+#define MDIO_R_VALID BIT(1)
+#define MDIO_READ (BIT(12) | BIT(11) | MDIO_START)
+#define MDIO_WRITE (BIT(12) | BIT(10) | MDIO_START)
+
+struct hip04_mdio_priv {
+ void __iomem *base;
+};
+
+#define WAIT_TIMEOUT 10
+static int hip04_mdio_wait_ready(struct mii_bus *bus)
+{
+ struct hip04_mdio_priv *priv = bus->priv;
+ int i;
+
+ for (i = 0; readl_relaxed(priv->base + MDIO_CMD_REG) & MDIO_START; i++) {
+ if (i == WAIT_TIMEOUT)
+ return -ETIMEDOUT;
+ msleep(20);
+ }
+
+ return 0;
+}
+
+static int hip04_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+ struct hip04_mdio_priv *priv = bus->priv;
+ u32 val;
+ int ret;
+
+ ret = hip04_mdio_wait_ready(bus);
+ if (ret < 0)
+ goto out;
+
+ val = regnum | (mii_id << 5) | MDIO_READ;
+ writel_relaxed(val, priv->base + MDIO_CMD_REG);
+
+ ret = hip04_mdio_wait_ready(bus);
+ if (ret < 0)
+ goto out;
+
+ val = readl_relaxed(priv->base + MDIO_STA_REG);
+ if (val & MDIO_R_VALID) {
+ dev_err(bus->parent, "SMI bus read not valid\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ val = readl_relaxed(priv->base + MDIO_RDATA_REG);
+ ret = val & 0xFFFF;
+out:
+ return ret;
+}
+
+static int hip04_mdio_write(struct mii_bus *bus, int mii_id,
+ int regnum, u16 value)
+{
+ struct hip04_mdio_priv *priv = bus->priv;
+ u32 val;
+ int ret;
+
+ ret = hip04_mdio_wait_ready(bus);
+ if (ret < 0)
+ goto out;
+
+ writel_relaxed(value, priv->base + MDIO_WDATA_REG);
+ val = regnum | (mii_id << 5) | MDIO_WRITE;
+ writel_relaxed(val, priv->base + MDIO_CMD_REG);
+out:
+ return ret;
+}
+
+static int hip04_mdio_reset(struct mii_bus *bus)
+{
+ int temp, i;
+
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ hip04_mdio_write(bus, i, 22, 0);
+ temp = hip04_mdio_read(bus, i, MII_BMCR);
+ if (temp < 0)
+ continue;
+
+ temp |= BMCR_RESET;
+ if (hip04_mdio_write(bus, i, MII_BMCR, temp) < 0)
+ continue;
+ }
+
+ mdelay(500);
+ return 0;
+}
+
+static int hip04_mdio_probe(struct platform_device *pdev)
+{
+ struct resource *r;
+ struct mii_bus *bus;
+ struct hip04_mdio_priv *priv;
+ int ret;
+
+ bus = mdiobus_alloc_size(sizeof(struct hip04_mdio_priv));
+ if (!bus) {
+ dev_err(&pdev->dev, "Cannot allocate MDIO bus\n");
+ return -ENOMEM;
+ }
+
+ bus->name = "hip04_mdio_bus";
+ bus->read = hip04_mdio_read;
+ bus->write = hip04_mdio_write;
+ bus->reset = hip04_mdio_reset;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev));
+ bus->parent = &pdev->dev;
+ priv = bus->priv;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(priv->base)) {
+ ret = PTR_ERR(priv->base);
+ goto out_mdio;
+ }
+
+ ret = of_mdiobus_register(bus, pdev->dev.of_node);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret);
+ goto out_mdio;
+ }
+
+ platform_set_drvdata(pdev, bus);
+
+ return 0;
+
+out_mdio:
+ mdiobus_free(bus);
+ return ret;
+}
+
+static int hip04_mdio_remove(struct platform_device *pdev)
+{
+ struct mii_bus *bus = platform_get_drvdata(pdev);
+
+ mdiobus_unregister(bus);
+ mdiobus_free(bus);
+
+ return 0;
+}
+
+static const struct of_device_id hip04_mdio_match[] = {
+ { .compatible = "hisilicon,hip04-mdio" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, hip04_mdio_match);
+
+static struct platform_driver hip04_mdio_driver = {
+ .probe = hip04_mdio_probe,
+ .remove = hip04_mdio_remove,
+ .driver = {
+ .name = "hip04-mdio",
+ .owner = THIS_MODULE,
+ .of_match_table = hip04_mdio_match,
+ },
+};
+
+module_platform_driver(hip04_mdio_driver);
+
+MODULE_DESCRIPTION("HISILICON P04 MDIO interface driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:hip04-mdio");
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 566b17db135a..e8a1adb7a962 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -2064,9 +2064,9 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
memset(swqe, 0, SWQE_HEADER_SIZE);
atomic_dec(&pr->swqe_avail);
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
- swqe->vlan_tag = vlan_tx_tag_get(skb);
+ swqe->vlan_tag = skb_vlan_tag_get(skb);
}
pr->tx_packets++;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 83140cbb5f01..9242982db3e0 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -3226,9 +3226,10 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
tx_flags |= E1000_TX_FLAGS_VLAN;
- tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
+ tx_flags |= (skb_vlan_tag_get(skb) <<
+ E1000_TX_FLAGS_VLAN_SHIFT);
}
first = tx_ring->next_to_use;
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 7785240a0da1..9416e5a7e0c8 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -34,7 +34,7 @@
#include <linux/pci-aspm.h>
#include <linux/crc32.h>
#include <linux/if_vlan.h>
-#include <linux/clocksource.h>
+#include <linux/timecounter.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/ptp_classify.h>
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index e14fd85f64eb..38cb586b1bf4 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -4189,7 +4189,7 @@ static int e1000_sw_init(struct e1000_adapter *adapter)
/* Setup hardware time stamping cyclecounter */
if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) {
adapter->cc.read = e1000e_cyclecounter_read;
- adapter->cc.mask = CLOCKSOURCE_MASK(64);
+ adapter->cc.mask = CYCLECOUNTER_MASK(64);
adapter->cc.mult = 1;
/* cc.shift set in e1000e_get_base_tininca() */
@@ -5463,8 +5463,8 @@ static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
struct e1000_hw *hw = &adapter->hw;
u16 length, offset;
- if (vlan_tx_tag_present(skb) &&
- !((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
+ if (skb_vlan_tag_present(skb) &&
+ !((skb_vlan_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
(adapter->hw.mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
return 0;
@@ -5603,9 +5603,10 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
if (e1000_maybe_stop_tx(tx_ring, count + 2))
return NETDEV_TX_BUSY;
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
tx_flags |= E1000_TX_FLAGS_VLAN;
- tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
+ tx_flags |= (skb_vlan_tag_get(skb) <<
+ E1000_TX_FLAGS_VLAN_SHIFT);
}
first = tx_ring->next_to_use;
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index fb1a914a3ad4..978ef9c4a043 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -90,12 +90,9 @@ static int e1000e_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
ptp_clock_info);
unsigned long flags;
- s64 now;
spin_lock_irqsave(&adapter->systim_lock, flags);
- now = timecounter_read(&adapter->tc);
- now += delta;
- timecounter_init(&adapter->tc, &adapter->cc, now);
+ timecounter_adjtime(&adapter->tc, delta);
spin_unlock_irqrestore(&adapter->systim_lock, flags);
return 0;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index eb088b129bc7..caa43f7c2931 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -965,8 +965,8 @@ static void fm10k_tx_map(struct fm10k_ring *tx_ring,
tx_desc = FM10K_TX_DESC(tx_ring, i);
/* add HW VLAN tag */
- if (vlan_tx_tag_present(skb))
- tx_desc->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
+ if (skb_vlan_tag_present(skb))
+ tx_desc->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
else
tx_desc->vlan = 0;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 8811364b91cb..945b35d31c71 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -609,7 +609,7 @@ static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev)
int err;
if ((skb->protocol == htons(ETH_P_8021Q)) &&
- !vlan_tx_tag_present(skb)) {
+ !skb_vlan_tag_present(skb)) {
/* FM10K only supports hardware tagging, any tags in frame
* are considered 2nd level or "outer" tags
*/
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 3d741ee99a2c..4f4d9d16bddb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -742,6 +742,65 @@ i40e_status i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
#endif
/**
+ * i40e_read_pba_string - Reads part number string from EEPROM
+ * @hw: pointer to hardware structure
+ * @pba_num: stores the part number string from the EEPROM
+ * @pba_num_size: part number string buffer length
+ *
+ * Reads the part number string from the EEPROM.
+ **/
+i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
+ u32 pba_num_size)
+{
+ i40e_status status = 0;
+ u16 pba_word = 0;
+ u16 pba_size = 0;
+ u16 pba_ptr = 0;
+ u16 i = 0;
+
+ status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word);
+ if (status || (pba_word != 0xFAFA)) {
+ hw_dbg(hw, "Failed to read PBA flags or flag is invalid.\n");
+ return status;
+ }
+
+ status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr);
+ if (status) {
+ hw_dbg(hw, "Failed to read PBA Block pointer.\n");
+ return status;
+ }
+
+ status = i40e_read_nvm_word(hw, pba_ptr, &pba_size);
+ if (status) {
+ hw_dbg(hw, "Failed to read PBA Block size.\n");
+ return status;
+ }
+
+ /* Subtract one to get PBA word count (PBA Size word is included in
+ * total size)
+ */
+ pba_size--;
+ if (pba_num_size < (((u32)pba_size * 2) + 1)) {
+ hw_dbg(hw, "Buffer to small for PBA data.\n");
+ return I40E_ERR_PARAM;
+ }
+
+ for (i = 0; i < pba_size; i++) {
+ status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word);
+ if (status) {
+ hw_dbg(hw, "Failed to read PBA Block word %d.\n", i);
+ return status;
+ }
+
+ pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
+ pba_num[(i * 2) + 1] = pba_word & 0xFF;
+ }
+ pba_num[(pba_size * 2)] = '\0';
+
+ return status;
+}
+
+/**
* i40e_get_media_type - Gets media type
* @hw: pointer to the hardware structure
**/
@@ -2035,6 +2094,43 @@ i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
}
/**
+ * i40e_aq_debug_read_register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Read the register using the admin queue commands
+ **/
+i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
+ u32 reg_addr, u64 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_debug_reg_read_write *cmd_resp =
+ (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
+ i40e_status status;
+
+ if (reg_val == NULL)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_debug_read_reg);
+
+ cmd_resp->address = cpu_to_le32(reg_addr);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status) {
+ *reg_val = ((u64)cmd_resp->value_high << 32) |
+ (u64)cmd_resp->value_low;
+ *reg_val = le64_to_cpu(*reg_val);
+ }
+
+ return status;
+}
+
+/**
* i40e_aq_debug_write_register
* @hw: pointer to the hw struct
* @reg_addr: register address
@@ -2292,6 +2388,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
enum i40e_admin_queue_opc list_type_opc)
{
struct i40e_aqc_list_capabilities_element_resp *cap;
+ u32 valid_functions, num_functions;
u32 number, logical_id, phys_id;
struct i40e_hw_capabilities *p;
u32 i = 0;
@@ -2427,6 +2524,34 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
if (p->npar_enable || p->mfp_mode_1)
p->fcoe = false;
+ /* count the enabled ports (aka the "not disabled" ports) */
+ hw->num_ports = 0;
+ for (i = 0; i < 4; i++) {
+ u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i);
+ u64 port_cfg = 0;
+
+ /* use AQ read to get the physical register offset instead
+ * of the port relative offset
+ */
+ i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL);
+ if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK))
+ hw->num_ports++;
+ }
+
+ valid_functions = p->valid_functions;
+ num_functions = 0;
+ while (valid_functions) {
+ if (valid_functions & 1)
+ num_functions++;
+ valid_functions >>= 1;
+ }
+
+ /* partition id is 1-based, and functions are evenly spread
+ * across the ports as partitions
+ */
+ hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
+ hw->num_partitions = num_functions / hw->num_ports;
+
/* additional HW specific goodies that might
* someday be HW version specific
*/
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 951e8767fc50..b8230dc205ec 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -219,6 +219,16 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
#define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN)
/**
+ * i40e_partition_setting_complaint - generic complaint for MFP restriction
+ * @pf: the PF struct
+ **/
+static void i40e_partition_setting_complaint(struct i40e_pf *pf)
+{
+ dev_info(&pf->pdev->dev,
+ "The link settings are allowed to be changed only from the first partition of a given port. Please switch to the first partition in order to change the setting.\n");
+}
+
+/**
* i40e_get_settings - Get Link Speed and Duplex settings
* @netdev: network interface device structure
* @ecmd: ethtool command
@@ -485,6 +495,14 @@ static int i40e_set_settings(struct net_device *netdev,
u8 autoneg;
u32 advertise;
+ /* Changing port settings is not supported if this isn't the
+ * port's controlling PF
+ */
+ if (hw->partition_id != 1) {
+ i40e_partition_setting_complaint(pf);
+ return -EOPNOTSUPP;
+ }
+
if (vsi != pf->vsi[pf->lan_vsi])
return -EOPNOTSUPP;
@@ -687,6 +705,14 @@ static int i40e_set_pauseparam(struct net_device *netdev,
u8 aq_failures;
int err = 0;
+ /* Changing the port's flow control is not supported if this isn't the
+ * port's controlling PF
+ */
+ if (hw->partition_id != 1) {
+ i40e_partition_setting_complaint(pf);
+ return -EOPNOTSUPP;
+ }
+
if (vsi != pf->vsi[pf->lan_vsi])
return -EOPNOTSUPP;
@@ -1503,7 +1529,7 @@ static void i40e_get_wol(struct net_device *netdev,
/* NVM bit on means WoL disabled for the port */
i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
- if ((1 << hw->port) & wol_nvm_bits) {
+ if ((1 << hw->port) & wol_nvm_bits || hw->partition_id != 1) {
wol->supported = 0;
wol->wolopts = 0;
} else {
@@ -1512,13 +1538,28 @@ static void i40e_get_wol(struct net_device *netdev,
}
}
+/**
+ * i40e_set_wol - set the WakeOnLAN configuration
+ * @netdev: the netdev in question
+ * @wol: the ethtool WoL setting data
+ **/
static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
+ struct i40e_vsi *vsi = np->vsi;
struct i40e_hw *hw = &pf->hw;
u16 wol_nvm_bits;
+ /* WoL not supported if this isn't the controlling PF on the port */
+ if (hw->partition_id != 1) {
+ i40e_partition_setting_complaint(pf);
+ return -EOPNOTSUPP;
+ }
+
+ if (vsi != pf->vsi[pf->lan_vsi])
+ return -EOPNOTSUPP;
+
/* NVM bit on means WoL disabled for the port */
i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
if (((1 << hw->port) & wol_nvm_bits))
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
index a8b8bd95108d..2cd841b29059 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
@@ -1515,8 +1515,6 @@ void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi)
i40e_add_filter(vsi, (u8[6]) FC_FCOE_FLOGI_MAC, 0, false, false);
i40e_add_filter(vsi, FIP_ALL_FCOE_MACS, 0, false, false);
i40e_add_filter(vsi, FIP_ALL_ENODE_MACS, 0, false, false);
- i40e_add_filter(vsi, FIP_ALL_VN2VN_MACS, 0, false, false);
- i40e_add_filter(vsi, FIP_ALL_P2P_MACS, 0, false, false);
/* use san mac */
ether_addr_copy(netdev->dev_addr, hw->mac.san_addr);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index a5f2660d552d..f3b036d19b6b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -4557,6 +4557,15 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
return;
}
+ /* Warn user if link speed on NPAR enabled partition is not at
+ * least 10GB
+ */
+ if (vsi->back->hw.func_caps.npar_enable &&
+ (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
+ vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
+ netdev_warn(vsi->netdev,
+ "The partition detected link speed that is less than 10Gbps\n");
+
switch (vsi->back->hw.phy.link_info.link_speed) {
case I40E_LINK_SPEED_40GB:
strlcpy(speed, "40 Gbps", SPEED_SIZE);
@@ -5494,14 +5503,18 @@ static void i40e_link_event(struct i40e_pf *pf)
{
bool new_link, old_link;
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ u8 new_link_speed, old_link_speed;
/* set this to force the get_link_status call to refresh state */
pf->hw.phy.get_link_info = true;
old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
new_link = i40e_get_link_status(&pf->hw);
+ old_link_speed = pf->hw.phy.link_info_old.link_speed;
+ new_link_speed = pf->hw.phy.link_info.link_speed;
if (new_link == old_link &&
+ new_link_speed == old_link_speed &&
(test_bit(__I40E_DOWN, &vsi->state) ||
new_link == netif_carrier_ok(vsi->netdev)))
return;
@@ -7306,7 +7319,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
#endif /* I40E_FCOE */
#ifdef CONFIG_PCI_IOV
- if (pf->hw.func_caps.num_vfs) {
+ if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
pf->flags |= I40E_FLAG_SRIOV_ENABLED;
pf->num_req_vfs = min_t(int,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 2fb4306597e8..68e852a96680 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -71,6 +71,9 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
u32 reg_addr, u64 reg_val,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
+ u32 reg_addr, u64 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
@@ -245,6 +248,8 @@ void i40e_clear_pxe_mode(struct i40e_hw *hw);
bool i40e_get_link_status(struct i40e_hw *hw);
i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
+i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
+ u32 pba_num_size);
i40e_status i40e_validate_mac_addr(u8 *mac_addr);
void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
#ifdef I40E_FCOE
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index cecb340898fe..bb86390a0967 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1815,8 +1815,8 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
u32 tx_flags = 0;
/* if we have a HW VLAN tag being added, default to the HW one */
- if (vlan_tx_tag_present(skb)) {
- tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
+ if (skb_vlan_tag_present(skb)) {
+ tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
tx_flags |= I40E_TX_FLAGS_HW_VLAN;
/* else if it is a SW VLAN, check the next protocol and store the tag */
} else if (protocol == htons(ETH_P_8021Q)) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index c1f2eb963357..ff121fe98dd4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -431,7 +431,7 @@ struct i40e_hw {
u8 __iomem *hw_addr;
void *back;
- /* function pointer structs */
+ /* subsystem structs */
struct i40e_phy_info phy;
struct i40e_mac_info mac;
struct i40e_bus_info bus;
@@ -458,6 +458,11 @@ struct i40e_hw {
u8 pf_id;
u16 main_vsi_seid;
+ /* for multi-function MACs */
+ u16 partition_id;
+ u16 num_partitions;
+ u16 num_ports;
+
/* Closest numa node to the device */
u16 numa_node;
@@ -1135,6 +1140,8 @@ struct i40e_hw_port_stats {
/* Checksum and Shadow RAM pointers */
#define I40E_SR_NVM_CONTROL_WORD 0x00
#define I40E_SR_EMP_MODULE_PTR 0x0F
+#define I40E_SR_PBA_FLAGS 0x15
+#define I40E_SR_PBA_BLOCK_PTR 0x16
#define I40E_SR_NVM_IMAGE_VERSION 0x18
#define I40E_SR_NVM_WAKE_ON_LAN 0x19
#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 5bae89550657..044019b9d406 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -791,10 +791,18 @@ void i40e_free_vfs(struct i40e_pf *pf)
if (!pf->vf)
return;
+ /* Disable IOV before freeing resources. This lets any VF drivers
+ * running in the host get themselves cleaned up before we yank
+ * the carpet out from underneath their feet.
+ */
+ if (!pci_vfs_assigned(pf->pdev))
+ pci_disable_sriov(pf->pdev);
+
+ msleep(20); /* let any messages in transit get finished up */
+
/* Disable interrupt 0 so we don't try to handle the VFLR. */
i40e_irq_dynamic_disable_icr0(pf);
- mdelay(10); /* let any messages in transit get finished up */
/* free up vf resources */
tmp = pf->num_alloc_vfs;
pf->num_alloc_vfs = 0;
@@ -813,7 +821,6 @@ void i40e_free_vfs(struct i40e_pf *pf)
* before this function ever gets called.
*/
if (!pci_vfs_assigned(pf->pdev)) {
- pci_disable_sriov(pf->pdev);
/* Acknowledge VFLR for all VFS. Without this, VFs will fail to
* work correctly when SR-IOV gets re-enabled.
*/
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 04c7c1557a0c..82c3798fdd36 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -1122,8 +1122,8 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
u32 tx_flags = 0;
/* if we have a HW VLAN tag being added, default to the HW one */
- if (vlan_tx_tag_present(skb)) {
- tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
+ if (skb_vlan_tag_present(skb)) {
+ tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
tx_flags |= I40E_TX_FLAGS_HW_VLAN;
/* else if it is a SW VLAN, check the next protocol and store the tag */
} else if (protocol == htons(ETH_P_8021Q)) {
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 68aec11f6523..d1c2b5a6e648 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -425,7 +425,7 @@ struct i40e_hw {
u8 __iomem *hw_addr;
void *back;
- /* function pointer structs */
+ /* subsystem structs */
struct i40e_phy_info phy;
struct i40e_mac_info mac;
struct i40e_bus_info bus;
@@ -452,6 +452,11 @@ struct i40e_hw {
u8 pf_id;
u16 main_vsi_seid;
+ /* for multi-function MACs */
+ u16 partition_id;
+ u16 num_partitions;
+ u16 num_ports;
+
/* Closest numa node to the device */
u16 numa_node;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index cabaf599f562..21ccbe8b6d13 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -313,10 +313,6 @@ static irqreturn_t i40evf_msix_aq(int irq, void *data)
val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
wr32(hw, I40E_VFINT_DYN_CTL01, val);
- /* re-enable interrupt causes */
- wr32(hw, I40E_VFINT_ICR0_ENA1, ena_mask);
- wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK);
-
/* schedule work on the private workqueue */
schedule_work(&adapter->adminq_task);
@@ -947,30 +943,6 @@ static int i40evf_up_complete(struct i40evf_adapter *adapter)
}
/**
- * i40evf_clean_all_rx_rings - Free Rx Buffers for all queues
- * @adapter: board private structure
- **/
-static void i40evf_clean_all_rx_rings(struct i40evf_adapter *adapter)
-{
- int i;
-
- for (i = 0; i < adapter->num_active_queues; i++)
- i40evf_clean_rx_ring(adapter->rx_rings[i]);
-}
-
-/**
- * i40evf_clean_all_tx_rings - Free Tx Buffers for all queues
- * @adapter: board private structure
- **/
-static void i40evf_clean_all_tx_rings(struct i40evf_adapter *adapter)
-{
- int i;
-
- for (i = 0; i < adapter->num_active_queues; i++)
- i40evf_clean_tx_ring(adapter->tx_rings[i]);
-}
-
-/**
* i40e_down - Shutdown the connection processing
* @adapter: board private structure
**/
@@ -982,6 +954,12 @@ void i40evf_down(struct i40evf_adapter *adapter)
if (adapter->state == __I40EVF_DOWN)
return;
+ while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section))
+ usleep_range(500, 1000);
+
+ i40evf_irq_disable(adapter);
+
/* remove all MAC filters */
list_for_each_entry(f, &adapter->mac_filter_list, list) {
f->remove = true;
@@ -992,25 +970,27 @@ void i40evf_down(struct i40evf_adapter *adapter)
}
if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) &&
adapter->state != __I40EVF_RESETTING) {
- adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+ /* cancel any current operation */
+ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+ adapter->aq_pending = 0;
+ /* Schedule operations to close down the HW. Don't wait
+ * here for this to complete. The watchdog is still running
+ * and it will take care of this.
+ */
+ adapter->aq_required = I40EVF_FLAG_AQ_DEL_MAC_FILTER;
adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
- /* disable receives */
adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
- mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
- msleep(20);
}
netif_tx_disable(netdev);
netif_tx_stop_all_queues(netdev);
- i40evf_irq_disable(adapter);
-
i40evf_napi_disable_all(adapter);
- netif_carrier_off(netdev);
+ msleep(20);
- i40evf_clean_all_tx_rings(adapter);
- i40evf_clean_all_rx_rings(adapter);
+ netif_carrier_off(netdev);
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
}
/**
@@ -1356,8 +1336,13 @@ static void i40evf_watchdog_task(struct work_struct *work)
/* Process admin queue tasks. After init, everything gets done
* here so we don't race on the admin queue.
*/
- if (adapter->aq_pending)
+ if (adapter->aq_pending) {
+ if (!i40evf_asq_done(hw)) {
+ dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n");
+ i40evf_send_api_ver(adapter);
+ }
goto watchdog_done;
+ }
if (adapter->aq_required & I40EVF_FLAG_AQ_MAP_VECTORS) {
i40evf_map_queues(adapter);
@@ -1401,11 +1386,14 @@ static void i40evf_watchdog_task(struct work_struct *work)
if (adapter->state == __I40EVF_RUNNING)
i40evf_request_stats(adapter);
-
- i40evf_irq_enable(adapter, true);
- i40evf_fire_sw_int(adapter, 0xFF);
-
watchdog_done:
+ if (adapter->state == __I40EVF_RUNNING) {
+ i40evf_irq_enable_queues(adapter, ~0);
+ i40evf_fire_sw_int(adapter, 0xFF);
+ } else {
+ i40evf_fire_sw_int(adapter, 0x1);
+ }
+
clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
restart_watchdog:
if (adapter->state == __I40EVF_REMOVE)
@@ -1633,12 +1621,12 @@ static void i40evf_adminq_task(struct work_struct *work)
u16 pending;
if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
- return;
+ goto out;
event.buf_len = I40EVF_MAX_AQ_BUF_SIZE;
event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
if (!event.msg_buf)
- return;
+ goto out;
v_msg = (struct i40e_virtchnl_msg *)&event.desc;
do {
@@ -1688,10 +1676,10 @@ static void i40evf_adminq_task(struct work_struct *work)
if (oldval != val)
wr32(hw, hw->aq.asq.len, val);
+ kfree(event.msg_buf);
+out:
/* re-enable Admin queue interrupt cause */
i40evf_misc_irq_enable(adapter);
-
- kfree(event.msg_buf);
}
/**
@@ -2053,12 +2041,8 @@ static void i40evf_init_task(struct work_struct *work)
/* aq msg sent, awaiting reply */
err = i40evf_verify_api_ver(adapter);
if (err) {
- dev_info(&pdev->dev, "Unable to verify API version (%d), retrying\n",
- err);
- if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
- dev_info(&pdev->dev, "Resending request\n");
+ if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
err = i40evf_send_api_ver(adapter);
- }
goto err;
}
err = i40evf_send_vf_config_msg(adapter);
@@ -2081,7 +2065,6 @@ static void i40evf_init_task(struct work_struct *work)
}
err = i40evf_get_vf_config(adapter);
if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
- dev_info(&pdev->dev, "Resending VF config request\n");
err = i40evf_send_vf_config_msg(adapter);
goto err;
}
@@ -2440,6 +2423,7 @@ static void i40evf_remove(struct pci_dev *pdev)
struct i40evf_adapter *adapter = netdev_priv(netdev);
struct i40evf_mac_filter *f, *ftmp;
struct i40e_hw *hw = &adapter->hw;
+ int count = 50;
cancel_delayed_work_sync(&adapter->init_task);
cancel_work_sync(&adapter->reset_task);
@@ -2448,6 +2432,11 @@ static void i40evf_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
adapter->netdev_registered = false;
}
+ while (count-- && adapter->aq_required)
+ msleep(50);
+
+ if (count < 0)
+ dev_err(&pdev->dev, "Timed out waiting for PF driver.\n");
adapter->state = __I40EVF_REMOVE;
if (adapter->msix_entries) {
@@ -2477,6 +2466,10 @@ static void i40evf_remove(struct pci_dev *pdev)
list_del(&f->list);
kfree(f);
}
+ list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
+ list_del(&f->list);
+ kfree(f);
+ }
free_netdev(netdev);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index 5fde5a7f4591..3f0c85ecbca6 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -715,14 +715,14 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
}
return;
}
- if (v_opcode != adapter->current_op)
- dev_info(&adapter->pdev->dev, "Pending op is %d, received %d\n",
- adapter->current_op, v_opcode);
if (v_retval) {
dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d\n",
__func__, v_retval, v_opcode);
}
switch (v_opcode) {
+ case I40E_VIRTCHNL_OP_VERSION:
+ /* no action, but also not an error */
+ break;
case I40E_VIRTCHNL_OP_GET_STATS: {
struct i40e_eth_stats *stats =
(struct i40e_eth_stats *)msg;
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 82d891e183b1..ee22da391474 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -29,7 +29,7 @@
#include "e1000_mac.h"
#include "e1000_82575.h"
-#include <linux/clocksource.h>
+#include <linux/timecounter.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/bitops.h>
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index ff59897a9463..6c25ec314183 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -5035,9 +5035,9 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
skb_tx_timestamp(skb);
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
tx_flags |= IGB_TX_FLAGS_VLAN;
- tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
+ tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
}
/* record initial flags and protocol */
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 794c139f0cc0..5e7a4e30a7b6 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -256,14 +256,9 @@ static int igb_ptp_adjtime_82576(struct ptp_clock_info *ptp, s64 delta)
struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
ptp_caps);
unsigned long flags;
- s64 now;
spin_lock_irqsave(&igb->tmreg_lock, flags);
-
- now = timecounter_read(&igb->tc);
- now += delta;
- timecounter_init(&igb->tc, &igb->cc, now);
-
+ timecounter_adjtime(&igb->tc, delta);
spin_unlock_irqrestore(&igb->tmreg_lock, flags);
return 0;
@@ -770,7 +765,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.settime = igb_ptp_settime_82576;
adapter->ptp_caps.enable = igb_ptp_feature_enable;
adapter->cc.read = igb_ptp_read_82576;
- adapter->cc.mask = CLOCKSOURCE_MASK(64);
+ adapter->cc.mask = CYCLECOUNTER_MASK(64);
adapter->cc.mult = 1;
adapter->cc.shift = IGB_82576_TSYNC_SHIFT;
/* Dial the nominal frequency. */
@@ -790,7 +785,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.settime = igb_ptp_settime_82576;
adapter->ptp_caps.enable = igb_ptp_feature_enable;
adapter->cc.read = igb_ptp_read_82580;
- adapter->cc.mask = CLOCKSOURCE_MASK(IGB_NBITS_82580);
+ adapter->cc.mask = CYCLECOUNTER_MASK(IGB_NBITS_82580);
adapter->cc.mult = 1;
adapter->cc.shift = 0;
/* Enable the timer functions by clearing bit 31. */
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 63c807c9b21c..ad2b4897b392 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2234,9 +2234,10 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
tx_flags |= IGBVF_TX_FLAGS_VLAN;
- tx_flags |= (vlan_tx_tag_get(skb) << IGBVF_TX_FLAGS_VLAN_SHIFT);
+ tx_flags |= (skb_vlan_tag_get(skb) <<
+ IGBVF_TX_FLAGS_VLAN_SHIFT);
}
if (skb->protocol == htons(ETH_P_IP))
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index aa87605b144a..11a1bdbe3fd9 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -1532,9 +1532,9 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
DESC_NEEDED)))
return NETDEV_TX_BUSY;
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
tx_flags |= IXGB_TX_FLAGS_VLAN;
- vlan_id = vlan_tx_tag_get(skb);
+ vlan_id = skb_vlan_tag_get(skb);
}
first = adapter->tx_ring.next_to_use;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index b6137be43920..38fc64cf5dca 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -38,7 +38,7 @@
#include <linux/if_vlan.h>
#include <linux/jiffies.h>
-#include <linux/clocksource.h>
+#include <linux/timecounter.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 2ed2c7de2304..7bb421bfd84e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -7217,8 +7217,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
first->gso_segs = 1;
/* if we have a HW VLAN tag being added default to the HW one */
- if (vlan_tx_tag_present(skb)) {
- tx_flags |= vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
+ if (skb_vlan_tag_present(skb)) {
+ tx_flags |= skb_vlan_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
/* else if it is a SW VLAN check the next protocol and store the tag */
} else if (protocol == htons(ETH_P_8021Q)) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 5fd4b5271f9a..79c00f57d3e7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -261,18 +261,9 @@ static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
struct ixgbe_adapter *adapter =
container_of(ptp, struct ixgbe_adapter, ptp_caps);
unsigned long flags;
- u64 now;
spin_lock_irqsave(&adapter->tmreg_lock, flags);
-
- now = timecounter_read(&adapter->tc);
- now += delta;
-
- /* reset the timecounter */
- timecounter_init(&adapter->tc,
- &adapter->cc,
- now);
-
+ timecounter_adjtime(&adapter->tc, delta);
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
ixgbe_ptp_setup_sdp(adapter);
@@ -802,7 +793,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
memset(&adapter->cc, 0, sizeof(adapter->cc));
adapter->cc.read = ixgbe_ptp_read;
- adapter->cc.mask = CLOCKSOURCE_MASK(64);
+ adapter->cc.mask = CYCLECOUNTER_MASK(64);
adapter->cc.shift = shift;
adapter->cc.mult = 1;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 62a0d8e0f17d..c9b49bfb51bb 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -3452,8 +3452,8 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
first->bytecount = skb->len;
first->gso_segs = 1;
- if (vlan_tx_tag_present(skb)) {
- tx_flags |= vlan_tx_tag_get(skb);
+ if (skb_vlan_tag_present(skb)) {
+ tx_flags |= skb_vlan_tag_get(skb);
tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
tx_flags |= IXGBE_TX_FLAGS_VLAN;
}
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 44ce7d88f554..6e9a792097d3 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -2154,9 +2154,9 @@ jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags)
static inline void
jme_tx_vlan(struct sk_buff *skb, __le16 *vlan, u8 *flags)
{
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
*flags |= TXFLAG_TAGON;
- *vlan = cpu_to_le16(vlan_tx_tag_get(skb));
+ *vlan = cpu_to_le16(skb_vlan_tag_get(skb));
}
}
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 867a6a3ef81f..d9f4498832a1 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -1895,14 +1895,14 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
ctrl = 0;
/* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
if (!le) {
le = get_tx_le(sky2, &slot);
le->addr = 0;
le->opcode = OP_VLAN|HW_OWNER;
} else
le->opcode |= OP_VLAN;
- le->length = cpu_to_be16(vlan_tx_tag_get(skb));
+ le->length = cpu_to_be16(skb_vlan_tag_get(skb));
ctrl |= INS_VLAN;
}
@@ -2594,7 +2594,7 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
prefetch(sky2->rx_ring + sky2->rx_next);
- if (vlan_tx_tag_present(re->skb))
+ if (skb_vlan_tag_present(re->skb))
count -= VLAN_HLEN; /* Account for vlan tag */
/* This chip has hardware problems that generates bogus status.
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 999014413b1a..90b5309cdb5c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -32,6 +32,7 @@
*/
#include <linux/mlx4/device.h>
+#include <linux/clocksource.h>
#include "mlx4_en.h"
@@ -147,12 +148,9 @@ static int mlx4_en_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
ptp_clock_info);
unsigned long flags;
- s64 now;
write_lock_irqsave(&mdev->clock_lock, flags);
- now = timecounter_read(&mdev->clock);
- now += delta;
- timecounter_init(&mdev->clock, &mdev->cycles, now);
+ timecounter_adjtime(&mdev->clock, delta);
write_unlock_irqrestore(&mdev->clock_lock, flags);
return 0;
@@ -243,7 +241,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
{
struct mlx4_dev *dev = mdev->dev;
unsigned long flags;
- u64 ns;
+ u64 ns, zero = 0;
rwlock_init(&mdev->clock_lock);
@@ -268,7 +266,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
/* Calculate period in seconds to call the overflow watchdog - to make
* sure counter is checked at least once every wrap around.
*/
- ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask);
+ ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask, zero, &zero);
do_div(ns, NSEC_PER_SEC / 2 / HZ);
mdev->overflow_period = ns;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index e3357bf523df..359bb1286eb5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -682,8 +682,8 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
if (dev->num_tc)
return skb_tx_hash(dev, skb);
- if (vlan_tx_tag_present(skb))
- up = vlan_tx_tag_get(skb) >> VLAN_PRIO_SHIFT;
+ if (skb_vlan_tag_present(skb))
+ up = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT;
return fallback(dev, skb) % rings_p_up + up * rings_p_up;
}
@@ -742,8 +742,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
goto tx_drop;
}
- if (vlan_tx_tag_present(skb))
- vlan_tag = vlan_tx_tag_get(skb);
+ if (skb_vlan_tag_present(skb))
+ vlan_tag = skb_vlan_tag_get(skb);
netdev_txq_bql_enqueue_prefetchw(ring->tx_queue);
@@ -930,7 +930,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
real_size = (real_size / 16) & 0x3f;
if (ring->bf_enabled && desc_size <= MAX_BF && !bounce &&
- !vlan_tx_tag_present(skb) && send_doorbell) {
+ !skb_vlan_tag_present(skb) && send_doorbell) {
tx_desc->ctrl.bf_qpn = ring->doorbell_qpn |
cpu_to_be32(real_size);
@@ -952,7 +952,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
} else {
tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
- !!vlan_tx_tag_present(skb);
+ !!skb_vlan_tag_present(skb);
tx_desc->ctrl.fence_size = real_size;
/* Ensure new descriptor hits memory
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index 10e1f1a18255..4878025e231c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -300,11 +300,11 @@ static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
param = qp->pid;
break;
case QP_STATE:
- param = (u64)mlx5_qp_state_str(be32_to_cpu(ctx->flags) >> 28);
+ param = (unsigned long)mlx5_qp_state_str(be32_to_cpu(ctx->flags) >> 28);
*is_str = 1;
break;
case QP_XPORT:
- param = (u64)mlx5_qp_type_str((be32_to_cpu(ctx->flags) >> 16) & 0xff);
+ param = (unsigned long)mlx5_qp_type_str((be32_to_cpu(ctx->flags) >> 16) & 0xff);
*is_str = 1;
break;
case QP_MTU:
@@ -464,7 +464,7 @@ static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
if (is_str)
- ret = snprintf(tbuf, sizeof(tbuf), "%s\n", (const char *)field);
+ ret = snprintf(tbuf, sizeof(tbuf), "%s\n", (const char *)(unsigned long)field);
else
ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field);
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 2fa6ae026e4f..10988fbf47eb 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -4342,9 +4342,7 @@ static void ksz_init_timer(struct ksz_timer_info *info, int period,
{
info->max = 0;
info->period = period;
- init_timer(&info->timer);
- info->timer.function = function;
- info->timer.data = (unsigned long) data;
+ setup_timer(&info->timer, function, (unsigned long)data);
}
static void ksz_update_timer(struct ksz_timer_info *info)
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index 2552e550a78c..eb807b0dc72a 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -1122,12 +1122,12 @@ again:
}
#ifdef NS83820_VLAN_ACCEL_SUPPORT
- if(vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
/* fetch the vlan tag info out of the
* ancillary data if the vlan code
* is using hw vlan acceleration
*/
- short tag = vlan_tx_tag_get(skb);
+ short tag = skb_vlan_tag_get(skb);
extsts |= (EXTSTS_VPKT | htons(tag));
}
#endif
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index f5e4b820128b..0529cad75b10 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -4045,8 +4045,8 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
}
queue = 0;
- if (vlan_tx_tag_present(skb))
- vlan_tag = vlan_tx_tag_get(skb);
+ if (skb_vlan_tag_present(skb))
+ vlan_tag = skb_vlan_tag_get(skb);
if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
if (skb->protocol == htons(ETH_P_IP)) {
struct iphdr *ip;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index cc0485e3c621..50d5604833ed 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -890,8 +890,8 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
dev->name, __func__, __LINE__,
fifo_hw, dtr, dtr_priv);
- if (vlan_tx_tag_present(skb)) {
- u16 vlan_tag = vlan_tx_tag_get(skb);
+ if (skb_vlan_tag_present(skb)) {
+ u16 vlan_tag = skb_vlan_tag_get(skb);
vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag);
}
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index f39cae620f61..a41bb5e6b954 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -2462,9 +2462,9 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
/* vlan tag */
- if (vlan_tx_tag_present(skb))
+ if (skb_vlan_tag_present(skb))
start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT |
- vlan_tx_tag_get(skb));
+ skb_vlan_tag_get(skb));
else
start_tx->txvlan = 0;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 613037584d08..a47fe67fdf58 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -1893,9 +1893,9 @@ netxen_tso_check(struct net_device *netdev,
protocol = vh->h_vlan_encapsulated_proto;
flags = FLAGS_VLAN_TAGGED;
- } else if (vlan_tx_tag_present(skb)) {
+ } else if (skb_vlan_tag_present(skb)) {
flags = FLAGS_VLAN_OOB;
- vid = vlan_tx_tag_get(skb);
+ vid = skb_vlan_tag_get(skb);
netxen_set_tx_vlan_tci(first_desc, vid);
vlan_oob = 1;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 18e5de72e9b4..4d2496f28b85 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -10,6 +10,7 @@
#include <net/ip.h>
#include <linux/ipv6.h>
#include <net/checksum.h>
+#include <linux/printk.h>
#include "qlcnic.h"
@@ -320,8 +321,8 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
if (protocol == ETH_P_8021Q) {
vh = (struct vlan_ethhdr *)skb->data;
vlan_id = ntohs(vh->h_vlan_TCI);
- } else if (vlan_tx_tag_present(skb)) {
- vlan_id = vlan_tx_tag_get(skb);
+ } else if (skb_vlan_tag_present(skb)) {
+ vlan_id = skb_vlan_tag_get(skb);
}
}
@@ -472,9 +473,9 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
flags = QLCNIC_FLAGS_VLAN_TAGGED;
vlan_tci = ntohs(vh->h_vlan_TCI);
protocol = ntohs(vh->h_vlan_encapsulated_proto);
- } else if (vlan_tx_tag_present(skb)) {
+ } else if (skb_vlan_tag_present(skb)) {
flags = QLCNIC_FLAGS_VLAN_OOB;
- vlan_tci = vlan_tx_tag_get(skb);
+ vlan_tci = skb_vlan_tag_get(skb);
}
if (unlikely(adapter->tx_pvid)) {
if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
@@ -1465,14 +1466,14 @@ void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
static void dump_skb(struct sk_buff *skb, struct qlcnic_adapter *adapter)
{
- int i;
- unsigned char *data = skb->data;
-
- pr_info(KERN_INFO "\n");
- for (i = 0; i < skb->len; i++) {
- QLCDB(adapter, DRV, "%02x ", data[i]);
- if ((i & 0x0f) == 8)
- pr_info(KERN_INFO "\n");
+ if (adapter->ahw->msg_enable & NETIF_MSG_DRV) {
+ char prefix[30];
+
+ scnprintf(prefix, sizeof(prefix), "%s: %s: ",
+ dev_name(&adapter->pdev->dev), __func__);
+
+ print_hex_dump_debug(prefix, DUMP_PREFIX_NONE, 16, 1,
+ skb->data, skb->len, true);
}
}
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 6c904a6cad2a..dc0058f90370 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2660,11 +2660,11 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
- "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
+ "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
- mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
+ mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
}
tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
if (tso < 0) {
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 9c31e46d1eee..d79e33b3c191 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -708,8 +708,8 @@ static void cp_tx (struct cp_private *cp)
static inline u32 cp_tx_vlan_tag(struct sk_buff *skb)
{
- return vlan_tx_tag_present(skb) ?
- TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
+ return skb_vlan_tag_present(skb) ?
+ TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00;
}
static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb,
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 14a1c5cec3a5..cd286b0356ab 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -2073,8 +2073,8 @@ static int rtl8169_set_features(struct net_device *dev,
static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb)
{
- return (vlan_tx_tag_present(skb)) ?
- TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
+ return (skb_vlan_tag_present(skb)) ?
+ TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00;
}
static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
@@ -7049,6 +7049,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
u32 status, len;
u32 opts[2];
int frags;
+ bool stop_queue;
if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
@@ -7105,11 +7106,16 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
tp->cur_tx += frags + 1;
- RTL_W8(TxPoll, NPQ);
+ stop_queue = !TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS);
- mmiowb();
+ if (!skb->xmit_more || stop_queue ||
+ netif_xmit_stopped(netdev_get_tx_queue(dev, 0))) {
+ RTL_W8(TxPoll, NPQ);
+
+ mmiowb();
+ }
- if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
+ if (stop_queue) {
/* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
* not miss a ring update when it notices a stopped queue.
*/
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index 2f398fa4b9e6..cad8cf962cdf 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -806,13 +806,13 @@ static bool rocker_desc_gen(struct rocker_desc_info *desc_info)
static void *rocker_desc_cookie_ptr_get(struct rocker_desc_info *desc_info)
{
- return (void *) desc_info->desc->cookie;
+ return (void *)(uintptr_t)desc_info->desc->cookie;
}
static void rocker_desc_cookie_ptr_set(struct rocker_desc_info *desc_info,
void *ptr)
{
- desc_info->desc->cookie = (long) ptr;
+ desc_info->desc->cookie = (uintptr_t) ptr;
}
static struct rocker_desc_info *
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 698494481d18..23545e1e605a 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -133,9 +133,8 @@ bool sxgbe_eee_init(struct sxgbe_priv_data * const priv)
return false;
priv->eee_active = 1;
- init_timer(&priv->eee_ctrl_timer);
- priv->eee_ctrl_timer.function = sxgbe_eee_ctrl_timer;
- priv->eee_ctrl_timer.data = (unsigned long)priv;
+ setup_timer(&priv->eee_ctrl_timer, sxgbe_eee_ctrl_timer,
+ (unsigned long)priv);
priv->eee_ctrl_timer.expires = SXGBE_LPI_TIMER(eee_timer);
add_timer(&priv->eee_ctrl_timer);
@@ -1009,10 +1008,9 @@ static void sxgbe_tx_init_coalesce(struct sxgbe_priv_data *priv)
struct sxgbe_tx_queue *p = priv->txq[queue_num];
p->tx_coal_frames = SXGBE_TX_FRAMES;
p->tx_coal_timer = SXGBE_COAL_TX_TIMER;
- init_timer(&p->txtimer);
+ setup_timer(&p->txtimer, sxgbe_tx_timer,
+ (unsigned long)&priv->txq[queue_num]);
p->txtimer.expires = SXGBE_COAL_TIMER(p->tx_coal_timer);
- p->txtimer.data = (unsigned long)&priv->txq[queue_num];
- p->txtimer.function = sxgbe_tx_timer;
add_timer(&p->txtimer);
}
}
@@ -1274,7 +1272,7 @@ static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(skb_is_gso(skb) && tqueue->prev_mss != cur_mss))
ctxt_desc_req = 1;
- if (unlikely(vlan_tx_tag_present(skb) ||
+ if (unlikely(skb_vlan_tag_present(skb) ||
((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
tqueue->hwts_tx_en)))
ctxt_desc_req = 1;
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index ac4d5629d905..73c2715a27f3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -6,7 +6,7 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o
stmmac-platform-objs:= stmmac_platform.o dwmac-meson.o dwmac-sunxi.o \
- dwmac-sti.o dwmac-socfpga.o
+ dwmac-sti.o dwmac-socfpga.o dwmac-rk.o
obj-$(CONFIG_STMMAC_PCI) += stmmac-pci.o
stmmac-pci-objs:= stmmac_pci.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
new file mode 100644
index 000000000000..35f9b86bc9e5
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -0,0 +1,459 @@
+/**
+ * dwmac-rk.c - Rockchip RK3288 DWMAC specific glue layer
+ *
+ * Copyright (C) 2014 Chen-Zhi (Roger Chen)
+ *
+ * Chen-Zhi (Roger Chen) <roger.chen@rock-chips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/stmmac.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/phy.h>
+#include <linux/of_net.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/delay.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+struct rk_priv_data {
+ struct platform_device *pdev;
+ int phy_iface;
+ char regulator[32];
+
+ bool clk_enabled;
+ bool clock_input;
+
+ struct clk *clk_mac;
+ struct clk *clk_mac_pll;
+ struct clk *gmac_clkin;
+ struct clk *mac_clk_rx;
+ struct clk *mac_clk_tx;
+ struct clk *clk_mac_ref;
+ struct clk *clk_mac_refout;
+ struct clk *aclk_mac;
+ struct clk *pclk_mac;
+
+ int tx_delay;
+ int rx_delay;
+
+ struct regmap *grf;
+};
+
+#define HIWORD_UPDATE(val, mask, shift) \
+ ((val) << (shift) | (mask) << ((shift) + 16))
+
+#define GRF_BIT(nr) (BIT(nr) | BIT(nr+16))
+#define GRF_CLR_BIT(nr) (BIT(nr+16))
+
+#define RK3288_GRF_SOC_CON1 0x0248
+#define RK3288_GRF_SOC_CON3 0x0250
+#define RK3288_GRF_GPIO3D_E 0x01ec
+#define RK3288_GRF_GPIO4A_E 0x01f0
+#define RK3288_GRF_GPIO4B_E 0x01f4
+
+/*RK3288_GRF_SOC_CON1*/
+#define GMAC_PHY_INTF_SEL_RGMII (GRF_BIT(6) | GRF_CLR_BIT(7) | GRF_CLR_BIT(8))
+#define GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(6) | GRF_CLR_BIT(7) | GRF_BIT(8))
+#define GMAC_FLOW_CTRL GRF_BIT(9)
+#define GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(9)
+#define GMAC_SPEED_10M GRF_CLR_BIT(10)
+#define GMAC_SPEED_100M GRF_BIT(10)
+#define GMAC_RMII_CLK_25M GRF_BIT(11)
+#define GMAC_RMII_CLK_2_5M GRF_CLR_BIT(11)
+#define GMAC_CLK_125M (GRF_CLR_BIT(12) | GRF_CLR_BIT(13))
+#define GMAC_CLK_25M (GRF_BIT(12) | GRF_BIT(13))
+#define GMAC_CLK_2_5M (GRF_CLR_BIT(12) | GRF_BIT(13))
+#define GMAC_RMII_MODE GRF_BIT(14)
+#define GMAC_RMII_MODE_CLR GRF_CLR_BIT(14)
+
+/*RK3288_GRF_SOC_CON3*/
+#define GMAC_TXCLK_DLY_ENABLE GRF_BIT(14)
+#define GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(14)
+#define GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
+#define GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
+#define GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7)
+#define GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+
+static void set_to_rgmii(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
+ GMAC_PHY_INTF_SEL_RGMII | GMAC_RMII_MODE_CLR);
+ regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON3,
+ GMAC_RXCLK_DLY_ENABLE | GMAC_TXCLK_DLY_ENABLE |
+ GMAC_CLK_RX_DL_CFG(rx_delay) |
+ GMAC_CLK_TX_DL_CFG(tx_delay));
+}
+
+static void set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
+ GMAC_PHY_INTF_SEL_RMII | GMAC_RMII_MODE);
+}
+
+static void set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ if (speed == 10)
+ regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1, GMAC_CLK_2_5M);
+ else if (speed == 100)
+ regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1, GMAC_CLK_25M);
+ else if (speed == 1000)
+ regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1, GMAC_CLK_125M);
+ else
+ dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
+}
+
+static void set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ if (speed == 10) {
+ regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
+ GMAC_RMII_CLK_2_5M | GMAC_SPEED_10M);
+ } else if (speed == 100) {
+ regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
+ GMAC_RMII_CLK_25M | GMAC_SPEED_100M);
+ } else {
+ dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+ }
+}
+
+static int gmac_clk_init(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ bsp_priv->clk_enabled = false;
+
+ bsp_priv->mac_clk_rx = devm_clk_get(dev, "mac_clk_rx");
+ if (IS_ERR(bsp_priv->mac_clk_rx))
+ dev_err(dev, "%s: cannot get clock %s\n",
+ __func__, "mac_clk_rx");
+
+ bsp_priv->mac_clk_tx = devm_clk_get(dev, "mac_clk_tx");
+ if (IS_ERR(bsp_priv->mac_clk_tx))
+ dev_err(dev, "%s: cannot get clock %s\n",
+ __func__, "mac_clk_tx");
+
+ bsp_priv->aclk_mac = devm_clk_get(dev, "aclk_mac");
+ if (IS_ERR(bsp_priv->aclk_mac))
+ dev_err(dev, "%s: cannot get clock %s\n",
+ __func__, "aclk_mac");
+
+ bsp_priv->pclk_mac = devm_clk_get(dev, "pclk_mac");
+ if (IS_ERR(bsp_priv->pclk_mac))
+ dev_err(dev, "%s: cannot get clock %s\n",
+ __func__, "pclk_mac");
+
+ bsp_priv->clk_mac = devm_clk_get(dev, "stmmaceth");
+ if (IS_ERR(bsp_priv->clk_mac))
+ dev_err(dev, "%s: cannot get clock %s\n",
+ __func__, "stmmaceth");
+
+ if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) {
+ bsp_priv->clk_mac_ref = devm_clk_get(dev, "clk_mac_ref");
+ if (IS_ERR(bsp_priv->clk_mac_ref))
+ dev_err(dev, "%s: cannot get clock %s\n",
+ __func__, "clk_mac_ref");
+
+ if (!bsp_priv->clock_input) {
+ bsp_priv->clk_mac_refout =
+ devm_clk_get(dev, "clk_mac_refout");
+ if (IS_ERR(bsp_priv->clk_mac_refout))
+ dev_err(dev, "%s: cannot get clock %s\n",
+ __func__, "clk_mac_refout");
+ }
+ }
+
+ if (bsp_priv->clock_input) {
+ dev_info(dev, "%s: clock input from PHY\n", __func__);
+ } else {
+ if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
+ clk_set_rate(bsp_priv->clk_mac_pll, 50000000);
+ }
+
+ return 0;
+}
+
+static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
+{
+ int phy_iface = phy_iface = bsp_priv->phy_iface;
+
+ if (enable) {
+ if (!bsp_priv->clk_enabled) {
+ if (phy_iface == PHY_INTERFACE_MODE_RMII) {
+ if (!IS_ERR(bsp_priv->mac_clk_rx))
+ clk_prepare_enable(
+ bsp_priv->mac_clk_rx);
+
+ if (!IS_ERR(bsp_priv->clk_mac_ref))
+ clk_prepare_enable(
+ bsp_priv->clk_mac_ref);
+
+ if (!IS_ERR(bsp_priv->clk_mac_refout))
+ clk_prepare_enable(
+ bsp_priv->clk_mac_refout);
+ }
+
+ if (!IS_ERR(bsp_priv->aclk_mac))
+ clk_prepare_enable(bsp_priv->aclk_mac);
+
+ if (!IS_ERR(bsp_priv->pclk_mac))
+ clk_prepare_enable(bsp_priv->pclk_mac);
+
+ if (!IS_ERR(bsp_priv->mac_clk_tx))
+ clk_prepare_enable(bsp_priv->mac_clk_tx);
+
+ /**
+ * if (!IS_ERR(bsp_priv->clk_mac))
+ * clk_prepare_enable(bsp_priv->clk_mac);
+ */
+ mdelay(5);
+ bsp_priv->clk_enabled = true;
+ }
+ } else {
+ if (bsp_priv->clk_enabled) {
+ if (phy_iface == PHY_INTERFACE_MODE_RMII) {
+ if (!IS_ERR(bsp_priv->mac_clk_rx))
+ clk_disable_unprepare(
+ bsp_priv->mac_clk_rx);
+
+ if (!IS_ERR(bsp_priv->clk_mac_ref))
+ clk_disable_unprepare(
+ bsp_priv->clk_mac_ref);
+
+ if (!IS_ERR(bsp_priv->clk_mac_refout))
+ clk_disable_unprepare(
+ bsp_priv->clk_mac_refout);
+ }
+
+ if (!IS_ERR(bsp_priv->aclk_mac))
+ clk_disable_unprepare(bsp_priv->aclk_mac);
+
+ if (!IS_ERR(bsp_priv->pclk_mac))
+ clk_disable_unprepare(bsp_priv->pclk_mac);
+
+ if (!IS_ERR(bsp_priv->mac_clk_tx))
+ clk_disable_unprepare(bsp_priv->mac_clk_tx);
+ /**
+ * if (!IS_ERR(bsp_priv->clk_mac))
+ * clk_disable_unprepare(bsp_priv->clk_mac);
+ */
+ bsp_priv->clk_enabled = false;
+ }
+ }
+
+ return 0;
+}
+
+static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable)
+{
+ struct regulator *ldo;
+ char *ldostr = bsp_priv->regulator;
+ int ret;
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (!ldostr) {
+ dev_err(dev, "%s: no ldo found\n", __func__);
+ return -1;
+ }
+
+ ldo = regulator_get(NULL, ldostr);
+ if (!ldo) {
+ dev_err(dev, "\n%s get ldo %s failed\n", __func__, ldostr);
+ } else {
+ if (enable) {
+ if (!regulator_is_enabled(ldo)) {
+ regulator_set_voltage(ldo, 3300000, 3300000);
+ ret = regulator_enable(ldo);
+ if (ret != 0)
+ dev_err(dev, "%s: fail to enable %s\n",
+ __func__, ldostr);
+ else
+ dev_info(dev, "turn on ldo done.\n");
+ } else {
+ dev_warn(dev, "%s is enabled before enable",
+ ldostr);
+ }
+ } else {
+ if (regulator_is_enabled(ldo)) {
+ ret = regulator_disable(ldo);
+ if (ret != 0)
+ dev_err(dev, "%s: fail to disable %s\n",
+ __func__, ldostr);
+ else
+ dev_info(dev, "turn off ldo done.\n");
+ } else {
+ dev_warn(dev, "%s is disabled before disable",
+ ldostr);
+ }
+ }
+ regulator_put(ldo);
+ }
+
+ return 0;
+}
+
+static void *rk_gmac_setup(struct platform_device *pdev)
+{
+ struct rk_priv_data *bsp_priv;
+ struct device *dev = &pdev->dev;
+ int ret;
+ const char *strings = NULL;
+ int value;
+
+ bsp_priv = devm_kzalloc(dev, sizeof(*bsp_priv), GFP_KERNEL);
+ if (!bsp_priv)
+ return ERR_PTR(-ENOMEM);
+
+ bsp_priv->phy_iface = of_get_phy_mode(dev->of_node);
+
+ ret = of_property_read_string(dev->of_node, "phy_regulator", &strings);
+ if (ret) {
+ dev_warn(dev, "%s: Can not read property: phy_regulator.\n",
+ __func__);
+ } else {
+ dev_info(dev, "%s: PHY power controlled by regulator(%s).\n",
+ __func__, strings);
+ strcpy(bsp_priv->regulator, strings);
+ }
+
+ ret = of_property_read_string(dev->of_node, "clock_in_out", &strings);
+ if (ret) {
+ dev_err(dev, "%s: Can not read property: clock_in_out.\n",
+ __func__);
+ bsp_priv->clock_input = true;
+ } else {
+ dev_info(dev, "%s: clock input or output? (%s).\n",
+ __func__, strings);
+ if (!strcmp(strings, "input"))
+ bsp_priv->clock_input = true;
+ else
+ bsp_priv->clock_input = false;
+ }
+
+ ret = of_property_read_u32(dev->of_node, "tx_delay", &value);
+ if (ret) {
+ bsp_priv->tx_delay = 0x30;
+ dev_err(dev, "%s: Can not read property: tx_delay.", __func__);
+ dev_err(dev, "%s: set tx_delay to 0x%x\n",
+ __func__, bsp_priv->tx_delay);
+ } else {
+ dev_info(dev, "%s: TX delay(0x%x).\n", __func__, value);
+ bsp_priv->tx_delay = value;
+ }
+
+ ret = of_property_read_u32(dev->of_node, "rx_delay", &value);
+ if (ret) {
+ bsp_priv->rx_delay = 0x10;
+ dev_err(dev, "%s: Can not read property: rx_delay.", __func__);
+ dev_err(dev, "%s: set rx_delay to 0x%x\n",
+ __func__, bsp_priv->rx_delay);
+ } else {
+ dev_info(dev, "%s: RX delay(0x%x).\n", __func__, value);
+ bsp_priv->rx_delay = value;
+ }
+
+ bsp_priv->grf = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "rockchip,grf");
+ bsp_priv->pdev = pdev;
+
+ /*rmii or rgmii*/
+ if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII) {
+ dev_info(dev, "%s: init for RGMII\n", __func__);
+ set_to_rgmii(bsp_priv, bsp_priv->tx_delay, bsp_priv->rx_delay);
+ } else if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) {
+ dev_info(dev, "%s: init for RMII\n", __func__);
+ set_to_rmii(bsp_priv);
+ } else {
+ dev_err(dev, "%s: NO interface defined!\n", __func__);
+ }
+
+ gmac_clk_init(bsp_priv);
+
+ return bsp_priv;
+}
+
+static int rk_gmac_init(struct platform_device *pdev, void *priv)
+{
+ struct rk_priv_data *bsp_priv = priv;
+ int ret;
+
+ ret = phy_power_on(bsp_priv, true);
+ if (ret)
+ return ret;
+
+ ret = gmac_clk_enable(bsp_priv, true);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void rk_gmac_exit(struct platform_device *pdev, void *priv)
+{
+ struct rk_priv_data *gmac = priv;
+
+ phy_power_on(gmac, false);
+ gmac_clk_enable(gmac, false);
+}
+
+static void rk_fix_speed(void *priv, unsigned int speed)
+{
+ struct rk_priv_data *bsp_priv = priv;
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII)
+ set_rgmii_speed(bsp_priv, speed);
+ else if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
+ set_rmii_speed(bsp_priv, speed);
+ else
+ dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface);
+}
+
+const struct stmmac_of_data rk3288_gmac_data = {
+ .has_gmac = 1,
+ .fix_mac_speed = rk_fix_speed,
+ .setup = rk_gmac_setup,
+ .init = rk_gmac_init,
+ .exit = rk_gmac_exit,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index 056b358b4a72..bb6e2dc61bec 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -122,7 +122,7 @@ struct sti_dwmac {
bool ext_phyclk; /* Clock from external PHY */
u32 tx_retime_src; /* TXCLK Retiming*/
struct clk *clk; /* PHY clock */
- int ctrl_reg; /* GMAC glue-logic control register */
+ u32 ctrl_reg; /* GMAC glue-logic control register */
int clk_sel_reg; /* GMAC ext clk selection register */
struct device *dev;
struct regmap *regmap;
@@ -285,11 +285,6 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
if (!np)
return -EINVAL;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sti-ethconf");
- if (!res)
- return -ENODATA;
- dwmac->ctrl_reg = res->start;
-
/* clk selection from extra syscfg register */
dwmac->clk_sel_reg = -ENXIO;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sti-clkconf");
@@ -300,6 +295,12 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
if (IS_ERR(regmap))
return PTR_ERR(regmap);
+ err = of_property_read_u32_index(np, "st,syscon", 1, &dwmac->ctrl_reg);
+ if (err) {
+ dev_err(dev, "Can't get sysconfig ctrl offset (%d)\n", err);
+ return err;
+ }
+
dwmac->dev = dev;
dwmac->interface = of_get_phy_mode(np);
dwmac->regmap = regmap;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 3039de2465ba..879e29f48a89 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -33,6 +33,7 @@
static const struct of_device_id stmmac_dt_ids[] = {
/* SoC specific glue layers should come before generic bindings */
+ { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_gmac_data},
{ .compatible = "amlogic,meson6-dwmac", .data = &meson6_dwmac_data},
{ .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data},
{ .compatible = "st,stih415-dwmac", .data = &stih4xx_dwmac_data},
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
index 25dd1f7ace02..093eb99e5ffd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
@@ -24,5 +24,6 @@ extern const struct stmmac_of_data sun7i_gmac_data;
extern const struct stmmac_of_data stih4xx_dwmac_data;
extern const struct stmmac_of_data stid127_dwmac_data;
extern const struct stmmac_of_data socfpga_gmac_data;
+extern const struct stmmac_of_data rk3288_gmac_data;
#endif /* __STMMAC_PLATFORM_H__ */
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index d2835bf7b4fb..b5a1d3d7b0bf 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -351,10 +351,15 @@ static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc)
unsigned int len = desc->size;
unsigned int copy_len;
struct sk_buff *skb;
+ int maxlen;
int err;
err = -EMSGSIZE;
- if (unlikely(len < ETH_ZLEN || len > port->rmtu)) {
+ if (port->tso && port->tsolen > port->rmtu)
+ maxlen = port->tsolen;
+ else
+ maxlen = port->rmtu;
+ if (unlikely(len < ETH_ZLEN || len > maxlen)) {
dev->stats.rx_length_errors++;
goto out_dropped;
}
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 6ab36d9ff2ab..a9cac8413e49 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1650,9 +1650,9 @@ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
txd_mss);
}
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
/*Cut VLAN ID to 12 bits */
- txd_vlan_id = vlan_tx_tag_get(skb) & BITS_MASK(12);
+ txd_vlan_id = skb_vlan_tag_get(skb) & BITS_MASK(12);
txd_vtag = 1;
}
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 4a4388b813ac..fbe42cb107ec 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -157,14 +157,11 @@ static int cpts_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
static int cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
- s64 now;
unsigned long flags;
struct cpts *cpts = container_of(ptp, struct cpts, info);
spin_lock_irqsave(&cpts->lock, flags);
- now = timecounter_read(&cpts->tc);
- now += delta;
- timecounter_init(&cpts->tc, &cpts->cc, now);
+ timecounter_adjtime(&cpts->tc, delta);
spin_unlock_irqrestore(&cpts->lock, flags);
return 0;
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h
index 1a581ef7eee8..69a46b92c7d6 100644
--- a/drivers/net/ethernet/ti/cpts.h
+++ b/drivers/net/ethernet/ti/cpts.h
@@ -27,6 +27,7 @@
#include <linux/list.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/skbuff.h>
+#include <linux/timecounter.h>
struct cpsw_cpts {
u32 idver; /* Identification and version */
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index a191afc23b56..0ac76102b33d 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -1781,8 +1781,8 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
rp->tx_ring[entry].desc_length =
cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
- if (unlikely(vlan_tx_tag_present(skb))) {
- u16 vid_pcp = vlan_tx_tag_get(skb);
+ if (unlikely(skb_vlan_tag_present(skb))) {
+ u16 vid_pcp = skb_vlan_tag_get(skb);
/* drop CFI/DEI bit, register needs VID and PCP */
vid_pcp = (vid_pcp & VLAN_VID_MASK) |
@@ -1803,7 +1803,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
/* Non-x86 Todo: explicitly flush cache lines here. */
- if (vlan_tx_tag_present(skb))
+ if (skb_vlan_tag_present(skb))
/* Tx queues are bits 7-0 (first Tx queue: bit 7) */
BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 282f83a63b67..c20206f83cc1 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2611,8 +2611,8 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
- if (vlan_tx_tag_present(skb)) {
- td_ptr->tdesc1.vlan = cpu_to_le16(vlan_tx_tag_get(skb));
+ if (skb_vlan_tag_present(skb)) {
+ td_ptr->tdesc1.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
td_ptr->tdesc1.TCR |= TCR0_VETAG;
}
diff --git a/drivers/net/fddi/skfp/smt.c b/drivers/net/fddi/skfp/smt.c
index 9edada85ed02..cd78b7cacc75 100644
--- a/drivers/net/fddi/skfp/smt.c
+++ b/drivers/net/fddi/skfp/smt.c
@@ -1736,18 +1736,6 @@ char *addr_to_string(struct fddi_addr *addr)
}
#endif
-#ifdef AM29K
-int smt_ifconfig(int argc, char *argv[])
-{
- if (argc >= 2 && !strcmp(argv[0],"opt_bypass") &&
- !strcmp(argv[1],"yes")) {
- smc->mib.fddiSMTBypassPresent = 1 ;
- return 0;
- }
- return amdfddi_config(0, argc, argv);
-}
-#endif
-
/*
* return static mac index
*/
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 1c0135620c62..7b051eacb7f1 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -427,7 +427,7 @@ at86rf230_reg_precious(struct device *dev, unsigned int reg)
}
}
-static struct regmap_config at86rf230_regmap_spi_config = {
+static const struct regmap_config at86rf230_regmap_spi_config = {
.reg_bits = 8,
.val_bits = 8,
.write_flag_mask = CMD_REG | CMD_WRITE,
@@ -450,7 +450,7 @@ at86rf230_async_error_recover(void *context)
ieee802154_wake_queue(lp->hw);
}
-static void
+static inline void
at86rf230_async_error(struct at86rf230_local *lp,
struct at86rf230_state_change *ctx, int rc)
{
@@ -524,7 +524,6 @@ at86rf230_async_state_assert(void *context)
}
}
-
dev_warn(&lp->spi->dev, "unexcept state change from 0x%02x to 0x%02x. Actual state: 0x%02x\n",
ctx->from_state, ctx->to_state, trx_state);
}
@@ -655,7 +654,7 @@ at86rf230_async_state_change_start(void *context)
if (ctx->irq_enable)
enable_irq(lp->spi->irq);
- at86rf230_async_error(lp, &lp->state, rc);
+ at86rf230_async_error(lp, ctx, rc);
}
}
@@ -715,10 +714,7 @@ at86rf230_tx_complete(void *context)
enable_irq(lp->spi->irq);
- if (lp->max_frame_retries <= 0)
- ieee802154_xmit_complete(lp->hw, skb, true);
- else
- ieee802154_xmit_complete(lp->hw, skb, false);
+ ieee802154_xmit_complete(lp->hw, skb, !lp->tx_aret);
}
static void
@@ -753,16 +749,13 @@ at86rf230_tx_trac_check(void *context)
* to STATE_FORCE_TRX_OFF then STATE_TX_ON to recover the transceiver
* state to TX_ON.
*/
- if (trac) {
+ if (trac)
at86rf230_async_state_change(lp, ctx, STATE_FORCE_TRX_OFF,
at86rf230_tx_trac_error, true);
- return;
- }
-
- at86rf230_tx_on(context);
+ else
+ at86rf230_tx_on(context);
}
-
static void
at86rf230_tx_trac_status(void *context)
{
@@ -1082,7 +1075,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
u16 addr = le16_to_cpu(filt->short_addr);
dev_vdbg(&lp->spi->dev,
- "at86rf230_set_hw_addr_filt called for saddr\n");
+ "at86rf230_set_hw_addr_filt called for saddr\n");
__at86rf230_write(lp, RG_SHORT_ADDR_0, addr);
__at86rf230_write(lp, RG_SHORT_ADDR_1, addr >> 8);
}
@@ -1091,7 +1084,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
u16 pan = le16_to_cpu(filt->pan_id);
dev_vdbg(&lp->spi->dev,
- "at86rf230_set_hw_addr_filt called for pan id\n");
+ "at86rf230_set_hw_addr_filt called for pan id\n");
__at86rf230_write(lp, RG_PAN_ID_0, pan);
__at86rf230_write(lp, RG_PAN_ID_1, pan >> 8);
}
@@ -1101,14 +1094,14 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
memcpy(addr, &filt->ieee_addr, 8);
dev_vdbg(&lp->spi->dev,
- "at86rf230_set_hw_addr_filt called for IEEE addr\n");
+ "at86rf230_set_hw_addr_filt called for IEEE addr\n");
for (i = 0; i < 8; i++)
__at86rf230_write(lp, RG_IEEE_ADDR_0 + i, addr[i]);
}
if (changed & IEEE802154_AFILT_PANC_CHANGED) {
dev_vdbg(&lp->spi->dev,
- "at86rf230_set_hw_addr_filt called for panc change\n");
+ "at86rf230_set_hw_addr_filt called for panc change\n");
if (filt->pan_coord)
at86rf230_write_subreg(lp, SR_AACK_I_AM_COORD, 1);
else
@@ -1146,11 +1139,37 @@ at86rf230_set_lbt(struct ieee802154_hw *hw, bool on)
}
static int
-at86rf230_set_cca_mode(struct ieee802154_hw *hw, u8 mode)
+at86rf230_set_cca_mode(struct ieee802154_hw *hw,
+ const struct wpan_phy_cca *cca)
{
struct at86rf230_local *lp = hw->priv;
+ u8 val;
+
+ /* mapping 802.15.4 to driver spec */
+ switch (cca->mode) {
+ case NL802154_CCA_ENERGY:
+ val = 1;
+ break;
+ case NL802154_CCA_CARRIER:
+ val = 2;
+ break;
+ case NL802154_CCA_ENERGY_CARRIER:
+ switch (cca->opt) {
+ case NL802154_CCA_OPT_ENERGY_CARRIER_AND:
+ val = 3;
+ break;
+ case NL802154_CCA_OPT_ENERGY_CARRIER_OR:
+ val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
- return at86rf230_write_subreg(lp, SR_CCA_MODE, mode);
+ return at86rf230_write_subreg(lp, SR_CCA_MODE, val);
}
static int
@@ -1400,7 +1419,7 @@ at86rf230_detect_device(struct at86rf230_local *lp)
if (rc)
return rc;
- rc = __at86rf230_read(lp, RG_PART_NUM, &version);
+ rc = __at86rf230_read(lp, RG_VERSION_NUM, &version);
if (rc)
return rc;
@@ -1410,11 +1429,12 @@ at86rf230_detect_device(struct at86rf230_local *lp)
return -EINVAL;
}
- lp->hw->extra_tx_headroom = 0;
lp->hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AACK |
IEEE802154_HW_TXPOWER | IEEE802154_HW_ARET |
IEEE802154_HW_AFILT | IEEE802154_HW_PROMISCUOUS;
+ lp->hw->phy->cca.mode = NL802154_CCA_ENERGY;
+
switch (part) {
case 2:
chip = "at86rf230";
@@ -1429,16 +1449,12 @@ at86rf230_detect_device(struct at86rf230_local *lp)
break;
case 7:
chip = "at86rf212";
- if (version == 1) {
- lp->data = &at86rf212_data;
- lp->hw->flags |= IEEE802154_HW_LBT;
- lp->hw->phy->channels_supported[0] = 0x00007FF;
- lp->hw->phy->channels_supported[2] = 0x00007FF;
- lp->hw->phy->current_channel = 5;
- lp->hw->phy->symbol_duration = 25;
- } else {
- rc = -ENOTSUPP;
- }
+ lp->data = &at86rf212_data;
+ lp->hw->flags |= IEEE802154_HW_LBT;
+ lp->hw->phy->channels_supported[0] = 0x00007FF;
+ lp->hw->phy->channels_supported[2] = 0x00007FF;
+ lp->hw->phy->current_channel = 5;
+ lp->hw->phy->symbol_duration = 25;
break;
case 11:
chip = "at86rf233";
@@ -1448,7 +1464,7 @@ at86rf230_detect_device(struct at86rf230_local *lp)
lp->hw->phy->symbol_duration = 16;
break;
default:
- chip = "unkown";
+ chip = "unknown";
rc = -ENOTSUPP;
break;
}
diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c
index f9df9fa86d5f..a43c8acb7268 100644
--- a/drivers/net/ieee802154/cc2520.c
+++ b/drivers/net/ieee802154/cc2520.c
@@ -19,7 +19,6 @@
#include <linux/workqueue.h>
#include <linux/interrupt.h>
#include <linux/skbuff.h>
-#include <linux/pinctrl/consumer.h>
#include <linux/of_gpio.h>
#include <linux/ieee802154.h>
@@ -513,7 +512,6 @@ err_tx:
return rc;
}
-
static int cc2520_rx(struct cc2520_private *priv)
{
u8 len = 0, lqi = 0, bytes = 1;
@@ -652,6 +650,7 @@ static int cc2520_register(struct cc2520_private *priv)
priv->hw->parent = &priv->spi->dev;
priv->hw->extra_tx_headroom = 0;
priv->hw->vif_data_size = sizeof(*priv);
+ ieee802154_random_extended_addr(&priv->hw->phy->perm_extended_addr);
/* We do support only 2.4 Ghz */
priv->hw->phy->channels_supported[0] = 0x7FFF800;
@@ -842,24 +841,15 @@ done:
static int cc2520_probe(struct spi_device *spi)
{
struct cc2520_private *priv;
- struct pinctrl *pinctrl;
struct cc2520_platform_data *pdata;
int ret;
- priv = devm_kzalloc(&spi->dev,
- sizeof(struct cc2520_private), GFP_KERNEL);
- if (!priv) {
- ret = -ENOMEM;
- goto err_ret;
- }
+ priv = devm_kzalloc(&spi->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
spi_set_drvdata(spi, priv);
- pinctrl = devm_pinctrl_get_select_default(&spi->dev);
- if (IS_ERR(pinctrl))
- dev_warn(&spi->dev,
- "pinctrl pins are not configured\n");
-
pdata = cc2520_get_platform_data(spi);
if (!pdata) {
dev_err(&spi->dev, "no platform data\n");
@@ -870,10 +860,8 @@ static int cc2520_probe(struct spi_device *spi)
priv->buf = devm_kzalloc(&spi->dev,
SPI_COMMAND_BUFFER, GFP_KERNEL);
- if (!priv->buf) {
- ret = -ENOMEM;
- goto err_ret;
- }
+ if (!priv->buf)
+ return -ENOMEM;
mutex_init(&priv->buffer_mutex);
INIT_WORK(&priv->fifop_irqwork, cc2520_fifop_irqwork);
@@ -947,7 +935,6 @@ static int cc2520_probe(struct spi_device *spi)
if (ret)
goto err_hw_init;
-
gpio_set_value(pdata->vreg, HIGH);
usleep_range(100, 150);
@@ -991,8 +978,6 @@ static int cc2520_probe(struct spi_device *spi)
err_hw_init:
mutex_destroy(&priv->buffer_mutex);
flush_work(&priv->fifop_irqwork);
-
-err_ret:
return ret;
}
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index a200fa16beae..fba2dfd910f7 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -289,7 +289,7 @@ static int mrf24j40_read_rx_buf(struct mrf24j40 *devrec,
goto out;
/* Range check the RX FIFO length, accounting for the one-byte
- * length field at the begining. */
+ * length field at the beginning. */
if (rx_len > RX_FIFO_SIZE-1) {
dev_err(printdev(devrec), "Invalid length read from device. Performing short read.\n");
rx_len = RX_FIFO_SIZE-1;
@@ -323,7 +323,7 @@ static int mrf24j40_read_rx_buf(struct mrf24j40 *devrec,
#ifdef DEBUG
print_hex_dump(KERN_DEBUG, "mrf24j40 rx: ",
- DUMP_PREFIX_OFFSET, 16, 1, data, *len, 0);
+ DUMP_PREFIX_OFFSET, 16, 1, data, *len, 0);
pr_debug("mrf24j40 rx: lqi: %02hhx rssi: %02hhx\n",
lqi_rssi[0], lqi_rssi[1]);
#endif
@@ -521,7 +521,7 @@ static int mrf24j40_filter(struct ieee802154_hw *hw,
*/
dev_dbg(printdev(devrec), "Set Pan Coord to %s\n",
- filt->pan_coord ? "on" : "off");
+ filt->pan_coord ? "on" : "off");
}
return 0;
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index 58f98f4de773..58ae11a14bb6 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -1462,17 +1462,12 @@ static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb,
if (mtt)
{
/* Check how much time we have used already */
- do_gettimeofday(&self->now);
-
- diff = self->now.tv_usec - self->stamp.tv_usec;
+ diff = ktime_us_delta(ktime_get(), self->stamp);
/* self->stamp is set from ali_ircc_dma_receive_complete() */
pr_debug("%s(), ******* diff = %d *******\n",
__func__, diff);
-
- if (diff < 0)
- diff += 1000000;
-
+
/* Check if the mtt is larger than the time we have
* already used by all the protocol processing
*/
@@ -1884,7 +1879,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
* reduce the min turn time a bit since we will know
* how much time we have used for protocol processing
*/
- do_gettimeofday(&self->stamp);
+ self->stamp = ktime_get();
skb = dev_alloc_skb(len+1);
if (skb == NULL)
diff --git a/drivers/net/irda/ali-ircc.h b/drivers/net/irda/ali-ircc.h
index 0c8edb41bd0a..c2d9747a5108 100644
--- a/drivers/net/irda/ali-ircc.h
+++ b/drivers/net/irda/ali-ircc.h
@@ -22,7 +22,7 @@
#ifndef ALI_IRCC_H
#define ALI_IRCC_H
-#include <linux/time.h>
+#include <linux/ktime.h>
#include <linux/spinlock.h>
#include <linux/pm.h>
@@ -209,8 +209,7 @@ struct ali_ircc_cb {
unsigned char rcvFramesOverflow;
- struct timeval stamp;
- struct timeval now;
+ ktime_t stamp;
spinlock_t lock; /* For serializing operations */
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index e151205281e2..44e4f386a5dc 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -24,7 +24,6 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/time.h>
#include <linux/types.h>
#include <linux/ioport.h>
@@ -163,8 +162,6 @@ struct au1k_private {
iobuff_t rx_buff;
struct net_device *netdev;
- struct timeval stamp;
- struct timeval now;
struct qos_info qos;
struct irlap_cb *irlap;
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 48b2f9a321b7..f6c916312577 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -495,18 +495,12 @@ static netdev_tx_t irda_usb_hard_xmit(struct sk_buff *skb,
mtt = irda_get_mtt(skb);
if (mtt) {
int diff;
- do_gettimeofday(&self->now);
- diff = self->now.tv_usec - self->stamp.tv_usec;
+ diff = ktime_us_delta(ktime_get(), self->stamp);
#ifdef IU_USB_MIN_RTT
/* Factor in USB delays -> Get rid of udelay() that
* would be lost in the noise - Jean II */
diff += IU_USB_MIN_RTT;
#endif /* IU_USB_MIN_RTT */
- /* If the usec counter did wraparound, the diff will
- * go negative (tv_usec is a long), so we need to
- * correct it by one second. Jean II */
- if (diff < 0)
- diff += 1000000;
/* Check if the mtt is larger than the time we have
* already used by all the protocol processing
@@ -869,7 +863,7 @@ static void irda_usb_receive(struct urb *urb)
* reduce the min turn time a bit since we will know
* how much time we have used for protocol processing
*/
- do_gettimeofday(&self->stamp);
+ self->stamp = ktime_get();
/* Check if we need to copy the data to a new skb or not.
* For most frames, we use ZeroCopy and pass the already
diff --git a/drivers/net/irda/irda-usb.h b/drivers/net/irda/irda-usb.h
index 58ddb5214916..8ac389fa9348 100644
--- a/drivers/net/irda/irda-usb.h
+++ b/drivers/net/irda/irda-usb.h
@@ -29,7 +29,7 @@
*
*****************************************************************************/
-#include <linux/time.h>
+#include <linux/ktime.h>
#include <net/irda/irda.h>
#include <net/irda/irda_device.h> /* struct irlap_cb */
@@ -157,8 +157,7 @@ struct irda_usb_cb {
char *speed_buff; /* Buffer for speed changes */
char *tx_buff;
- struct timeval stamp;
- struct timeval now;
+ ktime_t stamp;
spinlock_t lock; /* For serializing Tx operations */
diff --git a/drivers/net/irda/kingsun-sir.c b/drivers/net/irda/kingsun-sir.c
index e638893e98a9..fb5d162ec7d2 100644
--- a/drivers/net/irda/kingsun-sir.c
+++ b/drivers/net/irda/kingsun-sir.c
@@ -114,7 +114,6 @@ struct kingsun_cb {
(usually 8) */
iobuff_t rx_buff; /* receive unwrap state machine */
- struct timeval rx_time;
spinlock_t lock;
int receiving;
@@ -235,7 +234,6 @@ static void kingsun_rcv_irq(struct urb *urb)
&kingsun->netdev->stats,
&kingsun->rx_buff, bytes[i]);
}
- do_gettimeofday(&kingsun->rx_time);
kingsun->receiving =
(kingsun->rx_buff.state != OUTSIDE_FRAME)
? 1 : 0;
@@ -273,7 +271,6 @@ static int kingsun_net_open(struct net_device *netdev)
skb_reserve(kingsun->rx_buff.skb, 1);
kingsun->rx_buff.head = kingsun->rx_buff.skb->data;
- do_gettimeofday(&kingsun->rx_time);
kingsun->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!kingsun->rx_urb)
diff --git a/drivers/net/irda/ks959-sir.c b/drivers/net/irda/ks959-sir.c
index e6b3804edacd..8e6e0edf2440 100644
--- a/drivers/net/irda/ks959-sir.c
+++ b/drivers/net/irda/ks959-sir.c
@@ -187,7 +187,6 @@ struct ks959_cb {
__u8 *rx_buf;
__u8 rx_variable_xormask;
iobuff_t rx_unwrap_buff;
- struct timeval rx_time;
struct usb_ctrlrequest *speed_setuprequest;
struct urb *speed_urb;
@@ -476,7 +475,6 @@ static void ks959_rcv_irq(struct urb *urb)
bytes[i]);
}
}
- do_gettimeofday(&kingsun->rx_time);
kingsun->receiving =
(kingsun->rx_unwrap_buff.state != OUTSIDE_FRAME) ? 1 : 0;
}
@@ -514,7 +512,6 @@ static int ks959_net_open(struct net_device *netdev)
skb_reserve(kingsun->rx_unwrap_buff.skb, 1);
kingsun->rx_unwrap_buff.head = kingsun->rx_unwrap_buff.skb->data;
- do_gettimeofday(&kingsun->rx_time);
kingsun->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!kingsun->rx_urb)
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index e4d678fbeb2f..bca6a1e72d1d 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -722,7 +722,6 @@ static int mcs_net_open(struct net_device *netdev)
skb_reserve(mcs->rx_buff.skb, 1);
mcs->rx_buff.head = mcs->rx_buff.skb->data;
- do_gettimeofday(&mcs->rx_time);
/*
* Now that everything should be initialized properly,
@@ -799,7 +798,6 @@ static void mcs_receive_irq(struct urb *urb)
mcs_unwrap_fir(mcs, urb->transfer_buffer,
urb->actual_length);
}
- do_gettimeofday(&mcs->rx_time);
}
ret = usb_submit_urb(urb, GFP_ATOMIC);
diff --git a/drivers/net/irda/mcs7780.h b/drivers/net/irda/mcs7780.h
index b10689b2887c..a6e8f7dbafc9 100644
--- a/drivers/net/irda/mcs7780.h
+++ b/drivers/net/irda/mcs7780.h
@@ -116,7 +116,6 @@ struct mcs_cb {
__u8 *fifo_status;
iobuff_t rx_buff; /* receive unwrap state machine */
- struct timeval rx_time;
spinlock_t lock;
int receiving;
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index e7317b104bfb..dc0dbd8dd0b5 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -1501,10 +1501,7 @@ static netdev_tx_t nsc_ircc_hard_xmit_fir(struct sk_buff *skb,
mtt = irda_get_mtt(skb);
if (mtt) {
/* Check how much time we have used already */
- do_gettimeofday(&self->now);
- diff = self->now.tv_usec - self->stamp.tv_usec;
- if (diff < 0)
- diff += 1000000;
+ diff = ktime_us_delta(ktime_get(), self->stamp);
/* Check if the mtt is larger than the time we have
* already used by all the protocol processing
@@ -1867,7 +1864,7 @@ static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase)
* reduce the min turn time a bit since we will know
* how much time we have used for protocol processing
*/
- do_gettimeofday(&self->stamp);
+ self->stamp = ktime_get();
skb = dev_alloc_skb(len+1);
if (skb == NULL) {
diff --git a/drivers/net/irda/nsc-ircc.h b/drivers/net/irda/nsc-ircc.h
index 32fa58211fad..7be5acb56532 100644
--- a/drivers/net/irda/nsc-ircc.h
+++ b/drivers/net/irda/nsc-ircc.h
@@ -28,7 +28,7 @@
#ifndef NSC_IRCC_H
#define NSC_IRCC_H
-#include <linux/time.h>
+#include <linux/ktime.h>
#include <linux/spinlock.h>
#include <linux/pm.h>
@@ -263,8 +263,7 @@ struct nsc_ircc_cb {
__u8 ier; /* Interrupt enable register */
- struct timeval stamp;
- struct timeval now;
+ ktime_t stamp;
spinlock_t lock; /* For serializing operations */
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index 7b17fa2114e1..b6e44ff4e373 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -38,7 +38,7 @@
#include <net/irda/irda_device.h>
#include <mach/hardware.h>
-#include <asm/mach/irda.h>
+#include <linux/platform_data/irda-sa11x0.h>
static int power_level = 3;
static int tx_lpm;
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c
index dd1bd1060ec9..83cc48a01802 100644
--- a/drivers/net/irda/stir4200.c
+++ b/drivers/net/irda/stir4200.c
@@ -40,6 +40,7 @@
#include <linux/moduleparam.h>
#include <linux/kernel.h>
+#include <linux/ktime.h>
#include <linux/types.h>
#include <linux/time.h>
#include <linux/skbuff.h>
@@ -174,7 +175,7 @@ struct stir_cb {
__u8 *fifo_status;
iobuff_t rx_buff; /* receive unwrap state machine */
- struct timeval rx_time;
+ ktime_t rx_time;
int receiving;
struct urb *rx_urb;
};
@@ -650,15 +651,12 @@ static int fifo_txwait(struct stir_cb *stir, int space)
static void turnaround_delay(const struct stir_cb *stir, long us)
{
long ticks;
- struct timeval now;
if (us <= 0)
return;
- do_gettimeofday(&now);
- if (now.tv_sec - stir->rx_time.tv_sec > 0)
- us -= USEC_PER_SEC;
- us -= now.tv_usec - stir->rx_time.tv_usec;
+ us -= ktime_us_delta(ktime_get(), stir->rx_time);
+
if (us < 10)
return;
@@ -823,8 +821,8 @@ static void stir_rcv_irq(struct urb *urb)
pr_debug("receive %d\n", urb->actual_length);
unwrap_chars(stir, urb->transfer_buffer,
urb->actual_length);
-
- do_gettimeofday(&stir->rx_time);
+
+ stir->rx_time = ktime_get();
}
/* kernel thread is stopping receiver don't resubmit */
@@ -876,7 +874,7 @@ static int stir_net_open(struct net_device *netdev)
skb_reserve(stir->rx_buff.skb, 1);
stir->rx_buff.head = stir->rx_buff.skb->data;
- do_gettimeofday(&stir->rx_time);
+ stir->rx_time = ktime_get();
stir->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!stir->rx_urb)
diff --git a/drivers/net/irda/via-ircc.h b/drivers/net/irda/via-ircc.h
index 7ce820ecc361..ac1525573398 100644
--- a/drivers/net/irda/via-ircc.h
+++ b/drivers/net/irda/via-ircc.h
@@ -29,7 +29,6 @@ this program; if not, see <http://www.gnu.org/licenses/>.
********************************************************************/
#ifndef via_IRCC_H
#define via_IRCC_H
-#include <linux/time.h>
#include <linux/spinlock.h>
#include <linux/pm.h>
#include <linux/types.h>
@@ -106,9 +105,6 @@ struct via_ircc_cb {
__u8 ier; /* Interrupt enable register */
- struct timeval stamp;
- struct timeval now;
-
spinlock_t lock; /* For serializing operations */
__u32 flags; /* Interface flags */
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index ac39d9f33d5f..a0849f49bbec 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -33,6 +33,7 @@ MODULE_LICENSE("GPL");
/********************************************************/
#include <linux/kernel.h>
+#include <linux/ktime.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
@@ -40,9 +41,9 @@ MODULE_LICENSE("GPL");
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/delay.h>
-#include <linux/time.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/math64.h>
#include <linux/mutex.h>
#include <asm/uaccess.h>
#include <asm/byteorder.h>
@@ -180,8 +181,7 @@ static void vlsi_proc_ndev(struct seq_file *seq, struct net_device *ndev)
vlsi_irda_dev_t *idev = netdev_priv(ndev);
u8 byte;
u16 word;
- unsigned delta1, delta2;
- struct timeval now;
+ s32 sec, usec;
unsigned iobase = ndev->base_addr;
seq_printf(seq, "\n%s link state: %s / %s / %s / %s\n", ndev->name,
@@ -277,17 +277,9 @@ static void vlsi_proc_ndev(struct seq_file *seq, struct net_device *ndev)
seq_printf(seq, "\nsw-state:\n");
seq_printf(seq, "IrPHY setup: %d baud - %s encoding\n", idev->baud,
(idev->mode==IFF_SIR)?"SIR":((idev->mode==IFF_MIR)?"MIR":"FIR"));
- do_gettimeofday(&now);
- if (now.tv_usec >= idev->last_rx.tv_usec) {
- delta2 = now.tv_usec - idev->last_rx.tv_usec;
- delta1 = 0;
- }
- else {
- delta2 = 1000000 + now.tv_usec - idev->last_rx.tv_usec;
- delta1 = 1;
- }
- seq_printf(seq, "last rx: %lu.%06u sec\n",
- now.tv_sec - idev->last_rx.tv_sec - delta1, delta2);
+ sec = div_s64_rem(ktime_us_delta(ktime_get(), idev->last_rx),
+ USEC_PER_SEC, &usec);
+ seq_printf(seq, "last rx: %ul.%06u sec\n", sec, usec);
seq_printf(seq, "RX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu",
ndev->stats.rx_packets, ndev->stats.rx_bytes, ndev->stats.rx_errors,
@@ -661,7 +653,7 @@ static void vlsi_rx_interrupt(struct net_device *ndev)
}
}
- do_gettimeofday(&idev->last_rx); /* remember "now" for later mtt delay */
+ idev->last_rx = ktime_get(); /* remember "now" for later mtt delay */
vlsi_fill_rx(r);
@@ -858,9 +850,8 @@ static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb,
unsigned iobase = ndev->base_addr;
u8 status;
u16 config;
- int mtt;
+ int mtt, diff;
int len, speed;
- struct timeval now, ready;
char *msg = NULL;
speed = irda_get_next_speed(skb);
@@ -940,21 +931,10 @@ static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb,
spin_unlock_irqrestore(&idev->lock, flags);
if ((mtt = irda_get_mtt(skb)) > 0) {
-
- ready.tv_usec = idev->last_rx.tv_usec + mtt;
- ready.tv_sec = idev->last_rx.tv_sec;
- if (ready.tv_usec >= 1000000) {
- ready.tv_usec -= 1000000;
- ready.tv_sec++; /* IrLAP 1.1: mtt always < 1 sec */
- }
- for(;;) {
- do_gettimeofday(&now);
- if (now.tv_sec > ready.tv_sec ||
- (now.tv_sec==ready.tv_sec && now.tv_usec>=ready.tv_usec))
- break;
- udelay(100);
+ diff = ktime_us_delta(ktime_get(), idev->last_rx);
+ if (mtt > diff)
+ udelay(mtt - diff);
/* must not sleep here - called under netif_tx_lock! */
- }
}
/* tx buffer already owned by CPU due to pci_dma_sync_single_for_cpu()
@@ -1333,7 +1313,7 @@ static int vlsi_start_hw(vlsi_irda_dev_t *idev)
vlsi_fill_rx(idev->rx_ring);
- do_gettimeofday(&idev->last_rx); /* first mtt may start from now on */
+ idev->last_rx = ktime_get(); /* first mtt may start from now on */
outw(0, iobase+VLSI_PIO_PROMPT); /* kick hw state machine */
@@ -1520,7 +1500,7 @@ static int vlsi_open(struct net_device *ndev)
if (!idev->irlap)
goto errout_free_ring;
- do_gettimeofday(&idev->last_rx); /* first mtt may start from now on */
+ idev->last_rx = ktime_get(); /* first mtt may start from now on */
idev->new_baud = 9600; /* start with IrPHY using 9600(SIR) mode */
diff --git a/drivers/net/irda/vlsi_ir.h b/drivers/net/irda/vlsi_ir.h
index f9119c6d2a09..f9db2ce4c5c6 100644
--- a/drivers/net/irda/vlsi_ir.h
+++ b/drivers/net/irda/vlsi_ir.h
@@ -723,7 +723,7 @@ typedef struct vlsi_irda_dev {
void *virtaddr;
struct vlsi_ring *tx_ring, *rx_ring;
- struct timeval last_rx;
+ ktime_t last_rx;
spinlock_t lock;
struct mutex mtx;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 7df221788cd4..d0ed5694dd7d 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -645,7 +645,7 @@ static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
- if (vlan_tx_tag_present(skb))
+ if (skb_vlan_tag_present(skb))
vnet_hdr->csum_start = cpu_to_macvtap16(q,
skb_checksum_start_offset(skb) + VLAN_HLEN);
else
@@ -821,13 +821,13 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
total = vnet_hdr_len;
total += skb->len;
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
struct {
__be16 h_vlan_proto;
__be16 h_vlan_TCI;
} veth;
veth.h_vlan_proto = skb->vlan_proto;
- veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
+ veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
total += VLAN_HLEN;
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index f7ff493f1e73..4b2bfc52c52f 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -176,7 +176,6 @@ static int __team_option_inst_add(struct team *team, struct team_option *option,
static int __team_option_inst_add_option(struct team *team,
struct team_option *option)
{
- struct team_port *port;
int err;
if (!option->per_port) {
@@ -184,12 +183,6 @@ static int __team_option_inst_add_option(struct team *team,
if (err)
goto inst_del_option;
}
-
- list_for_each_entry(port, &team->port_list, list) {
- err = __team_option_inst_add(team, option, port);
- if (err)
- goto inst_del_option;
- }
return 0;
inst_del_option:
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 8c8dc16839a7..be196e89ab6c 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -124,10 +124,9 @@ struct tap_filter {
unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN];
};
-/* DEFAULT_MAX_NUM_RSS_QUEUES were chosen to let the rx/tx queues allocated for
- * the netdevice to be fit in one page. So we can make sure the success of
- * memory allocation. TODO: increase the limit. */
-#define MAX_TAP_QUEUES DEFAULT_MAX_NUM_RSS_QUEUES
+/* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal
+ * to max number of VCPUs in guest. */
+#define MAX_TAP_QUEUES 256
#define MAX_TAP_FLOWS 4096
#define TUN_FLOW_EXPIRE (3 * HZ)
@@ -1261,7 +1260,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
int vlan_hlen = 0;
int vnet_hdr_sz = 0;
- if (vlan_tx_tag_present(skb))
+ if (skb_vlan_tag_present(skb))
vlan_hlen = VLAN_HLEN;
if (tun->flags & IFF_VNET_HDR)
@@ -1338,7 +1337,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
} veth;
veth.h_vlan_proto = skb->vlan_proto;
- veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
+ veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
@@ -1380,7 +1379,7 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
skb = __skb_recv_datagram(tfile->socket.sk, noblock ? MSG_DONTWAIT : 0,
&peeked, &off, &err);
if (!skb)
- return 0;
+ return err;
ret = tun_put_user(tun, tfile, skb, to);
if (unlikely(ret < 0))
@@ -1501,7 +1500,7 @@ static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
goto out;
}
ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT);
- if (ret > total_len) {
+ if (ret > (ssize_t)total_len) {
m->msg_flags |= MSG_TRUNC;
ret = flags & MSG_TRUNC ? ret : total_len;
}
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 57ec23e8ccfa..2e2244221cbe 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -27,7 +27,7 @@
#include <linux/usb/cdc.h>
/* Version Information */
-#define DRIVER_VERSION "v1.07.0 (2014/10/09)"
+#define DRIVER_VERSION "v1.08.0 (2015/01/13)"
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
#define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
#define MODULENAME "r8152"
@@ -448,6 +448,7 @@ enum rtl_register_content {
#define RTL8152_RMS (VLAN_ETH_FRAME_LEN + VLAN_HLEN)
#define RTL8153_RMS RTL8153_MAX_PACKET
#define RTL8152_TX_TIMEOUT (5 * HZ)
+#define RTL8152_NAPI_WEIGHT 64
/* rtl8152 flags */
enum rtl8152_flags {
@@ -457,7 +458,7 @@ enum rtl8152_flags {
RTL8152_LINK_CHG,
SELECTIVE_SUSPEND,
PHY_RESET,
- SCHEDULE_TASKLET,
+ SCHEDULE_NAPI,
};
/* Define these values to match your device */
@@ -549,14 +550,14 @@ struct tx_agg {
struct r8152 {
unsigned long flags;
struct usb_device *udev;
- struct tasklet_struct tl;
+ struct napi_struct napi;
struct usb_interface *intf;
struct net_device *netdev;
struct urb *intr_urb;
struct tx_agg tx_info[RTL8152_MAX_TX];
struct rx_agg rx_info[RTL8152_MAX_RX];
struct list_head rx_done, tx_free;
- struct sk_buff_head tx_queue;
+ struct sk_buff_head tx_queue, rx_queue;
spinlock_t rx_lock, tx_lock;
struct delayed_work schedule;
struct mii_if_info mii;
@@ -1062,7 +1063,7 @@ static void read_bulk_callback(struct urb *urb)
spin_lock(&tp->rx_lock);
list_add_tail(&agg->list, &tp->rx_done);
spin_unlock(&tp->rx_lock);
- tasklet_schedule(&tp->tl);
+ napi_schedule(&tp->napi);
return;
case -ESHUTDOWN:
set_bit(RTL8152_UNPLUG, &tp->flags);
@@ -1126,7 +1127,7 @@ static void write_bulk_callback(struct urb *urb)
return;
if (!skb_queue_empty(&tp->tx_queue))
- tasklet_schedule(&tp->tl);
+ napi_schedule(&tp->napi);
}
static void intr_callback(struct urb *urb)
@@ -1245,6 +1246,7 @@ static int alloc_all_mem(struct r8152 *tp)
spin_lock_init(&tp->tx_lock);
INIT_LIST_HEAD(&tp->tx_free);
skb_queue_head_init(&tp->tx_queue);
+ skb_queue_head_init(&tp->rx_queue);
for (i = 0; i < RTL8152_MAX_RX; i++) {
buf = kmalloc_node(agg_buf_sz, GFP_KERNEL, node);
@@ -1421,10 +1423,10 @@ static int msdn_giant_send_check(struct sk_buff *skb)
static inline void rtl_tx_vlan_tag(struct tx_desc *desc, struct sk_buff *skb)
{
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
u32 opts2;
- opts2 = TX_VLAN_TAG | swab16(vlan_tx_tag_get(skb));
+ opts2 = TX_VLAN_TAG | swab16(skb_vlan_tag_get(skb));
desc->opts2 |= cpu_to_le32(opts2);
}
}
@@ -1649,13 +1651,32 @@ return_result:
return checksum;
}
-static void rx_bottom(struct r8152 *tp)
+static int rx_bottom(struct r8152 *tp, int budget)
{
unsigned long flags;
struct list_head *cursor, *next, rx_queue;
+ int work_done = 0;
+
+ if (!skb_queue_empty(&tp->rx_queue)) {
+ while (work_done < budget) {
+ struct sk_buff *skb = __skb_dequeue(&tp->rx_queue);
+ struct net_device *netdev = tp->netdev;
+ struct net_device_stats *stats = &netdev->stats;
+ unsigned int pkt_len;
+
+ if (!skb)
+ break;
+
+ pkt_len = skb->len;
+ napi_gro_receive(&tp->napi, skb);
+ work_done++;
+ stats->rx_packets++;
+ stats->rx_bytes += pkt_len;
+ }
+ }
if (list_empty(&tp->rx_done))
- return;
+ goto out1;
INIT_LIST_HEAD(&rx_queue);
spin_lock_irqsave(&tp->rx_lock, flags);
@@ -1708,9 +1729,14 @@ static void rx_bottom(struct r8152 *tp)
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, netdev);
rtl_rx_vlan_tag(rx_desc, skb);
- netif_receive_skb(skb);
- stats->rx_packets++;
- stats->rx_bytes += pkt_len;
+ if (work_done < budget) {
+ napi_gro_receive(&tp->napi, skb);
+ work_done++;
+ stats->rx_packets++;
+ stats->rx_bytes += pkt_len;
+ } else {
+ __skb_queue_tail(&tp->rx_queue, skb);
+ }
find_next_rx:
rx_data = rx_agg_align(rx_data + pkt_len + CRC_SIZE);
@@ -1722,6 +1748,9 @@ find_next_rx:
submit:
r8152_submit_rx(tp, agg, GFP_ATOMIC);
}
+
+out1:
+ return work_done;
}
static void tx_bottom(struct r8152 *tp)
@@ -1761,12 +1790,8 @@ static void tx_bottom(struct r8152 *tp)
} while (res == 0);
}
-static void bottom_half(unsigned long data)
+static void bottom_half(struct r8152 *tp)
{
- struct r8152 *tp;
-
- tp = (struct r8152 *)data;
-
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return;
@@ -1778,17 +1803,38 @@ static void bottom_half(unsigned long data)
if (!netif_carrier_ok(tp->netdev))
return;
- clear_bit(SCHEDULE_TASKLET, &tp->flags);
+ clear_bit(SCHEDULE_NAPI, &tp->flags);
- rx_bottom(tp);
tx_bottom(tp);
}
+static int r8152_poll(struct napi_struct *napi, int budget)
+{
+ struct r8152 *tp = container_of(napi, struct r8152, napi);
+ int work_done;
+
+ work_done = rx_bottom(tp, budget);
+ bottom_half(tp);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+ if (!list_empty(&tp->rx_done))
+ napi_schedule(napi);
+ }
+
+ return work_done;
+}
+
static
int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags)
{
int ret;
+ /* The rx would be stopped, so skip submitting */
+ if (test_bit(RTL8152_UNPLUG, &tp->flags) ||
+ !test_bit(WORK_ENABLE, &tp->flags) || !netif_carrier_ok(tp->netdev))
+ return 0;
+
usb_fill_bulk_urb(agg->urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1),
agg->head, agg_buf_sz,
(usb_complete_t)read_bulk_callback, agg);
@@ -1805,7 +1851,11 @@ int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags)
spin_lock_irqsave(&tp->rx_lock, flags);
list_add_tail(&agg->list, &tp->rx_done);
spin_unlock_irqrestore(&tp->rx_lock, flags);
- tasklet_schedule(&tp->tl);
+
+ netif_err(tp, rx_err, tp->netdev,
+ "Couldn't submit rx[%p], ret = %d\n", agg, ret);
+
+ napi_schedule(&tp->napi);
}
return ret;
@@ -1924,11 +1974,11 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
if (!list_empty(&tp->tx_free)) {
if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
- set_bit(SCHEDULE_TASKLET, &tp->flags);
+ set_bit(SCHEDULE_NAPI, &tp->flags);
schedule_delayed_work(&tp->schedule, 0);
} else {
usb_mark_last_busy(tp->udev);
- tasklet_schedule(&tp->tl);
+ napi_schedule(&tp->napi);
}
} else if (skb_queue_len(&tp->tx_queue) > tp->tx_qlen) {
netif_stop_queue(netdev);
@@ -2007,6 +2057,7 @@ static int rtl_start_rx(struct r8152 *tp)
{
int i, ret = 0;
+ napi_disable(&tp->napi);
INIT_LIST_HEAD(&tp->rx_done);
for (i = 0; i < RTL8152_MAX_RX; i++) {
INIT_LIST_HEAD(&tp->rx_info[i].list);
@@ -2014,6 +2065,7 @@ static int rtl_start_rx(struct r8152 *tp)
if (ret)
break;
}
+ napi_enable(&tp->napi);
if (ret && ++i < RTL8152_MAX_RX) {
struct list_head rx_queue;
@@ -2044,6 +2096,9 @@ static int rtl_stop_rx(struct r8152 *tp)
for (i = 0; i < RTL8152_MAX_RX; i++)
usb_kill_urb(tp->rx_info[i].urb);
+ while (!skb_queue_empty(&tp->rx_queue))
+ dev_kfree_skb(__skb_dequeue(&tp->rx_queue));
+
return 0;
}
@@ -2059,7 +2114,7 @@ static int rtl_enable(struct r8152 *tp)
rxdy_gated_en(tp, false);
- return rtl_start_rx(tp);
+ return 0;
}
static int rtl8152_enable(struct r8152 *tp)
@@ -2874,13 +2929,14 @@ static void set_carrier(struct r8152 *tp)
tp->rtl_ops.enable(tp);
set_bit(RTL8152_SET_RX_MODE, &tp->flags);
netif_carrier_on(netdev);
+ rtl_start_rx(tp);
}
} else {
if (tp->speed & LINK_STATUS) {
netif_carrier_off(netdev);
- tasklet_disable(&tp->tl);
+ napi_disable(&tp->napi);
tp->rtl_ops.disable(tp);
- tasklet_enable(&tp->tl);
+ napi_enable(&tp->napi);
}
}
tp->speed = speed;
@@ -2913,10 +2969,11 @@ static void rtl_work_func_t(struct work_struct *work)
if (test_bit(RTL8152_SET_RX_MODE, &tp->flags))
_rtl8152_set_rx_mode(tp->netdev);
- if (test_bit(SCHEDULE_TASKLET, &tp->flags) &&
+ /* don't schedule napi before linking */
+ if (test_bit(SCHEDULE_NAPI, &tp->flags) &&
(tp->speed & LINK_STATUS)) {
- clear_bit(SCHEDULE_TASKLET, &tp->flags);
- tasklet_schedule(&tp->tl);
+ clear_bit(SCHEDULE_NAPI, &tp->flags);
+ napi_schedule(&tp->napi);
}
if (test_bit(PHY_RESET, &tp->flags))
@@ -2977,7 +3034,7 @@ static int rtl8152_open(struct net_device *netdev)
res);
free_all_mem(tp);
} else {
- tasklet_enable(&tp->tl);
+ napi_enable(&tp->napi);
}
mutex_unlock(&tp->control);
@@ -2993,7 +3050,7 @@ static int rtl8152_close(struct net_device *netdev)
struct r8152 *tp = netdev_priv(netdev);
int res = 0;
- tasklet_disable(&tp->tl);
+ napi_disable(&tp->napi);
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);
@@ -3002,6 +3059,7 @@ static int rtl8152_close(struct net_device *netdev)
res = usb_autopm_get_interface(tp->intf);
if (res < 0) {
rtl_drop_queued_tx(tp);
+ rtl_stop_rx(tp);
} else {
mutex_lock(&tp->control);
@@ -3257,7 +3315,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) {
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
- tasklet_disable(&tp->tl);
+ napi_disable(&tp->napi);
if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
rtl_stop_rx(tp);
rtl_runtime_suspend_enable(tp, true);
@@ -3265,7 +3323,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
cancel_delayed_work_sync(&tp->schedule);
tp->rtl_ops.down(tp);
}
- tasklet_enable(&tp->tl);
+ napi_enable(&tp->napi);
}
out1:
mutex_unlock(&tp->control);
@@ -3849,7 +3907,6 @@ static int rtl8152_probe(struct usb_interface *intf,
if (ret)
goto out;
- tasklet_init(&tp->tl, bottom_half, (unsigned long)tp);
mutex_init(&tp->control);
INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t);
@@ -3885,6 +3942,7 @@ static int rtl8152_probe(struct usb_interface *intf,
set_ethernet_addr(tp);
usb_set_intfdata(intf, tp);
+ netif_napi_add(netdev, &tp->napi, r8152_poll, RTL8152_NAPI_WEIGHT);
ret = register_netdev(netdev);
if (ret != 0) {
@@ -3898,15 +3956,13 @@ static int rtl8152_probe(struct usb_interface *intf,
else
device_set_wakeup_enable(&udev->dev, false);
- tasklet_disable(&tp->tl);
-
netif_info(tp, probe, netdev, "%s\n", DRIVER_VERSION);
return 0;
out1:
+ netif_napi_del(&tp->napi);
usb_set_intfdata(intf, NULL);
- tasklet_kill(&tp->tl);
out:
free_netdev(netdev);
return ret;
@@ -3923,7 +3979,7 @@ static void rtl8152_disconnect(struct usb_interface *intf)
if (udev->state == USB_STATE_NOTATTACHED)
set_bit(RTL8152_UNPLUG, &tp->flags);
- tasklet_kill(&tp->tl);
+ netif_napi_del(&tp->napi);
unregister_netdev(tp->netdev);
tp->rtl_ops.unload(tp);
free_netdev(tp->netdev);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 5ca97713bfb3..11e2e8131359 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1759,6 +1759,8 @@ static int virtnet_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
dev->hw_features |= NETIF_F_TSO_ECN;
+ dev->features |= NETIF_F_GSO_ROBUST;
+
if (gso)
dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
/* (!csum && gso) case will be fixed by register_netdev() */
diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h
index 4d84912c99ba..25b6fa4810b5 100644
--- a/drivers/net/vmxnet3/vmxnet3_defs.h
+++ b/drivers/net/vmxnet3/vmxnet3_defs.h
@@ -342,6 +342,7 @@ union Vmxnet3_GenericDesc {
#define VMXNET3_TX_RING_MAX_SIZE 4096
#define VMXNET3_TC_RING_MAX_SIZE 4096
#define VMXNET3_RX_RING_MAX_SIZE 4096
+#define VMXNET3_RX_RING2_MAX_SIZE 2048
#define VMXNET3_RC_RING_MAX_SIZE 8192
/* a list of reasons for queue stop */
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index afd295348ddb..294214c15292 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1038,9 +1038,9 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
le32_add_cpu(&tq->shared->txNumDeferred, 1);
}
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
gdesc->txd.ti = 1;
- gdesc->txd.tci = vlan_tx_tag_get(skb);
+ gdesc->txd.tci = skb_vlan_tag_get(skb);
}
/* finally flips the GEN bit of the SOP desc. */
@@ -2505,6 +2505,9 @@ vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
sz * sz);
ring1_size = adapter->rx_queue[0].rx_ring[1].size;
+ ring1_size = (ring1_size + sz - 1) / sz * sz;
+ ring1_size = min_t(u32, ring1_size, VMXNET3_RX_RING2_MAX_SIZE /
+ sz * sz);
comp_size = ring0_size + ring1_size;
for (i = 0; i < adapter->num_rx_queues; i++) {
@@ -2585,7 +2588,7 @@ vmxnet3_open(struct net_device *netdev)
err = vmxnet3_create_queues(adapter, adapter->tx_ring_size,
adapter->rx_ring_size,
- VMXNET3_DEF_RX_RING_SIZE);
+ adapter->rx_ring2_size);
if (err)
goto queue_err;
@@ -2964,6 +2967,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
+ adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
spin_lock_init(&adapter->cmd_lock);
adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
@@ -3286,27 +3290,15 @@ skip_arp:
static int
vmxnet3_resume(struct device *device)
{
- int err, i = 0;
+ int err;
unsigned long flags;
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *netdev = pci_get_drvdata(pdev);
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
- struct Vmxnet3_PMConf *pmConf;
if (!netif_running(netdev))
return 0;
- /* Destroy wake-up filters. */
- pmConf = adapter->pm_conf;
- memset(pmConf, 0, sizeof(*pmConf));
-
- adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
- adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
- *pmConf));
- adapter->shared->devRead.pmConfDesc.confPA =
- cpu_to_le64(adapter->pm_conf_pa);
-
- netif_device_attach(netdev);
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
err = pci_enable_device_mem(pdev);
@@ -3315,15 +3307,31 @@ vmxnet3_resume(struct device *device)
pci_enable_wake(pdev, PCI_D0, 0);
+ vmxnet3_alloc_intr_resources(adapter);
+
+ /* During hibernate and suspend, device has to be reinitialized as the
+ * device state need not be preserved.
+ */
+
+ /* Need not check adapter state as other reset tasks cannot run during
+ * device resume.
+ */
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
- VMXNET3_CMD_UPDATE_PMCFG);
+ VMXNET3_CMD_QUIESCE_DEV);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
- vmxnet3_alloc_intr_resources(adapter);
- vmxnet3_request_irqs(adapter);
- for (i = 0; i < adapter->num_rx_queues; i++)
- napi_enable(&adapter->rx_queue[i].napi);
- vmxnet3_enable_all_intrs(adapter);
+ vmxnet3_tq_cleanup_all(adapter);
+ vmxnet3_rq_cleanup_all(adapter);
+
+ vmxnet3_reset_dev(adapter);
+ err = vmxnet3_activate_dev(adapter);
+ if (err != 0) {
+ netdev_err(netdev,
+ "failed to re-activate on resume, error: %d", err);
+ vmxnet3_force_close(adapter);
+ return err;
+ }
+ netif_device_attach(netdev);
return 0;
}
@@ -3331,6 +3339,8 @@ vmxnet3_resume(struct device *device)
static const struct dev_pm_ops vmxnet3_pm_ops = {
.suspend = vmxnet3_suspend,
.resume = vmxnet3_resume,
+ .freeze = vmxnet3_suspend,
+ .restore = vmxnet3_resume,
};
#endif
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index b7b53329d575..8a5a90eeb4f9 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -447,12 +447,12 @@ vmxnet3_get_ringparam(struct net_device *netdev,
param->rx_max_pending = VMXNET3_RX_RING_MAX_SIZE;
param->tx_max_pending = VMXNET3_TX_RING_MAX_SIZE;
param->rx_mini_max_pending = 0;
- param->rx_jumbo_max_pending = 0;
+ param->rx_jumbo_max_pending = VMXNET3_RX_RING2_MAX_SIZE;
param->rx_pending = adapter->rx_ring_size;
param->tx_pending = adapter->tx_ring_size;
param->rx_mini_pending = 0;
- param->rx_jumbo_pending = 0;
+ param->rx_jumbo_pending = adapter->rx_ring2_size;
}
@@ -461,7 +461,7 @@ vmxnet3_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *param)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
- u32 new_tx_ring_size, new_rx_ring_size;
+ u32 new_tx_ring_size, new_rx_ring_size, new_rx_ring2_size;
u32 sz;
int err = 0;
@@ -473,6 +473,10 @@ vmxnet3_set_ringparam(struct net_device *netdev,
VMXNET3_RX_RING_MAX_SIZE)
return -EINVAL;
+ if (param->rx_jumbo_pending == 0 ||
+ param->rx_jumbo_pending > VMXNET3_RX_RING2_MAX_SIZE)
+ return -EINVAL;
+
/* if adapter not yet initialized, do nothing */
if (adapter->rx_buf_per_pkt == 0) {
netdev_err(netdev, "adapter not completely initialized, "
@@ -500,8 +504,15 @@ vmxnet3_set_ringparam(struct net_device *netdev,
sz) != 0)
return -EINVAL;
- if (new_tx_ring_size == adapter->tx_queue[0].tx_ring.size &&
- new_rx_ring_size == adapter->rx_queue[0].rx_ring[0].size) {
+ /* ring2 has to be a multiple of VMXNET3_RING_SIZE_ALIGN */
+ new_rx_ring2_size = (param->rx_jumbo_pending + VMXNET3_RING_SIZE_MASK) &
+ ~VMXNET3_RING_SIZE_MASK;
+ new_rx_ring2_size = min_t(u32, new_rx_ring2_size,
+ VMXNET3_RX_RING2_MAX_SIZE);
+
+ if (new_tx_ring_size == adapter->tx_ring_size &&
+ new_rx_ring_size == adapter->rx_ring_size &&
+ new_rx_ring2_size == adapter->rx_ring2_size) {
return 0;
}
@@ -522,7 +533,7 @@ vmxnet3_set_ringparam(struct net_device *netdev,
vmxnet3_rq_destroy_all(adapter);
err = vmxnet3_create_queues(adapter, new_tx_ring_size,
- new_rx_ring_size, VMXNET3_DEF_RX_RING_SIZE);
+ new_rx_ring_size, new_rx_ring2_size);
if (err) {
/* failed, most likely because of OOM, try default
@@ -530,11 +541,12 @@ vmxnet3_set_ringparam(struct net_device *netdev,
netdev_err(netdev, "failed to apply new sizes, "
"try the default ones\n");
new_rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
+ new_rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
new_tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
err = vmxnet3_create_queues(adapter,
new_tx_ring_size,
new_rx_ring_size,
- VMXNET3_DEF_RX_RING_SIZE);
+ new_rx_ring2_size);
if (err) {
netdev_err(netdev, "failed to create queues "
"with default sizes. Closing it\n");
@@ -549,6 +561,7 @@ vmxnet3_set_ringparam(struct net_device *netdev,
}
adapter->tx_ring_size = new_tx_ring_size;
adapter->rx_ring_size = new_rx_ring_size;
+ adapter->rx_ring2_size = new_rx_ring2_size;
out:
clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 5f0199f6c31e..6297d9fb0ae5 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.2.1.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.3.2.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM 0x01020100
+#define VMXNET3_DRIVER_VERSION_NUM 0x01030200
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
@@ -352,6 +352,7 @@ struct vmxnet3_adapter {
/* Ring sizes */
u32 tx_ring_size;
u32 rx_ring_size;
+ u32 rx_ring2_size;
struct work_struct work;
@@ -384,6 +385,7 @@ struct vmxnet3_adapter {
/* must be a multiple of VMXNET3_RING_SIZE_ALIGN */
#define VMXNET3_DEF_TX_RING_SIZE 512
#define VMXNET3_DEF_RX_RING_SIZE 256
+#define VMXNET3_DEF_RX_RING2_SIZE 128
#define VMXNET3_MAX_ETH_HDR_SIZE 22
#define VMXNET3_MAX_SKB_BUF_SIZE (3*1024)
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 7fbd89fbe107..99df0d76157c 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -61,12 +61,6 @@
#define FDB_AGE_DEFAULT 300 /* 5 min */
#define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
-#define VXLAN_N_VID (1u << 24)
-#define VXLAN_VID_MASK (VXLAN_N_VID - 1)
-#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
-
-#define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
-
/* UDP port for VXLAN traffic.
* The IANA assigned port is 4789, but the Linux default is 8472
* for compatibility with early adopters.
@@ -545,15 +539,57 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
return 1;
}
-static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff *skb)
+static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
+ unsigned int off,
+ struct vxlanhdr *vh, size_t hdrlen,
+ u32 data)
+{
+ size_t start, offset, plen;
+ __wsum delta;
+
+ if (skb->remcsum_offload)
+ return vh;
+
+ if (!NAPI_GRO_CB(skb)->csum_valid)
+ return NULL;
+
+ start = (data & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT;
+ offset = start + ((data & VXLAN_RCO_UDP) ?
+ offsetof(struct udphdr, check) :
+ offsetof(struct tcphdr, check));
+
+ plen = hdrlen + offset + sizeof(u16);
+
+ /* Pull checksum that will be written */
+ if (skb_gro_header_hard(skb, off + plen)) {
+ vh = skb_gro_header_slow(skb, off + plen, off);
+ if (!vh)
+ return NULL;
+ }
+
+ delta = remcsum_adjust((void *)vh + hdrlen,
+ NAPI_GRO_CB(skb)->csum, start, offset);
+
+ /* Adjust skb->csum since we changed the packet */
+ skb->csum = csum_add(skb->csum, delta);
+ NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
+
+ skb->remcsum_offload = 1;
+
+ return vh;
+}
+
+static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
+ struct sk_buff *skb,
+ struct udp_offload *uoff)
{
struct sk_buff *p, **pp = NULL;
struct vxlanhdr *vh, *vh2;
- struct ethhdr *eh, *eh2;
- unsigned int hlen, off_vx, off_eth;
- const struct packet_offload *ptype;
- __be16 type;
+ unsigned int hlen, off_vx;
int flush = 1;
+ struct vxlan_sock *vs = container_of(uoff, struct vxlan_sock,
+ udp_offloads);
+ u32 flags;
off_vx = skb_gro_offset(skb);
hlen = off_vx + sizeof(*vh);
@@ -563,15 +599,17 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff
if (unlikely(!vh))
goto out;
}
+
skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
- off_eth = skb_gro_offset(skb);
- hlen = off_eth + sizeof(*eh);
- eh = skb_gro_header_fast(skb, off_eth);
- if (skb_gro_header_hard(skb, hlen)) {
- eh = skb_gro_header_slow(skb, hlen, off_eth);
- if (unlikely(!eh))
+ flags = ntohl(vh->vx_flags);
+
+ if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
+ vh = vxlan_gro_remcsum(skb, off_vx, vh, sizeof(struct vxlanhdr),
+ ntohl(vh->vx_vni));
+
+ if (!vh)
goto out;
}
@@ -582,54 +620,26 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff
continue;
vh2 = (struct vxlanhdr *)(p->data + off_vx);
- eh2 = (struct ethhdr *)(p->data + off_eth);
- if (vh->vx_vni != vh2->vx_vni || compare_ether_header(eh, eh2)) {
+ if (vh->vx_vni != vh2->vx_vni) {
NAPI_GRO_CB(p)->same_flow = 0;
continue;
}
}
- type = eh->h_proto;
-
- rcu_read_lock();
- ptype = gro_find_receive_by_type(type);
- if (ptype == NULL) {
- flush = 1;
- goto out_unlock;
- }
-
- skb_gro_pull(skb, sizeof(*eh)); /* pull inner eth header */
- skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
- pp = ptype->callbacks.gro_receive(head, skb);
+ pp = eth_gro_receive(head, skb);
-out_unlock:
- rcu_read_unlock();
out:
NAPI_GRO_CB(skb)->flush |= flush;
return pp;
}
-static int vxlan_gro_complete(struct sk_buff *skb, int nhoff)
+static int vxlan_gro_complete(struct sk_buff *skb, int nhoff,
+ struct udp_offload *uoff)
{
- struct ethhdr *eh;
- struct packet_offload *ptype;
- __be16 type;
- int vxlan_len = sizeof(struct vxlanhdr) + sizeof(struct ethhdr);
- int err = -ENOSYS;
-
udp_tunnel_gro_complete(skb, nhoff);
- eh = (struct ethhdr *)(skb->data + nhoff + sizeof(struct vxlanhdr));
- type = eh->h_proto;
-
- rcu_read_lock();
- ptype = gro_find_complete_by_type(type);
- if (ptype != NULL)
- err = ptype->callbacks.gro_complete(skb, nhoff + vxlan_len);
-
- rcu_read_unlock();
- return err;
+ return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
}
/* Notify netdevs that UDP port started listening */
@@ -1131,32 +1141,94 @@ static void vxlan_igmp_leave(struct work_struct *work)
dev_put(vxlan->dev);
}
+static struct vxlanhdr *vxlan_remcsum(struct sk_buff *skb, struct vxlanhdr *vh,
+ size_t hdrlen, u32 data)
+{
+ size_t start, offset, plen;
+ __wsum delta;
+
+ if (skb->remcsum_offload) {
+ /* Already processed in GRO path */
+ skb->remcsum_offload = 0;
+ return vh;
+ }
+
+ start = (data & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT;
+ offset = start + ((data & VXLAN_RCO_UDP) ?
+ offsetof(struct udphdr, check) :
+ offsetof(struct tcphdr, check));
+
+ plen = hdrlen + offset + sizeof(u16);
+
+ if (!pskb_may_pull(skb, plen))
+ return NULL;
+
+ vh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
+
+ if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE))
+ __skb_checksum_complete(skb);
+
+ delta = remcsum_adjust((void *)vh + hdrlen,
+ skb->csum, start, offset);
+
+ /* Adjust skb->csum since we changed the packet */
+ skb->csum = csum_add(skb->csum, delta);
+
+ return vh;
+}
+
/* Callback from net/ipv4/udp.c to receive packets */
static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
struct vxlan_sock *vs;
struct vxlanhdr *vxh;
+ u32 flags, vni;
/* Need Vxlan and inner Ethernet header to be present */
if (!pskb_may_pull(skb, VXLAN_HLEN))
goto error;
- /* Return packets with reserved bits set */
vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
- if (vxh->vx_flags != htonl(VXLAN_FLAGS) ||
- (vxh->vx_vni & htonl(0xff))) {
- netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
- ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
- goto error;
+ flags = ntohl(vxh->vx_flags);
+ vni = ntohl(vxh->vx_vni);
+
+ if (flags & VXLAN_HF_VNI) {
+ flags &= ~VXLAN_HF_VNI;
+ } else {
+ /* VNI flag always required to be set */
+ goto bad_flags;
}
if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB)))
goto drop;
+ vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
vs = rcu_dereference_sk_user_data(sk);
if (!vs)
goto drop;
+ if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
+ vxh = vxlan_remcsum(skb, vxh, sizeof(struct vxlanhdr), vni);
+ if (!vxh)
+ goto drop;
+
+ flags &= ~VXLAN_HF_RCO;
+ vni &= VXLAN_VID_MASK;
+ }
+
+ if (flags || (vni & ~VXLAN_VID_MASK)) {
+ /* If there are any unprocessed flags remaining treat
+ * this as a malformed packet. This behavior diverges from
+ * VXLAN RFC (RFC7348) which stipulates that bits in reserved
+ * in reserved fields are to be ignored. The approach here
+ * maintains compatbility with previous stack code, and also
+ * is more robust and provides a little more security in
+ * adding extensions to VXLAN.
+ */
+
+ goto bad_flags;
+ }
+
vs->rcv(vs, skb, vxh->vx_vni);
return 0;
@@ -1165,6 +1237,10 @@ drop:
kfree_skb(skb);
return 0;
+bad_flags:
+ netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
+ ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
+
error:
/* Return non vxlan pkt */
return 1;
@@ -1577,8 +1653,23 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
int min_headroom;
int err;
bool udp_sum = !udp_get_no_check6_tx(vs->sock->sk);
+ int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
+ u16 hdrlen = sizeof(struct vxlanhdr);
+
+ if ((vs->flags & VXLAN_F_REMCSUM_TX) &&
+ skb->ip_summed == CHECKSUM_PARTIAL) {
+ int csum_start = skb_checksum_start_offset(skb);
+
+ if (csum_start <= VXLAN_MAX_REMCSUM_START &&
+ !(csum_start & VXLAN_RCO_SHIFT_MASK) &&
+ (skb->csum_offset == offsetof(struct udphdr, check) ||
+ skb->csum_offset == offsetof(struct tcphdr, check))) {
+ udp_sum = false;
+ type |= SKB_GSO_TUNNEL_REMCSUM;
+ }
+ }
- skb = udp_tunnel_handle_offloads(skb, udp_sum);
+ skb = iptunnel_handle_offloads(skb, udp_sum, type);
if (IS_ERR(skb)) {
err = -EINVAL;
goto err;
@@ -1588,7 +1679,7 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
+ VXLAN_HLEN + sizeof(struct ipv6hdr)
- + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
+ + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
/* Need space for new headers (invalidates iph ptr) */
err = skb_cow_head(skb, min_headroom);
@@ -1604,9 +1695,25 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
}
vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
- vxh->vx_flags = htonl(VXLAN_FLAGS);
+ vxh->vx_flags = htonl(VXLAN_HF_VNI);
vxh->vx_vni = vni;
+ if (type & SKB_GSO_TUNNEL_REMCSUM) {
+ u32 data = (skb_checksum_start_offset(skb) - hdrlen) >>
+ VXLAN_RCO_SHIFT;
+
+ if (skb->csum_offset == offsetof(struct udphdr, check))
+ data |= VXLAN_RCO_UDP;
+
+ vxh->vx_vni |= htonl(data);
+ vxh->vx_flags |= htonl(VXLAN_HF_RCO);
+
+ if (!skb_is_gso(skb)) {
+ skb->ip_summed = CHECKSUM_NONE;
+ skb->encapsulation = 0;
+ }
+ }
+
skb_set_inner_protocol(skb, htons(ETH_P_TEB));
udp_tunnel6_xmit_skb(vs->sock, dst, skb, dev, saddr, daddr, prio,
@@ -1627,14 +1734,29 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
int min_headroom;
int err;
bool udp_sum = !vs->sock->sk->sk_no_check_tx;
+ int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
+ u16 hdrlen = sizeof(struct vxlanhdr);
+
+ if ((vs->flags & VXLAN_F_REMCSUM_TX) &&
+ skb->ip_summed == CHECKSUM_PARTIAL) {
+ int csum_start = skb_checksum_start_offset(skb);
+
+ if (csum_start <= VXLAN_MAX_REMCSUM_START &&
+ !(csum_start & VXLAN_RCO_SHIFT_MASK) &&
+ (skb->csum_offset == offsetof(struct udphdr, check) ||
+ skb->csum_offset == offsetof(struct tcphdr, check))) {
+ udp_sum = false;
+ type |= SKB_GSO_TUNNEL_REMCSUM;
+ }
+ }
- skb = udp_tunnel_handle_offloads(skb, udp_sum);
+ skb = iptunnel_handle_offloads(skb, udp_sum, type);
if (IS_ERR(skb))
return PTR_ERR(skb);
min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+ VXLAN_HLEN + sizeof(struct iphdr)
- + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
+ + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
/* Need space for new headers (invalidates iph ptr) */
err = skb_cow_head(skb, min_headroom);
@@ -1648,9 +1770,25 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
return -ENOMEM;
vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
- vxh->vx_flags = htonl(VXLAN_FLAGS);
+ vxh->vx_flags = htonl(VXLAN_HF_VNI);
vxh->vx_vni = vni;
+ if (type & SKB_GSO_TUNNEL_REMCSUM) {
+ u32 data = (skb_checksum_start_offset(skb) - hdrlen) >>
+ VXLAN_RCO_SHIFT;
+
+ if (skb->csum_offset == offsetof(struct udphdr, check))
+ data |= VXLAN_RCO_UDP;
+
+ vxh->vx_vni |= htonl(data);
+ vxh->vx_flags |= htonl(VXLAN_HF_RCO);
+
+ if (!skb_is_gso(skb)) {
+ skb->ip_summed = CHECKSUM_NONE;
+ skb->encapsulation = 0;
+ }
+ }
+
skb_set_inner_protocol(skb, htons(ETH_P_TEB));
return udp_tunnel_xmit_skb(vs->sock, rt, skb, src, dst, tos,
@@ -2242,6 +2380,8 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
[IFLA_VXLAN_UDP_CSUM] = { .type = NLA_U8 },
[IFLA_VXLAN_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 },
[IFLA_VXLAN_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 },
+ [IFLA_VXLAN_REMCSUM_TX] = { .type = NLA_U8 },
+ [IFLA_VXLAN_REMCSUM_RX] = { .type = NLA_U8 },
};
static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -2363,6 +2503,7 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
atomic_set(&vs->refcnt, 1);
vs->rcv = rcv;
vs->data = data;
+ vs->flags = flags;
/* Initialize the vxlan udp offloads structure */
vs->udp_offloads.port = port;
@@ -2557,6 +2698,14 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
+ if (data[IFLA_VXLAN_REMCSUM_TX] &&
+ nla_get_u8(data[IFLA_VXLAN_REMCSUM_TX]))
+ vxlan->flags |= VXLAN_F_REMCSUM_TX;
+
+ if (data[IFLA_VXLAN_REMCSUM_RX] &&
+ nla_get_u8(data[IFLA_VXLAN_REMCSUM_RX]))
+ vxlan->flags |= VXLAN_F_REMCSUM_RX;
+
if (vxlan_find_vni(net, vni, use_ipv6 ? AF_INET6 : AF_INET,
vxlan->dst_port)) {
pr_info("duplicate VNI %u\n", vni);
@@ -2625,6 +2774,8 @@ static size_t vxlan_get_size(const struct net_device *dev)
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_CSUM */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_TX */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_RX */
+ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_TX */
+ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_RX */
0;
}
@@ -2690,7 +2841,11 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
!!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) ||
nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
- !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_RX)))
+ !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) ||
+ nla_put_u8(skb, IFLA_VXLAN_REMCSUM_TX,
+ !!(vxlan->flags & VXLAN_F_REMCSUM_TX)) ||
+ nla_put_u8(skb, IFLA_VXLAN_REMCSUM_RX,
+ !!(vxlan->flags & VXLAN_F_REMCSUM_RX)))
goto nla_put_failure;
if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 871e969409bf..c43e2ad36587 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -1115,6 +1115,75 @@ static const struct file_operations fops_ackto = {
};
#endif
+static ssize_t read_file_tpc(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_hw *ah = sc->sc_ah;
+ unsigned int len = 0, size = 32;
+ ssize_t retval;
+ char *buf;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len += scnprintf(buf + len, size - len, "%s\n",
+ ah->tpc_enabled ? "ENABLED" : "DISABLED");
+
+ if (len > size)
+ len = size;
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static ssize_t write_file_tpc(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_hw *ah = sc->sc_ah;
+ unsigned long val;
+ char buf[32];
+ ssize_t len;
+ bool tpc_enabled;
+
+ if (!AR_SREV_9300_20_OR_LATER(ah)) {
+ /* ar9002 does not support TPC for the moment */
+ return -EOPNOTSUPP;
+ }
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val < 0 || val > 1)
+ return -EINVAL;
+
+ tpc_enabled = !!val;
+
+ if (tpc_enabled != ah->tpc_enabled) {
+ ah->tpc_enabled = tpc_enabled;
+ ath9k_hw_set_txpowerlimit(ah, sc->cur_chan->txpower, false);
+ }
+
+ return count;
+}
+
+static const struct file_operations fops_tpc = {
+ .read = read_file_tpc,
+ .write = write_file_tpc,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
/* Ethtool support for get-stats */
#define AMKSTR(nm) #nm "_BE", #nm "_BK", #nm "_VI", #nm "_VO"
@@ -1324,6 +1393,8 @@ int ath9k_init_debug(struct ath_hw *ah)
debugfs_create_file("ack_to", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
sc, &fops_ackto);
#endif
+ debugfs_create_file("tpc", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc, &fops_tpc);
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 6d4b273469b1..258c4d236cbe 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -422,6 +422,9 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
ah->power_mode = ATH9K_PM_UNDEFINED;
ah->htc_reset_init = true;
+ /* ar9002 does not support TPC for the moment */
+ ah->tpc_enabled = !!AR_SREV_9300_20_OR_LATER(ah);
+
ah->ani_function = ATH9K_ANI_ALL;
if (!AR_SREV_9300_20_OR_LATER(ah))
ah->ani_function &= ~ATH9K_ANI_MRC_CCK;
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index e9bd02c2e844..52d63de4a93d 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1106,7 +1106,7 @@ static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf,
return MAX_RATE_POWER;
if (!AR_SREV_9300_20_OR_LATER(ah)) {
- /* ar9002 is not sipported for the moment */
+ /* ar9002 does not support TPC for the moment */
return MAX_RATE_POWER;
}
diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
index cfd0554cf140..3d57f8772389 100644
--- a/drivers/net/wireless/ath/dfs_pattern_detector.c
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
@@ -86,7 +86,7 @@ static const struct radar_detector_specs fcc_radar_ref_types[] = {
FCC_PATTERN(1, 0, 5, 150, 230, 1, 23),
FCC_PATTERN(2, 6, 10, 200, 500, 1, 16),
FCC_PATTERN(3, 11, 20, 200, 500, 1, 12),
- FCC_PATTERN(4, 50, 100, 1000, 2000, 1, 20),
+ FCC_PATTERN(4, 50, 100, 1000, 2000, 1, 1),
FCC_PATTERN(5, 0, 1, 333, 333, 1, 9),
};
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
index 4834a9abc171..b6cc9ff47fc2 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
@@ -172,7 +172,6 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
(struct rsi_91x_sdiodev *)adapter->rsi_dev;
u32 len;
u32 num_blocks;
- const u8 *fw;
const struct firmware *fw_entry = NULL;
u32 block_size = dev->tx_blk_size;
int status = 0;
@@ -201,7 +200,6 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
return status;
}
- fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
len = fw_entry->size;
if (len % 4)
@@ -212,7 +210,7 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
rsi_dbg(INIT_ZONE, "%s: Instruction size:%d\n", __func__, len);
rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
- status = rsi_copy_to_card(common, fw, len, num_blocks);
+ status = rsi_copy_to_card(common, fw_entry->data, len, num_blocks);
release_firmware(fw_entry);
return status;
}
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 40b6d1d006d7..1d4677460711 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -867,63 +867,135 @@ static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw,
*
* B/G rate:
* (rx_status->flag & RX_FLAG_HT) = 0,
- * DESC92_RATE1M-->DESC92_RATE54M ==> idx is 0-->11,
+ * DESC_RATE1M-->DESC_RATE54M ==> idx is 0-->11,
*
* N rate:
* (rx_status->flag & RX_FLAG_HT) = 1,
- * DESC92_RATEMCS0-->DESC92_RATEMCS15 ==> idx is 0-->15
+ * DESC_RATEMCS0-->DESC_RATEMCS15 ==> idx is 0-->15
*
* 5G band:rx_status->band == IEEE80211_BAND_5GHZ
* A rate:
* (rx_status->flag & RX_FLAG_HT) = 0,
- * DESC92_RATE6M-->DESC92_RATE54M ==> idx is 0-->7,
+ * DESC_RATE6M-->DESC_RATE54M ==> idx is 0-->7,
*
* N rate:
* (rx_status->flag & RX_FLAG_HT) = 1,
- * DESC92_RATEMCS0-->DESC92_RATEMCS15 ==> idx is 0-->15
+ * DESC_RATEMCS0-->DESC_RATEMCS15 ==> idx is 0-->15
+ *
+ * VHT rates:
+ * DESC_RATEVHT1SS_MCS0-->DESC_RATEVHT1SS_MCS9 ==> idx is 0-->9
+ * DESC_RATEVHT2SS_MCS0-->DESC_RATEVHT2SS_MCS9 ==> idx is 0-->9
*/
-int rtlwifi_rate_mapping(struct ieee80211_hw *hw,
- bool isht, u8 desc_rate, bool first_ampdu)
+int rtlwifi_rate_mapping(struct ieee80211_hw *hw, bool isht, bool isvht,
+ u8 desc_rate)
{
int rate_idx;
+ if (isvht) {
+ switch (desc_rate) {
+ case DESC_RATEVHT1SS_MCS0:
+ rate_idx = 0;
+ break;
+ case DESC_RATEVHT1SS_MCS1:
+ rate_idx = 1;
+ break;
+ case DESC_RATEVHT1SS_MCS2:
+ rate_idx = 2;
+ break;
+ case DESC_RATEVHT1SS_MCS3:
+ rate_idx = 3;
+ break;
+ case DESC_RATEVHT1SS_MCS4:
+ rate_idx = 4;
+ break;
+ case DESC_RATEVHT1SS_MCS5:
+ rate_idx = 5;
+ break;
+ case DESC_RATEVHT1SS_MCS6:
+ rate_idx = 6;
+ break;
+ case DESC_RATEVHT1SS_MCS7:
+ rate_idx = 7;
+ break;
+ case DESC_RATEVHT1SS_MCS8:
+ rate_idx = 8;
+ break;
+ case DESC_RATEVHT1SS_MCS9:
+ rate_idx = 9;
+ break;
+ case DESC_RATEVHT2SS_MCS0:
+ rate_idx = 0;
+ break;
+ case DESC_RATEVHT2SS_MCS1:
+ rate_idx = 1;
+ break;
+ case DESC_RATEVHT2SS_MCS2:
+ rate_idx = 2;
+ break;
+ case DESC_RATEVHT2SS_MCS3:
+ rate_idx = 3;
+ break;
+ case DESC_RATEVHT2SS_MCS4:
+ rate_idx = 4;
+ break;
+ case DESC_RATEVHT2SS_MCS5:
+ rate_idx = 5;
+ break;
+ case DESC_RATEVHT2SS_MCS6:
+ rate_idx = 6;
+ break;
+ case DESC_RATEVHT2SS_MCS7:
+ rate_idx = 7;
+ break;
+ case DESC_RATEVHT2SS_MCS8:
+ rate_idx = 8;
+ break;
+ case DESC_RATEVHT2SS_MCS9:
+ rate_idx = 9;
+ break;
+ default:
+ rate_idx = 0;
+ break;
+ }
+ return rate_idx;
+ }
if (false == isht) {
if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) {
switch (desc_rate) {
- case DESC92_RATE1M:
+ case DESC_RATE1M:
rate_idx = 0;
break;
- case DESC92_RATE2M:
+ case DESC_RATE2M:
rate_idx = 1;
break;
- case DESC92_RATE5_5M:
+ case DESC_RATE5_5M:
rate_idx = 2;
break;
- case DESC92_RATE11M:
+ case DESC_RATE11M:
rate_idx = 3;
break;
- case DESC92_RATE6M:
+ case DESC_RATE6M:
rate_idx = 4;
break;
- case DESC92_RATE9M:
+ case DESC_RATE9M:
rate_idx = 5;
break;
- case DESC92_RATE12M:
+ case DESC_RATE12M:
rate_idx = 6;
break;
- case DESC92_RATE18M:
+ case DESC_RATE18M:
rate_idx = 7;
break;
- case DESC92_RATE24M:
+ case DESC_RATE24M:
rate_idx = 8;
break;
- case DESC92_RATE36M:
+ case DESC_RATE36M:
rate_idx = 9;
break;
- case DESC92_RATE48M:
+ case DESC_RATE48M:
rate_idx = 10;
break;
- case DESC92_RATE54M:
+ case DESC_RATE54M:
rate_idx = 11;
break;
default:
@@ -932,28 +1004,28 @@ int rtlwifi_rate_mapping(struct ieee80211_hw *hw,
}
} else {
switch (desc_rate) {
- case DESC92_RATE6M:
+ case DESC_RATE6M:
rate_idx = 0;
break;
- case DESC92_RATE9M:
+ case DESC_RATE9M:
rate_idx = 1;
break;
- case DESC92_RATE12M:
+ case DESC_RATE12M:
rate_idx = 2;
break;
- case DESC92_RATE18M:
+ case DESC_RATE18M:
rate_idx = 3;
break;
- case DESC92_RATE24M:
+ case DESC_RATE24M:
rate_idx = 4;
break;
- case DESC92_RATE36M:
+ case DESC_RATE36M:
rate_idx = 5;
break;
- case DESC92_RATE48M:
+ case DESC_RATE48M:
rate_idx = 6;
break;
- case DESC92_RATE54M:
+ case DESC_RATE54M:
rate_idx = 7;
break;
default:
@@ -963,52 +1035,52 @@ int rtlwifi_rate_mapping(struct ieee80211_hw *hw,
}
} else {
switch (desc_rate) {
- case DESC92_RATEMCS0:
+ case DESC_RATEMCS0:
rate_idx = 0;
break;
- case DESC92_RATEMCS1:
+ case DESC_RATEMCS1:
rate_idx = 1;
break;
- case DESC92_RATEMCS2:
+ case DESC_RATEMCS2:
rate_idx = 2;
break;
- case DESC92_RATEMCS3:
+ case DESC_RATEMCS3:
rate_idx = 3;
break;
- case DESC92_RATEMCS4:
+ case DESC_RATEMCS4:
rate_idx = 4;
break;
- case DESC92_RATEMCS5:
+ case DESC_RATEMCS5:
rate_idx = 5;
break;
- case DESC92_RATEMCS6:
+ case DESC_RATEMCS6:
rate_idx = 6;
break;
- case DESC92_RATEMCS7:
+ case DESC_RATEMCS7:
rate_idx = 7;
break;
- case DESC92_RATEMCS8:
+ case DESC_RATEMCS8:
rate_idx = 8;
break;
- case DESC92_RATEMCS9:
+ case DESC_RATEMCS9:
rate_idx = 9;
break;
- case DESC92_RATEMCS10:
+ case DESC_RATEMCS10:
rate_idx = 10;
break;
- case DESC92_RATEMCS11:
+ case DESC_RATEMCS11:
rate_idx = 11;
break;
- case DESC92_RATEMCS12:
+ case DESC_RATEMCS12:
rate_idx = 12;
break;
- case DESC92_RATEMCS13:
+ case DESC_RATEMCS13:
rate_idx = 13;
break;
- case DESC92_RATEMCS14:
+ case DESC_RATEMCS14:
rate_idx = 14;
break;
- case DESC92_RATEMCS15:
+ case DESC_RATEMCS15:
rate_idx = 15;
break;
default:
diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/rtlwifi/base.h
index 982f2450feea..c6cb49c3ee32 100644
--- a/drivers/net/wireless/rtlwifi/base.h
+++ b/drivers/net/wireless/rtlwifi/base.h
@@ -123,8 +123,8 @@ void rtl_watch_dog_timer_callback(unsigned long data);
void rtl_deinit_deferred_work(struct ieee80211_hw *hw);
bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
-int rtlwifi_rate_mapping(struct ieee80211_hw *hw,
- bool isht, u8 desc_rate, bool first_ampdu);
+int rtlwifi_rate_mapping(struct ieee80211_hw *hw, bool isht,
+ bool isvht, u8 desc_rate);
bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb);
u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 5fc6f52641bd..deab85236bfd 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -95,7 +95,8 @@ void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data)
}
EXPORT_SYMBOL(rtl_bb_delay);
-void rtl_fw_cb(const struct firmware *firmware, void *context)
+static void rtl_fw_do_work(const struct firmware *firmware, void *context,
+ bool is_wow)
{
struct ieee80211_hw *hw = context;
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -125,12 +126,31 @@ found_alt:
release_firmware(firmware);
return;
}
- memcpy(rtlpriv->rtlhal.pfirmware, firmware->data, firmware->size);
+ if (!is_wow) {
+ memcpy(rtlpriv->rtlhal.pfirmware, firmware->data,
+ firmware->size);
+ rtlpriv->rtlhal.fwsize = firmware->size;
+ } else {
+ memcpy(rtlpriv->rtlhal.wowlan_firmware, firmware->data,
+ firmware->size);
+ rtlpriv->rtlhal.wowlan_fwsize = firmware->size;
+ }
rtlpriv->rtlhal.fwsize = firmware->size;
release_firmware(firmware);
}
+
+void rtl_fw_cb(const struct firmware *firmware, void *context)
+{
+ rtl_fw_do_work(firmware, context, false);
+}
EXPORT_SYMBOL(rtl_fw_cb);
+void rtl_wowlan_fw_cb(const struct firmware *firmware, void *context)
+{
+ rtl_fw_do_work(firmware, context, true);
+}
+EXPORT_SYMBOL(rtl_wowlan_fw_cb);
+
/*mutex for start & stop is must here. */
static int rtl_op_start(struct ieee80211_hw *hw)
{
diff --git a/drivers/net/wireless/rtlwifi/core.h b/drivers/net/wireless/rtlwifi/core.h
index 624e1dc16d31..8c87eb54be66 100644
--- a/drivers/net/wireless/rtlwifi/core.h
+++ b/drivers/net/wireless/rtlwifi/core.h
@@ -37,6 +37,7 @@
extern const struct ieee80211_ops rtl_ops;
void rtl_fw_cb(const struct firmware *firmware, void *context);
+void rtl_wowlan_fw_cb(const struct firmware *firmware, void *context);
void rtl_addr_delay(u32 addr);
void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr,
u32 mask, u32 data);
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
index df549c96adef..791efbe6b18c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
@@ -47,164 +47,6 @@ static u8 _rtl88ee_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
return skb->priority;
}
-/* mac80211's rate_idx is like this:
- *
- * 2.4G band:rx_status->band == IEEE80211_BAND_2GHZ
- *
- * B/G rate:
- * (rx_status->flag & RX_FLAG_HT) = 0,
- * DESC92C_RATE1M-->DESC92C_RATE54M ==> idx is 0-->11,
- *
- * N rate:
- * (rx_status->flag & RX_FLAG_HT) = 1,
- * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15
- *
- * 5G band:rx_status->band == IEEE80211_BAND_5GHZ
- * A rate:
- * (rx_status->flag & RX_FLAG_HT) = 0,
- * DESC92C_RATE6M-->DESC92C_RATE54M ==> idx is 0-->7,
- *
- * N rate:
- * (rx_status->flag & RX_FLAG_HT) = 1,
- * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15
- */
-static int _rtl88ee_rate_mapping(struct ieee80211_hw *hw,
- bool isht, u8 desc_rate)
-{
- int rate_idx;
-
- if (!isht) {
- if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) {
- switch (desc_rate) {
- case DESC92C_RATE1M:
- rate_idx = 0;
- break;
- case DESC92C_RATE2M:
- rate_idx = 1;
- break;
- case DESC92C_RATE5_5M:
- rate_idx = 2;
- break;
- case DESC92C_RATE11M:
- rate_idx = 3;
- break;
- case DESC92C_RATE6M:
- rate_idx = 4;
- break;
- case DESC92C_RATE9M:
- rate_idx = 5;
- break;
- case DESC92C_RATE12M:
- rate_idx = 6;
- break;
- case DESC92C_RATE18M:
- rate_idx = 7;
- break;
- case DESC92C_RATE24M:
- rate_idx = 8;
- break;
- case DESC92C_RATE36M:
- rate_idx = 9;
- break;
- case DESC92C_RATE48M:
- rate_idx = 10;
- break;
- case DESC92C_RATE54M:
- rate_idx = 11;
- break;
- default:
- rate_idx = 0;
- break;
- }
- } else {
- switch (desc_rate) {
- case DESC92C_RATE6M:
- rate_idx = 0;
- break;
- case DESC92C_RATE9M:
- rate_idx = 1;
- break;
- case DESC92C_RATE12M:
- rate_idx = 2;
- break;
- case DESC92C_RATE18M:
- rate_idx = 3;
- break;
- case DESC92C_RATE24M:
- rate_idx = 4;
- break;
- case DESC92C_RATE36M:
- rate_idx = 5;
- break;
- case DESC92C_RATE48M:
- rate_idx = 6;
- break;
- case DESC92C_RATE54M:
- rate_idx = 7;
- break;
- default:
- rate_idx = 0;
- break;
- }
- }
- } else {
- switch (desc_rate) {
- case DESC92C_RATEMCS0:
- rate_idx = 0;
- break;
- case DESC92C_RATEMCS1:
- rate_idx = 1;
- break;
- case DESC92C_RATEMCS2:
- rate_idx = 2;
- break;
- case DESC92C_RATEMCS3:
- rate_idx = 3;
- break;
- case DESC92C_RATEMCS4:
- rate_idx = 4;
- break;
- case DESC92C_RATEMCS5:
- rate_idx = 5;
- break;
- case DESC92C_RATEMCS6:
- rate_idx = 6;
- break;
- case DESC92C_RATEMCS7:
- rate_idx = 7;
- break;
- case DESC92C_RATEMCS8:
- rate_idx = 8;
- break;
- case DESC92C_RATEMCS9:
- rate_idx = 9;
- break;
- case DESC92C_RATEMCS10:
- rate_idx = 10;
- break;
- case DESC92C_RATEMCS11:
- rate_idx = 11;
- break;
- case DESC92C_RATEMCS12:
- rate_idx = 12;
- break;
- case DESC92C_RATEMCS13:
- rate_idx = 13;
- break;
- case DESC92C_RATEMCS14:
- rate_idx = 14;
- break;
- case DESC92C_RATEMCS15:
- rate_idx = 15;
- break;
- default:
- rate_idx = 0;
- break;
- }
- }
- return rate_idx;
-}
-
static void _rtl88ee_query_rxphystatus(struct ieee80211_hw *hw,
struct rtl_stats *pstatus, u8 *pdesc,
struct rx_fwinfo_88e *p_drvinfo,
@@ -630,8 +472,8 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw,
* are use (RX_FLAG_HT)
* Notice: this is diff with windows define
*/
- rx_status->rate_idx = _rtl88ee_rate_mapping(hw,
- status->is_ht, status->rate);
+ rx_status->rate_idx = rtlwifi_rate_mapping(hw, status->is_ht,
+ false, status->rate);
rx_status->mactime = status->timestamp_low;
if (phystatus == true) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
index b64ae45dc674..e9f4281f5067 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
@@ -37,6 +37,7 @@
#define FW_8192C_POLLING_DELAY 5
#define FW_8192C_POLLING_TIMEOUT_COUNT 100
#define NORMAL_CHIP BIT(4)
+#define H2C_92C_KEEP_ALIVE_CTRL 48
#define IS_FW_HEADER_EXIST(_pfwhdr) \
((le16_to_cpu(_pfwhdr->signature)&0xFFF0) == 0x92C0 ||\
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index 5c646d5f7bb8..303b299376c9 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -544,8 +544,13 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
(u8 *)(&fw_current_inps));
}
break; }
- case HW_VAR_KEEP_ALIVE:
- break;
+ case HW_VAR_KEEP_ALIVE: {
+ u8 array[2];
+
+ array[0] = 0xff;
+ array[1] = *((u8 *)val);
+ rtl92c_fill_h2c_cmd(hw, H2C_92C_KEEP_ALIVE_CTRL, 2, array);
+ break; }
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"switch case %d not processed\n", variable);
@@ -1156,47 +1161,35 @@ static int _rtl92ce_set_media_status(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 bt_msr = rtl_read_byte(rtlpriv, MSR);
enum led_ctl_mode ledaction = LED_CTL_NO_LINK;
- bt_msr &= 0xfc;
+ u8 mode = MSR_NOLINK;
- if (type == NL80211_IFTYPE_UNSPECIFIED ||
- type == NL80211_IFTYPE_STATION) {
- _rtl92ce_stop_tx_beacon(hw);
- _rtl92ce_enable_bcn_sub_func(hw);
- } else if (type == NL80211_IFTYPE_ADHOC || type == NL80211_IFTYPE_AP ||
- type == NL80211_IFTYPE_MESH_POINT) {
- _rtl92ce_resume_tx_beacon(hw);
- _rtl92ce_disable_bcn_sub_func(hw);
- } else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Set HW_VAR_MEDIA_STATUS: No such media status(%x)\n",
- type);
- }
+ bt_msr &= 0xfc;
switch (type) {
case NL80211_IFTYPE_UNSPECIFIED:
- bt_msr |= MSR_NOLINK;
- ledaction = LED_CTL_LINK;
+ mode = MSR_NOLINK;
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
"Set Network type to NO LINK!\n");
break;
case NL80211_IFTYPE_ADHOC:
- bt_msr |= MSR_ADHOC;
+ mode = MSR_ADHOC;
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
"Set Network type to Ad Hoc!\n");
break;
case NL80211_IFTYPE_STATION:
- bt_msr |= MSR_INFRA;
+ mode = MSR_INFRA;
ledaction = LED_CTL_LINK;
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
"Set Network type to STA!\n");
break;
case NL80211_IFTYPE_AP:
- bt_msr |= MSR_AP;
+ mode = MSR_AP;
+ ledaction = LED_CTL_LINK;
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
"Set Network type to AP!\n");
break;
case NL80211_IFTYPE_MESH_POINT:
- bt_msr |= MSR_ADHOC;
+ mode = MSR_ADHOC;
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
"Set Network type to Mesh Point!\n");
break;
@@ -1207,9 +1200,32 @@ static int _rtl92ce_set_media_status(struct ieee80211_hw *hw,
}
- rtl_write_byte(rtlpriv, (MSR), bt_msr);
+ /* MSR_INFRA == Link in infrastructure network;
+ * MSR_ADHOC == Link in ad hoc network;
+ * Therefore, check link state is necessary.
+ *
+ * MSR_AP == AP mode; link state does not matter here.
+ */
+ if (mode != MSR_AP &&
+ rtlpriv->mac80211.link_state < MAC80211_LINKED) {
+ mode = MSR_NOLINK;
+ ledaction = LED_CTL_NO_LINK;
+ }
+ if (mode == MSR_NOLINK || mode == MSR_INFRA) {
+ _rtl92ce_stop_tx_beacon(hw);
+ _rtl92ce_enable_bcn_sub_func(hw);
+ } else if (mode == MSR_ADHOC || mode == MSR_AP) {
+ _rtl92ce_resume_tx_beacon(hw);
+ _rtl92ce_disable_bcn_sub_func(hw);
+ } else {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Set HW_VAR_MEDIA_STATUS: No such media status(%x).\n",
+ mode);
+ }
+ rtl_write_byte(rtlpriv, MSR, bt_msr | mode);
+
rtlpriv->cfg->ops->led_control(hw, ledaction);
- if ((bt_msr & MSR_MASK) == MSR_AP)
+ if (mode == MSR_AP)
rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
else
rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
@@ -1833,7 +1849,6 @@ static void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw,
u32 ratr_value;
u8 ratr_index = 0;
u8 nmode = mac->ht_enable;
- u8 mimo_ps = IEEE80211_SMPS_OFF;
u16 shortgi_rate;
u32 tmp_ratr_value;
u8 curtxbw_40mhz = mac->bw_40;
@@ -1842,6 +1857,7 @@ static void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw,
u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1 : 0;
enum wireless_mode wirelessmode = mac->mode;
+ u32 ratr_mask;
if (rtlhal->current_bandtype == BAND_ON_5G)
ratr_value = sta->supp_rates[1] << 4;
@@ -1865,19 +1881,13 @@ static void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw,
case WIRELESS_MODE_N_24G:
case WIRELESS_MODE_N_5G:
nmode = 1;
- if (mimo_ps == IEEE80211_SMPS_STATIC) {
- ratr_value &= 0x0007F005;
- } else {
- u32 ratr_mask;
-
- if (get_rf_type(rtlphy) == RF_1T2R ||
- get_rf_type(rtlphy) == RF_1T1R)
- ratr_mask = 0x000ff005;
- else
- ratr_mask = 0x0f0ff005;
+ if (get_rf_type(rtlphy) == RF_1T2R ||
+ get_rf_type(rtlphy) == RF_1T1R)
+ ratr_mask = 0x000ff005;
+ else
+ ratr_mask = 0x0f0ff005;
- ratr_value &= ratr_mask;
- }
+ ratr_value &= ratr_mask;
break;
default:
if (rtlphy->rf_type == RF_1T2R)
@@ -1930,17 +1940,16 @@ static void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw,
struct rtl_sta_info *sta_entry = NULL;
u32 ratr_bitmap;
u8 ratr_index;
- u8 curtxbw_40mhz = (sta->bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0;
- u8 curshortgi_40mhz = curtxbw_40mhz &&
- (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
- 1 : 0;
+ u8 curtxbw_40mhz = (sta->ht_cap.cap &
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40) ? 1 : 0;
+ u8 curshortgi_40mhz = (sta->ht_cap.cap &
+ IEEE80211_HT_CAP_SGI_40) ? 1 : 0;
u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1 : 0;
enum wireless_mode wirelessmode = 0;
bool shortgi = false;
u8 rate_mask[5];
u8 macid = 0;
- u8 mimo_ps = IEEE80211_SMPS_OFF;
sta_entry = (struct rtl_sta_info *) sta->drv_priv;
wirelessmode = sta_entry->wireless_mode;
@@ -1985,47 +1994,38 @@ static void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw,
case WIRELESS_MODE_N_5G:
ratr_index = RATR_INX_WIRELESS_NGB;
- if (mimo_ps == IEEE80211_SMPS_STATIC) {
- if (rssi_level == 1)
- ratr_bitmap &= 0x00070000;
- else if (rssi_level == 2)
- ratr_bitmap &= 0x0007f000;
- else
- ratr_bitmap &= 0x0007f005;
+ if (rtlphy->rf_type == RF_1T2R ||
+ rtlphy->rf_type == RF_1T1R) {
+ if (curtxbw_40mhz) {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x000f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x000ff000;
+ else
+ ratr_bitmap &= 0x000ff015;
+ } else {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x000f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x000ff000;
+ else
+ ratr_bitmap &= 0x000ff005;
+ }
} else {
- if (rtlphy->rf_type == RF_1T2R ||
- rtlphy->rf_type == RF_1T1R) {
- if (curtxbw_40mhz) {
- if (rssi_level == 1)
- ratr_bitmap &= 0x000f0000;
- else if (rssi_level == 2)
- ratr_bitmap &= 0x000ff000;
- else
- ratr_bitmap &= 0x000ff015;
- } else {
- if (rssi_level == 1)
- ratr_bitmap &= 0x000f0000;
- else if (rssi_level == 2)
- ratr_bitmap &= 0x000ff000;
- else
- ratr_bitmap &= 0x000ff005;
- }
+ if (curtxbw_40mhz) {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x0f0f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x0f0ff000;
+ else
+ ratr_bitmap &= 0x0f0ff015;
} else {
- if (curtxbw_40mhz) {
- if (rssi_level == 1)
- ratr_bitmap &= 0x0f0f0000;
- else if (rssi_level == 2)
- ratr_bitmap &= 0x0f0ff000;
- else
- ratr_bitmap &= 0x0f0ff015;
- } else {
- if (rssi_level == 1)
- ratr_bitmap &= 0x0f0f0000;
- else if (rssi_level == 2)
- ratr_bitmap &= 0x0f0ff000;
- else
- ratr_bitmap &= 0x0f0ff005;
- }
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x0f0f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x0f0ff000;
+ else
+ ratr_bitmap &= 0x0f0ff005;
}
}
@@ -2058,9 +2058,6 @@ static void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw,
"Rate_index:%x, ratr_val:%x, %5phC\n",
ratr_index, ratr_bitmap, rate_mask);
rtl92c_fill_h2c_cmd(hw, H2C_RA_MASK, 5, rate_mask);
-
- if (macid != 0)
- sta_entry->ratr_index = ratr_index;
}
void rtl92ce_update_hal_rate_tbl(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
index bc5ca989b915..1ee5a6ae9960 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
@@ -518,11 +518,12 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
}
case ERFSLEEP:{
if (ppsc->rfpwr_state == ERFOFF)
- return false;
+ break;
for (queue_id = 0, i = 0;
queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
ring = &pcipriv->dev.tx_ring[queue_id];
- if (skb_queue_len(&ring->queue) == 0) {
+ if (queue_id == BEACON_QUEUE ||
+ skb_queue_len(&ring->queue) == 0) {
queue_id++;
continue;
} else {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
index dd5aa089126a..de6cb6c3a48c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -334,21 +334,21 @@ static struct rtl_hal_cfg rtl92ce_hal_cfg = {
.maps[RTL_IMR_ROK] = IMR_ROK,
.maps[RTL_IBSS_INT_MASKS] = (IMR_BCNINT | IMR_TBDOK | IMR_TBDER),
- .maps[RTL_RC_CCK_RATE1M] = DESC92_RATE1M,
- .maps[RTL_RC_CCK_RATE2M] = DESC92_RATE2M,
- .maps[RTL_RC_CCK_RATE5_5M] = DESC92_RATE5_5M,
- .maps[RTL_RC_CCK_RATE11M] = DESC92_RATE11M,
- .maps[RTL_RC_OFDM_RATE6M] = DESC92_RATE6M,
- .maps[RTL_RC_OFDM_RATE9M] = DESC92_RATE9M,
- .maps[RTL_RC_OFDM_RATE12M] = DESC92_RATE12M,
- .maps[RTL_RC_OFDM_RATE18M] = DESC92_RATE18M,
- .maps[RTL_RC_OFDM_RATE24M] = DESC92_RATE24M,
- .maps[RTL_RC_OFDM_RATE36M] = DESC92_RATE36M,
- .maps[RTL_RC_OFDM_RATE48M] = DESC92_RATE48M,
- .maps[RTL_RC_OFDM_RATE54M] = DESC92_RATE54M,
-
- .maps[RTL_RC_HT_RATEMCS7] = DESC92_RATEMCS7,
- .maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15,
+ .maps[RTL_RC_CCK_RATE1M] = DESC_RATE1M,
+ .maps[RTL_RC_CCK_RATE2M] = DESC_RATE2M,
+ .maps[RTL_RC_CCK_RATE5_5M] = DESC_RATE5_5M,
+ .maps[RTL_RC_CCK_RATE11M] = DESC_RATE11M,
+ .maps[RTL_RC_OFDM_RATE6M] = DESC_RATE6M,
+ .maps[RTL_RC_OFDM_RATE9M] = DESC_RATE9M,
+ .maps[RTL_RC_OFDM_RATE12M] = DESC_RATE12M,
+ .maps[RTL_RC_OFDM_RATE18M] = DESC_RATE18M,
+ .maps[RTL_RC_OFDM_RATE24M] = DESC_RATE24M,
+ .maps[RTL_RC_OFDM_RATE36M] = DESC_RATE36M,
+ .maps[RTL_RC_OFDM_RATE48M] = DESC_RATE48M,
+ .maps[RTL_RC_OFDM_RATE54M] = DESC_RATE54M,
+
+ .maps[RTL_RC_HT_RATEMCS7] = DESC_RATEMCS7,
+ .maps[RTL_RC_HT_RATEMCS15] = DESC_RATEMCS15,
};
static const struct pci_device_id rtl92ce_pci_ids[] = {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index e88dcd0e0af1..84ddd4d07a1d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -257,8 +257,8 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
pstats->recvsignalpower = rx_pwr_all;
/* (3)EVM of HT rate */
- if (pstats->is_ht && pstats->rate >= DESC92_RATEMCS8 &&
- pstats->rate <= DESC92_RATEMCS15)
+ if (pstats->is_ht && pstats->rate >= DESC_RATEMCS8 &&
+ pstats->rate <= DESC_RATEMCS15)
max_spatial_stream = 2;
else
max_spatial_stream = 1;
@@ -400,9 +400,8 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
* are use (RX_FLAG_HT)
* Notice: this is diff with windows define
*/
- rx_status->rate_idx = rtlwifi_rate_mapping(hw,
- stats->is_ht, stats->rate,
- stats->isfirst_ampdu);
+ rx_status->rate_idx = rtlwifi_rate_mapping(hw, stats->is_ht,
+ false, stats->rate);
rx_status->mactime = stats->timestamp_low;
if (phystatus) {
@@ -501,7 +500,7 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
SET_TX_DESC_RTS_BW(pdesc, 0);
SET_TX_DESC_RTS_SC(pdesc, tcb_desc->rts_sc);
SET_TX_DESC_RTS_SHORT(pdesc,
- ((tcb_desc->rts_rate <= DESC92_RATE54M) ?
+ ((tcb_desc->rts_rate <= DESC_RATE54M) ?
(tcb_desc->rts_use_shortpreamble ? 1 : 0)
: (tcb_desc->rts_use_shortgi ? 1 : 0)));
@@ -624,7 +623,7 @@ void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw,
if (firstseg)
SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
- SET_TX_DESC_TX_RATE(pdesc, DESC92_RATE1M);
+ SET_TX_DESC_TX_RATE(pdesc, DESC_RATE1M);
SET_TX_DESC_SEQ(pdesc, 0);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
index c2d8ec6afcda..133e395b7401 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -880,8 +880,8 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
pstats->rxpower = rx_pwr_all;
pstats->recvsignalpower = rx_pwr_all;
if (GET_RX_DESC_RX_MCS(pdesc) &&
- GET_RX_DESC_RX_MCS(pdesc) >= DESC92_RATEMCS8 &&
- GET_RX_DESC_RX_MCS(pdesc) <= DESC92_RATEMCS15)
+ GET_RX_DESC_RX_MCS(pdesc) >= DESC_RATEMCS8 &&
+ GET_RX_DESC_RX_MCS(pdesc) <= DESC_RATEMCS15)
max_spatial_stream = 2;
else
max_spatial_stream = 1;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index e06bafee37f9..90a714c189a8 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -257,20 +257,20 @@ static struct rtl_hal_cfg rtl92cu_hal_cfg = {
.maps[RTL_IMR_ROK] = IMR_ROK,
.maps[RTL_IBSS_INT_MASKS] = (IMR_BCNINT | IMR_TBDOK | IMR_TBDER),
- .maps[RTL_RC_CCK_RATE1M] = DESC92_RATE1M,
- .maps[RTL_RC_CCK_RATE2M] = DESC92_RATE2M,
- .maps[RTL_RC_CCK_RATE5_5M] = DESC92_RATE5_5M,
- .maps[RTL_RC_CCK_RATE11M] = DESC92_RATE11M,
- .maps[RTL_RC_OFDM_RATE6M] = DESC92_RATE6M,
- .maps[RTL_RC_OFDM_RATE9M] = DESC92_RATE9M,
- .maps[RTL_RC_OFDM_RATE12M] = DESC92_RATE12M,
- .maps[RTL_RC_OFDM_RATE18M] = DESC92_RATE18M,
- .maps[RTL_RC_OFDM_RATE24M] = DESC92_RATE24M,
- .maps[RTL_RC_OFDM_RATE36M] = DESC92_RATE36M,
- .maps[RTL_RC_OFDM_RATE48M] = DESC92_RATE48M,
- .maps[RTL_RC_OFDM_RATE54M] = DESC92_RATE54M,
- .maps[RTL_RC_HT_RATEMCS7] = DESC92_RATEMCS7,
- .maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15,
+ .maps[RTL_RC_CCK_RATE1M] = DESC_RATE1M,
+ .maps[RTL_RC_CCK_RATE2M] = DESC_RATE2M,
+ .maps[RTL_RC_CCK_RATE5_5M] = DESC_RATE5_5M,
+ .maps[RTL_RC_CCK_RATE11M] = DESC_RATE11M,
+ .maps[RTL_RC_OFDM_RATE6M] = DESC_RATE6M,
+ .maps[RTL_RC_OFDM_RATE9M] = DESC_RATE9M,
+ .maps[RTL_RC_OFDM_RATE12M] = DESC_RATE12M,
+ .maps[RTL_RC_OFDM_RATE18M] = DESC_RATE18M,
+ .maps[RTL_RC_OFDM_RATE24M] = DESC_RATE24M,
+ .maps[RTL_RC_OFDM_RATE36M] = DESC_RATE36M,
+ .maps[RTL_RC_OFDM_RATE48M] = DESC_RATE48M,
+ .maps[RTL_RC_OFDM_RATE54M] = DESC_RATE54M,
+ .maps[RTL_RC_HT_RATEMCS7] = DESC_RATEMCS7,
+ .maps[RTL_RC_HT_RATEMCS15] = DESC_RATEMCS15,
};
#define USB_VENDER_ID_REALTEK 0x0bda
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index f383d5f1fed5..cbead007171f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -325,6 +325,7 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
&& (GET_RX_DESC_FAGGR(pdesc) == 1));
stats->timestamp_low = GET_RX_DESC_TSFL(pdesc);
stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc);
+ stats->is_ht = (bool)GET_RX_DESC_RX_HT(pdesc);
rx_status->freq = hw->conf.chandef.chan->center_freq;
rx_status->band = hw->conf.chandef.chan->band;
if (GET_RX_DESC_CRC32(pdesc))
@@ -338,10 +339,8 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
rx_status->flag |= RX_FLAG_MACTIME_START;
if (stats->decrypted)
rx_status->flag |= RX_FLAG_DECRYPTED;
- rx_status->rate_idx = rtlwifi_rate_mapping(hw,
- (bool)GET_RX_DESC_RX_HT(pdesc),
- (u8)GET_RX_DESC_RX_MCS(pdesc),
- (bool)GET_RX_DESC_PAGGR(pdesc));
+ rx_status->rate_idx = rtlwifi_rate_mapping(hw, stats->is_ht,
+ false, stats->rate);
rx_status->mactime = GET_RX_DESC_TSFL(pdesc);
if (phystatus) {
p_drvinfo = (struct rx_fwinfo_92c *)(skb->data +
@@ -393,6 +392,7 @@ static void _rtl_rx_process(struct ieee80211_hw *hw, struct sk_buff *skb)
&& (GET_RX_DESC_FAGGR(rxdesc) == 1));
stats.timestamp_low = GET_RX_DESC_TSFL(rxdesc);
stats.rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(rxdesc);
+ stats.is_ht = (bool)GET_RX_DESC_RX_HT(rxdesc);
/* TODO: is center_freq changed when doing scan? */
/* TODO: Shall we add protection or just skip those two step? */
rx_status->freq = hw->conf.chandef.chan->center_freq;
@@ -406,10 +406,8 @@ static void _rtl_rx_process(struct ieee80211_hw *hw, struct sk_buff *skb)
if (GET_RX_DESC_RX_HT(rxdesc))
rx_status->flag |= RX_FLAG_HT;
/* Data rate */
- rx_status->rate_idx = rtlwifi_rate_mapping(hw,
- (bool)GET_RX_DESC_RX_HT(rxdesc),
- (u8)GET_RX_DESC_RX_MCS(rxdesc),
- (bool)GET_RX_DESC_PAGGR(rxdesc));
+ rx_status->rate_idx = rtlwifi_rate_mapping(hw, stats.is_ht,
+ false, stats.rate);
/* There is a phy status after this rx descriptor. */
if (GET_RX_DESC_PHY_STATUS(rxdesc)) {
p_drvinfo = (struct rx_fwinfo_92c *)(rxdesc + RTL_RX_DESC_SIZE);
@@ -545,7 +543,7 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
SET_TX_DESC_RTS_BW(txdesc, 0);
SET_TX_DESC_RTS_SC(txdesc, tcb_desc->rts_sc);
SET_TX_DESC_RTS_SHORT(txdesc,
- ((tcb_desc->rts_rate <= DESC92_RATE54M) ?
+ ((tcb_desc->rts_rate <= DESC_RATE54M) ?
(tcb_desc->rts_use_shortpreamble ? 1 : 0)
: (tcb_desc->rts_use_shortgi ? 1 : 0)));
if (mac->bw_40) {
@@ -644,7 +642,7 @@ void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc,
}
SET_TX_DESC_USE_RATE(pDesc, 1); /* use data rate which is set by Sw */
SET_TX_DESC_OWN(pDesc, 1);
- SET_TX_DESC_TX_RATE(pDesc, DESC92_RATE1M);
+ SET_TX_DESC_TX_RATE(pDesc, DESC_RATE1M);
_rtl_tx_desc_checksum(pDesc);
}
@@ -660,7 +658,7 @@ void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw,
memset((void *)pdesc, 0, RTL_TX_HEADER_SIZE);
if (firstseg)
SET_TX_DESC_OFFSET(pdesc, RTL_TX_HEADER_SIZE);
- SET_TX_DESC_TX_RATE(pdesc, DESC92_RATE1M);
+ SET_TX_DESC_TX_RATE(pdesc, DESC_RATE1M);
SET_TX_DESC_SEQ(pdesc, 0);
SET_TX_DESC_LINIP(pdesc, 0);
SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/rtlwifi/rtl8192de/fw.c
index 23177076b97f..62ef8209718f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/fw.c
@@ -540,23 +540,6 @@ void rtl92d_fill_h2c_cmd(struct ieee80211_hw *hw,
return;
}
-void rtl92d_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 u1_h2c_set_pwrmode[3] = { 0 };
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode);
- SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, mode);
- SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode, 1);
- SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(u1_h2c_set_pwrmode,
- ppsc->reg_max_lps_awakeintvl);
- RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
- "rtl92d_set_fw_rsvdpagepkt(): u1_h2c_set_pwrmode",
- u1_h2c_set_pwrmode, 3);
- rtl92d_fill_h2c_cmd(hw, H2C_SETPWRMODE, 3, u1_h2c_set_pwrmode);
-}
-
static bool _rtl92d_cmd_send_packet(struct ieee80211_hw *hw,
struct sk_buff *skb)
{
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/fw.h b/drivers/net/wireless/rtlwifi/rtl8192de/fw.h
index a55a803a0b4d..1646e7c3d0f8 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/fw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/fw.h
@@ -136,7 +136,6 @@ int rtl92d_download_fw(struct ieee80211_hw *hw);
void rtl92d_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
u32 cmd_len, u8 *p_cmdbuffer);
void rtl92d_firmware_selfreset(struct ieee80211_hw *hw);
-void rtl92d_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
void rtl92d_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
index a0aba088259a..b19d0398215f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
@@ -337,21 +337,21 @@ static struct rtl_hal_cfg rtl92de_hal_cfg = {
.maps[RTL_IMR_ROK] = IMR_ROK,
.maps[RTL_IBSS_INT_MASKS] = (IMR_BCNINT | IMR_TBDOK | IMR_TBDER),
- .maps[RTL_RC_CCK_RATE1M] = DESC92_RATE1M,
- .maps[RTL_RC_CCK_RATE2M] = DESC92_RATE2M,
- .maps[RTL_RC_CCK_RATE5_5M] = DESC92_RATE5_5M,
- .maps[RTL_RC_CCK_RATE11M] = DESC92_RATE11M,
- .maps[RTL_RC_OFDM_RATE6M] = DESC92_RATE6M,
- .maps[RTL_RC_OFDM_RATE9M] = DESC92_RATE9M,
- .maps[RTL_RC_OFDM_RATE12M] = DESC92_RATE12M,
- .maps[RTL_RC_OFDM_RATE18M] = DESC92_RATE18M,
- .maps[RTL_RC_OFDM_RATE24M] = DESC92_RATE24M,
- .maps[RTL_RC_OFDM_RATE36M] = DESC92_RATE36M,
- .maps[RTL_RC_OFDM_RATE48M] = DESC92_RATE48M,
- .maps[RTL_RC_OFDM_RATE54M] = DESC92_RATE54M,
-
- .maps[RTL_RC_HT_RATEMCS7] = DESC92_RATEMCS7,
- .maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15,
+ .maps[RTL_RC_CCK_RATE1M] = DESC_RATE1M,
+ .maps[RTL_RC_CCK_RATE2M] = DESC_RATE2M,
+ .maps[RTL_RC_CCK_RATE5_5M] = DESC_RATE5_5M,
+ .maps[RTL_RC_CCK_RATE11M] = DESC_RATE11M,
+ .maps[RTL_RC_OFDM_RATE6M] = DESC_RATE6M,
+ .maps[RTL_RC_OFDM_RATE9M] = DESC_RATE9M,
+ .maps[RTL_RC_OFDM_RATE12M] = DESC_RATE12M,
+ .maps[RTL_RC_OFDM_RATE18M] = DESC_RATE18M,
+ .maps[RTL_RC_OFDM_RATE24M] = DESC_RATE24M,
+ .maps[RTL_RC_OFDM_RATE36M] = DESC_RATE36M,
+ .maps[RTL_RC_OFDM_RATE48M] = DESC_RATE48M,
+ .maps[RTL_RC_OFDM_RATE54M] = DESC_RATE54M,
+
+ .maps[RTL_RC_HT_RATEMCS7] = DESC_RATEMCS7,
+ .maps[RTL_RC_HT_RATEMCS15] = DESC_RATEMCS15,
};
static struct pci_device_id rtl92de_pci_ids[] = {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
index 8efbcc7af250..1feaa629dd4f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
@@ -235,8 +235,8 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
pstats->rx_pwdb_all = pwdb_all;
pstats->rxpower = rx_pwr_all;
pstats->recvsignalpower = rx_pwr_all;
- if (pdesc->rxht && pdesc->rxmcs >= DESC92_RATEMCS8 &&
- pdesc->rxmcs <= DESC92_RATEMCS15)
+ if (pdesc->rxht && pdesc->rxmcs >= DESC_RATEMCS8 &&
+ pdesc->rxmcs <= DESC_RATEMCS15)
max_spatial_stream = 2;
else
max_spatial_stream = 1;
@@ -499,6 +499,7 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
&& (GET_RX_DESC_FAGGR(pdesc) == 1));
stats->timestamp_low = GET_RX_DESC_TSFL(pdesc);
stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc);
+ stats->is_ht = (bool)GET_RX_DESC_RXHT(pdesc);
rx_status->freq = hw->conf.chandef.chan->center_freq;
rx_status->band = hw->conf.chandef.chan->band;
if (GET_RX_DESC_CRC32(pdesc))
@@ -512,10 +513,8 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
rx_status->flag |= RX_FLAG_MACTIME_START;
if (stats->decrypted)
rx_status->flag |= RX_FLAG_DECRYPTED;
- rx_status->rate_idx = rtlwifi_rate_mapping(hw,
- (bool)GET_RX_DESC_RXHT(pdesc),
- (u8)GET_RX_DESC_RXMCS(pdesc),
- (bool)GET_RX_DESC_PAGGR(pdesc));
+ rx_status->rate_idx = rtlwifi_rate_mapping(hw, stats->is_ht,
+ false, stats->rate);
rx_status->mactime = GET_RX_DESC_TSFL(pdesc);
if (phystatus) {
p_drvinfo = (struct rx_fwinfo_92d *)(skb->data +
@@ -612,14 +611,14 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
}
/* 5G have no CCK rate */
if (rtlhal->current_bandtype == BAND_ON_5G)
- if (ptcb_desc->hw_rate < DESC92_RATE6M)
- ptcb_desc->hw_rate = DESC92_RATE6M;
+ if (ptcb_desc->hw_rate < DESC_RATE6M)
+ ptcb_desc->hw_rate = DESC_RATE6M;
SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate);
if (ptcb_desc->use_shortgi || ptcb_desc->use_shortpreamble)
SET_TX_DESC_DATA_SHORTGI(pdesc, 1);
if (rtlhal->macphymode == DUALMAC_DUALPHY &&
- ptcb_desc->hw_rate == DESC92_RATEMCS7)
+ ptcb_desc->hw_rate == DESC_RATEMCS7)
SET_TX_DESC_DATA_SHORTGI(pdesc, 1);
if (info->flags & IEEE80211_TX_CTL_AMPDU) {
@@ -635,13 +634,13 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
SET_TX_DESC_RTS_STBC(pdesc, ((ptcb_desc->rts_stbc) ? 1 : 0));
/* 5G have no CCK rate */
if (rtlhal->current_bandtype == BAND_ON_5G)
- if (ptcb_desc->rts_rate < DESC92_RATE6M)
- ptcb_desc->rts_rate = DESC92_RATE6M;
+ if (ptcb_desc->rts_rate < DESC_RATE6M)
+ ptcb_desc->rts_rate = DESC_RATE6M;
SET_TX_DESC_RTS_RATE(pdesc, ptcb_desc->rts_rate);
SET_TX_DESC_RTS_BW(pdesc, 0);
SET_TX_DESC_RTS_SC(pdesc, ptcb_desc->rts_sc);
SET_TX_DESC_RTS_SHORT(pdesc, ((ptcb_desc->rts_rate <=
- DESC92_RATE54M) ?
+ DESC_RATE54M) ?
(ptcb_desc->rts_use_shortpreamble ? 1 : 0) :
(ptcb_desc->rts_use_shortgi ? 1 : 0)));
if (bw_40) {
@@ -756,9 +755,9 @@ void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw,
* The braces are needed no matter what checkpatch says
*/
if (rtlhal->current_bandtype == BAND_ON_5G) {
- SET_TX_DESC_TX_RATE(pdesc, DESC92_RATE6M);
+ SET_TX_DESC_TX_RATE(pdesc, DESC_RATE6M);
} else {
- SET_TX_DESC_TX_RATE(pdesc, DESC92_RATE1M);
+ SET_TX_DESC_TX_RATE(pdesc, DESC_RATE1M);
}
SET_TX_DESC_SEQ(pdesc, 0);
SET_TX_DESC_LINIP(pdesc, 0);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c
index 2fcbef1d029f..55d1da5e162b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c
@@ -47,164 +47,6 @@ static u8 _rtl92ee_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
return skb->priority;
}
-/* mac80211's rate_idx is like this:
- *
- * 2.4G band:rx_status->band == IEEE80211_BAND_2GHZ
- *
- * B/G rate:
- * (rx_status->flag & RX_FLAG_HT) = 0,
- * DESC92C_RATE1M-->DESC92C_RATE54M ==> idx is 0-->11,
- *
- * N rate:
- * (rx_status->flag & RX_FLAG_HT) = 1,
- * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15
- *
- * 5G band:rx_status->band == IEEE80211_BAND_5GHZ
- * A rate:
- * (rx_status->flag & RX_FLAG_HT) = 0,
- * DESC92C_RATE6M-->DESC92C_RATE54M ==> idx is 0-->7,
- *
- * N rate:
- * (rx_status->flag & RX_FLAG_HT) = 1,
- * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15
- */
-static int _rtl92ee_rate_mapping(struct ieee80211_hw *hw,
- bool isht, u8 desc_rate)
-{
- int rate_idx;
-
- if (!isht) {
- if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) {
- switch (desc_rate) {
- case DESC92C_RATE1M:
- rate_idx = 0;
- break;
- case DESC92C_RATE2M:
- rate_idx = 1;
- break;
- case DESC92C_RATE5_5M:
- rate_idx = 2;
- break;
- case DESC92C_RATE11M:
- rate_idx = 3;
- break;
- case DESC92C_RATE6M:
- rate_idx = 4;
- break;
- case DESC92C_RATE9M:
- rate_idx = 5;
- break;
- case DESC92C_RATE12M:
- rate_idx = 6;
- break;
- case DESC92C_RATE18M:
- rate_idx = 7;
- break;
- case DESC92C_RATE24M:
- rate_idx = 8;
- break;
- case DESC92C_RATE36M:
- rate_idx = 9;
- break;
- case DESC92C_RATE48M:
- rate_idx = 10;
- break;
- case DESC92C_RATE54M:
- rate_idx = 11;
- break;
- default:
- rate_idx = 0;
- break;
- }
- } else {
- switch (desc_rate) {
- case DESC92C_RATE6M:
- rate_idx = 0;
- break;
- case DESC92C_RATE9M:
- rate_idx = 1;
- break;
- case DESC92C_RATE12M:
- rate_idx = 2;
- break;
- case DESC92C_RATE18M:
- rate_idx = 3;
- break;
- case DESC92C_RATE24M:
- rate_idx = 4;
- break;
- case DESC92C_RATE36M:
- rate_idx = 5;
- break;
- case DESC92C_RATE48M:
- rate_idx = 6;
- break;
- case DESC92C_RATE54M:
- rate_idx = 7;
- break;
- default:
- rate_idx = 0;
- break;
- }
- }
- } else {
- switch (desc_rate) {
- case DESC92C_RATEMCS0:
- rate_idx = 0;
- break;
- case DESC92C_RATEMCS1:
- rate_idx = 1;
- break;
- case DESC92C_RATEMCS2:
- rate_idx = 2;
- break;
- case DESC92C_RATEMCS3:
- rate_idx = 3;
- break;
- case DESC92C_RATEMCS4:
- rate_idx = 4;
- break;
- case DESC92C_RATEMCS5:
- rate_idx = 5;
- break;
- case DESC92C_RATEMCS6:
- rate_idx = 6;
- break;
- case DESC92C_RATEMCS7:
- rate_idx = 7;
- break;
- case DESC92C_RATEMCS8:
- rate_idx = 8;
- break;
- case DESC92C_RATEMCS9:
- rate_idx = 9;
- break;
- case DESC92C_RATEMCS10:
- rate_idx = 10;
- break;
- case DESC92C_RATEMCS11:
- rate_idx = 11;
- break;
- case DESC92C_RATEMCS12:
- rate_idx = 12;
- break;
- case DESC92C_RATEMCS13:
- rate_idx = 13;
- break;
- case DESC92C_RATEMCS14:
- rate_idx = 14;
- break;
- case DESC92C_RATEMCS15:
- rate_idx = 15;
- break;
- default:
- rate_idx = 0;
- break;
- }
- }
- return rate_idx;
-}
-
static void _rtl92ee_query_rxphystatus(struct ieee80211_hw *hw,
struct rtl_stats *pstatus, u8 *pdesc,
struct rx_fwinfo *p_drvinfo,
@@ -345,8 +187,8 @@ static void _rtl92ee_query_rxphystatus(struct ieee80211_hw *hw,
pstatus->recvsignalpower = rx_pwr_all;
/* (3)EVM of HT rate */
- if (pstatus->rate >= DESC92C_RATEMCS8 &&
- pstatus->rate <= DESC92C_RATEMCS15)
+ if (pstatus->rate >= DESC_RATEMCS8 &&
+ pstatus->rate <= DESC_RATEMCS15)
max_spatial_stream = 2;
else
max_spatial_stream = 1;
@@ -576,9 +418,8 @@ bool rtl92ee_rx_query_desc(struct ieee80211_hw *hw,
* are use (RX_FLAG_HT)
* Notice: this is diff with windows define
*/
- rx_status->rate_idx = _rtl92ee_rate_mapping(hw,
- status->is_ht,
- status->rate);
+ rx_status->rate_idx = rtlwifi_rate_mapping(hw, status->is_ht,
+ false, status->rate);
rx_status->mactime = status->timestamp_low;
if (phystatus) {
@@ -710,27 +551,6 @@ static u16 get_desc_addr_fr_q_idx(u16 queue_index)
return desc_address;
}
-void rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx)
-{
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u16 point_diff = 0;
- u16 current_tx_read_point = 0, current_tx_write_point = 0;
- u32 tmp_4byte;
-
- tmp_4byte = rtl_read_dword(rtlpriv,
- get_desc_addr_fr_q_idx(q_idx));
- current_tx_read_point = (u16)((tmp_4byte >> 16) & 0x0fff);
- current_tx_write_point = (u16)((tmp_4byte) & 0x0fff);
-
- point_diff = ((current_tx_read_point > current_tx_write_point) ?
- (current_tx_read_point - current_tx_write_point) :
- (TX_DESC_NUM_92E - current_tx_write_point +
- current_tx_read_point));
-
- rtlpci->tx_ring[q_idx].avl_desc = point_diff;
-}
-
void rtl92ee_pre_fill_tx_bd_desc(struct ieee80211_hw *hw,
u8 *tx_bd_desc, u8 *desc, u8 queue_index,
struct sk_buff *skb, dma_addr_t addr)
@@ -901,13 +721,13 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw,
} else {
if (rtlpriv->ra.is_special_data) {
ptcb_desc->use_driver_rate = true;
- SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE11M);
+ SET_TX_DESC_TX_RATE(pdesc, DESC_RATE11M);
} else {
ptcb_desc->use_driver_rate = false;
}
}
- if (ptcb_desc->hw_rate > DESC92C_RATEMCS0)
+ if (ptcb_desc->hw_rate > DESC_RATEMCS0)
short_gi = (ptcb_desc->use_shortgi) ? 1 : 0;
else
short_gi = (ptcb_desc->use_shortpreamble) ? 1 : 0;
@@ -927,7 +747,7 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw,
SET_TX_DESC_RTS_RATE(pdesc, ptcb_desc->rts_rate);
SET_TX_DESC_RTS_SC(pdesc, ptcb_desc->rts_sc);
SET_TX_DESC_RTS_SHORT(pdesc,
- ((ptcb_desc->rts_rate <= DESC92C_RATE54M) ?
+ ((ptcb_desc->rts_rate <= DESC_RATE54M) ?
(ptcb_desc->rts_use_shortpreamble ? 1 : 0) :
(ptcb_desc->rts_use_shortgi ? 1 : 0)));
@@ -1038,7 +858,7 @@ void rtl92ee_tx_fill_cmddesc(struct ieee80211_hw *hw,
if (firstseg)
SET_TX_DESC_OFFSET(pdesc, txdesc_len);
- SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE1M);
+ SET_TX_DESC_TX_RATE(pdesc, DESC_RATE1M);
SET_TX_DESC_SEQ(pdesc, 0);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h b/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h
index 6f9be1c7515c..48504c25fffb 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h
@@ -591,10 +591,10 @@ do { \
} while (0)
#define RTL92EE_RX_HAL_IS_CCK_RATE(rxmcs)\
- (rxmcs == DESC92C_RATE1M ||\
- rxmcs == DESC92C_RATE2M ||\
- rxmcs == DESC92C_RATE5_5M ||\
- rxmcs == DESC92C_RATE11M)
+ (rxmcs == DESC_RATE1M ||\
+ rxmcs == DESC_RATE2M ||\
+ rxmcs == DESC_RATE5_5M ||\
+ rxmcs == DESC_RATE11M)
#define IS_LITTLE_ENDIAN 1
@@ -829,7 +829,6 @@ void rtl92ee_rx_check_dma_ok(struct ieee80211_hw *hw, u8 *header_desc,
u8 queue_index);
u16 rtl92ee_rx_desc_buff_remained_cnt(struct ieee80211_hw *hw,
u8 queue_index);
-void rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 queue_index);
void rtl92ee_pre_fill_tx_bd_desc(struct ieee80211_hw *hw,
u8 *tx_bd_desc, u8 *desc, u8 queue_index,
struct sk_buff *skb, dma_addr_t addr);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/def.h b/drivers/net/wireless/rtlwifi/rtl8192se/def.h
index 6e7a70b43949..ef87c09b77d0 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/def.h
@@ -450,10 +450,10 @@
SHIFT_AND_MASK_LE(__pdesc + 24, 0, 32)
#define SE_RX_HAL_IS_CCK_RATE(_pdesc)\
- (GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC92_RATE1M || \
- GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC92_RATE2M || \
- GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC92_RATE5_5M ||\
- GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC92_RATE11M)
+ (GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC_RATE1M || \
+ GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC_RATE2M || \
+ GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC_RATE5_5M ||\
+ GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC_RATE11M)
enum rf_optype {
RF_OP_BY_SW_3WIRE = 0,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
index fb003868bdef..e1fd27c888bf 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
@@ -383,21 +383,21 @@ static struct rtl_hal_cfg rtl92se_hal_cfg = {
.maps[RTL_IMR_ROK] = IMR_ROK,
.maps[RTL_IBSS_INT_MASKS] = (IMR_BCNINT | IMR_TBDOK | IMR_TBDER),
- .maps[RTL_RC_CCK_RATE1M] = DESC92_RATE1M,
- .maps[RTL_RC_CCK_RATE2M] = DESC92_RATE2M,
- .maps[RTL_RC_CCK_RATE5_5M] = DESC92_RATE5_5M,
- .maps[RTL_RC_CCK_RATE11M] = DESC92_RATE11M,
- .maps[RTL_RC_OFDM_RATE6M] = DESC92_RATE6M,
- .maps[RTL_RC_OFDM_RATE9M] = DESC92_RATE9M,
- .maps[RTL_RC_OFDM_RATE12M] = DESC92_RATE12M,
- .maps[RTL_RC_OFDM_RATE18M] = DESC92_RATE18M,
- .maps[RTL_RC_OFDM_RATE24M] = DESC92_RATE24M,
- .maps[RTL_RC_OFDM_RATE36M] = DESC92_RATE36M,
- .maps[RTL_RC_OFDM_RATE48M] = DESC92_RATE48M,
- .maps[RTL_RC_OFDM_RATE54M] = DESC92_RATE54M,
-
- .maps[RTL_RC_HT_RATEMCS7] = DESC92_RATEMCS7,
- .maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15,
+ .maps[RTL_RC_CCK_RATE1M] = DESC_RATE1M,
+ .maps[RTL_RC_CCK_RATE2M] = DESC_RATE2M,
+ .maps[RTL_RC_CCK_RATE5_5M] = DESC_RATE5_5M,
+ .maps[RTL_RC_CCK_RATE11M] = DESC_RATE11M,
+ .maps[RTL_RC_OFDM_RATE6M] = DESC_RATE6M,
+ .maps[RTL_RC_OFDM_RATE9M] = DESC_RATE9M,
+ .maps[RTL_RC_OFDM_RATE12M] = DESC_RATE12M,
+ .maps[RTL_RC_OFDM_RATE18M] = DESC_RATE18M,
+ .maps[RTL_RC_OFDM_RATE24M] = DESC_RATE24M,
+ .maps[RTL_RC_OFDM_RATE36M] = DESC_RATE36M,
+ .maps[RTL_RC_OFDM_RATE48M] = DESC_RATE48M,
+ .maps[RTL_RC_OFDM_RATE54M] = DESC_RATE54M,
+
+ .maps[RTL_RC_HT_RATEMCS7] = DESC_RATEMCS7,
+ .maps[RTL_RC_HT_RATEMCS15] = DESC_RATEMCS15,
};
static struct pci_device_id rtl92se_pci_ids[] = {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index 672fd3b02835..125b29bd2f93 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -191,8 +191,8 @@ static void _rtl92se_query_rxphystatus(struct ieee80211_hw *hw,
pstats->rxpower = rx_pwr_all;
pstats->recvsignalpower = rx_pwr_all;
- if (pstats->is_ht && pstats->rate >= DESC92_RATEMCS8 &&
- pstats->rate <= DESC92_RATEMCS15)
+ if (pstats->is_ht && pstats->rate >= DESC_RATEMCS8 &&
+ pstats->rate <= DESC_RATEMCS15)
max_spatial_stream = 2;
else
max_spatial_stream = 1;
@@ -264,7 +264,6 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
struct rx_fwinfo *p_drvinfo;
u32 phystatus = (u32)GET_RX_STATUS_DESC_PHY_STATUS(pdesc);
struct ieee80211_hdr *hdr;
- bool first_ampdu = false;
stats->length = (u16)GET_RX_STATUS_DESC_PKT_LEN(pdesc);
stats->rx_drvinfo_size = (u8)GET_RX_STATUS_DESC_DRVINFO_SIZE(pdesc) * 8;
@@ -319,8 +318,8 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
rx_status->flag |= RX_FLAG_DECRYPTED;
}
- rx_status->rate_idx = rtlwifi_rate_mapping(hw,
- stats->is_ht, stats->rate, first_ampdu);
+ rx_status->rate_idx = rtlwifi_rate_mapping(hw, stats->is_ht,
+ false, stats->rate);
rx_status->mactime = stats->timestamp_low;
if (phystatus) {
@@ -394,14 +393,14 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
SET_TX_DESC_RSVD_MACID(pdesc, reserved_macid);
SET_TX_DESC_TXHT(pdesc, ((ptcb_desc->hw_rate >=
- DESC92_RATEMCS0) ? 1 : 0));
+ DESC_RATEMCS0) ? 1 : 0));
if (rtlhal->version == VERSION_8192S_ACUT) {
- if (ptcb_desc->hw_rate == DESC92_RATE1M ||
- ptcb_desc->hw_rate == DESC92_RATE2M ||
- ptcb_desc->hw_rate == DESC92_RATE5_5M ||
- ptcb_desc->hw_rate == DESC92_RATE11M) {
- ptcb_desc->hw_rate = DESC92_RATE12M;
+ if (ptcb_desc->hw_rate == DESC_RATE1M ||
+ ptcb_desc->hw_rate == DESC_RATE2M ||
+ ptcb_desc->hw_rate == DESC_RATE5_5M ||
+ ptcb_desc->hw_rate == DESC_RATE11M) {
+ ptcb_desc->hw_rate = DESC_RATE12M;
}
}
@@ -430,7 +429,7 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
SET_TX_DESC_RTS_BANDWIDTH(pdesc, 0);
SET_TX_DESC_RTS_SUB_CARRIER(pdesc, ptcb_desc->rts_sc);
SET_TX_DESC_RTS_SHORT(pdesc, ((ptcb_desc->rts_rate <=
- DESC92_RATE54M) ?
+ DESC_RATE54M) ?
(ptcb_desc->rts_use_shortpreamble ? 1 : 0)
: (ptcb_desc->rts_use_shortgi ? 1 : 0)));
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
index d372ccaf3465..2f7c144d7980 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
@@ -45,164 +45,6 @@ static u8 _rtl8723e_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
return skb->priority;
}
-/* mac80211's rate_idx is like this:
- *
- * 2.4G band:rx_status->band == IEEE80211_BAND_2GHZ
- *
- * B/G rate:
- * (rx_status->flag & RX_FLAG_HT) = 0,
- * DESC92C_RATE1M-->DESC92C_RATE54M ==> idx is 0-->11,
- *
- * N rate:
- * (rx_status->flag & RX_FLAG_HT) = 1,
- * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15
- *
- * 5G band:rx_status->band == IEEE80211_BAND_5GHZ
- * A rate:
- * (rx_status->flag & RX_FLAG_HT) = 0,
- * DESC92C_RATE6M-->DESC92C_RATE54M ==> idx is 0-->7,
- *
- * N rate:
- * (rx_status->flag & RX_FLAG_HT) = 1,
- * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15
- */
-static int _rtl8723e_rate_mapping(struct ieee80211_hw *hw,
- bool isht, u8 desc_rate)
-{
- int rate_idx;
-
- if (!isht) {
- if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) {
- switch (desc_rate) {
- case DESC92C_RATE1M:
- rate_idx = 0;
- break;
- case DESC92C_RATE2M:
- rate_idx = 1;
- break;
- case DESC92C_RATE5_5M:
- rate_idx = 2;
- break;
- case DESC92C_RATE11M:
- rate_idx = 3;
- break;
- case DESC92C_RATE6M:
- rate_idx = 4;
- break;
- case DESC92C_RATE9M:
- rate_idx = 5;
- break;
- case DESC92C_RATE12M:
- rate_idx = 6;
- break;
- case DESC92C_RATE18M:
- rate_idx = 7;
- break;
- case DESC92C_RATE24M:
- rate_idx = 8;
- break;
- case DESC92C_RATE36M:
- rate_idx = 9;
- break;
- case DESC92C_RATE48M:
- rate_idx = 10;
- break;
- case DESC92C_RATE54M:
- rate_idx = 11;
- break;
- default:
- rate_idx = 0;
- break;
- }
- } else {
- switch (desc_rate) {
- case DESC92C_RATE6M:
- rate_idx = 0;
- break;
- case DESC92C_RATE9M:
- rate_idx = 1;
- break;
- case DESC92C_RATE12M:
- rate_idx = 2;
- break;
- case DESC92C_RATE18M:
- rate_idx = 3;
- break;
- case DESC92C_RATE24M:
- rate_idx = 4;
- break;
- case DESC92C_RATE36M:
- rate_idx = 5;
- break;
- case DESC92C_RATE48M:
- rate_idx = 6;
- break;
- case DESC92C_RATE54M:
- rate_idx = 7;
- break;
- default:
- rate_idx = 0;
- break;
- }
- }
- } else {
- switch (desc_rate) {
- case DESC92C_RATEMCS0:
- rate_idx = 0;
- break;
- case DESC92C_RATEMCS1:
- rate_idx = 1;
- break;
- case DESC92C_RATEMCS2:
- rate_idx = 2;
- break;
- case DESC92C_RATEMCS3:
- rate_idx = 3;
- break;
- case DESC92C_RATEMCS4:
- rate_idx = 4;
- break;
- case DESC92C_RATEMCS5:
- rate_idx = 5;
- break;
- case DESC92C_RATEMCS6:
- rate_idx = 6;
- break;
- case DESC92C_RATEMCS7:
- rate_idx = 7;
- break;
- case DESC92C_RATEMCS8:
- rate_idx = 8;
- break;
- case DESC92C_RATEMCS9:
- rate_idx = 9;
- break;
- case DESC92C_RATEMCS10:
- rate_idx = 10;
- break;
- case DESC92C_RATEMCS11:
- rate_idx = 11;
- break;
- case DESC92C_RATEMCS12:
- rate_idx = 12;
- break;
- case DESC92C_RATEMCS13:
- rate_idx = 13;
- break;
- case DESC92C_RATEMCS14:
- rate_idx = 14;
- break;
- case DESC92C_RATEMCS15:
- rate_idx = 15;
- break;
- default:
- rate_idx = 0;
- break;
- }
- }
- return rate_idx;
-}
-
static void _rtl8723e_query_rxphystatus(struct ieee80211_hw *hw,
struct rtl_stats *pstatus, u8 *pdesc,
struct rx_fwinfo_8723e *p_drvinfo,
@@ -503,8 +345,8 @@ bool rtl8723e_rx_query_desc(struct ieee80211_hw *hw,
* are use (RX_FLAG_HT)
* Notice: this is diff with windows define
*/
- rx_status->rate_idx = _rtl8723e_rate_mapping(hw,
- status->is_ht, status->rate);
+ rx_status->rate_idx = rtlwifi_rate_mapping(hw, status->is_ht,
+ false, status->rate);
rx_status->mactime = status->timestamp_low;
if (phystatus == true) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/rtlwifi/rtl8723be/phy.c
index 20dcc25c506c..b7b73cbe346d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/phy.c
@@ -874,31 +874,6 @@ void rtl8723be_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
ROFDM0_RXDETECTOR3, rtlphy->framesync);
}
-void rtl8723be_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- u8 txpwr_level;
- long txpwr_dbm;
-
- txpwr_level = rtlphy->cur_cck_txpwridx;
- txpwr_dbm = rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_B,
- txpwr_level);
- txpwr_level = rtlphy->cur_ofdm24g_txpwridx;
- if (rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G, txpwr_level) >
- txpwr_dbm)
- txpwr_dbm =
- rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G,
- txpwr_level);
- txpwr_level = rtlphy->cur_ofdm24g_txpwridx;
- if (rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G,
- txpwr_level) > txpwr_dbm)
- txpwr_dbm =
- rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G,
- txpwr_level);
- *powerlevel = txpwr_dbm;
-}
-
static u8 _rtl8723be_phy_get_ratesection_intxpower_byrate(enum radio_path path,
u8 rate)
{
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/phy.h b/drivers/net/wireless/rtlwifi/rtl8723be/phy.h
index 6339738a0e33..9021d4745ab7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/phy.h
@@ -114,8 +114,6 @@ bool rtl8723be_phy_mac_config(struct ieee80211_hw *hw);
bool rtl8723be_phy_bb_config(struct ieee80211_hw *hw);
bool rtl8723be_phy_rf_config(struct ieee80211_hw *hw);
void rtl8723be_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
-void rtl8723be_phy_get_txpower_level(struct ieee80211_hw *hw,
- long *powerlevel);
void rtl8723be_phy_set_txpower_level(struct ieee80211_hw *hw,
u8 channel);
void rtl8723be_phy_scan_operation_backup(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/rtlwifi/rtl8723be/trx.c
index d6a1c70cb657..338ec9a9d09b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/trx.c
@@ -47,164 +47,6 @@ static u8 _rtl8723be_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
return skb->priority;
}
-/* mac80211's rate_idx is like this:
- *
- * 2.4G band:rx_status->band == IEEE80211_BAND_2GHZ
- *
- * B/G rate:
- * (rx_status->flag & RX_FLAG_HT) = 0,
- * DESC92C_RATE1M-->DESC92C_RATE54M ==> idx is 0-->11,
- *
- * N rate:
- * (rx_status->flag & RX_FLAG_HT) = 1,
- * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15
- *
- * 5G band:rx_status->band == IEEE80211_BAND_5GHZ
- * A rate:
- * (rx_status->flag & RX_FLAG_HT) = 0,
- * DESC92C_RATE6M-->DESC92C_RATE54M ==> idx is 0-->7,
- *
- * N rate:
- * (rx_status->flag & RX_FLAG_HT) = 1,
- * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15
- */
-static int _rtl8723be_rate_mapping(struct ieee80211_hw *hw,
- bool isht, u8 desc_rate)
-{
- int rate_idx;
-
- if (!isht) {
- if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) {
- switch (desc_rate) {
- case DESC92C_RATE1M:
- rate_idx = 0;
- break;
- case DESC92C_RATE2M:
- rate_idx = 1;
- break;
- case DESC92C_RATE5_5M:
- rate_idx = 2;
- break;
- case DESC92C_RATE11M:
- rate_idx = 3;
- break;
- case DESC92C_RATE6M:
- rate_idx = 4;
- break;
- case DESC92C_RATE9M:
- rate_idx = 5;
- break;
- case DESC92C_RATE12M:
- rate_idx = 6;
- break;
- case DESC92C_RATE18M:
- rate_idx = 7;
- break;
- case DESC92C_RATE24M:
- rate_idx = 8;
- break;
- case DESC92C_RATE36M:
- rate_idx = 9;
- break;
- case DESC92C_RATE48M:
- rate_idx = 10;
- break;
- case DESC92C_RATE54M:
- rate_idx = 11;
- break;
- default:
- rate_idx = 0;
- break;
- }
- } else {
- switch (desc_rate) {
- case DESC92C_RATE6M:
- rate_idx = 0;
- break;
- case DESC92C_RATE9M:
- rate_idx = 1;
- break;
- case DESC92C_RATE12M:
- rate_idx = 2;
- break;
- case DESC92C_RATE18M:
- rate_idx = 3;
- break;
- case DESC92C_RATE24M:
- rate_idx = 4;
- break;
- case DESC92C_RATE36M:
- rate_idx = 5;
- break;
- case DESC92C_RATE48M:
- rate_idx = 6;
- break;
- case DESC92C_RATE54M:
- rate_idx = 7;
- break;
- default:
- rate_idx = 0;
- break;
- }
- }
- } else {
- switch (desc_rate) {
- case DESC92C_RATEMCS0:
- rate_idx = 0;
- break;
- case DESC92C_RATEMCS1:
- rate_idx = 1;
- break;
- case DESC92C_RATEMCS2:
- rate_idx = 2;
- break;
- case DESC92C_RATEMCS3:
- rate_idx = 3;
- break;
- case DESC92C_RATEMCS4:
- rate_idx = 4;
- break;
- case DESC92C_RATEMCS5:
- rate_idx = 5;
- break;
- case DESC92C_RATEMCS6:
- rate_idx = 6;
- break;
- case DESC92C_RATEMCS7:
- rate_idx = 7;
- break;
- case DESC92C_RATEMCS8:
- rate_idx = 8;
- break;
- case DESC92C_RATEMCS9:
- rate_idx = 9;
- break;
- case DESC92C_RATEMCS10:
- rate_idx = 10;
- break;
- case DESC92C_RATEMCS11:
- rate_idx = 11;
- break;
- case DESC92C_RATEMCS12:
- rate_idx = 12;
- break;
- case DESC92C_RATEMCS13:
- rate_idx = 13;
- break;
- case DESC92C_RATEMCS14:
- rate_idx = 14;
- break;
- case DESC92C_RATEMCS15:
- rate_idx = 15;
- break;
- default:
- rate_idx = 0;
- break;
- }
- }
- return rate_idx;
-}
-
static void _rtl8723be_query_rxphystatus(struct ieee80211_hw *hw,
struct rtl_stats *pstatus, u8 *pdesc,
struct rx_fwinfo_8723be *p_drvinfo,
@@ -558,8 +400,8 @@ bool rtl8723be_rx_query_desc(struct ieee80211_hw *hw,
* supported rates or MCS index if HT rates
* are use (RX_FLAG_HT)
*/
- rx_status->rate_idx = _rtl8723be_rate_mapping(hw, status->is_ht,
- status->rate);
+ rx_status->rate_idx = rtlwifi_rate_mapping(hw, status->is_ht,
+ false, status->rate);
rx_status->mactime = status->timestamp_low;
if (phystatus) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/def.h b/drivers/net/wireless/rtlwifi/rtl8821ae/def.h
index a730985ae81d..ee7c208bd070 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/def.h
@@ -373,60 +373,6 @@ enum rtl_desc_qsel {
QSLT_CMD = 0x13,
};
-enum rtl_desc8821ae_rate {
- DESC_RATE1M = 0x00,
- DESC_RATE2M = 0x01,
- DESC_RATE5_5M = 0x02,
- DESC_RATE11M = 0x03,
-
- DESC_RATE6M = 0x04,
- DESC_RATE9M = 0x05,
- DESC_RATE12M = 0x06,
- DESC_RATE18M = 0x07,
- DESC_RATE24M = 0x08,
- DESC_RATE36M = 0x09,
- DESC_RATE48M = 0x0a,
- DESC_RATE54M = 0x0b,
-
- DESC_RATEMCS0 = 0x0c,
- DESC_RATEMCS1 = 0x0d,
- DESC_RATEMCS2 = 0x0e,
- DESC_RATEMCS3 = 0x0f,
- DESC_RATEMCS4 = 0x10,
- DESC_RATEMCS5 = 0x11,
- DESC_RATEMCS6 = 0x12,
- DESC_RATEMCS7 = 0x13,
- DESC_RATEMCS8 = 0x14,
- DESC_RATEMCS9 = 0x15,
- DESC_RATEMCS10 = 0x16,
- DESC_RATEMCS11 = 0x17,
- DESC_RATEMCS12 = 0x18,
- DESC_RATEMCS13 = 0x19,
- DESC_RATEMCS14 = 0x1a,
- DESC_RATEMCS15 = 0x1b,
-
- DESC_RATEVHT1SS_MCS0 = 0x2c,
- DESC_RATEVHT1SS_MCS1 = 0x2d,
- DESC_RATEVHT1SS_MCS2 = 0x2e,
- DESC_RATEVHT1SS_MCS3 = 0x2f,
- DESC_RATEVHT1SS_MCS4 = 0x30,
- DESC_RATEVHT1SS_MCS5 = 0x31,
- DESC_RATEVHT1SS_MCS6 = 0x32,
- DESC_RATEVHT1SS_MCS7 = 0x33,
- DESC_RATEVHT1SS_MCS8 = 0x34,
- DESC_RATEVHT1SS_MCS9 = 0x35,
- DESC_RATEVHT2SS_MCS0 = 0x36,
- DESC_RATEVHT2SS_MCS1 = 0x37,
- DESC_RATEVHT2SS_MCS2 = 0x38,
- DESC_RATEVHT2SS_MCS3 = 0x39,
- DESC_RATEVHT2SS_MCS4 = 0x3a,
- DESC_RATEVHT2SS_MCS5 = 0x3b,
- DESC_RATEVHT2SS_MCS6 = 0x3c,
- DESC_RATEVHT2SS_MCS7 = 0x3d,
- DESC_RATEVHT2SS_MCS8 = 0x3e,
- DESC_RATEVHT2SS_MCS9 = 0x3f,
-};
-
enum rx_packet_type {
NORMAL_RX,
TX_REPORT1,
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/pwrseq.h b/drivers/net/wireless/rtlwifi/rtl8821ae/pwrseq.h
index bf0b0ce9519c..36b3e91d996e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/pwrseq.h
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/pwrseq.h
@@ -93,9 +93,9 @@
#define RTL8812_TRANS_CARDEMU_TO_SUS \
{0x0042, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,\
- PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xF0, 0xcc}, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xF0, 0xc0}, \
{0x0042, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,\
- PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xF0, 0xEC}, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xF0, 0xE0}, \
{0x0043, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\
PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x07 \
/* gpio11 input mode, gpio10~8 output mode */}, \
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/sw.c
index fc92dd6a0d07..a4988121e1ab 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/sw.c
@@ -85,52 +85,6 @@ static void rtl8821ae_init_aspm_vars(struct ieee80211_hw *hw)
rtlpci->const_support_pciaspm = 1;
}
-static void load_wowlan_fw(struct rtl_priv *rtlpriv)
-{
- /* callback routine to load wowlan firmware after main fw has
- * been loaded
- */
- const struct firmware *wowlan_firmware;
- char *fw_name = NULL;
- int err;
-
- /* for wowlan firmware buf */
- rtlpriv->rtlhal.wowlan_firmware = vzalloc(0x8000);
- if (!rtlpriv->rtlhal.wowlan_firmware) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Can't alloc buffer for wowlan fw.\n");
- return;
- }
-
- if (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8821AE)
- fw_name = "rtlwifi/rtl8821aefw_wowlan.bin";
- else
- fw_name = "rtlwifi/rtl8812aefw_wowlan.bin";
- err = request_firmware(&wowlan_firmware, fw_name, rtlpriv->io.dev);
- if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Failed to request wowlan firmware!\n");
- goto error;
- }
-
- if (wowlan_firmware->size > 0x8000) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Wowlan Firmware is too big!\n");
- goto error;
- }
-
- memcpy(rtlpriv->rtlhal.wowlan_firmware, wowlan_firmware->data,
- wowlan_firmware->size);
- rtlpriv->rtlhal.wowlan_fwsize = wowlan_firmware->size;
- release_firmware(wowlan_firmware);
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "WOWLAN FirmwareDownload OK\n");
- return;
-error:
- release_firmware(wowlan_firmware);
- vfree(rtlpriv->rtlhal.wowlan_firmware);
-}
-
/*InitializeVariables8812E*/
int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
{
@@ -231,7 +185,6 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
else if (rtlpriv->psc.reg_fwctrl_lps == 3)
rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE;
- rtlpriv->rtl_fw_second_cb = load_wowlan_fw;
/* for firmware buf */
rtlpriv->rtlhal.pfirmware = vzalloc(0x8000);
if (!rtlpriv->rtlhal.pfirmware) {
@@ -239,20 +192,41 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
"Can't alloc buffer for fw.\n");
return 1;
}
+ rtlpriv->rtlhal.wowlan_firmware = vzalloc(0x8000);
+ if (!rtlpriv->rtlhal.wowlan_firmware) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "Can't alloc buffer for wowlan fw.\n");
+ return 1;
+ }
- if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
rtlpriv->cfg->fw_name = "rtlwifi/rtl8812aefw.bin";
- else
+ rtlpriv->cfg->wowlan_fw_name = "rtlwifi/rtl8812aefw_wowlan.bin";
+ } else {
rtlpriv->cfg->fw_name = "rtlwifi/rtl8821aefw.bin";
+ rtlpriv->cfg->wowlan_fw_name = "rtlwifi/rtl8821aefw_wowlan.bin";
+ }
rtlpriv->max_fw_size = 0x8000;
+ /*load normal firmware*/
pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Failed to request firmware!\n");
+ "Failed to request normal firmware!\n");
+ return 1;
+ }
+ /*load wowlan firmware*/
+ pr_info("Using firmware %s\n", rtlpriv->cfg->wowlan_fw_name);
+ err = request_firmware_nowait(THIS_MODULE, 1,
+ rtlpriv->cfg->wowlan_fw_name,
+ rtlpriv->io.dev, GFP_KERNEL, hw,
+ rtl_wowlan_fw_cb);
+ if (err) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "Failed to request wowlan firmware!\n");
return 1;
}
return 0;
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/rtlwifi/rtl8821ae/trx.c
index 383b86b05cba..72af4b9ee32b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/trx.c
@@ -48,232 +48,6 @@ static u8 _rtl8821ae_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
return skb->priority;
}
-/* mac80211's rate_idx is like this:
- *
- * 2.4G band:rx_status->band == IEEE80211_BAND_2GHZ
- *
- * B/G rate:
- * (rx_status->flag & RX_FLAG_HT) = 0,
- * DESC_RATE1M-->DESC_RATE54M ==> idx is 0-->11,
- *
- * N rate:
- * (rx_status->flag & RX_FLAG_HT) = 1,
- * DESC_RATEMCS0-->DESC_RATEMCS15 ==> idx is 0-->15
- *
- * 5G band:rx_status->band == IEEE80211_BAND_5GHZ
- * A rate:
- * (rx_status->flag & RX_FLAG_HT) = 0,
- * DESC_RATE6M-->DESC_RATE54M ==> idx is 0-->7,
- *
- * N rate:
- * (rx_status->flag & RX_FLAG_HT) = 1,
- * DESC_RATEMCS0-->DESC_RATEMCS15 ==> idx is 0-->15
- */
-static int _rtl8821ae_rate_mapping(struct ieee80211_hw *hw,
- bool isht, bool isvht, u8 desc_rate)
-{
- int rate_idx;
-
- if (!isht) {
- if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) {
- switch (desc_rate) {
- case DESC_RATE1M:
- rate_idx = 0;
- break;
- case DESC_RATE2M:
- rate_idx = 1;
- break;
- case DESC_RATE5_5M:
- rate_idx = 2;
- break;
- case DESC_RATE11M:
- rate_idx = 3;
- break;
- case DESC_RATE6M:
- rate_idx = 4;
- break;
- case DESC_RATE9M:
- rate_idx = 5;
- break;
- case DESC_RATE12M:
- rate_idx = 6;
- break;
- case DESC_RATE18M:
- rate_idx = 7;
- break;
- case DESC_RATE24M:
- rate_idx = 8;
- break;
- case DESC_RATE36M:
- rate_idx = 9;
- break;
- case DESC_RATE48M:
- rate_idx = 10;
- break;
- case DESC_RATE54M:
- rate_idx = 11;
- break;
- default:
- rate_idx = 0;
- break;
- }
- } else {
- switch (desc_rate) {
- case DESC_RATE6M:
- rate_idx = 0;
- break;
- case DESC_RATE9M:
- rate_idx = 1;
- break;
- case DESC_RATE12M:
- rate_idx = 2;
- break;
- case DESC_RATE18M:
- rate_idx = 3;
- break;
- case DESC_RATE24M:
- rate_idx = 4;
- break;
- case DESC_RATE36M:
- rate_idx = 5;
- break;
- case DESC_RATE48M:
- rate_idx = 6;
- break;
- case DESC_RATE54M:
- rate_idx = 7;
- break;
- default:
- rate_idx = 0;
- break;
- }
- }
- } else {
- switch (desc_rate) {
- case DESC_RATEMCS0:
- rate_idx = 0;
- break;
- case DESC_RATEMCS1:
- rate_idx = 1;
- break;
- case DESC_RATEMCS2:
- rate_idx = 2;
- break;
- case DESC_RATEMCS3:
- rate_idx = 3;
- break;
- case DESC_RATEMCS4:
- rate_idx = 4;
- break;
- case DESC_RATEMCS5:
- rate_idx = 5;
- break;
- case DESC_RATEMCS6:
- rate_idx = 6;
- break;
- case DESC_RATEMCS7:
- rate_idx = 7;
- break;
- case DESC_RATEMCS8:
- rate_idx = 8;
- break;
- case DESC_RATEMCS9:
- rate_idx = 9;
- break;
- case DESC_RATEMCS10:
- rate_idx = 10;
- break;
- case DESC_RATEMCS11:
- rate_idx = 11;
- break;
- case DESC_RATEMCS12:
- rate_idx = 12;
- break;
- case DESC_RATEMCS13:
- rate_idx = 13;
- break;
- case DESC_RATEMCS14:
- rate_idx = 14;
- break;
- case DESC_RATEMCS15:
- rate_idx = 15;
- break;
- default:
- rate_idx = 0;
- break;
- }
- }
-
- if (isvht) {
- switch (desc_rate) {
- case DESC_RATEVHT1SS_MCS0:
- rate_idx = 0;
- break;
- case DESC_RATEVHT1SS_MCS1:
- rate_idx = 1;
- break;
- case DESC_RATEVHT1SS_MCS2:
- rate_idx = 2;
- break;
- case DESC_RATEVHT1SS_MCS3:
- rate_idx = 3;
- break;
- case DESC_RATEVHT1SS_MCS4:
- rate_idx = 4;
- break;
- case DESC_RATEVHT1SS_MCS5:
- rate_idx = 5;
- break;
- case DESC_RATEVHT1SS_MCS6:
- rate_idx = 6;
- break;
- case DESC_RATEVHT1SS_MCS7:
- rate_idx = 7;
- break;
- case DESC_RATEVHT1SS_MCS8:
- rate_idx = 8;
- break;
- case DESC_RATEVHT1SS_MCS9:
- rate_idx = 9;
- break;
- case DESC_RATEVHT2SS_MCS0:
- rate_idx = 0;
- break;
- case DESC_RATEVHT2SS_MCS1:
- rate_idx = 1;
- break;
- case DESC_RATEVHT2SS_MCS2:
- rate_idx = 2;
- break;
- case DESC_RATEVHT2SS_MCS3:
- rate_idx = 3;
- break;
- case DESC_RATEVHT2SS_MCS4:
- rate_idx = 4;
- break;
- case DESC_RATEVHT2SS_MCS5:
- rate_idx = 5;
- break;
- case DESC_RATEVHT2SS_MCS6:
- rate_idx = 6;
- break;
- case DESC_RATEVHT2SS_MCS7:
- rate_idx = 7;
- break;
- case DESC_RATEVHT2SS_MCS8:
- rate_idx = 8;
- break;
- case DESC_RATEVHT2SS_MCS9:
- rate_idx = 9;
- break;
- default:
- rate_idx = 0;
- break;
- }
- }
- return rate_idx;
-}
-
static u16 odm_cfo(char value)
{
int ret_val;
@@ -766,9 +540,9 @@ bool rtl8821ae_rx_query_desc(struct ieee80211_hw *hw,
* supported rates or MCS index if HT rates
* are use (RX_FLAG_HT)
*/
- rx_status->rate_idx =
- _rtl8821ae_rate_mapping(hw, status->is_ht,
- status->is_vht, status->rate);
+ rx_status->rate_idx = rtlwifi_rate_mapping(hw, status->is_ht,
+ status->is_vht,
+ status->rate);
rx_status->mactime = status->timestamp_low;
if (phystatus) {
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 6866dcf24340..7a718fdb82cd 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -331,10 +331,10 @@ enum hardware_type {
(IS_HARDWARE_TYPE_8723E(rtlhal) || IS_HARDWARE_TYPE_8723U(rtlhal))
#define RX_HAL_IS_CCK_RATE(rxmcs) \
- ((rxmcs) == DESC92_RATE1M || \
- (rxmcs) == DESC92_RATE2M || \
- (rxmcs) == DESC92_RATE5_5M || \
- (rxmcs) == DESC92_RATE11M)
+ ((rxmcs) == DESC_RATE1M || \
+ (rxmcs) == DESC_RATE2M || \
+ (rxmcs) == DESC_RATE5_5M || \
+ (rxmcs) == DESC_RATE11M)
enum scan_operation_backup_opt {
SCAN_OPT_BACKUP = 0,
@@ -579,38 +579,59 @@ enum rtl_hal_state {
};
enum rtl_desc92_rate {
- DESC92_RATE1M = 0x00,
- DESC92_RATE2M = 0x01,
- DESC92_RATE5_5M = 0x02,
- DESC92_RATE11M = 0x03,
-
- DESC92_RATE6M = 0x04,
- DESC92_RATE9M = 0x05,
- DESC92_RATE12M = 0x06,
- DESC92_RATE18M = 0x07,
- DESC92_RATE24M = 0x08,
- DESC92_RATE36M = 0x09,
- DESC92_RATE48M = 0x0a,
- DESC92_RATE54M = 0x0b,
-
- DESC92_RATEMCS0 = 0x0c,
- DESC92_RATEMCS1 = 0x0d,
- DESC92_RATEMCS2 = 0x0e,
- DESC92_RATEMCS3 = 0x0f,
- DESC92_RATEMCS4 = 0x10,
- DESC92_RATEMCS5 = 0x11,
- DESC92_RATEMCS6 = 0x12,
- DESC92_RATEMCS7 = 0x13,
- DESC92_RATEMCS8 = 0x14,
- DESC92_RATEMCS9 = 0x15,
- DESC92_RATEMCS10 = 0x16,
- DESC92_RATEMCS11 = 0x17,
- DESC92_RATEMCS12 = 0x18,
- DESC92_RATEMCS13 = 0x19,
- DESC92_RATEMCS14 = 0x1a,
- DESC92_RATEMCS15 = 0x1b,
- DESC92_RATEMCS15_SG = 0x1c,
- DESC92_RATEMCS32 = 0x20,
+ DESC_RATE1M = 0x00,
+ DESC_RATE2M = 0x01,
+ DESC_RATE5_5M = 0x02,
+ DESC_RATE11M = 0x03,
+
+ DESC_RATE6M = 0x04,
+ DESC_RATE9M = 0x05,
+ DESC_RATE12M = 0x06,
+ DESC_RATE18M = 0x07,
+ DESC_RATE24M = 0x08,
+ DESC_RATE36M = 0x09,
+ DESC_RATE48M = 0x0a,
+ DESC_RATE54M = 0x0b,
+
+ DESC_RATEMCS0 = 0x0c,
+ DESC_RATEMCS1 = 0x0d,
+ DESC_RATEMCS2 = 0x0e,
+ DESC_RATEMCS3 = 0x0f,
+ DESC_RATEMCS4 = 0x10,
+ DESC_RATEMCS5 = 0x11,
+ DESC_RATEMCS6 = 0x12,
+ DESC_RATEMCS7 = 0x13,
+ DESC_RATEMCS8 = 0x14,
+ DESC_RATEMCS9 = 0x15,
+ DESC_RATEMCS10 = 0x16,
+ DESC_RATEMCS11 = 0x17,
+ DESC_RATEMCS12 = 0x18,
+ DESC_RATEMCS13 = 0x19,
+ DESC_RATEMCS14 = 0x1a,
+ DESC_RATEMCS15 = 0x1b,
+ DESC_RATEMCS15_SG = 0x1c,
+ DESC_RATEMCS32 = 0x20,
+
+ DESC_RATEVHT1SS_MCS0 = 0x2c,
+ DESC_RATEVHT1SS_MCS1 = 0x2d,
+ DESC_RATEVHT1SS_MCS2 = 0x2e,
+ DESC_RATEVHT1SS_MCS3 = 0x2f,
+ DESC_RATEVHT1SS_MCS4 = 0x30,
+ DESC_RATEVHT1SS_MCS5 = 0x31,
+ DESC_RATEVHT1SS_MCS6 = 0x32,
+ DESC_RATEVHT1SS_MCS7 = 0x33,
+ DESC_RATEVHT1SS_MCS8 = 0x34,
+ DESC_RATEVHT1SS_MCS9 = 0x35,
+ DESC_RATEVHT2SS_MCS0 = 0x36,
+ DESC_RATEVHT2SS_MCS1 = 0x37,
+ DESC_RATEVHT2SS_MCS2 = 0x38,
+ DESC_RATEVHT2SS_MCS3 = 0x39,
+ DESC_RATEVHT2SS_MCS4 = 0x3a,
+ DESC_RATEVHT2SS_MCS5 = 0x3b,
+ DESC_RATEVHT2SS_MCS6 = 0x3c,
+ DESC_RATEVHT2SS_MCS7 = 0x3d,
+ DESC_RATEVHT2SS_MCS8 = 0x3e,
+ DESC_RATEVHT2SS_MCS9 = 0x3f,
};
enum rtl_var_map {
@@ -2242,6 +2263,7 @@ struct rtl_hal_cfg {
char *name;
char *fw_name;
char *alt_fw_name;
+ char *wowlan_fw_name;
struct rtl_hal_ops *ops;
struct rtl_mod_params *mod_params;
struct rtl_hal_usbint_cfg *usb_interface_cfg;
@@ -2518,8 +2540,6 @@ struct proxim {
struct rtl_priv {
struct ieee80211_hw *hw;
- /* Used to load a second firmware */
- void (*rtl_fw_second_cb)(struct rtl_priv *rtlpriv);
struct completion firmware_loading_complete;
struct list_head list;
struct rtl_priv *buddy_priv;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index d8c10764f130..7cfa6c027c0c 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -142,10 +142,6 @@ struct netfront_queue {
struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
grant_ref_t gref_rx_head;
grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
-
- unsigned long rx_pfn_array[NET_RX_RING_SIZE];
- struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
- struct mmu_update rx_mmu[NET_RX_RING_SIZE];
};
struct netfront_info {
@@ -424,109 +420,68 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
xennet_maybe_wake_tx(queue);
}
-static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue,
- struct xen_netif_tx_request *tx)
+static struct xen_netif_tx_request *xennet_make_one_txreq(
+ struct netfront_queue *queue, struct sk_buff *skb,
+ struct page *page, unsigned int offset, unsigned int len)
{
- char *data = skb->data;
- unsigned long mfn;
- RING_IDX prod = queue->tx.req_prod_pvt;
- int frags = skb_shinfo(skb)->nr_frags;
- unsigned int offset = offset_in_page(data);
- unsigned int len = skb_headlen(skb);
unsigned int id;
+ struct xen_netif_tx_request *tx;
grant_ref_t ref;
- int i;
- /* While the header overlaps a page boundary (including being
- larger than a page), split it it into page-sized chunks. */
- while (len > PAGE_SIZE - offset) {
- tx->size = PAGE_SIZE - offset;
- tx->flags |= XEN_NETTXF_more_data;
- len -= tx->size;
- data += tx->size;
- offset = 0;
+ len = min_t(unsigned int, PAGE_SIZE - offset, len);
- id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
- queue->tx_skbs[id].skb = skb_get(skb);
- tx = RING_GET_REQUEST(&queue->tx, prod++);
- tx->id = id;
- ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
- BUG_ON((signed short)ref < 0);
+ id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
+ tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
+ ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
+ BUG_ON((signed short)ref < 0);
- mfn = virt_to_mfn(data);
- gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
- mfn, GNTMAP_readonly);
+ gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
+ page_to_mfn(page), GNTMAP_readonly);
- queue->grant_tx_page[id] = virt_to_page(data);
- tx->gref = queue->grant_tx_ref[id] = ref;
- tx->offset = offset;
- tx->size = len;
- tx->flags = 0;
- }
+ queue->tx_skbs[id].skb = skb;
+ queue->grant_tx_page[id] = page;
+ queue->grant_tx_ref[id] = ref;
- /* Grant backend access to each skb fragment page. */
- for (i = 0; i < frags; i++) {
- skb_frag_t *frag = skb_shinfo(skb)->frags + i;
- struct page *page = skb_frag_page(frag);
+ tx->id = id;
+ tx->gref = ref;
+ tx->offset = offset;
+ tx->size = len;
+ tx->flags = 0;
- len = skb_frag_size(frag);
- offset = frag->page_offset;
+ return tx;
+}
- /* Skip unused frames from start of page */
- page += offset >> PAGE_SHIFT;
- offset &= ~PAGE_MASK;
+static struct xen_netif_tx_request *xennet_make_txreqs(
+ struct netfront_queue *queue, struct xen_netif_tx_request *tx,
+ struct sk_buff *skb, struct page *page,
+ unsigned int offset, unsigned int len)
+{
+ /* Skip unused frames from start of page */
+ page += offset >> PAGE_SHIFT;
+ offset &= ~PAGE_MASK;
- while (len > 0) {
- unsigned long bytes;
-
- bytes = PAGE_SIZE - offset;
- if (bytes > len)
- bytes = len;
-
- tx->flags |= XEN_NETTXF_more_data;
-
- id = get_id_from_freelist(&queue->tx_skb_freelist,
- queue->tx_skbs);
- queue->tx_skbs[id].skb = skb_get(skb);
- tx = RING_GET_REQUEST(&queue->tx, prod++);
- tx->id = id;
- ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
- BUG_ON((signed short)ref < 0);
-
- mfn = pfn_to_mfn(page_to_pfn(page));
- gnttab_grant_foreign_access_ref(ref,
- queue->info->xbdev->otherend_id,
- mfn, GNTMAP_readonly);
-
- queue->grant_tx_page[id] = page;
- tx->gref = queue->grant_tx_ref[id] = ref;
- tx->offset = offset;
- tx->size = bytes;
- tx->flags = 0;
-
- offset += bytes;
- len -= bytes;
-
- /* Next frame */
- if (offset == PAGE_SIZE && len) {
- BUG_ON(!PageCompound(page));
- page++;
- offset = 0;
- }
- }
+ while (len) {
+ tx->flags |= XEN_NETTXF_more_data;
+ tx = xennet_make_one_txreq(queue, skb_get(skb),
+ page, offset, len);
+ page++;
+ offset = 0;
+ len -= tx->size;
}
- queue->tx.req_prod_pvt = prod;
+ return tx;
}
/*
- * Count how many ring slots are required to send the frags of this
- * skb. Each frag might be a compound page.
+ * Count how many ring slots are required to send this skb. Each frag
+ * might be a compound page.
*/
-static int xennet_count_skb_frag_slots(struct sk_buff *skb)
+static int xennet_count_skb_slots(struct sk_buff *skb)
{
int i, frags = skb_shinfo(skb)->nr_frags;
- int pages = 0;
+ int pages;
+
+ pages = PFN_UP(offset_in_page(skb->data) + skb_headlen(skb));
for (i = 0; i < frags; i++) {
skb_frag_t *frag = skb_shinfo(skb)->frags + i;
@@ -562,18 +517,15 @@ static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
- unsigned short id;
struct netfront_info *np = netdev_priv(dev);
struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
- struct xen_netif_tx_request *tx;
- char *data = skb->data;
- RING_IDX i;
- grant_ref_t ref;
- unsigned long mfn;
+ struct xen_netif_tx_request *tx, *first_tx;
+ unsigned int i;
int notify;
int slots;
- unsigned int offset = offset_in_page(data);
- unsigned int len = skb_headlen(skb);
+ struct page *page;
+ unsigned int offset;
+ unsigned int len;
unsigned long flags;
struct netfront_queue *queue = NULL;
unsigned int num_queues = dev->real_num_tx_queues;
@@ -596,18 +548,18 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto drop;
}
- slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) +
- xennet_count_skb_frag_slots(skb);
+ slots = xennet_count_skb_slots(skb);
if (unlikely(slots > MAX_SKB_FRAGS + 1)) {
net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n",
slots, skb->len);
if (skb_linearize(skb))
goto drop;
- data = skb->data;
- offset = offset_in_page(data);
- len = skb_headlen(skb);
}
+ page = virt_to_page(skb->data);
+ offset = offset_in_page(skb->data);
+ len = skb_headlen(skb);
+
spin_lock_irqsave(&queue->tx_lock, flags);
if (unlikely(!netif_carrier_ok(dev) ||
@@ -617,25 +569,13 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto drop;
}
- i = queue->tx.req_prod_pvt;
-
- id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
- queue->tx_skbs[id].skb = skb;
-
- tx = RING_GET_REQUEST(&queue->tx, i);
+ /* First request for the linear area. */
+ first_tx = tx = xennet_make_one_txreq(queue, skb,
+ page, offset, len);
+ page++;
+ offset = 0;
+ len -= tx->size;
- tx->id = id;
- ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
- BUG_ON((signed short)ref < 0);
- mfn = virt_to_mfn(data);
- gnttab_grant_foreign_access_ref(
- ref, queue->info->xbdev->otherend_id, mfn, GNTMAP_readonly);
- queue->grant_tx_page[id] = virt_to_page(data);
- tx->gref = queue->grant_tx_ref[id] = ref;
- tx->offset = offset;
- tx->size = len;
-
- tx->flags = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL)
/* local packet? */
tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
@@ -643,11 +583,12 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* remote but checksummed. */
tx->flags |= XEN_NETTXF_data_validated;
+ /* Optional extra info after the first request. */
if (skb_shinfo(skb)->gso_size) {
struct xen_netif_extra_info *gso;
gso = (struct xen_netif_extra_info *)
- RING_GET_REQUEST(&queue->tx, ++i);
+ RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
tx->flags |= XEN_NETTXF_extra_info;
@@ -662,10 +603,19 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
gso->flags = 0;
}
- queue->tx.req_prod_pvt = i + 1;
+ /* Requests for the rest of the linear area. */
+ tx = xennet_make_txreqs(queue, tx, skb, page, offset, len);
+
+ /* Requests for all the frags. */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ tx = xennet_make_txreqs(queue, tx, skb,
+ skb_frag_page(frag), frag->page_offset,
+ skb_frag_size(frag));
+ }
- xennet_make_frags(skb, queue, tx);
- tx->size = skb->len;
+ /* First request has the packet length. */
+ first_tx->size = skb->len;
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
if (notify)
diff --git a/drivers/phy/phy-miphy365x.c b/drivers/phy/phy-miphy365x.c
index 6ab43a814ad2..6c80154e8bff 100644
--- a/drivers/phy/phy-miphy365x.c
+++ b/drivers/phy/phy-miphy365x.c
@@ -141,7 +141,7 @@ struct miphy365x_phy {
bool pcie_tx_pol_inv;
bool sata_tx_pol_inv;
u32 sata_gen;
- u64 ctrlreg;
+ u32 ctrlreg;
u8 type;
};
@@ -179,7 +179,7 @@ static int miphy365x_set_path(struct miphy365x_phy *miphy_phy,
bool sata = (miphy_phy->type == MIPHY_TYPE_SATA);
return regmap_update_bits(miphy_dev->regmap,
- (unsigned int)miphy_phy->ctrlreg,
+ miphy_phy->ctrlreg,
SYSCFG_SELECT_SATA_MASK,
sata << SYSCFG_SELECT_SATA_POS);
}
@@ -445,7 +445,6 @@ int miphy365x_get_addr(struct device *dev, struct miphy365x_phy *miphy_phy,
{
struct device_node *phynode = miphy_phy->phy->dev.of_node;
const char *name;
- const __be32 *taddr;
int type = miphy_phy->type;
int ret;
@@ -455,22 +454,6 @@ int miphy365x_get_addr(struct device *dev, struct miphy365x_phy *miphy_phy,
return ret;
}
- if (!strncmp(name, "syscfg", 6)) {
- taddr = of_get_address(phynode, index, NULL, NULL);
- if (!taddr) {
- dev_err(dev, "failed to fetch syscfg address\n");
- return -EINVAL;
- }
-
- miphy_phy->ctrlreg = of_translate_address(phynode, taddr);
- if (miphy_phy->ctrlreg == OF_BAD_ADDR) {
- dev_err(dev, "failed to translate syscfg address\n");
- return -EINVAL;
- }
-
- return 0;
- }
-
if (!((!strncmp(name, "sata", 4) && type == MIPHY_TYPE_SATA) ||
(!strncmp(name, "pcie", 4) && type == MIPHY_TYPE_PCIE)))
return 0;
@@ -606,7 +589,15 @@ static int miphy365x_probe(struct platform_device *pdev)
return ret;
phy_set_drvdata(phy, miphy_dev->phys[port]);
+
port++;
+ /* sysconfig offsets are indexed from 1 */
+ ret = of_property_read_u32_index(np, "st,syscfg", port,
+ &miphy_phy->ctrlreg);
+ if (ret) {
+ dev_err(&pdev->dev, "No sysconfig offset found\n");
+ return ret;
+ }
}
provider = devm_of_phy_provider_register(&pdev->dev, miphy365x_xlate);
diff --git a/drivers/phy/phy-stih407-usb.c b/drivers/phy/phy-stih407-usb.c
index 74f0fab3cd8a..1d5ae5f8ef69 100644
--- a/drivers/phy/phy-stih407-usb.c
+++ b/drivers/phy/phy-stih407-usb.c
@@ -22,6 +22,9 @@
#include <linux/mfd/syscon.h>
#include <linux/phy/phy.h>
+#define PHYPARAM_REG 1
+#define PHYCTRL_REG 2
+
/* Default PHY_SEL and REFCLKSEL configuration */
#define STIH407_USB_PICOPHY_CTRL_PORT_CONF 0x6
#define STIH407_USB_PICOPHY_CTRL_PORT_MASK 0x1f
@@ -93,7 +96,7 @@ static int stih407_usb2_picophy_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
struct phy_provider *phy_provider;
struct phy *phy;
- struct resource *res;
+ int ret;
phy_dev = devm_kzalloc(dev, sizeof(*phy_dev), GFP_KERNEL);
if (!phy_dev)
@@ -123,19 +126,19 @@ static int stih407_usb2_picophy_probe(struct platform_device *pdev)
return PTR_ERR(phy_dev->regmap);
}
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
- if (!res) {
- dev_err(dev, "No ctrl reg found\n");
- return -ENXIO;
+ ret = of_property_read_u32_index(np, "st,syscfg", PHYPARAM_REG,
+ &phy_dev->param);
+ if (ret) {
+ dev_err(dev, "can't get phyparam offset (%d)\n", ret);
+ return ret;
}
- phy_dev->ctrl = res->start;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "param");
- if (!res) {
- dev_err(dev, "No param reg found\n");
- return -ENXIO;
+ ret = of_property_read_u32_index(np, "st,syscfg", PHYCTRL_REG,
+ &phy_dev->ctrl);
+ if (ret) {
+ dev_err(dev, "can't get phyctrl offset (%d)\n", ret);
+ return ret;
}
- phy_dev->param = res->start;
phy = devm_phy_create(dev, NULL, &stih407_usb2_picophy_data);
if (IS_ERR(phy)) {
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 625227ad16ee..dd4ab8d73d34 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2800,12 +2800,12 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
* before we're going to overwrite this location with next hop ip.
* v6 uses passthrough, v4 sets the tag in the QDIO header.
*/
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
if ((ipv == 4) || (card->info.type == QETH_CARD_TYPE_IQD))
hdr->hdr.l3.ext_flags = QETH_HDR_EXT_VLAN_FRAME;
else
hdr->hdr.l3.ext_flags = QETH_HDR_EXT_INCLUDE_VLAN_TAG;
- hdr->hdr.l3.vlan_id = vlan_tx_tag_get(skb);
+ hdr->hdr.l3.vlan_id = skb_vlan_tag_get(skb);
}
hdr->hdr.l3.length = skb->len - sizeof(struct qeth_hdr);
@@ -2986,7 +2986,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_pull(new_skb, ETH_HLEN);
}
- if (ipv != 4 && vlan_tx_tag_present(new_skb)) {
+ if (ipv != 4 && skb_vlan_tag_present(new_skb)) {
skb_push(new_skb, VLAN_HLEN);
skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4);
skb_copy_to_linear_data_offset(new_skb, 4,
@@ -2995,7 +2995,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
new_skb->data + 12, 4);
tag = (u16 *)(new_skb->data + 12);
*tag = __constant_htons(ETH_P_8021Q);
- *(tag + 1) = htons(vlan_tx_tag_get(new_skb));
+ *(tag + 1) = htons(skb_vlan_tag_get(new_skb));
}
}
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index 9ab997e18b20..5c31fa603de4 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -188,9 +188,9 @@ void
csio_hw_tp_wr_bits_indirect(struct csio_hw *hw, unsigned int addr,
unsigned int mask, unsigned int val)
{
- csio_wr_reg32(hw, addr, TP_PIO_ADDR);
- val |= csio_rd_reg32(hw, TP_PIO_DATA) & ~mask;
- csio_wr_reg32(hw, val, TP_PIO_DATA);
+ csio_wr_reg32(hw, addr, TP_PIO_ADDR_A);
+ val |= csio_rd_reg32(hw, TP_PIO_DATA_A) & ~mask;
+ csio_wr_reg32(hw, val, TP_PIO_DATA_A);
}
void
@@ -256,7 +256,7 @@ csio_hw_seeprom_read(struct csio_hw *hw, uint32_t addr, uint32_t *data)
}
pci_read_config_dword(hw->pdev, base + PCI_VPD_DATA, data);
- *data = le32_to_cpu(*data);
+ *data = le32_to_cpu(*(__le32 *)data);
return 0;
}
@@ -421,17 +421,15 @@ csio_hw_sf1_read(struct csio_hw *hw, uint32_t byte_cnt, int32_t cont,
if (!byte_cnt || byte_cnt > 4)
return -EINVAL;
- if (csio_rd_reg32(hw, SF_OP) & SF_BUSY)
+ if (csio_rd_reg32(hw, SF_OP_A) & SF_BUSY_F)
return -EBUSY;
- cont = cont ? SF_CONT : 0;
- lock = lock ? SF_LOCK : 0;
-
- csio_wr_reg32(hw, lock | cont | BYTECNT(byte_cnt - 1), SF_OP);
- ret = csio_hw_wait_op_done_val(hw, SF_OP, SF_BUSY, 0, SF_ATTEMPTS,
- 10, NULL);
+ csio_wr_reg32(hw, SF_LOCK_V(lock) | SF_CONT_V(cont) |
+ BYTECNT_V(byte_cnt - 1), SF_OP_A);
+ ret = csio_hw_wait_op_done_val(hw, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS,
+ 10, NULL);
if (!ret)
- *valp = csio_rd_reg32(hw, SF_DATA);
+ *valp = csio_rd_reg32(hw, SF_DATA_A);
return ret;
}
@@ -453,16 +451,14 @@ csio_hw_sf1_write(struct csio_hw *hw, uint32_t byte_cnt, uint32_t cont,
{
if (!byte_cnt || byte_cnt > 4)
return -EINVAL;
- if (csio_rd_reg32(hw, SF_OP) & SF_BUSY)
+ if (csio_rd_reg32(hw, SF_OP_A) & SF_BUSY_F)
return -EBUSY;
- cont = cont ? SF_CONT : 0;
- lock = lock ? SF_LOCK : 0;
-
- csio_wr_reg32(hw, val, SF_DATA);
- csio_wr_reg32(hw, cont | BYTECNT(byte_cnt - 1) | OP_WR | lock, SF_OP);
+ csio_wr_reg32(hw, val, SF_DATA_A);
+ csio_wr_reg32(hw, SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) |
+ OP_V(1) | SF_LOCK_V(lock), SF_OP_A);
- return csio_hw_wait_op_done_val(hw, SF_OP, SF_BUSY, 0, SF_ATTEMPTS,
+ return csio_hw_wait_op_done_val(hw, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS,
10, NULL);
}
@@ -533,11 +529,11 @@ csio_hw_read_flash(struct csio_hw *hw, uint32_t addr, uint32_t nwords,
for ( ; nwords; nwords--, data++) {
ret = csio_hw_sf1_read(hw, 4, nwords > 1, nwords == 1, data);
if (nwords == 1)
- csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
+ csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */
if (ret)
return ret;
if (byte_oriented)
- *data = htonl(*data);
+ *data = (__force __u32) htonl(*data);
}
return 0;
}
@@ -586,7 +582,7 @@ csio_hw_write_flash(struct csio_hw *hw, uint32_t addr,
if (ret)
goto unlock;
- csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
+ csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */
/* Read the page to verify the write succeeded */
ret = csio_hw_read_flash(hw, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
@@ -603,7 +599,7 @@ csio_hw_write_flash(struct csio_hw *hw, uint32_t addr,
return 0;
unlock:
- csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
+ csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */
return ret;
}
@@ -641,7 +637,7 @@ out:
if (ret)
csio_err(hw, "erase of flash sector %d failed, error %d\n",
start, ret);
- csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
+ csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */
return 0;
}
@@ -686,43 +682,6 @@ csio_hw_get_tp_version(struct csio_hw *hw, u32 *vers)
}
/*
- * csio_hw_check_fw_version - check if the FW is compatible with
- * this driver
- * @hw: HW module
- *
- * Checks if an adapter's FW is compatible with the driver. Returns 0
- * if there's exact match, a negative error if the version could not be
- * read or there's a major/minor version mismatch/minor.
- */
-static int
-csio_hw_check_fw_version(struct csio_hw *hw)
-{
- int ret, major, minor, micro;
-
- ret = csio_hw_get_fw_version(hw, &hw->fwrev);
- if (!ret)
- ret = csio_hw_get_tp_version(hw, &hw->tp_vers);
- if (ret)
- return ret;
-
- major = FW_HDR_FW_VER_MAJOR_G(hw->fwrev);
- minor = FW_HDR_FW_VER_MINOR_G(hw->fwrev);
- micro = FW_HDR_FW_VER_MICRO_G(hw->fwrev);
-
- if (major != FW_VERSION_MAJOR(hw)) { /* major mismatch - fail */
- csio_err(hw, "card FW has major version %u, driver wants %u\n",
- major, FW_VERSION_MAJOR(hw));
- return -EINVAL;
- }
-
- if (minor == FW_VERSION_MINOR(hw) && micro == FW_VERSION_MICRO(hw))
- return 0; /* perfect match */
-
- /* Minor/micro version mismatch */
- return -EINVAL;
-}
-
-/*
* csio_hw_fw_dload - download firmware.
* @hw: HW module
* @fw_data: firmware image to write.
@@ -833,7 +792,7 @@ csio_hw_get_flash_params(struct csio_hw *hw)
ret = csio_hw_sf1_write(hw, 1, 1, 0, SF_RD_ID);
if (!ret)
ret = csio_hw_sf1_read(hw, 3, 0, 1, &info);
- csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
+ csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */
if (ret != 0)
return ret;
@@ -861,17 +820,17 @@ csio_hw_dev_ready(struct csio_hw *hw)
uint32_t reg;
int cnt = 6;
- while (((reg = csio_rd_reg32(hw, PL_WHOAMI)) == 0xFFFFFFFF) &&
- (--cnt != 0))
+ while (((reg = csio_rd_reg32(hw, PL_WHOAMI_A)) == 0xFFFFFFFF) &&
+ (--cnt != 0))
mdelay(100);
- if ((cnt == 0) && (((int32_t)(SOURCEPF_GET(reg)) < 0) ||
- (SOURCEPF_GET(reg) >= CSIO_MAX_PFN))) {
+ if ((cnt == 0) && (((int32_t)(SOURCEPF_G(reg)) < 0) ||
+ (SOURCEPF_G(reg) >= CSIO_MAX_PFN))) {
csio_err(hw, "PL_WHOAMI returned 0x%x, cnt:%d\n", reg, cnt);
return -EIO;
}
- hw->pfn = SOURCEPF_GET(reg);
+ hw->pfn = SOURCEPF_G(reg);
return 0;
}
@@ -959,8 +918,8 @@ retry:
* timeout ... and then retry if we haven't exhausted
* our retries ...
*/
- pcie_fw = csio_rd_reg32(hw, PCIE_FW);
- if (!(pcie_fw & (PCIE_FW_ERR|PCIE_FW_INIT))) {
+ pcie_fw = csio_rd_reg32(hw, PCIE_FW_A);
+ if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) {
if (waiting <= 0) {
if (retries-- > 0)
goto retry;
@@ -976,10 +935,10 @@ retry:
* report errors preferentially.
*/
if (state) {
- if (pcie_fw & PCIE_FW_ERR) {
+ if (pcie_fw & PCIE_FW_ERR_F) {
*state = CSIO_DEV_STATE_ERR;
rv = -ETIMEDOUT;
- } else if (pcie_fw & PCIE_FW_INIT)
+ } else if (pcie_fw & PCIE_FW_INIT_F)
*state = CSIO_DEV_STATE_INIT;
}
@@ -988,9 +947,9 @@ retry:
* there's not a valid Master PF, grab its identity
* for our caller.
*/
- if (mpfn == PCIE_FW_MASTER_MASK &&
- (pcie_fw & PCIE_FW_MASTER_VLD))
- mpfn = PCIE_FW_MASTER_GET(pcie_fw);
+ if (mpfn == PCIE_FW_MASTER_M &&
+ (pcie_fw & PCIE_FW_MASTER_VLD_F))
+ mpfn = PCIE_FW_MASTER_G(pcie_fw);
break;
}
hw->flags &= ~CSIO_HWF_MASTER;
@@ -1078,7 +1037,7 @@ csio_do_reset(struct csio_hw *hw, bool fw_rst)
if (!fw_rst) {
/* PIO reset */
- csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST);
+ csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A);
mdelay(2000);
return 0;
}
@@ -1090,7 +1049,7 @@ csio_do_reset(struct csio_hw *hw, bool fw_rst)
}
csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO,
- PIORSTMODE | PIORST, 0, NULL);
+ PIORSTMODE_F | PIORST_F, 0, NULL);
if (csio_mb_issue(hw, mbp)) {
csio_err(hw, "Issue of RESET command failed.n");
@@ -1156,7 +1115,7 @@ csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force)
* If a legitimate mailbox is provided, issue a RESET command
* with a HALT indication.
*/
- if (mbox <= PCIE_FW_MASTER_MASK) {
+ if (mbox <= PCIE_FW_MASTER_M) {
struct csio_mb *mbp;
mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
@@ -1166,7 +1125,7 @@ csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force)
}
csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO,
- PIORSTMODE | PIORST, FW_RESET_CMD_HALT_F,
+ PIORSTMODE_F | PIORST_F, FW_RESET_CMD_HALT_F,
NULL);
if (csio_mb_issue(hw, mbp)) {
@@ -1193,8 +1152,9 @@ csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force)
* rather than a RESET ... if it's new enough to understand that ...
*/
if (retval == 0 || force) {
- csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, UPCRST);
- csio_set_reg_field(hw, PCIE_FW, PCIE_FW_HALT, PCIE_FW_HALT);
+ csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F);
+ csio_set_reg_field(hw, PCIE_FW_A, PCIE_FW_HALT_F,
+ PCIE_FW_HALT_F);
}
/*
@@ -1234,7 +1194,7 @@ csio_hw_fw_restart(struct csio_hw *hw, uint32_t mbox, int32_t reset)
* doing it automatically, we need to clear the PCIE_FW.HALT
* bit.
*/
- csio_set_reg_field(hw, PCIE_FW, PCIE_FW_HALT, 0);
+ csio_set_reg_field(hw, PCIE_FW_A, PCIE_FW_HALT_F, 0);
/*
* If we've been given a valid mailbox, first try to get the
@@ -1243,21 +1203,21 @@ csio_hw_fw_restart(struct csio_hw *hw, uint32_t mbox, int32_t reset)
* valid mailbox or the RESET command failed, fall back to
* hitting the chip with a hammer.
*/
- if (mbox <= PCIE_FW_MASTER_MASK) {
- csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, 0);
+ if (mbox <= PCIE_FW_MASTER_M) {
+ csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, 0);
msleep(100);
if (csio_do_reset(hw, true) == 0)
return 0;
}
- csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST);
+ csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A);
msleep(2000);
} else {
int ms;
- csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, 0);
+ csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, 0);
for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
- if (!(csio_rd_reg32(hw, PCIE_FW) & PCIE_FW_HALT))
+ if (!(csio_rd_reg32(hw, PCIE_FW_A) & PCIE_FW_HALT_F))
return 0;
msleep(100);
ms += 100;
@@ -1970,6 +1930,170 @@ out:
return rv;
}
+/* Is the given firmware API compatible with the one the driver was compiled
+ * with?
+ */
+static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
+{
+
+ /* short circuit if it's the exact same firmware version */
+ if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
+ return 1;
+
+#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
+ if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
+ SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
+ return 1;
+#undef SAME_INTF
+
+ return 0;
+}
+
+/* The firmware in the filesystem is usable, but should it be installed?
+ * This routine explains itself in detail if it indicates the filesystem
+ * firmware should be installed.
+ */
+static int csio_should_install_fs_fw(struct csio_hw *hw, int card_fw_usable,
+ int k, int c)
+{
+ const char *reason;
+
+ if (!card_fw_usable) {
+ reason = "incompatible or unusable";
+ goto install;
+ }
+
+ if (k > c) {
+ reason = "older than the version supported with this driver";
+ goto install;
+ }
+
+ return 0;
+
+install:
+ csio_err(hw, "firmware on card (%u.%u.%u.%u) is %s, "
+ "installing firmware %u.%u.%u.%u on card.\n",
+ FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
+ FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason,
+ FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
+ FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
+
+ return 1;
+}
+
+static struct fw_info fw_info_array[] = {
+ {
+ .chip = CHELSIO_T5,
+ .fs_name = FW_CFG_NAME_T5,
+ .fw_mod_name = FW_FNAME_T5,
+ .fw_hdr = {
+ .chip = FW_HDR_CHIP_T5,
+ .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
+ .intfver_nic = FW_INTFVER(T5, NIC),
+ .intfver_vnic = FW_INTFVER(T5, VNIC),
+ .intfver_ri = FW_INTFVER(T5, RI),
+ .intfver_iscsi = FW_INTFVER(T5, ISCSI),
+ .intfver_fcoe = FW_INTFVER(T5, FCOE),
+ },
+ }
+};
+
+static struct fw_info *find_fw_info(int chip)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
+ if (fw_info_array[i].chip == chip)
+ return &fw_info_array[i];
+ }
+ return NULL;
+}
+
+static int csio_hw_prep_fw(struct csio_hw *hw, struct fw_info *fw_info,
+ const u8 *fw_data, unsigned int fw_size,
+ struct fw_hdr *card_fw, enum csio_dev_state state,
+ int *reset)
+{
+ int ret, card_fw_usable, fs_fw_usable;
+ const struct fw_hdr *fs_fw;
+ const struct fw_hdr *drv_fw;
+
+ drv_fw = &fw_info->fw_hdr;
+
+ /* Read the header of the firmware on the card */
+ ret = csio_hw_read_flash(hw, FLASH_FW_START,
+ sizeof(*card_fw) / sizeof(uint32_t),
+ (uint32_t *)card_fw, 1);
+ if (ret == 0) {
+ card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
+ } else {
+ csio_err(hw,
+ "Unable to read card's firmware header: %d\n", ret);
+ card_fw_usable = 0;
+ }
+
+ if (fw_data != NULL) {
+ fs_fw = (const void *)fw_data;
+ fs_fw_usable = fw_compatible(drv_fw, fs_fw);
+ } else {
+ fs_fw = NULL;
+ fs_fw_usable = 0;
+ }
+
+ if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
+ (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
+ /* Common case: the firmware on the card is an exact match and
+ * the filesystem one is an exact match too, or the filesystem
+ * one is absent/incompatible.
+ */
+ } else if (fs_fw_usable && state == CSIO_DEV_STATE_UNINIT &&
+ csio_should_install_fs_fw(hw, card_fw_usable,
+ be32_to_cpu(fs_fw->fw_ver),
+ be32_to_cpu(card_fw->fw_ver))) {
+ ret = csio_hw_fw_upgrade(hw, hw->pfn, fw_data,
+ fw_size, 0);
+ if (ret != 0) {
+ csio_err(hw,
+ "failed to install firmware: %d\n", ret);
+ goto bye;
+ }
+
+ /* Installed successfully, update the cached header too. */
+ memcpy(card_fw, fs_fw, sizeof(*card_fw));
+ card_fw_usable = 1;
+ *reset = 0; /* already reset as part of load_fw */
+ }
+
+ if (!card_fw_usable) {
+ uint32_t d, c, k;
+
+ d = be32_to_cpu(drv_fw->fw_ver);
+ c = be32_to_cpu(card_fw->fw_ver);
+ k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
+
+ csio_err(hw, "Cannot find a usable firmware: "
+ "chip state %d, "
+ "driver compiled with %d.%d.%d.%d, "
+ "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
+ state,
+ FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d),
+ FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d),
+ FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
+ FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
+ FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
+ FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
+ ret = EINVAL;
+ goto bye;
+ }
+
+ /* We're using whatever's on the card and it's known to be good. */
+ hw->fwrev = be32_to_cpu(card_fw->fw_ver);
+ hw->tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
+
+bye:
+ return ret;
+}
+
/*
* Returns -EINVAL if attempts to flash the firmware failed
* else returns 0,
@@ -1977,14 +2101,27 @@ out:
* latest firmware ECANCELED is returned
*/
static int
-csio_hw_flash_fw(struct csio_hw *hw)
+csio_hw_flash_fw(struct csio_hw *hw, int *reset)
{
int ret = -ECANCELED;
const struct firmware *fw;
- const struct fw_hdr *hdr;
- u32 fw_ver;
+ struct fw_info *fw_info;
+ struct fw_hdr *card_fw;
struct pci_dev *pci_dev = hw->pdev;
struct device *dev = &pci_dev->dev ;
+ const u8 *fw_data = NULL;
+ unsigned int fw_size = 0;
+
+ /* This is the firmware whose headers the driver was compiled
+ * against
+ */
+ fw_info = find_fw_info(CHELSIO_CHIP_VERSION(hw->chip_id));
+ if (fw_info == NULL) {
+ csio_err(hw,
+ "unable to get firmware info for chip %d.\n",
+ CHELSIO_CHIP_VERSION(hw->chip_id));
+ return -EINVAL;
+ }
if (request_firmware(&fw, CSIO_FW_FNAME(hw), dev) < 0) {
csio_err(hw, "could not find firmware image %s, err: %d\n",
@@ -1992,33 +2129,25 @@ csio_hw_flash_fw(struct csio_hw *hw)
return -EINVAL;
}
- hdr = (const struct fw_hdr *)fw->data;
- fw_ver = ntohl(hdr->fw_ver);
- if (FW_HDR_FW_VER_MAJOR_G(fw_ver) != FW_VERSION_MAJOR(hw))
- return -EINVAL; /* wrong major version, won't do */
-
- /*
- * If the flash FW is unusable or we found something newer, load it.
+ /* allocate memory to read the header of the firmware on the
+ * card
*/
- if (FW_HDR_FW_VER_MAJOR_G(hw->fwrev) != FW_VERSION_MAJOR(hw) ||
- fw_ver > hw->fwrev) {
- ret = csio_hw_fw_upgrade(hw, hw->pfn, fw->data, fw->size,
- /*force=*/false);
- if (!ret)
- csio_info(hw,
- "firmware upgraded to version %pI4 from %s\n",
- &hdr->fw_ver, CSIO_FW_FNAME(hw));
- else
- csio_err(hw, "firmware upgrade failed! err=%d\n", ret);
- } else
- ret = -EINVAL;
+ card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL);
- release_firmware(fw);
+ fw_data = fw->data;
+ fw_size = fw->size;
+ /* upgrade FW logic */
+ ret = csio_hw_prep_fw(hw, fw_info, fw_data, fw_size, card_fw,
+ hw->fw_state, reset);
+
+ /* Cleaning up */
+ if (fw != NULL)
+ release_firmware(fw);
+ kfree(card_fw);
return ret;
}
-
/*
* csio_hw_configure - Configure HW
* @hw - HW module
@@ -2039,7 +2168,7 @@ csio_hw_configure(struct csio_hw *hw)
}
/* HW version */
- hw->chip_ver = (char)csio_rd_reg32(hw, PL_REV);
+ hw->chip_ver = (char)csio_rd_reg32(hw, PL_REV_A);
/* Needed for FW download */
rv = csio_hw_get_flash_params(hw);
@@ -2074,25 +2203,18 @@ csio_hw_configure(struct csio_hw *hw)
if (rv != 0)
goto out;
+ csio_hw_get_fw_version(hw, &hw->fwrev);
+ csio_hw_get_tp_version(hw, &hw->tp_vers);
if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {
- rv = csio_hw_check_fw_version(hw);
- if (rv == -EINVAL) {
/* Do firmware update */
- spin_unlock_irq(&hw->lock);
- rv = csio_hw_flash_fw(hw);
- spin_lock_irq(&hw->lock);
+ spin_unlock_irq(&hw->lock);
+ rv = csio_hw_flash_fw(hw, &reset);
+ spin_lock_irq(&hw->lock);
+
+ if (rv != 0)
+ goto out;
- if (rv == 0) {
- reset = 0;
- /*
- * Note that the chip was reset as part of the
- * firmware upgrade so we don't reset it again
- * below and grab the new firmware version.
- */
- rv = csio_hw_check_fw_version(hw);
- }
- }
/*
* If the firmware doesn't support Configuration
* Files, use the old Driver-based, hard-wired
@@ -2217,7 +2339,7 @@ out:
return;
}
-#define PF_INTR_MASK (PFSW | PFCIM)
+#define PF_INTR_MASK (PFSW_F | PFCIM_F)
/*
* csio_hw_intr_enable - Enable HW interrupts
@@ -2229,21 +2351,21 @@ static void
csio_hw_intr_enable(struct csio_hw *hw)
{
uint16_t vec = (uint16_t)csio_get_mb_intr_idx(csio_hw_to_mbm(hw));
- uint32_t pf = SOURCEPF_GET(csio_rd_reg32(hw, PL_WHOAMI));
- uint32_t pl = csio_rd_reg32(hw, PL_INT_ENABLE);
+ uint32_t pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
+ uint32_t pl = csio_rd_reg32(hw, PL_INT_ENABLE_A);
/*
* Set aivec for MSI/MSIX. PCIE_PF_CFG.INTXType is set up
* by FW, so do nothing for INTX.
*/
if (hw->intr_mode == CSIO_IM_MSIX)
- csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG),
- AIVEC(AIVEC_MASK), vec);
+ csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG_A),
+ AIVEC_V(AIVEC_M), vec);
else if (hw->intr_mode == CSIO_IM_MSI)
- csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG),
- AIVEC(AIVEC_MASK), 0);
+ csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG_A),
+ AIVEC_V(AIVEC_M), 0);
- csio_wr_reg32(hw, PF_INTR_MASK, MYPF_REG(PL_PF_INT_ENABLE));
+ csio_wr_reg32(hw, PF_INTR_MASK, MYPF_REG(PL_PF_INT_ENABLE_A));
/* Turn on MB interrupts - this will internally flush PIO as well */
csio_mb_intr_enable(hw);
@@ -2253,19 +2375,19 @@ csio_hw_intr_enable(struct csio_hw *hw)
/*
* Disable the Serial FLASH interrupt, if enabled!
*/
- pl &= (~SF);
- csio_wr_reg32(hw, pl, PL_INT_ENABLE);
+ pl &= (~SF_F);
+ csio_wr_reg32(hw, pl, PL_INT_ENABLE_A);
- csio_wr_reg32(hw, ERR_CPL_EXCEED_IQE_SIZE |
- EGRESS_SIZE_ERR | ERR_INVALID_CIDX_INC |
- ERR_CPL_OPCODE_0 | ERR_DROPPED_DB |
- ERR_DATA_CPL_ON_HIGH_QID1 |
- ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
- ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
- ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
- ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR,
- SGE_INT_ENABLE3);
- csio_set_reg_field(hw, PL_INT_MAP0, 0, 1 << pf);
+ csio_wr_reg32(hw, ERR_CPL_EXCEED_IQE_SIZE_F |
+ EGRESS_SIZE_ERR_F | ERR_INVALID_CIDX_INC_F |
+ ERR_CPL_OPCODE_0_F | ERR_DROPPED_DB_F |
+ ERR_DATA_CPL_ON_HIGH_QID1_F |
+ ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
+ ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
+ ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
+ ERR_EGR_CTXT_PRIO_F | INGRESS_SIZE_ERR_F,
+ SGE_INT_ENABLE3_A);
+ csio_set_reg_field(hw, PL_INT_MAP0_A, 0, 1 << pf);
}
hw->flags |= CSIO_HWF_HW_INTR_ENABLED;
@@ -2281,16 +2403,16 @@ csio_hw_intr_enable(struct csio_hw *hw)
void
csio_hw_intr_disable(struct csio_hw *hw)
{
- uint32_t pf = SOURCEPF_GET(csio_rd_reg32(hw, PL_WHOAMI));
+ uint32_t pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
if (!(hw->flags & CSIO_HWF_HW_INTR_ENABLED))
return;
hw->flags &= ~CSIO_HWF_HW_INTR_ENABLED;
- csio_wr_reg32(hw, 0, MYPF_REG(PL_PF_INT_ENABLE));
+ csio_wr_reg32(hw, 0, MYPF_REG(PL_PF_INT_ENABLE_A));
if (csio_is_hw_master(hw))
- csio_set_reg_field(hw, PL_INT_MAP0, 1 << pf, 0);
+ csio_set_reg_field(hw, PL_INT_MAP0_A, 1 << pf, 0);
/* Turn off MB interrupts */
csio_mb_intr_disable(hw);
@@ -2300,7 +2422,7 @@ csio_hw_intr_disable(struct csio_hw *hw)
void
csio_hw_fatal_err(struct csio_hw *hw)
{
- csio_set_reg_field(hw, SGE_CONTROL, GLOBALENABLE, 0);
+ csio_set_reg_field(hw, SGE_CONTROL_A, GLOBALENABLE_F, 0);
csio_hw_intr_disable(hw);
/* Do not reset HW, we may need FW state for debugging */
@@ -2594,7 +2716,7 @@ csio_hws_removing(struct csio_hw *hw, enum csio_hw_ev evt)
* register directly.
*/
csio_err(hw, "Resetting HW and waiting 2 seconds...\n");
- csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST);
+ csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A);
mdelay(2000);
break;
@@ -2682,11 +2804,11 @@ static void csio_tp_intr_handler(struct csio_hw *hw)
{
static struct intr_info tp_intr_info[] = {
{ 0x3fffffff, "TP parity error", -1, 1 },
- { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
+ { FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 },
{ 0, NULL, 0, 0 }
};
- if (csio_handle_intr_status(hw, TP_INT_CAUSE, tp_intr_info))
+ if (csio_handle_intr_status(hw, TP_INT_CAUSE_A, tp_intr_info))
csio_hw_fatal_err(hw);
}
@@ -2698,52 +2820,52 @@ static void csio_sge_intr_handler(struct csio_hw *hw)
uint64_t v;
static struct intr_info sge_intr_info[] = {
- { ERR_CPL_EXCEED_IQE_SIZE,
+ { ERR_CPL_EXCEED_IQE_SIZE_F,
"SGE received CPL exceeding IQE size", -1, 1 },
- { ERR_INVALID_CIDX_INC,
+ { ERR_INVALID_CIDX_INC_F,
"SGE GTS CIDX increment too large", -1, 0 },
- { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
- { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
- { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
+ { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
+ { ERR_DROPPED_DB_F, "SGE doorbell dropped", -1, 0 },
+ { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
"SGE IQID > 1023 received CPL for FL", -1, 0 },
- { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
+ { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
0 },
- { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
+ { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
0 },
- { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
+ { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
0 },
- { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
+ { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
0 },
- { ERR_ING_CTXT_PRIO,
+ { ERR_ING_CTXT_PRIO_F,
"SGE too many priority ingress contexts", -1, 0 },
- { ERR_EGR_CTXT_PRIO,
+ { ERR_EGR_CTXT_PRIO_F,
"SGE too many priority egress contexts", -1, 0 },
- { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
- { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
+ { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
+ { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
{ 0, NULL, 0, 0 }
};
- v = (uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE1) |
- ((uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE2) << 32);
+ v = (uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE1_A) |
+ ((uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE2_A) << 32);
if (v) {
csio_fatal(hw, "SGE parity error (%#llx)\n",
(unsigned long long)v);
csio_wr_reg32(hw, (uint32_t)(v & 0xFFFFFFFF),
- SGE_INT_CAUSE1);
- csio_wr_reg32(hw, (uint32_t)(v >> 32), SGE_INT_CAUSE2);
+ SGE_INT_CAUSE1_A);
+ csio_wr_reg32(hw, (uint32_t)(v >> 32), SGE_INT_CAUSE2_A);
}
- v |= csio_handle_intr_status(hw, SGE_INT_CAUSE3, sge_intr_info);
+ v |= csio_handle_intr_status(hw, SGE_INT_CAUSE3_A, sge_intr_info);
- if (csio_handle_intr_status(hw, SGE_INT_CAUSE3, sge_intr_info) ||
+ if (csio_handle_intr_status(hw, SGE_INT_CAUSE3_A, sge_intr_info) ||
v != 0)
csio_hw_fatal_err(hw);
}
-#define CIM_OBQ_INTR (OBQULP0PARERR | OBQULP1PARERR | OBQULP2PARERR |\
- OBQULP3PARERR | OBQSGEPARERR | OBQNCSIPARERR)
-#define CIM_IBQ_INTR (IBQTP0PARERR | IBQTP1PARERR | IBQULPPARERR |\
- IBQSGEHIPARERR | IBQSGELOPARERR | IBQNCSIPARERR)
+#define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
+ OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
+#define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
+ IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
/*
* CIM interrupt handler.
@@ -2751,53 +2873,53 @@ static void csio_sge_intr_handler(struct csio_hw *hw)
static void csio_cim_intr_handler(struct csio_hw *hw)
{
static struct intr_info cim_intr_info[] = {
- { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
+ { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 },
{ CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
{ CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
- { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
- { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
- { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
- { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
+ { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 },
+ { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
+ { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
+ { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
{ 0, NULL, 0, 0 }
};
static struct intr_info cim_upintr_info[] = {
- { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
- { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
- { ILLWRINT, "CIM illegal write", -1, 1 },
- { ILLRDINT, "CIM illegal read", -1, 1 },
- { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
- { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
- { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
- { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
- { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
- { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
- { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
- { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
- { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
- { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
- { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
- { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
- { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
- { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
- { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
- { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
- { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
- { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
- { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
- { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
- { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
- { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
- { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
- { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
+ { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 },
+ { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 },
+ { ILLWRINT_F, "CIM illegal write", -1, 1 },
+ { ILLRDINT_F, "CIM illegal read", -1, 1 },
+ { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 },
+ { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 },
+ { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 },
+ { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 },
+ { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 },
+ { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 },
+ { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 },
+ { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 },
+ { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 },
+ { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 },
+ { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 },
+ { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 },
+ { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 },
+ { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 },
+ { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 },
+ { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 },
+ { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 },
+ { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 },
+ { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 },
+ { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 },
+ { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 },
+ { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 },
+ { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 },
+ { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 },
{ 0, NULL, 0, 0 }
};
int fat;
- fat = csio_handle_intr_status(hw, CIM_HOST_INT_CAUSE,
- cim_intr_info) +
- csio_handle_intr_status(hw, CIM_HOST_UPACC_INT_CAUSE,
- cim_upintr_info);
+ fat = csio_handle_intr_status(hw, CIM_HOST_INT_CAUSE_A,
+ cim_intr_info) +
+ csio_handle_intr_status(hw, CIM_HOST_UPACC_INT_CAUSE_A,
+ cim_upintr_info);
if (fat)
csio_hw_fatal_err(hw);
}
@@ -2813,7 +2935,7 @@ static void csio_ulprx_intr_handler(struct csio_hw *hw)
{ 0, NULL, 0, 0 }
};
- if (csio_handle_intr_status(hw, ULP_RX_INT_CAUSE, ulprx_intr_info))
+ if (csio_handle_intr_status(hw, ULP_RX_INT_CAUSE_A, ulprx_intr_info))
csio_hw_fatal_err(hw);
}
@@ -2823,19 +2945,19 @@ static void csio_ulprx_intr_handler(struct csio_hw *hw)
static void csio_ulptx_intr_handler(struct csio_hw *hw)
{
static struct intr_info ulptx_intr_info[] = {
- { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
+ { PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1,
0 },
- { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
+ { PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1,
0 },
- { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
+ { PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1,
0 },
- { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
+ { PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1,
0 },
{ 0xfffffff, "ULPTX parity error", -1, 1 },
{ 0, NULL, 0, 0 }
};
- if (csio_handle_intr_status(hw, ULP_TX_INT_CAUSE, ulptx_intr_info))
+ if (csio_handle_intr_status(hw, ULP_TX_INT_CAUSE_A, ulptx_intr_info))
csio_hw_fatal_err(hw);
}
@@ -2845,20 +2967,20 @@ static void csio_ulptx_intr_handler(struct csio_hw *hw)
static void csio_pmtx_intr_handler(struct csio_hw *hw)
{
static struct intr_info pmtx_intr_info[] = {
- { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
- { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
- { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
- { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
+ { PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 },
+ { PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 },
+ { PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 },
+ { ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 },
{ 0xffffff0, "PMTX framing error", -1, 1 },
- { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
- { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
+ { OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 },
+ { DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error", -1,
1 },
- { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
- { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
+ { ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 },
+ { PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1},
{ 0, NULL, 0, 0 }
};
- if (csio_handle_intr_status(hw, PM_TX_INT_CAUSE, pmtx_intr_info))
+ if (csio_handle_intr_status(hw, PM_TX_INT_CAUSE_A, pmtx_intr_info))
csio_hw_fatal_err(hw);
}
@@ -2868,17 +2990,17 @@ static void csio_pmtx_intr_handler(struct csio_hw *hw)
static void csio_pmrx_intr_handler(struct csio_hw *hw)
{
static struct intr_info pmrx_intr_info[] = {
- { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
+ { ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 },
{ 0x3ffff0, "PMRX framing error", -1, 1 },
- { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
- { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
+ { OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 },
+ { DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error", -1,
1 },
- { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
- { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
+ { IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 },
+ { PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1},
{ 0, NULL, 0, 0 }
};
- if (csio_handle_intr_status(hw, PM_RX_INT_CAUSE, pmrx_intr_info))
+ if (csio_handle_intr_status(hw, PM_RX_INT_CAUSE_A, pmrx_intr_info))
csio_hw_fatal_err(hw);
}
@@ -2888,16 +3010,16 @@ static void csio_pmrx_intr_handler(struct csio_hw *hw)
static void csio_cplsw_intr_handler(struct csio_hw *hw)
{
static struct intr_info cplsw_intr_info[] = {
- { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
- { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
- { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
- { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
- { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
- { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
+ { CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 },
+ { CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 },
+ { TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 },
+ { SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 },
+ { CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 },
+ { ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 },
{ 0, NULL, 0, 0 }
};
- if (csio_handle_intr_status(hw, CPL_INTR_CAUSE, cplsw_intr_info))
+ if (csio_handle_intr_status(hw, CPL_INTR_CAUSE_A, cplsw_intr_info))
csio_hw_fatal_err(hw);
}
@@ -2907,15 +3029,15 @@ static void csio_cplsw_intr_handler(struct csio_hw *hw)
static void csio_le_intr_handler(struct csio_hw *hw)
{
static struct intr_info le_intr_info[] = {
- { LIPMISS, "LE LIP miss", -1, 0 },
- { LIP0, "LE 0 LIP error", -1, 0 },
- { PARITYERR, "LE parity error", -1, 1 },
- { UNKNOWNCMD, "LE unknown command", -1, 1 },
- { REQQPARERR, "LE request queue parity error", -1, 1 },
+ { LIPMISS_F, "LE LIP miss", -1, 0 },
+ { LIP0_F, "LE 0 LIP error", -1, 0 },
+ { PARITYERR_F, "LE parity error", -1, 1 },
+ { UNKNOWNCMD_F, "LE unknown command", -1, 1 },
+ { REQQPARERR_F, "LE request queue parity error", -1, 1 },
{ 0, NULL, 0, 0 }
};
- if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE, le_intr_info))
+ if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE_A, le_intr_info))
csio_hw_fatal_err(hw);
}
@@ -2929,19 +3051,22 @@ static void csio_mps_intr_handler(struct csio_hw *hw)
{ 0, NULL, 0, 0 }
};
static struct intr_info mps_tx_intr_info[] = {
- { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
- { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
- { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
- { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
- { BUBBLE, "MPS Tx underflow", -1, 1 },
- { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
- { FRMERR, "MPS Tx framing error", -1, 1 },
+ { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
+ { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
+ { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
+ -1, 1 },
+ { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
+ -1, 1 },
+ { BUBBLE_F, "MPS Tx underflow", -1, 1 },
+ { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
+ { FRMERR_F, "MPS Tx framing error", -1, 1 },
{ 0, NULL, 0, 0 }
};
static struct intr_info mps_trc_intr_info[] = {
- { FILTMEM, "MPS TRC filter parity error", -1, 1 },
- { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
- { MISCPERR, "MPS TRC misc parity error", -1, 1 },
+ { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 },
+ { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error",
+ -1, 1 },
+ { MISCPERR_F, "MPS TRC misc parity error", -1, 1 },
{ 0, NULL, 0, 0 }
};
static struct intr_info mps_stat_sram_intr_info[] = {
@@ -2957,36 +3082,37 @@ static void csio_mps_intr_handler(struct csio_hw *hw)
{ 0, NULL, 0, 0 }
};
static struct intr_info mps_cls_intr_info[] = {
- { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
- { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
- { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
+ { MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 },
+ { MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 },
+ { HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 },
{ 0, NULL, 0, 0 }
};
int fat;
- fat = csio_handle_intr_status(hw, MPS_RX_PERR_INT_CAUSE,
- mps_rx_intr_info) +
- csio_handle_intr_status(hw, MPS_TX_INT_CAUSE,
- mps_tx_intr_info) +
- csio_handle_intr_status(hw, MPS_TRC_INT_CAUSE,
- mps_trc_intr_info) +
- csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_SRAM,
- mps_stat_sram_intr_info) +
- csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
- mps_stat_tx_intr_info) +
- csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
- mps_stat_rx_intr_info) +
- csio_handle_intr_status(hw, MPS_CLS_INT_CAUSE,
- mps_cls_intr_info);
-
- csio_wr_reg32(hw, 0, MPS_INT_CAUSE);
- csio_rd_reg32(hw, MPS_INT_CAUSE); /* flush */
+ fat = csio_handle_intr_status(hw, MPS_RX_PERR_INT_CAUSE_A,
+ mps_rx_intr_info) +
+ csio_handle_intr_status(hw, MPS_TX_INT_CAUSE_A,
+ mps_tx_intr_info) +
+ csio_handle_intr_status(hw, MPS_TRC_INT_CAUSE_A,
+ mps_trc_intr_info) +
+ csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_SRAM_A,
+ mps_stat_sram_intr_info) +
+ csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A,
+ mps_stat_tx_intr_info) +
+ csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A,
+ mps_stat_rx_intr_info) +
+ csio_handle_intr_status(hw, MPS_CLS_INT_CAUSE_A,
+ mps_cls_intr_info);
+
+ csio_wr_reg32(hw, 0, MPS_INT_CAUSE_A);
+ csio_rd_reg32(hw, MPS_INT_CAUSE_A); /* flush */
if (fat)
csio_hw_fatal_err(hw);
}
-#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
+#define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
+ ECC_UE_INT_CAUSE_F)
/*
* EDC/MC interrupt handler.
@@ -2998,28 +3124,28 @@ static void csio_mem_intr_handler(struct csio_hw *hw, int idx)
unsigned int addr, cnt_addr, v;
if (idx <= MEM_EDC1) {
- addr = EDC_REG(EDC_INT_CAUSE, idx);
- cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
+ addr = EDC_REG(EDC_INT_CAUSE_A, idx);
+ cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx);
} else {
- addr = MC_INT_CAUSE;
- cnt_addr = MC_ECC_STATUS;
+ addr = MC_INT_CAUSE_A;
+ cnt_addr = MC_ECC_STATUS_A;
}
v = csio_rd_reg32(hw, addr) & MEM_INT_MASK;
- if (v & PERR_INT_CAUSE)
+ if (v & PERR_INT_CAUSE_F)
csio_fatal(hw, "%s FIFO parity error\n", name[idx]);
- if (v & ECC_CE_INT_CAUSE) {
- uint32_t cnt = ECC_CECNT_GET(csio_rd_reg32(hw, cnt_addr));
+ if (v & ECC_CE_INT_CAUSE_F) {
+ uint32_t cnt = ECC_CECNT_G(csio_rd_reg32(hw, cnt_addr));
- csio_wr_reg32(hw, ECC_CECNT_MASK, cnt_addr);
+ csio_wr_reg32(hw, ECC_CECNT_V(ECC_CECNT_M), cnt_addr);
csio_warn(hw, "%u %s correctable ECC data error%s\n",
cnt, name[idx], cnt > 1 ? "s" : "");
}
- if (v & ECC_UE_INT_CAUSE)
+ if (v & ECC_UE_INT_CAUSE_F)
csio_fatal(hw, "%s uncorrectable ECC data error\n", name[idx]);
csio_wr_reg32(hw, v, addr);
- if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
+ if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F))
csio_hw_fatal_err(hw);
}
@@ -3028,18 +3154,18 @@ static void csio_mem_intr_handler(struct csio_hw *hw, int idx)
*/
static void csio_ma_intr_handler(struct csio_hw *hw)
{
- uint32_t v, status = csio_rd_reg32(hw, MA_INT_CAUSE);
+ uint32_t v, status = csio_rd_reg32(hw, MA_INT_CAUSE_A);
- if (status & MEM_PERR_INT_CAUSE)
+ if (status & MEM_PERR_INT_CAUSE_F)
csio_fatal(hw, "MA parity error, parity status %#x\n",
- csio_rd_reg32(hw, MA_PARITY_ERROR_STATUS));
- if (status & MEM_WRAP_INT_CAUSE) {
- v = csio_rd_reg32(hw, MA_INT_WRAP_STATUS);
+ csio_rd_reg32(hw, MA_PARITY_ERROR_STATUS_A));
+ if (status & MEM_WRAP_INT_CAUSE_F) {
+ v = csio_rd_reg32(hw, MA_INT_WRAP_STATUS_A);
csio_fatal(hw,
"MA address wrap-around error by client %u to address %#x\n",
- MEM_WRAP_CLIENT_NUM_GET(v), MEM_WRAP_ADDRESS_GET(v) << 4);
+ MEM_WRAP_CLIENT_NUM_G(v), MEM_WRAP_ADDRESS_G(v) << 4);
}
- csio_wr_reg32(hw, status, MA_INT_CAUSE);
+ csio_wr_reg32(hw, status, MA_INT_CAUSE_A);
csio_hw_fatal_err(hw);
}
@@ -3049,13 +3175,13 @@ static void csio_ma_intr_handler(struct csio_hw *hw)
static void csio_smb_intr_handler(struct csio_hw *hw)
{
static struct intr_info smb_intr_info[] = {
- { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
- { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
- { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
+ { MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 },
+ { MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 },
+ { SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 },
{ 0, NULL, 0, 0 }
};
- if (csio_handle_intr_status(hw, SMB_INT_CAUSE, smb_intr_info))
+ if (csio_handle_intr_status(hw, SMB_INT_CAUSE_A, smb_intr_info))
csio_hw_fatal_err(hw);
}
@@ -3065,14 +3191,14 @@ static void csio_smb_intr_handler(struct csio_hw *hw)
static void csio_ncsi_intr_handler(struct csio_hw *hw)
{
static struct intr_info ncsi_intr_info[] = {
- { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
- { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
- { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
- { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
+ { CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 },
+ { MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 },
+ { TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 },
+ { RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 },
{ 0, NULL, 0, 0 }
};
- if (csio_handle_intr_status(hw, NCSI_INT_CAUSE, ncsi_intr_info))
+ if (csio_handle_intr_status(hw, NCSI_INT_CAUSE_A, ncsi_intr_info))
csio_hw_fatal_err(hw);
}
@@ -3083,13 +3209,13 @@ static void csio_xgmac_intr_handler(struct csio_hw *hw, int port)
{
uint32_t v = csio_rd_reg32(hw, CSIO_MAC_INT_CAUSE_REG(hw, port));
- v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
+ v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F;
if (!v)
return;
- if (v & TXFIFO_PRTY_ERR)
+ if (v & TXFIFO_PRTY_ERR_F)
csio_fatal(hw, "XGMAC %d Tx FIFO parity error\n", port);
- if (v & RXFIFO_PRTY_ERR)
+ if (v & RXFIFO_PRTY_ERR_F)
csio_fatal(hw, "XGMAC %d Rx FIFO parity error\n", port);
csio_wr_reg32(hw, v, CSIO_MAC_INT_CAUSE_REG(hw, port));
csio_hw_fatal_err(hw);
@@ -3101,12 +3227,12 @@ static void csio_xgmac_intr_handler(struct csio_hw *hw, int port)
static void csio_pl_intr_handler(struct csio_hw *hw)
{
static struct intr_info pl_intr_info[] = {
- { FATALPERR, "T4 fatal parity error", -1, 1 },
- { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
+ { FATALPERR_F, "T4 fatal parity error", -1, 1 },
+ { PERRVFID_F, "PL VFID_MAP parity error", -1, 1 },
{ 0, NULL, 0, 0 }
};
- if (csio_handle_intr_status(hw, PL_PL_INT_CAUSE, pl_intr_info))
+ if (csio_handle_intr_status(hw, PL_PL_INT_CAUSE_A, pl_intr_info))
csio_hw_fatal_err(hw);
}
@@ -3121,7 +3247,7 @@ static void csio_pl_intr_handler(struct csio_hw *hw)
int
csio_hw_slow_intr_handler(struct csio_hw *hw)
{
- uint32_t cause = csio_rd_reg32(hw, PL_INT_CAUSE);
+ uint32_t cause = csio_rd_reg32(hw, PL_INT_CAUSE_A);
if (!(cause & CSIO_GLBL_INTR_MASK)) {
CSIO_INC_STATS(hw, n_plint_unexp);
@@ -3132,75 +3258,75 @@ csio_hw_slow_intr_handler(struct csio_hw *hw)
CSIO_INC_STATS(hw, n_plint_cnt);
- if (cause & CIM)
+ if (cause & CIM_F)
csio_cim_intr_handler(hw);
- if (cause & MPS)
+ if (cause & MPS_F)
csio_mps_intr_handler(hw);
- if (cause & NCSI)
+ if (cause & NCSI_F)
csio_ncsi_intr_handler(hw);
- if (cause & PL)
+ if (cause & PL_F)
csio_pl_intr_handler(hw);
- if (cause & SMB)
+ if (cause & SMB_F)
csio_smb_intr_handler(hw);
- if (cause & XGMAC0)
+ if (cause & XGMAC0_F)
csio_xgmac_intr_handler(hw, 0);
- if (cause & XGMAC1)
+ if (cause & XGMAC1_F)
csio_xgmac_intr_handler(hw, 1);
- if (cause & XGMAC_KR0)
+ if (cause & XGMAC_KR0_F)
csio_xgmac_intr_handler(hw, 2);
- if (cause & XGMAC_KR1)
+ if (cause & XGMAC_KR1_F)
csio_xgmac_intr_handler(hw, 3);
- if (cause & PCIE)
+ if (cause & PCIE_F)
hw->chip_ops->chip_pcie_intr_handler(hw);
- if (cause & MC)
+ if (cause & MC_F)
csio_mem_intr_handler(hw, MEM_MC);
- if (cause & EDC0)
+ if (cause & EDC0_F)
csio_mem_intr_handler(hw, MEM_EDC0);
- if (cause & EDC1)
+ if (cause & EDC1_F)
csio_mem_intr_handler(hw, MEM_EDC1);
- if (cause & LE)
+ if (cause & LE_F)
csio_le_intr_handler(hw);
- if (cause & TP)
+ if (cause & TP_F)
csio_tp_intr_handler(hw);
- if (cause & MA)
+ if (cause & MA_F)
csio_ma_intr_handler(hw);
- if (cause & PM_TX)
+ if (cause & PM_TX_F)
csio_pmtx_intr_handler(hw);
- if (cause & PM_RX)
+ if (cause & PM_RX_F)
csio_pmrx_intr_handler(hw);
- if (cause & ULP_RX)
+ if (cause & ULP_RX_F)
csio_ulprx_intr_handler(hw);
- if (cause & CPL_SWITCH)
+ if (cause & CPL_SWITCH_F)
csio_cplsw_intr_handler(hw);
- if (cause & SGE)
+ if (cause & SGE_F)
csio_sge_intr_handler(hw);
- if (cause & ULP_TX)
+ if (cause & ULP_TX_F)
csio_ulptx_intr_handler(hw);
/* Clear the interrupts just processed for which we are the master. */
- csio_wr_reg32(hw, cause & CSIO_GLBL_INTR_MASK, PL_INT_CAUSE);
- csio_rd_reg32(hw, PL_INT_CAUSE); /* flush */
+ csio_wr_reg32(hw, cause & CSIO_GLBL_INTR_MASK, PL_INT_CAUSE_A);
+ csio_rd_reg32(hw, PL_INT_CAUSE_A); /* flush */
return 1;
}
diff --git a/drivers/scsi/csiostor/csio_hw.h b/drivers/scsi/csiostor/csio_hw.h
index 68248da1b9af..1fe8fdee70fa 100644
--- a/drivers/scsi/csiostor/csio_hw.h
+++ b/drivers/scsi/csiostor/csio_hw.h
@@ -117,10 +117,10 @@ extern int csio_msi;
#define CSIO_ASIC_DEVID_PROTO_MASK 0xFF00
#define CSIO_ASIC_DEVID_TYPE_MASK 0x00FF
-#define CSIO_GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
- EDC1 | LE | TP | MA | PM_TX | PM_RX | \
- ULP_RX | CPL_SWITCH | SGE | \
- ULP_TX | SF)
+#define CSIO_GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | \
+ EDC0_F | EDC1_F | LE_F | TP_F | MA_F | \
+ PM_TX_F | PM_RX_F | ULP_RX_F | \
+ CPL_SWITCH_F | SGE_F | ULP_TX_F | SF_F)
/*
* Hard parameters used to initialize the card in the absence of a
@@ -201,9 +201,8 @@ enum {
SF_ERASE_SECTOR = 0xd8, /* erase sector */
FW_START_SEC = 8, /* first flash sector for FW */
- FW_END_SEC = 15, /* last flash sector for FW */
FW_IMG_START = FW_START_SEC * SF_SEC_SIZE,
- FW_MAX_SIZE = (FW_END_SEC - FW_START_SEC + 1) * SF_SEC_SIZE,
+ FW_MAX_SIZE = 16 * SF_SEC_SIZE,
FLASH_CFG_MAX_SIZE = 0x10000 , /* max size of the flash config file*/
FLASH_CFG_OFFSET = 0x1f0000,
@@ -221,7 +220,7 @@ enum {
* Location of firmware image in FLASH.
*/
FLASH_FW_START_SEC = 8,
- FLASH_FW_NSECS = 8,
+ FLASH_FW_NSECS = 16,
FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC),
FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS),
diff --git a/drivers/scsi/csiostor/csio_hw_chip.h b/drivers/scsi/csiostor/csio_hw_chip.h
index 4752fed476df..eec98f523ec9 100644
--- a/drivers/scsi/csiostor/csio_hw_chip.h
+++ b/drivers/scsi/csiostor/csio_hw_chip.h
@@ -50,6 +50,36 @@
#define FW_CFG_NAME_T4 "cxgb4/t4-config.txt"
#define FW_CFG_NAME_T5 "cxgb4/t5-config.txt"
+#define T4FW_VERSION_MAJOR 0x01
+#define T4FW_VERSION_MINOR 0x0B
+#define T4FW_VERSION_MICRO 0x1B
+#define T4FW_VERSION_BUILD 0x00
+
+#define T5FW_VERSION_MAJOR 0x01
+#define T5FW_VERSION_MINOR 0x0B
+#define T5FW_VERSION_MICRO 0x1B
+#define T5FW_VERSION_BUILD 0x00
+
+#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
+#define CHELSIO_CHIP_FPGA 0x100
+#define CHELSIO_CHIP_VERSION(code) (((code) >> 12) & 0xf)
+#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
+
+#define CHELSIO_T4 0x4
+#define CHELSIO_T5 0x5
+
+enum chip_type {
+ T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
+ T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2),
+ T4_FIRST_REV = T4_A1,
+ T4_LAST_REV = T4_A2,
+
+ T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
+ T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1),
+ T5_FIRST_REV = T5_A0,
+ T5_LAST_REV = T5_A1,
+};
+
/* Define static functions */
static inline int csio_is_t4(uint16_t chip)
{
@@ -66,24 +96,35 @@ static inline int csio_is_t5(uint16_t chip)
{ PCI_VENDOR_ID_CHELSIO, (devid), PCI_ANY_ID, PCI_ANY_ID, 0, 0, (idx) }
#define CSIO_HW_PIDX(hw, index) \
- (csio_is_t4(hw->chip_id) ? (PIDX(index)) : \
- (PIDX_T5(index) | DBTYPE(1U)))
+ (csio_is_t4(hw->chip_id) ? (PIDX_V(index)) : \
+ (PIDX_T5_G(index) | DBTYPE_F))
#define CSIO_HW_LP_INT_THRESH(hw, val) \
- (csio_is_t4(hw->chip_id) ? (LP_INT_THRESH(val)) : \
- (V_LP_INT_THRESH_T5(val)))
+ (csio_is_t4(hw->chip_id) ? (LP_INT_THRESH_V(val)) : \
+ (LP_INT_THRESH_T5_V(val)))
#define CSIO_HW_M_LP_INT_THRESH(hw) \
- (csio_is_t4(hw->chip_id) ? (LP_INT_THRESH_MASK) : (M_LP_INT_THRESH_T5))
+ (csio_is_t4(hw->chip_id) ? (LP_INT_THRESH_M) : (LP_INT_THRESH_T5_M))
#define CSIO_MAC_INT_CAUSE_REG(hw, port) \
- (csio_is_t4(hw->chip_id) ? (PORT_REG(port, XGMAC_PORT_INT_CAUSE)) : \
- (T5_PORT_REG(port, MAC_PORT_INT_CAUSE)))
-
-#define FW_VERSION_MAJOR(hw) (csio_is_t4(hw->chip_id) ? 1 : 0)
-#define FW_VERSION_MINOR(hw) (csio_is_t4(hw->chip_id) ? 2 : 0)
-#define FW_VERSION_MICRO(hw) (csio_is_t4(hw->chip_id) ? 8 : 0)
-
+ (csio_is_t4(hw->chip_id) ? (PORT_REG(port, XGMAC_PORT_INT_CAUSE_A)) : \
+ (T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A)))
+
+#include "t4fw_api.h"
+
+#define FW_VERSION(chip) ( \
+ FW_HDR_FW_VER_MAJOR_G(chip##FW_VERSION_MAJOR) | \
+ FW_HDR_FW_VER_MINOR_G(chip##FW_VERSION_MINOR) | \
+ FW_HDR_FW_VER_MICRO_G(chip##FW_VERSION_MICRO) | \
+ FW_HDR_FW_VER_BUILD_G(chip##FW_VERSION_BUILD))
+#define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf)
+
+struct fw_info {
+ u8 chip;
+ char *fs_name;
+ char *fw_mod_name;
+ struct fw_hdr fw_hdr;
+};
#define CSIO_FW_FNAME(hw) \
(csio_is_t4(hw->chip_id) ? FW_FNAME_T4 : FW_FNAME_T5)
diff --git a/drivers/scsi/csiostor/csio_hw_t4.c b/drivers/scsi/csiostor/csio_hw_t4.c
index 95d831857640..14884e46fd99 100644
--- a/drivers/scsi/csiostor/csio_hw_t4.c
+++ b/drivers/scsi/csiostor/csio_hw_t4.c
@@ -96,11 +96,11 @@ csio_t4_set_mem_win(struct csio_hw *hw, uint32_t win)
* back MA register to ensure that changes propagate before we attempt
* to use the new values.)
*/
- csio_wr_reg32(hw, mem_win_base | BIR(0) |
- WINDOW(ilog2(MEMWIN_APERTURE) - 10),
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
+ csio_wr_reg32(hw, mem_win_base | BIR_V(0) |
+ WINDOW_V(ilog2(MEMWIN_APERTURE) - 10),
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, win));
csio_rd_reg32(hw,
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, win));
return 0;
}
@@ -111,69 +111,69 @@ static void
csio_t4_pcie_intr_handler(struct csio_hw *hw)
{
static struct intr_info sysbus_intr_info[] = {
- { RNPP, "RXNP array parity error", -1, 1 },
- { RPCP, "RXPC array parity error", -1, 1 },
- { RCIP, "RXCIF array parity error", -1, 1 },
- { RCCP, "Rx completions control array parity error", -1, 1 },
- { RFTP, "RXFT array parity error", -1, 1 },
+ { RNPP_F, "RXNP array parity error", -1, 1 },
+ { RPCP_F, "RXPC array parity error", -1, 1 },
+ { RCIP_F, "RXCIF array parity error", -1, 1 },
+ { RCCP_F, "Rx completions control array parity error", -1, 1 },
+ { RFTP_F, "RXFT array parity error", -1, 1 },
{ 0, NULL, 0, 0 }
};
static struct intr_info pcie_port_intr_info[] = {
- { TPCP, "TXPC array parity error", -1, 1 },
- { TNPP, "TXNP array parity error", -1, 1 },
- { TFTP, "TXFT array parity error", -1, 1 },
- { TCAP, "TXCA array parity error", -1, 1 },
- { TCIP, "TXCIF array parity error", -1, 1 },
- { RCAP, "RXCA array parity error", -1, 1 },
- { OTDD, "outbound request TLP discarded", -1, 1 },
- { RDPE, "Rx data parity error", -1, 1 },
- { TDUE, "Tx uncorrectable data error", -1, 1 },
+ { TPCP_F, "TXPC array parity error", -1, 1 },
+ { TNPP_F, "TXNP array parity error", -1, 1 },
+ { TFTP_F, "TXFT array parity error", -1, 1 },
+ { TCAP_F, "TXCA array parity error", -1, 1 },
+ { TCIP_F, "TXCIF array parity error", -1, 1 },
+ { RCAP_F, "RXCA array parity error", -1, 1 },
+ { OTDD_F, "outbound request TLP discarded", -1, 1 },
+ { RDPE_F, "Rx data parity error", -1, 1 },
+ { TDUE_F, "Tx uncorrectable data error", -1, 1 },
{ 0, NULL, 0, 0 }
};
static struct intr_info pcie_intr_info[] = {
- { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
- { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
- { MSIDATAPERR, "MSI data parity error", -1, 1 },
- { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
- { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
- { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
- { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
- { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
- { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
- { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
- { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
- { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
- { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
- { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
- { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
- { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
- { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
- { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
- { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
- { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
- { FIDPERR, "PCI FID parity error", -1, 1 },
- { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
- { MATAGPERR, "PCI MA tag parity error", -1, 1 },
- { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
- { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
- { RXWRPERR, "PCI Rx write parity error", -1, 1 },
- { RPLPERR, "PCI replay buffer parity error", -1, 1 },
- { PCIESINT, "PCI core secondary fault", -1, 1 },
- { PCIEPINT, "PCI core primary fault", -1, 1 },
- { UNXSPLCPLERR, "PCI unexpected split completion error", -1,
+ { MSIADDRLPERR_F, "MSI AddrL parity error", -1, 1 },
+ { MSIADDRHPERR_F, "MSI AddrH parity error", -1, 1 },
+ { MSIDATAPERR_F, "MSI data parity error", -1, 1 },
+ { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
+ { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
+ { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
+ { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
+ { PIOCPLPERR_F, "PCI PIO completion FIFO parity error", -1, 1 },
+ { PIOREQPERR_F, "PCI PIO request FIFO parity error", -1, 1 },
+ { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
+ { CCNTPERR_F, "PCI CMD channel count parity error", -1, 1 },
+ { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
+ { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
+ { DCNTPERR_F, "PCI DMA channel count parity error", -1, 1 },
+ { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
+ { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
+ { HCNTPERR_F, "PCI HMA channel count parity error", -1, 1 },
+ { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
+ { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
+ { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
+ { FIDPERR_F, "PCI FID parity error", -1, 1 },
+ { INTXCLRPERR_F, "PCI INTx clear parity error", -1, 1 },
+ { MATAGPERR_F, "PCI MA tag parity error", -1, 1 },
+ { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
+ { RXCPLPERR_F, "PCI Rx completion parity error", -1, 1 },
+ { RXWRPERR_F, "PCI Rx write parity error", -1, 1 },
+ { RPLPERR_F, "PCI replay buffer parity error", -1, 1 },
+ { PCIESINT_F, "PCI core secondary fault", -1, 1 },
+ { PCIEPINT_F, "PCI core primary fault", -1, 1 },
+ { UNXSPLCPLERR_F, "PCI unexpected split completion error", -1,
0 },
{ 0, NULL, 0, 0 }
};
int fat;
fat = csio_handle_intr_status(hw,
- PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
+ PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
sysbus_intr_info) +
csio_handle_intr_status(hw,
- PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
+ PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
pcie_port_intr_info) +
- csio_handle_intr_status(hw, PCIE_INT_CAUSE, pcie_intr_info);
+ csio_handle_intr_status(hw, PCIE_INT_CAUSE_A, pcie_intr_info);
if (fat)
csio_hw_fatal_err(hw);
}
@@ -209,19 +209,19 @@ csio_t4_mc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
{
int i;
- if (csio_rd_reg32(hw, MC_BIST_CMD) & START_BIST)
+ if (csio_rd_reg32(hw, MC_BIST_CMD_A) & START_BIST_F)
return -EBUSY;
- csio_wr_reg32(hw, addr & ~0x3fU, MC_BIST_CMD_ADDR);
- csio_wr_reg32(hw, 64, MC_BIST_CMD_LEN);
- csio_wr_reg32(hw, 0xc, MC_BIST_DATA_PATTERN);
- csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1),
- MC_BIST_CMD);
- i = csio_hw_wait_op_done_val(hw, MC_BIST_CMD, START_BIST,
+ csio_wr_reg32(hw, addr & ~0x3fU, MC_BIST_CMD_ADDR_A);
+ csio_wr_reg32(hw, 64, MC_BIST_CMD_LEN_A);
+ csio_wr_reg32(hw, 0xc, MC_BIST_DATA_PATTERN_A);
+ csio_wr_reg32(hw, BIST_OPCODE_V(1) | START_BIST_F | BIST_CMD_GAP_V(1),
+ MC_BIST_CMD_A);
+ i = csio_hw_wait_op_done_val(hw, MC_BIST_CMD_A, START_BIST_F,
0, 10, 1, NULL);
if (i)
return i;
-#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
+#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA_A, i)
for (i = 15; i >= 0; i--)
*data++ = htonl(csio_rd_reg32(hw, MC_DATA(i)));
@@ -250,19 +250,19 @@ csio_t4_edc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
int i;
idx *= EDC_STRIDE;
- if (csio_rd_reg32(hw, EDC_BIST_CMD + idx) & START_BIST)
+ if (csio_rd_reg32(hw, EDC_BIST_CMD_A + idx) & START_BIST_F)
return -EBUSY;
- csio_wr_reg32(hw, addr & ~0x3fU, EDC_BIST_CMD_ADDR + idx);
- csio_wr_reg32(hw, 64, EDC_BIST_CMD_LEN + idx);
- csio_wr_reg32(hw, 0xc, EDC_BIST_DATA_PATTERN + idx);
- csio_wr_reg32(hw, BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST,
- EDC_BIST_CMD + idx);
- i = csio_hw_wait_op_done_val(hw, EDC_BIST_CMD + idx, START_BIST,
+ csio_wr_reg32(hw, addr & ~0x3fU, EDC_BIST_CMD_ADDR_A + idx);
+ csio_wr_reg32(hw, 64, EDC_BIST_CMD_LEN_A + idx);
+ csio_wr_reg32(hw, 0xc, EDC_BIST_DATA_PATTERN_A + idx);
+ csio_wr_reg32(hw, BIST_OPCODE_V(1) | BIST_CMD_GAP_V(1) | START_BIST_F,
+ EDC_BIST_CMD_A + idx);
+ i = csio_hw_wait_op_done_val(hw, EDC_BIST_CMD_A + idx, START_BIST_F,
0, 10, 1, NULL);
if (i)
return i;
-#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
+#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA_A, i) + idx)
for (i = 15; i >= 0; i--)
*data++ = htonl(csio_rd_reg32(hw, EDC_DATA(i)));
@@ -329,9 +329,9 @@ csio_t4_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr,
* the address is relative to BAR0.
*/
mem_reg = csio_rd_reg32(hw,
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
- mem_aperture = 1 << (WINDOW(mem_reg) + 10);
- mem_base = GET_PCIEOFST(mem_reg) << 10;
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, win));
+ mem_aperture = 1 << (WINDOW_V(mem_reg) + 10);
+ mem_base = PCIEOFST_G(mem_reg) << 10;
bar0 = csio_t4_read_pcie_cfg4(hw, PCI_BASE_ADDRESS_0);
bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
@@ -356,9 +356,9 @@ csio_t4_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr,
* before we attempt to use the new value.
*/
csio_wr_reg32(hw, pos,
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
csio_rd_reg32(hw,
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
while (offset < mem_aperture && len > 0) {
if (dir)
diff --git a/drivers/scsi/csiostor/csio_hw_t5.c b/drivers/scsi/csiostor/csio_hw_t5.c
index 66e180a58718..3267f4f627c9 100644
--- a/drivers/scsi/csiostor/csio_hw_t5.c
+++ b/drivers/scsi/csiostor/csio_hw_t5.c
@@ -56,11 +56,11 @@ csio_t5_set_mem_win(struct csio_hw *hw, uint32_t win)
* back MA register to ensure that changes propagate before we attempt
* to use the new values.)
*/
- csio_wr_reg32(hw, mem_win_base | BIR(0) |
- WINDOW(ilog2(MEMWIN_APERTURE) - 10),
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
+ csio_wr_reg32(hw, mem_win_base | BIR_V(0) |
+ WINDOW_V(ilog2(MEMWIN_APERTURE) - 10),
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, win));
csio_rd_reg32(hw,
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, win));
return 0;
}
@@ -72,74 +72,74 @@ static void
csio_t5_pcie_intr_handler(struct csio_hw *hw)
{
static struct intr_info sysbus_intr_info[] = {
- { RNPP, "RXNP array parity error", -1, 1 },
- { RPCP, "RXPC array parity error", -1, 1 },
- { RCIP, "RXCIF array parity error", -1, 1 },
- { RCCP, "Rx completions control array parity error", -1, 1 },
- { RFTP, "RXFT array parity error", -1, 1 },
+ { RNPP_F, "RXNP array parity error", -1, 1 },
+ { RPCP_F, "RXPC array parity error", -1, 1 },
+ { RCIP_F, "RXCIF array parity error", -1, 1 },
+ { RCCP_F, "Rx completions control array parity error", -1, 1 },
+ { RFTP_F, "RXFT array parity error", -1, 1 },
{ 0, NULL, 0, 0 }
};
static struct intr_info pcie_port_intr_info[] = {
- { TPCP, "TXPC array parity error", -1, 1 },
- { TNPP, "TXNP array parity error", -1, 1 },
- { TFTP, "TXFT array parity error", -1, 1 },
- { TCAP, "TXCA array parity error", -1, 1 },
- { TCIP, "TXCIF array parity error", -1, 1 },
- { RCAP, "RXCA array parity error", -1, 1 },
- { OTDD, "outbound request TLP discarded", -1, 1 },
- { RDPE, "Rx data parity error", -1, 1 },
- { TDUE, "Tx uncorrectable data error", -1, 1 },
+ { TPCP_F, "TXPC array parity error", -1, 1 },
+ { TNPP_F, "TXNP array parity error", -1, 1 },
+ { TFTP_F, "TXFT array parity error", -1, 1 },
+ { TCAP_F, "TXCA array parity error", -1, 1 },
+ { TCIP_F, "TXCIF array parity error", -1, 1 },
+ { RCAP_F, "RXCA array parity error", -1, 1 },
+ { OTDD_F, "outbound request TLP discarded", -1, 1 },
+ { RDPE_F, "Rx data parity error", -1, 1 },
+ { TDUE_F, "Tx uncorrectable data error", -1, 1 },
{ 0, NULL, 0, 0 }
};
static struct intr_info pcie_intr_info[] = {
- { MSTGRPPERR, "Master Response Read Queue parity error",
+ { MSTGRPPERR_F, "Master Response Read Queue parity error",
-1, 1 },
- { MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
- { MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
- { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
- { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
- { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
- { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
- { PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
+ { MSTTIMEOUTPERR_F, "Master Timeout FIFO parity error", -1, 1 },
+ { MSIXSTIPERR_F, "MSI-X STI SRAM parity error", -1, 1 },
+ { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
+ { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
+ { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
+ { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
+ { PIOCPLGRPPERR_F, "PCI PIO completion Group FIFO parity error",
-1, 1 },
- { PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
+ { PIOREQGRPPERR_F, "PCI PIO request Group FIFO parity error",
-1, 1 },
- { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
- { MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
- { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
- { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
- { DREQWRPERR, "PCI DMA channel write request parity error",
+ { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
+ { MSTTAGQPERR_F, "PCI master tag queue parity error", -1, 1 },
+ { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
+ { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
+ { DREQWRPERR_F, "PCI DMA channel write request parity error",
-1, 1 },
- { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
- { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
- { HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
- { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
- { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
- { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
- { FIDPERR, "PCI FID parity error", -1, 1 },
- { VFIDPERR, "PCI INTx clear parity error", -1, 1 },
- { MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
- { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
- { IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
+ { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
+ { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
+ { HREQWRPERR_F, "PCI HMA channel count parity error", -1, 1 },
+ { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
+ { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
+ { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
+ { FIDPERR_F, "PCI FID parity error", -1, 1 },
+ { VFIDPERR_F, "PCI INTx clear parity error", -1, 1 },
+ { MAGRPPERR_F, "PCI MA group FIFO parity error", -1, 1 },
+ { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
+ { IPRXHDRGRPPERR_F, "PCI IP Rx header group parity error",
-1, 1 },
- { IPRXDATAGRPPERR, "PCI IP Rx data group parity error",
+ { IPRXDATAGRPPERR_F, "PCI IP Rx data group parity error",
-1, 1 },
- { RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
- { IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
- { TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
- { READRSPERR, "Outbound read error", -1, 0 },
+ { RPLPERR_F, "PCI IP replay buffer parity error", -1, 1 },
+ { IPSOTPERR_F, "PCI IP SOT buffer parity error", -1, 1 },
+ { TRGT1GRPPERR_F, "PCI TRGT1 group FIFOs parity error", -1, 1 },
+ { READRSPERR_F, "Outbound read error", -1, 0 },
{ 0, NULL, 0, 0 }
};
int fat;
fat = csio_handle_intr_status(hw,
- PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
+ PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
sysbus_intr_info) +
csio_handle_intr_status(hw,
- PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
+ PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
pcie_port_intr_info) +
- csio_handle_intr_status(hw, PCIE_INT_CAUSE, pcie_intr_info);
+ csio_handle_intr_status(hw, PCIE_INT_CAUSE_A, pcie_intr_info);
if (fat)
csio_hw_fatal_err(hw);
}
@@ -177,25 +177,25 @@ csio_t5_mc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
uint32_t mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg;
uint32_t mc_bist_status_rdata_reg, mc_bist_data_pattern_reg;
- mc_bist_cmd_reg = MC_REG(MC_P_BIST_CMD, idx);
- mc_bist_cmd_addr_reg = MC_REG(MC_P_BIST_CMD_ADDR, idx);
- mc_bist_cmd_len_reg = MC_REG(MC_P_BIST_CMD_LEN, idx);
- mc_bist_status_rdata_reg = MC_REG(MC_P_BIST_STATUS_RDATA, idx);
- mc_bist_data_pattern_reg = MC_REG(MC_P_BIST_DATA_PATTERN, idx);
+ mc_bist_cmd_reg = MC_REG(MC_P_BIST_CMD_A, idx);
+ mc_bist_cmd_addr_reg = MC_REG(MC_P_BIST_CMD_ADDR_A, idx);
+ mc_bist_cmd_len_reg = MC_REG(MC_P_BIST_CMD_LEN_A, idx);
+ mc_bist_status_rdata_reg = MC_REG(MC_P_BIST_STATUS_RDATA_A, idx);
+ mc_bist_data_pattern_reg = MC_REG(MC_P_BIST_DATA_PATTERN_A, idx);
- if (csio_rd_reg32(hw, mc_bist_cmd_reg) & START_BIST)
+ if (csio_rd_reg32(hw, mc_bist_cmd_reg) & START_BIST_F)
return -EBUSY;
csio_wr_reg32(hw, addr & ~0x3fU, mc_bist_cmd_addr_reg);
csio_wr_reg32(hw, 64, mc_bist_cmd_len_reg);
csio_wr_reg32(hw, 0xc, mc_bist_data_pattern_reg);
- csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1),
+ csio_wr_reg32(hw, BIST_OPCODE_V(1) | START_BIST_F | BIST_CMD_GAP_V(1),
mc_bist_cmd_reg);
- i = csio_hw_wait_op_done_val(hw, mc_bist_cmd_reg, START_BIST,
+ i = csio_hw_wait_op_done_val(hw, mc_bist_cmd_reg, START_BIST_F,
0, 10, 1, NULL);
if (i)
return i;
-#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
+#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA_A, i)
for (i = 15; i >= 0; i--)
*data++ = htonl(csio_rd_reg32(hw, MC_DATA(i)));
@@ -231,27 +231,27 @@ csio_t5_edc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
- edc_bist_cmd_reg = EDC_REG_T5(EDC_H_BIST_CMD, idx);
- edc_bist_cmd_addr_reg = EDC_REG_T5(EDC_H_BIST_CMD_ADDR, idx);
- edc_bist_cmd_len_reg = EDC_REG_T5(EDC_H_BIST_CMD_LEN, idx);
- edc_bist_cmd_data_pattern = EDC_REG_T5(EDC_H_BIST_DATA_PATTERN, idx);
- edc_bist_status_rdata_reg = EDC_REG_T5(EDC_H_BIST_STATUS_RDATA, idx);
+ edc_bist_cmd_reg = EDC_REG_T5(EDC_H_BIST_CMD_A, idx);
+ edc_bist_cmd_addr_reg = EDC_REG_T5(EDC_H_BIST_CMD_ADDR_A, idx);
+ edc_bist_cmd_len_reg = EDC_REG_T5(EDC_H_BIST_CMD_LEN_A, idx);
+ edc_bist_cmd_data_pattern = EDC_REG_T5(EDC_H_BIST_DATA_PATTERN_A, idx);
+ edc_bist_status_rdata_reg = EDC_REG_T5(EDC_H_BIST_STATUS_RDATA_A, idx);
#undef EDC_REG_T5
#undef EDC_STRIDE_T5
- if (csio_rd_reg32(hw, edc_bist_cmd_reg) & START_BIST)
+ if (csio_rd_reg32(hw, edc_bist_cmd_reg) & START_BIST_F)
return -EBUSY;
csio_wr_reg32(hw, addr & ~0x3fU, edc_bist_cmd_addr_reg);
csio_wr_reg32(hw, 64, edc_bist_cmd_len_reg);
csio_wr_reg32(hw, 0xc, edc_bist_cmd_data_pattern);
- csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1),
+ csio_wr_reg32(hw, BIST_OPCODE_V(1) | START_BIST_F | BIST_CMD_GAP_V(1),
edc_bist_cmd_reg);
- i = csio_hw_wait_op_done_val(hw, edc_bist_cmd_reg, START_BIST,
+ i = csio_hw_wait_op_done_val(hw, edc_bist_cmd_reg, START_BIST_F,
0, 10, 1, NULL);
if (i)
return i;
-#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
+#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA_A, i) + idx)
for (i = 15; i >= 0; i--)
*data++ = htonl(csio_rd_reg32(hw, EDC_DATA(i)));
@@ -320,13 +320,13 @@ csio_t5_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr,
* the address is relative to BAR0.
*/
mem_reg = csio_rd_reg32(hw,
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
- mem_aperture = 1 << (WINDOW(mem_reg) + 10);
- mem_base = GET_PCIEOFST(mem_reg) << 10;
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, win));
+ mem_aperture = 1 << (WINDOW_V(mem_reg) + 10);
+ mem_base = PCIEOFST_G(mem_reg) << 10;
start = addr & ~(mem_aperture-1);
offset = addr - start;
- win_pf = V_PFNUM(hw->pfn);
+ win_pf = PFNUM_V(hw->pfn);
csio_dbg(hw, "csio_t5_memory_rw: mem_reg: 0x%x, mem_aperture: 0x%x\n",
mem_reg, mem_aperture);
@@ -344,9 +344,9 @@ csio_t5_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr,
* before we attempt to use the new value.
*/
csio_wr_reg32(hw, pos | win_pf,
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
csio_rd_reg32(hw,
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
while (offset < mem_aperture && len > 0) {
if (dir)
diff --git a/drivers/scsi/csiostor/csio_isr.c b/drivers/scsi/csiostor/csio_isr.c
index a8c748a35f9c..2fb71c6c3b37 100644
--- a/drivers/scsi/csiostor/csio_isr.c
+++ b/drivers/scsi/csiostor/csio_isr.c
@@ -317,7 +317,7 @@ csio_fcoe_isr(int irq, void *dev_id)
/* Disable the interrupt for this PCI function. */
if (hw->intr_mode == CSIO_IM_INTX)
- csio_wr_reg32(hw, 0, MYPF_REG(PCIE_PF_CLI));
+ csio_wr_reg32(hw, 0, MYPF_REG(PCIE_PF_CLI_A));
/*
* The read in the following function will flush the
diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c
index 87f9280d9b43..c00b2ff72b55 100644
--- a/drivers/scsi/csiostor/csio_lnode.c
+++ b/drivers/scsi/csiostor/csio_lnode.c
@@ -1758,7 +1758,7 @@ csio_ln_mgmt_submit_wr(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req,
else {
/* Program DSGL to dma payload */
dsgl.cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
- ULPTX_MORE | ULPTX_NSGE(1));
+ ULPTX_MORE_F | ULPTX_NSGE_V(1));
dsgl.len0 = cpu_to_be32(pld_len);
dsgl.addr0 = cpu_to_be64(pld->paddr);
csio_wr_copy_to_wrp(&dsgl, &wrp, ALIGN(wr_off, 8),
diff --git a/drivers/scsi/csiostor/csio_mb.c b/drivers/scsi/csiostor/csio_mb.c
index 08c265c0f353..1132c41d99ce 100644
--- a/drivers/scsi/csiostor/csio_mb.c
+++ b/drivers/scsi/csiostor/csio_mb.c
@@ -1104,8 +1104,8 @@ csio_mb_process_portparams_rsp(struct csio_hw *hw,
void
csio_mb_intr_enable(struct csio_hw *hw)
{
- csio_wr_reg32(hw, MBMSGRDYINTEN(1), MYPF_REG(CIM_PF_HOST_INT_ENABLE));
- csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE));
+ csio_wr_reg32(hw, MBMSGRDYINTEN_F, MYPF_REG(CIM_PF_HOST_INT_ENABLE_A));
+ csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE_A));
}
/*
@@ -1117,8 +1117,9 @@ csio_mb_intr_enable(struct csio_hw *hw)
void
csio_mb_intr_disable(struct csio_hw *hw)
{
- csio_wr_reg32(hw, MBMSGRDYINTEN(0), MYPF_REG(CIM_PF_HOST_INT_ENABLE));
- csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE));
+ csio_wr_reg32(hw, MBMSGRDYINTEN_V(0),
+ MYPF_REG(CIM_PF_HOST_INT_ENABLE_A));
+ csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE_A));
}
static void
@@ -1153,8 +1154,8 @@ csio_mb_debug_cmd_handler(struct csio_hw *hw)
{
int i;
__be64 cmd[CSIO_MB_MAX_REGS];
- uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL);
- uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA);
+ uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A);
+ uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A);
int size = sizeof(struct fw_debug_cmd);
/* Copy mailbox data */
@@ -1164,8 +1165,8 @@ csio_mb_debug_cmd_handler(struct csio_hw *hw)
csio_mb_dump_fw_dbg(hw, cmd);
/* Notify FW of mailbox by setting owner as UP */
- csio_wr_reg32(hw, MBMSGVALID | MBINTREQ | MBOWNER(CSIO_MBOWNER_FW),
- ctl_reg);
+ csio_wr_reg32(hw, MBMSGVALID_F | MBINTREQ_F |
+ MBOWNER_V(CSIO_MBOWNER_FW), ctl_reg);
csio_rd_reg32(hw, ctl_reg);
wmb();
@@ -1187,8 +1188,8 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
__be64 *cmd = mbp->mb;
__be64 hdr;
struct csio_mbm *mbm = &hw->mbm;
- uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL);
- uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA);
+ uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A);
+ uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A);
int size = mbp->mb_size;
int rv = -EINVAL;
struct fw_cmd_hdr *fw_hdr;
@@ -1224,12 +1225,12 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
}
/* Now get ownership of mailbox */
- owner = MBOWNER_GET(csio_rd_reg32(hw, ctl_reg));
+ owner = MBOWNER_G(csio_rd_reg32(hw, ctl_reg));
if (!csio_mb_is_host_owner(owner)) {
for (i = 0; (owner == CSIO_MBOWNER_NONE) && (i < 3); i++)
- owner = MBOWNER_GET(csio_rd_reg32(hw, ctl_reg));
+ owner = MBOWNER_G(csio_rd_reg32(hw, ctl_reg));
/*
* Mailbox unavailable. In immediate mode, fail the command.
* In other modes, enqueue the request.
@@ -1271,10 +1272,10 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
if (mbp->mb_cbfn != NULL) {
mbm->mcurrent = mbp;
mod_timer(&mbm->timer, jiffies + msecs_to_jiffies(mbp->tmo));
- csio_wr_reg32(hw, MBMSGVALID | MBINTREQ |
- MBOWNER(CSIO_MBOWNER_FW), ctl_reg);
+ csio_wr_reg32(hw, MBMSGVALID_F | MBINTREQ_F |
+ MBOWNER_V(CSIO_MBOWNER_FW), ctl_reg);
} else
- csio_wr_reg32(hw, MBMSGVALID | MBOWNER(CSIO_MBOWNER_FW),
+ csio_wr_reg32(hw, MBMSGVALID_F | MBOWNER_V(CSIO_MBOWNER_FW),
ctl_reg);
/* Flush posted writes */
@@ -1294,9 +1295,9 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
/* Check for response */
ctl = csio_rd_reg32(hw, ctl_reg);
- if (csio_mb_is_host_owner(MBOWNER_GET(ctl))) {
+ if (csio_mb_is_host_owner(MBOWNER_G(ctl))) {
- if (!(ctl & MBMSGVALID)) {
+ if (!(ctl & MBMSGVALID_F)) {
csio_wr_reg32(hw, 0, ctl_reg);
continue;
}
@@ -1457,16 +1458,16 @@ csio_mb_isr_handler(struct csio_hw *hw)
__be64 *cmd;
uint32_t ctl, cim_cause, pl_cause;
int i;
- uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL);
- uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA);
+ uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A);
+ uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A);
int size;
__be64 hdr;
struct fw_cmd_hdr *fw_hdr;
- pl_cause = csio_rd_reg32(hw, MYPF_REG(PL_PF_INT_CAUSE));
- cim_cause = csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_CAUSE));
+ pl_cause = csio_rd_reg32(hw, MYPF_REG(PL_PF_INT_CAUSE_A));
+ cim_cause = csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_CAUSE_A));
- if (!(pl_cause & PFCIM) || !(cim_cause & MBMSGRDYINT)) {
+ if (!(pl_cause & PFCIM_F) || !(cim_cause & MBMSGRDYINT_F)) {
CSIO_INC_STATS(hw, n_mbint_unexp);
return -EINVAL;
}
@@ -1477,16 +1478,16 @@ csio_mb_isr_handler(struct csio_hw *hw)
* the upper level cause register. In other words, CIM-cause
* first followed by PL-Cause next.
*/
- csio_wr_reg32(hw, MBMSGRDYINT, MYPF_REG(CIM_PF_HOST_INT_CAUSE));
- csio_wr_reg32(hw, PFCIM, MYPF_REG(PL_PF_INT_CAUSE));
+ csio_wr_reg32(hw, MBMSGRDYINT_F, MYPF_REG(CIM_PF_HOST_INT_CAUSE_A));
+ csio_wr_reg32(hw, PFCIM_F, MYPF_REG(PL_PF_INT_CAUSE_A));
ctl = csio_rd_reg32(hw, ctl_reg);
- if (csio_mb_is_host_owner(MBOWNER_GET(ctl))) {
+ if (csio_mb_is_host_owner(MBOWNER_G(ctl))) {
CSIO_DUMP_MB(hw, hw->pfn, data_reg);
- if (!(ctl & MBMSGVALID)) {
+ if (!(ctl & MBMSGVALID_F)) {
csio_warn(hw,
"Stray mailbox interrupt recvd,"
" mailbox data not valid\n");
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index 3987284e0d2a..2c4562d82dc0 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -298,8 +298,8 @@ csio_scsi_init_ultptx_dsgl(struct csio_hw *hw, struct csio_ioreq *req,
struct csio_dma_buf *dma_buf;
struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
- sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_MORE |
- ULPTX_NSGE(req->nsge));
+ sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_MORE_F |
+ ULPTX_NSGE_V(req->nsge));
/* Now add the data SGLs */
if (likely(!req->dcopy)) {
scsi_for_each_sg(scmnd, sgel, req->nsge, i) {
diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c
index 773da14cfa14..b47ea336e912 100644
--- a/drivers/scsi/csiostor/csio_wr.c
+++ b/drivers/scsi/csiostor/csio_wr.c
@@ -51,12 +51,12 @@ int csio_intr_coalesce_time = 10; /* value:SGE_TIMER_VALUE_1 */
static int csio_sge_timer_reg = 1;
#define CSIO_SET_FLBUF_SIZE(_hw, _reg, _val) \
- csio_wr_reg32((_hw), (_val), SGE_FL_BUFFER_SIZE##_reg)
+ csio_wr_reg32((_hw), (_val), SGE_FL_BUFFER_SIZE##_reg##_A)
static void
csio_get_flbuf_size(struct csio_hw *hw, struct csio_sge *sge, uint32_t reg)
{
- sge->sge_fl_buf_size[reg] = csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE0 +
+ sge->sge_fl_buf_size[reg] = csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE0_A +
reg * sizeof(uint32_t));
}
@@ -71,7 +71,7 @@ csio_wr_fl_bufsz(struct csio_sge *sge, struct csio_dma_buf *buf)
static inline uint32_t
csio_wr_qstat_pgsz(struct csio_hw *hw)
{
- return (hw->wrm.sge.sge_control & EGRSTATUSPAGESIZE(1)) ? 128 : 64;
+ return (hw->wrm.sge.sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64;
}
/* Ring freelist doorbell */
@@ -84,9 +84,9 @@ csio_wr_ring_fldb(struct csio_hw *hw, struct csio_q *flq)
* 8 freelist buffer pointers (since each pointer is 8 bytes).
*/
if (flq->inc_idx >= 8) {
- csio_wr_reg32(hw, DBPRIO(1) | QID(flq->un.fl.flid) |
+ csio_wr_reg32(hw, DBPRIO_F | QID_V(flq->un.fl.flid) |
CSIO_HW_PIDX(hw, flq->inc_idx / 8),
- MYPF_REG(SGE_PF_KDOORBELL));
+ MYPF_REG(SGE_PF_KDOORBELL_A));
flq->inc_idx &= 7;
}
}
@@ -95,10 +95,10 @@ csio_wr_ring_fldb(struct csio_hw *hw, struct csio_q *flq)
static void
csio_wr_sge_intr_enable(struct csio_hw *hw, uint16_t iqid)
{
- csio_wr_reg32(hw, CIDXINC(0) |
- INGRESSQID(iqid) |
- TIMERREG(X_TIMERREG_RESTART_COUNTER),
- MYPF_REG(SGE_PF_GTS));
+ csio_wr_reg32(hw, CIDXINC_V(0) |
+ INGRESSQID_V(iqid) |
+ TIMERREG_V(X_TIMERREG_RESTART_COUNTER),
+ MYPF_REG(SGE_PF_GTS_A));
}
/*
@@ -982,9 +982,9 @@ csio_wr_issue(struct csio_hw *hw, int qidx, bool prio)
wmb();
/* Ring SGE Doorbell writing q->pidx into it */
- csio_wr_reg32(hw, DBPRIO(prio) | QID(q->un.eq.physeqid) |
+ csio_wr_reg32(hw, DBPRIO_V(prio) | QID_V(q->un.eq.physeqid) |
CSIO_HW_PIDX(hw, q->inc_idx),
- MYPF_REG(SGE_PF_KDOORBELL));
+ MYPF_REG(SGE_PF_KDOORBELL_A));
q->inc_idx = 0;
return 0;
@@ -1242,10 +1242,10 @@ csio_wr_process_iq(struct csio_hw *hw, struct csio_q *q,
restart:
/* Now inform SGE about our incremental index value */
- csio_wr_reg32(hw, CIDXINC(q->inc_idx) |
- INGRESSQID(q->un.iq.physiqid) |
- TIMERREG(csio_sge_timer_reg),
- MYPF_REG(SGE_PF_GTS));
+ csio_wr_reg32(hw, CIDXINC_V(q->inc_idx) |
+ INGRESSQID_V(q->un.iq.physiqid) |
+ TIMERREG_V(csio_sge_timer_reg),
+ MYPF_REG(SGE_PF_GTS_A));
q->stats.n_tot_rsps += q->inc_idx;
q->inc_idx = 0;
@@ -1310,22 +1310,23 @@ csio_wr_fixup_host_params(struct csio_hw *hw)
uint32_t ingpad = 0;
uint32_t stat_len = clsz > 64 ? 128 : 64;
- csio_wr_reg32(hw, HOSTPAGESIZEPF0(s_hps) | HOSTPAGESIZEPF1(s_hps) |
- HOSTPAGESIZEPF2(s_hps) | HOSTPAGESIZEPF3(s_hps) |
- HOSTPAGESIZEPF4(s_hps) | HOSTPAGESIZEPF5(s_hps) |
- HOSTPAGESIZEPF6(s_hps) | HOSTPAGESIZEPF7(s_hps),
- SGE_HOST_PAGE_SIZE);
+ csio_wr_reg32(hw, HOSTPAGESIZEPF0_V(s_hps) | HOSTPAGESIZEPF1_V(s_hps) |
+ HOSTPAGESIZEPF2_V(s_hps) | HOSTPAGESIZEPF3_V(s_hps) |
+ HOSTPAGESIZEPF4_V(s_hps) | HOSTPAGESIZEPF5_V(s_hps) |
+ HOSTPAGESIZEPF6_V(s_hps) | HOSTPAGESIZEPF7_V(s_hps),
+ SGE_HOST_PAGE_SIZE_A);
sge->csio_fl_align = clsz < 32 ? 32 : clsz;
ingpad = ilog2(sge->csio_fl_align) - 5;
- csio_set_reg_field(hw, SGE_CONTROL, INGPADBOUNDARY_MASK |
- EGRSTATUSPAGESIZE(1),
- INGPADBOUNDARY(ingpad) |
- EGRSTATUSPAGESIZE(stat_len != 64));
+ csio_set_reg_field(hw, SGE_CONTROL_A,
+ INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
+ EGRSTATUSPAGESIZE_F,
+ INGPADBOUNDARY_V(ingpad) |
+ EGRSTATUSPAGESIZE_V(stat_len != 64));
/* FL BUFFER SIZE#0 is Page size i,e already aligned to cache line */
- csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0);
+ csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0_A);
/*
* If using hard params, the following will get set correctly
@@ -1333,23 +1334,24 @@ csio_wr_fixup_host_params(struct csio_hw *hw)
*/
if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS) {
csio_wr_reg32(hw,
- (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2) +
+ (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2_A) +
sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
- SGE_FL_BUFFER_SIZE2);
+ SGE_FL_BUFFER_SIZE2_A);
csio_wr_reg32(hw,
- (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3) +
+ (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3_A) +
sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
- SGE_FL_BUFFER_SIZE3);
+ SGE_FL_BUFFER_SIZE3_A);
}
- csio_wr_reg32(hw, HPZ0(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ);
+ csio_wr_reg32(hw, HPZ0_V(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ_A);
/* default value of rx_dma_offset of the NIC driver */
- csio_set_reg_field(hw, SGE_CONTROL, PKTSHIFT_MASK,
- PKTSHIFT(CSIO_SGE_RX_DMA_OFFSET));
+ csio_set_reg_field(hw, SGE_CONTROL_A,
+ PKTSHIFT_V(PKTSHIFT_M),
+ PKTSHIFT_V(CSIO_SGE_RX_DMA_OFFSET));
- csio_hw_tp_wr_bits_indirect(hw, TP_INGRESS_CONFIG,
- CSUM_HAS_PSEUDO_HDR, 0);
+ csio_hw_tp_wr_bits_indirect(hw, TP_INGRESS_CONFIG_A,
+ CSUM_HAS_PSEUDO_HDR_F, 0);
}
static void
@@ -1384,9 +1386,9 @@ csio_wr_get_sge(struct csio_hw *hw)
u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
u32 ingress_rx_threshold;
- sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL);
+ sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL_A);
- ingpad = INGPADBOUNDARY_GET(sge->sge_control);
+ ingpad = INGPADBOUNDARY_G(sge->sge_control);
switch (ingpad) {
case X_INGPCIEBOUNDARY_32B:
@@ -1410,28 +1412,28 @@ csio_wr_get_sge(struct csio_hw *hw)
for (i = 0; i < CSIO_SGE_FL_SIZE_REGS; i++)
csio_get_flbuf_size(hw, sge, i);
- timer_value_0_and_1 = csio_rd_reg32(hw, SGE_TIMER_VALUE_0_AND_1);
- timer_value_2_and_3 = csio_rd_reg32(hw, SGE_TIMER_VALUE_2_AND_3);
- timer_value_4_and_5 = csio_rd_reg32(hw, SGE_TIMER_VALUE_4_AND_5);
+ timer_value_0_and_1 = csio_rd_reg32(hw, SGE_TIMER_VALUE_0_AND_1_A);
+ timer_value_2_and_3 = csio_rd_reg32(hw, SGE_TIMER_VALUE_2_AND_3_A);
+ timer_value_4_and_5 = csio_rd_reg32(hw, SGE_TIMER_VALUE_4_AND_5_A);
sge->timer_val[0] = (uint16_t)csio_core_ticks_to_us(hw,
- TIMERVALUE0_GET(timer_value_0_and_1));
+ TIMERVALUE0_G(timer_value_0_and_1));
sge->timer_val[1] = (uint16_t)csio_core_ticks_to_us(hw,
- TIMERVALUE1_GET(timer_value_0_and_1));
+ TIMERVALUE1_G(timer_value_0_and_1));
sge->timer_val[2] = (uint16_t)csio_core_ticks_to_us(hw,
- TIMERVALUE2_GET(timer_value_2_and_3));
+ TIMERVALUE2_G(timer_value_2_and_3));
sge->timer_val[3] = (uint16_t)csio_core_ticks_to_us(hw,
- TIMERVALUE3_GET(timer_value_2_and_3));
+ TIMERVALUE3_G(timer_value_2_and_3));
sge->timer_val[4] = (uint16_t)csio_core_ticks_to_us(hw,
- TIMERVALUE4_GET(timer_value_4_and_5));
+ TIMERVALUE4_G(timer_value_4_and_5));
sge->timer_val[5] = (uint16_t)csio_core_ticks_to_us(hw,
- TIMERVALUE5_GET(timer_value_4_and_5));
+ TIMERVALUE5_G(timer_value_4_and_5));
- ingress_rx_threshold = csio_rd_reg32(hw, SGE_INGRESS_RX_THRESHOLD);
- sge->counter_val[0] = THRESHOLD_0_GET(ingress_rx_threshold);
- sge->counter_val[1] = THRESHOLD_1_GET(ingress_rx_threshold);
- sge->counter_val[2] = THRESHOLD_2_GET(ingress_rx_threshold);
- sge->counter_val[3] = THRESHOLD_3_GET(ingress_rx_threshold);
+ ingress_rx_threshold = csio_rd_reg32(hw, SGE_INGRESS_RX_THRESHOLD_A);
+ sge->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold);
+ sge->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold);
+ sge->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold);
+ sge->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold);
csio_init_intr_coalesce_parms(hw);
}
@@ -1454,9 +1456,9 @@ csio_wr_set_sge(struct csio_hw *hw)
* Set up our basic SGE mode to deliver CPL messages to our Ingress
* Queue and Packet Date to the Free List.
*/
- csio_set_reg_field(hw, SGE_CONTROL, RXPKTCPLMODE(1), RXPKTCPLMODE(1));
+ csio_set_reg_field(hw, SGE_CONTROL_A, RXPKTCPLMODE_F, RXPKTCPLMODE_F);
- sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL);
+ sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL_A);
/* sge->csio_fl_align is set up by csio_wr_fixup_host_params(). */
@@ -1464,22 +1466,24 @@ csio_wr_set_sge(struct csio_hw *hw)
* Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
* and generate an interrupt when this occurs so we can recover.
*/
- csio_set_reg_field(hw, SGE_DBFIFO_STATUS,
- HP_INT_THRESH(HP_INT_THRESH_MASK) |
- CSIO_HW_LP_INT_THRESH(hw, CSIO_HW_M_LP_INT_THRESH(hw)),
- HP_INT_THRESH(CSIO_SGE_DBFIFO_INT_THRESH) |
- CSIO_HW_LP_INT_THRESH(hw, CSIO_SGE_DBFIFO_INT_THRESH));
+ csio_set_reg_field(hw, SGE_DBFIFO_STATUS_A,
+ HP_INT_THRESH_V(HP_INT_THRESH_M) |
+ CSIO_HW_LP_INT_THRESH(hw,
+ CSIO_HW_M_LP_INT_THRESH(hw)),
+ HP_INT_THRESH_V(CSIO_SGE_DBFIFO_INT_THRESH) |
+ CSIO_HW_LP_INT_THRESH(hw,
+ CSIO_SGE_DBFIFO_INT_THRESH));
- csio_set_reg_field(hw, SGE_DOORBELL_CONTROL, ENABLE_DROP,
- ENABLE_DROP);
+ csio_set_reg_field(hw, SGE_DOORBELL_CONTROL_A, ENABLE_DROP_F,
+ ENABLE_DROP_F);
/* SGE_FL_BUFFER_SIZE0 is set up by csio_wr_fixup_host_params(). */
CSIO_SET_FLBUF_SIZE(hw, 1, CSIO_SGE_FLBUF_SIZE1);
csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE2 + sge->csio_fl_align - 1)
- & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE2);
+ & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE2_A);
csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE3 + sge->csio_fl_align - 1)
- & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE3);
+ & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE3_A);
CSIO_SET_FLBUF_SIZE(hw, 4, CSIO_SGE_FLBUF_SIZE4);
CSIO_SET_FLBUF_SIZE(hw, 5, CSIO_SGE_FLBUF_SIZE5);
CSIO_SET_FLBUF_SIZE(hw, 6, CSIO_SGE_FLBUF_SIZE6);
@@ -1502,26 +1506,26 @@ csio_wr_set_sge(struct csio_hw *hw)
sge->counter_val[2] = CSIO_SGE_INT_CNT_VAL_2;
sge->counter_val[3] = CSIO_SGE_INT_CNT_VAL_3;
- csio_wr_reg32(hw, THRESHOLD_0(sge->counter_val[0]) |
- THRESHOLD_1(sge->counter_val[1]) |
- THRESHOLD_2(sge->counter_val[2]) |
- THRESHOLD_3(sge->counter_val[3]),
- SGE_INGRESS_RX_THRESHOLD);
+ csio_wr_reg32(hw, THRESHOLD_0_V(sge->counter_val[0]) |
+ THRESHOLD_1_V(sge->counter_val[1]) |
+ THRESHOLD_2_V(sge->counter_val[2]) |
+ THRESHOLD_3_V(sge->counter_val[3]),
+ SGE_INGRESS_RX_THRESHOLD_A);
csio_wr_reg32(hw,
- TIMERVALUE0(csio_us_to_core_ticks(hw, sge->timer_val[0])) |
- TIMERVALUE1(csio_us_to_core_ticks(hw, sge->timer_val[1])),
- SGE_TIMER_VALUE_0_AND_1);
+ TIMERVALUE0_V(csio_us_to_core_ticks(hw, sge->timer_val[0])) |
+ TIMERVALUE1_V(csio_us_to_core_ticks(hw, sge->timer_val[1])),
+ SGE_TIMER_VALUE_0_AND_1_A);
csio_wr_reg32(hw,
- TIMERVALUE2(csio_us_to_core_ticks(hw, sge->timer_val[2])) |
- TIMERVALUE3(csio_us_to_core_ticks(hw, sge->timer_val[3])),
- SGE_TIMER_VALUE_2_AND_3);
+ TIMERVALUE2_V(csio_us_to_core_ticks(hw, sge->timer_val[2])) |
+ TIMERVALUE3_V(csio_us_to_core_ticks(hw, sge->timer_val[3])),
+ SGE_TIMER_VALUE_2_AND_3_A);
csio_wr_reg32(hw,
- TIMERVALUE4(csio_us_to_core_ticks(hw, sge->timer_val[4])) |
- TIMERVALUE5(csio_us_to_core_ticks(hw, sge->timer_val[5])),
- SGE_TIMER_VALUE_4_AND_5);
+ TIMERVALUE4_V(csio_us_to_core_ticks(hw, sge->timer_val[4])) |
+ TIMERVALUE5_V(csio_us_to_core_ticks(hw, sge->timer_val[5])),
+ SGE_TIMER_VALUE_4_AND_5_A);
csio_init_intr_coalesce_parms(hw);
}
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index a83d2ceded83..37d7191a3c38 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -704,7 +704,7 @@ static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
struct cpl_act_establish *req = (struct cpl_act_establish *)skb->data;
unsigned short tcp_opt = ntohs(req->tcp_opt);
unsigned int tid = GET_TID(req);
- unsigned int atid = GET_TID_TID(ntohl(req->tos_atid));
+ unsigned int atid = TID_TID_G(ntohl(req->tos_atid));
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
struct tid_info *t = lldi->tids;
u32 rcv_isn = be32_to_cpu(req->rcv_isn);
@@ -752,15 +752,15 @@ static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
if (cxgb4i_rcv_win > (RCV_BUFSIZ_MASK << 10))
csk->rcv_wup -= cxgb4i_rcv_win - (RCV_BUFSIZ_MASK << 10);
- csk->advmss = lldi->mtus[GET_TCPOPT_MSS(tcp_opt)] - 40;
- if (GET_TCPOPT_TSTAMP(tcp_opt))
+ csk->advmss = lldi->mtus[TCPOPT_MSS_G(tcp_opt)] - 40;
+ if (TCPOPT_TSTAMP_G(tcp_opt))
csk->advmss -= 12;
if (csk->advmss < 128)
csk->advmss = 128;
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p, mss_idx %u, advmss %u.\n",
- csk, GET_TCPOPT_MSS(tcp_opt), csk->advmss);
+ csk, TCPOPT_MSS_G(tcp_opt), csk->advmss);
cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
@@ -856,8 +856,8 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)skb->data;
unsigned int tid = GET_TID(rpl);
unsigned int atid =
- GET_TID_TID(GET_AOPEN_ATID(be32_to_cpu(rpl->atid_status)));
- unsigned int status = GET_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
+ TID_TID_G(AOPEN_ATID_G(be32_to_cpu(rpl->atid_status)));
+ unsigned int status = AOPEN_STATUS_G(be32_to_cpu(rpl->atid_status));
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
struct tid_info *t = lldi->tids;
@@ -1112,7 +1112,7 @@ static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb)
hlen = ntohs(cpl->len);
dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF;
- plen = ISCSI_PDU_LEN(pdu_len_ddp);
+ plen = ISCSI_PDU_LEN_G(pdu_len_ddp);
if (is_t4(lldi->adapter_type))
plen -= 40;
@@ -1619,7 +1619,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
req = (struct cpl_set_tcb_field *)skb->head;
INIT_TP_WR(req, csk->tid);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
- req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid));
+ req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
req->word_cookie = htons(0);
req->mask = cpu_to_be64(0x3 << 8);
req->val = cpu_to_be64(pg_idx << 8);
@@ -1651,7 +1651,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
req = (struct cpl_set_tcb_field *)skb->head;
INIT_TP_WR(req, tid);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
- req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid));
+ req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
req->word_cookie = htons(0);
req->mask = cpu_to_be64(0x3 << 4);
req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index d415d69dc237..6906f76332f4 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -469,7 +469,7 @@ static int peek_head_len(struct sock *sk)
head = skb_peek(&sk->sk_receive_queue);
if (likely(head)) {
len = head->len;
- if (vlan_tx_tag_present(head))
+ if (skb_vlan_tag_present(head))
len += VLAN_HLEN;
}